]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/reiserfs/journal.c
reiserfs: rework reiserfs_warning
[net-next-2.6.git] / fs / reiserfs / journal.c
CommitLineData
1da177e4
LT
1/*
2** Write ahead logging implementation copyright Chris Mason 2000
3**
4** The background commits make this code very interelated, and
5** overly complex. I need to rethink things a bit....The major players:
6**
7** journal_begin -- call with the number of blocks you expect to log.
8** If the current transaction is too
9** old, it will block until the current transaction is
10** finished, and then start a new one.
11** Usually, your transaction will get joined in with
12** previous ones for speed.
13**
14** journal_join -- same as journal_begin, but won't block on the current
15** transaction regardless of age. Don't ever call
16** this. Ever. There are only two places it should be
17** called from, and they are both inside this file.
18**
19** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20** that might make them get sent to disk
21** and then marks them BH_JDirty. Puts the buffer head
22** into the current transaction hash.
23**
24** journal_end -- if the current transaction is batchable, it does nothing
25** otherwise, it could do an async/synchronous commit, or
26** a full flush of all log and real blocks in the
27** transaction.
28**
29** flush_old_commits -- if the current transaction is too old, it is ended and
30** commit blocks are sent to disk. Forces commit blocks
31** to disk for all backgrounded commits that have been
32** around too long.
33** -- Note, if you call this as an immediate flush from
34** from within kupdate, it will ignore the immediate flag
35*/
36
1da177e4 37#include <linux/time.h>
6188e10d 38#include <linux/semaphore.h>
1da177e4
LT
39#include <linux/vmalloc.h>
40#include <linux/reiserfs_fs.h>
1da177e4
LT
41#include <linux/kernel.h>
42#include <linux/errno.h>
43#include <linux/fcntl.h>
44#include <linux/stat.h>
45#include <linux/string.h>
46#include <linux/smp_lock.h>
47#include <linux/buffer_head.h>
48#include <linux/workqueue.h>
49#include <linux/writeback.h>
50#include <linux/blkdev.h>
3fcfab16 51#include <linux/backing-dev.h>
90415dea
JM
52#include <linux/uaccess.h>
53
54#include <asm/system.h>
1da177e4 55
1da177e4
LT
56/* gets a struct reiserfs_journal_list * from a list head */
57#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
58 j_list))
59#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60 j_working_list))
61
62/* the number of mounted filesystems. This is used to decide when to
63** start and kill the commit workqueue
64*/
65static int reiserfs_mounted_fs_count;
66
67static struct workqueue_struct *commit_wq;
68
bd4c625c
LT
69#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
70 structs at 4k */
71#define BUFNR 64 /*read ahead */
1da177e4
LT
72
73/* cnode stat bits. Move these into reiserfs_fs.h */
74
75#define BLOCK_FREED 2 /* this block was freed, and can't be written. */
bd4c625c 76#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
1da177e4
LT
77
78#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
79#define BLOCK_DIRTIED 5
80
1da177e4
LT
81/* journal list state bits */
82#define LIST_TOUCHED 1
83#define LIST_DIRTY 2
bd4c625c 84#define LIST_COMMIT_PENDING 4 /* someone will commit this list */
1da177e4
LT
85
86/* flags for do_journal_end */
87#define FLUSH_ALL 1 /* flush commit and real blocks */
88#define COMMIT_NOW 2 /* end and commit this transaction */
bd4c625c
LT
89#define WAIT 4 /* wait for the log blocks to hit the disk */
90
91static int do_journal_end(struct reiserfs_transaction_handle *,
92 struct super_block *, unsigned long nblocks,
93 int flags);
94static int flush_journal_list(struct super_block *s,
95 struct reiserfs_journal_list *jl, int flushall);
96static int flush_commit_list(struct super_block *s,
97 struct reiserfs_journal_list *jl, int flushall);
98static int can_dirty(struct reiserfs_journal_cnode *cn);
99static int journal_join(struct reiserfs_transaction_handle *th,
100 struct super_block *p_s_sb, unsigned long nblocks);
101static int release_journal_dev(struct super_block *super,
102 struct reiserfs_journal *journal);
1da177e4 103static int dirty_one_transaction(struct super_block *s,
bd4c625c 104 struct reiserfs_journal_list *jl);
c4028958 105static void flush_async_commits(struct work_struct *work);
1da177e4
LT
106static void queue_log_writer(struct super_block *s);
107
108/* values for join in do_journal_begin_r */
109enum {
bd4c625c
LT
110 JBEGIN_REG = 0, /* regular journal begin */
111 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
112 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
1da177e4
LT
113};
114
115static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
bd4c625c
LT
116 struct super_block *p_s_sb,
117 unsigned long nblocks, int join);
1da177e4 118
bd4c625c
LT
119static void init_journal_hash(struct super_block *p_s_sb)
120{
121 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
122 memset(journal->j_hash_table, 0,
123 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
1da177e4
LT
124}
125
126/*
127** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
128** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
129** more details.
130*/
bd4c625c
LT
131static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
132{
133 if (bh) {
134 clear_buffer_dirty(bh);
135 clear_buffer_journal_test(bh);
136 }
137 return 0;
1da177e4
LT
138}
139
140static void disable_barrier(struct super_block *s)
141{
bd4c625c
LT
142 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
143 printk("reiserfs: disabling flush barriers on %s\n",
144 reiserfs_bdevname(s));
145}
146
147static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
148 *p_s_sb)
149{
150 struct reiserfs_bitmap_node *bn;
151 static int id;
152
d739b42b 153 bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
bd4c625c
LT
154 if (!bn) {
155 return NULL;
156 }
d739b42b 157 bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);
bd4c625c 158 if (!bn->data) {
d739b42b 159 kfree(bn);
bd4c625c
LT
160 return NULL;
161 }
162 bn->id = id++;
bd4c625c
LT
163 INIT_LIST_HEAD(&bn->list);
164 return bn;
165}
166
167static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
168{
169 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
170 struct reiserfs_bitmap_node *bn = NULL;
171 struct list_head *entry = journal->j_bitmap_nodes.next;
172
173 journal->j_used_bitmap_nodes++;
174 repeat:
175
176 if (entry != &journal->j_bitmap_nodes) {
177 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
178 list_del(entry);
179 memset(bn->data, 0, p_s_sb->s_blocksize);
180 journal->j_free_bitmap_nodes--;
181 return bn;
182 }
183 bn = allocate_bitmap_node(p_s_sb);
184 if (!bn) {
185 yield();
186 goto repeat;
187 }
188 return bn;
1da177e4
LT
189}
190static inline void free_bitmap_node(struct super_block *p_s_sb,
bd4c625c
LT
191 struct reiserfs_bitmap_node *bn)
192{
193 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
194 journal->j_used_bitmap_nodes--;
195 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
d739b42b
PE
196 kfree(bn->data);
197 kfree(bn);
bd4c625c
LT
198 } else {
199 list_add(&bn->list, &journal->j_bitmap_nodes);
200 journal->j_free_bitmap_nodes++;
201 }
202}
203
204static void allocate_bitmap_nodes(struct super_block *p_s_sb)
205{
206 int i;
207 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
208 struct reiserfs_bitmap_node *bn = NULL;
209 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
210 bn = allocate_bitmap_node(p_s_sb);
211 if (bn) {
212 list_add(&bn->list, &journal->j_bitmap_nodes);
213 journal->j_free_bitmap_nodes++;
214 } else {
215 break; // this is ok, we'll try again when more are needed
216 }
217 }
1da177e4
LT
218}
219
3ee16670
JM
220static int set_bit_in_list_bitmap(struct super_block *p_s_sb,
221 b_blocknr_t block,
bd4c625c
LT
222 struct reiserfs_list_bitmap *jb)
223{
3ee16670
JM
224 unsigned int bmap_nr = block / (p_s_sb->s_blocksize << 3);
225 unsigned int bit_nr = block % (p_s_sb->s_blocksize << 3);
1da177e4 226
bd4c625c
LT
227 if (!jb->bitmaps[bmap_nr]) {
228 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
229 }
230 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
231 return 0;
1da177e4
LT
232}
233
234static void cleanup_bitmap_list(struct super_block *p_s_sb,
bd4c625c
LT
235 struct reiserfs_list_bitmap *jb)
236{
237 int i;
238 if (jb->bitmaps == NULL)
239 return;
240
cb680c1b 241 for (i = 0; i < reiserfs_bmap_count(p_s_sb); i++) {
bd4c625c
LT
242 if (jb->bitmaps[i]) {
243 free_bitmap_node(p_s_sb, jb->bitmaps[i]);
244 jb->bitmaps[i] = NULL;
245 }
246 }
1da177e4
LT
247}
248
249/*
250** only call this on FS unmount.
251*/
252static int free_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
253 struct reiserfs_list_bitmap *jb_array)
254{
255 int i;
256 struct reiserfs_list_bitmap *jb;
257 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
258 jb = jb_array + i;
259 jb->journal_list = NULL;
260 cleanup_bitmap_list(p_s_sb, jb);
261 vfree(jb->bitmaps);
262 jb->bitmaps = NULL;
263 }
264 return 0;
265}
266
267static int free_bitmap_nodes(struct super_block *p_s_sb)
268{
269 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
270 struct list_head *next = journal->j_bitmap_nodes.next;
271 struct reiserfs_bitmap_node *bn;
272
273 while (next != &journal->j_bitmap_nodes) {
274 bn = list_entry(next, struct reiserfs_bitmap_node, list);
275 list_del(next);
d739b42b
PE
276 kfree(bn->data);
277 kfree(bn);
bd4c625c
LT
278 next = journal->j_bitmap_nodes.next;
279 journal->j_free_bitmap_nodes--;
280 }
281
282 return 0;
1da177e4
LT
283}
284
285/*
286** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
287** jb_array is the array to be filled in.
288*/
289int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
bd4c625c 290 struct reiserfs_list_bitmap *jb_array,
3ee16670 291 unsigned int bmap_nr)
bd4c625c
LT
292{
293 int i;
294 int failed = 0;
295 struct reiserfs_list_bitmap *jb;
296 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
297
298 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
299 jb = jb_array + i;
300 jb->journal_list = NULL;
301 jb->bitmaps = vmalloc(mem);
302 if (!jb->bitmaps) {
45b03d5e
JM
303 reiserfs_warning(p_s_sb, "clm-2000", "unable to "
304 "allocate bitmaps for journal lists");
bd4c625c
LT
305 failed = 1;
306 break;
307 }
308 memset(jb->bitmaps, 0, mem);
309 }
310 if (failed) {
311 free_list_bitmaps(p_s_sb, jb_array);
312 return -1;
313 }
314 return 0;
1da177e4
LT
315}
316
317/*
318** find an available list bitmap. If you can't find one, flush a commit list
319** and try again
320*/
bd4c625c
LT
321static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
322 struct reiserfs_journal_list
323 *jl)
324{
325 int i, j;
326 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
327 struct reiserfs_list_bitmap *jb = NULL;
328
329 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
330 i = journal->j_list_bitmap_index;
331 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
332 jb = journal->j_list_bitmap + i;
333 if (journal->j_list_bitmap[i].journal_list) {
334 flush_commit_list(p_s_sb,
335 journal->j_list_bitmap[i].
336 journal_list, 1);
337 if (!journal->j_list_bitmap[i].journal_list) {
338 break;
339 }
340 } else {
341 break;
342 }
343 }
344 if (jb->journal_list) { /* double check to make sure if flushed correctly */
345 return NULL;
346 }
347 jb->journal_list = jl;
348 return jb;
1da177e4
LT
349}
350
351/*
352** allocates a new chunk of X nodes, and links them all together as a list.
353** Uses the cnode->next and cnode->prev pointers
354** returns NULL on failure
355*/
bd4c625c
LT
356static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
357{
358 struct reiserfs_journal_cnode *head;
359 int i;
360 if (num_cnodes <= 0) {
361 return NULL;
362 }
363 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
364 if (!head) {
365 return NULL;
366 }
367 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
368 head[0].prev = NULL;
369 head[0].next = head + 1;
370 for (i = 1; i < num_cnodes; i++) {
371 head[i].prev = head + (i - 1);
372 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
373 }
374 head[num_cnodes - 1].next = NULL;
375 return head;
1da177e4
LT
376}
377
378/*
379** pulls a cnode off the free list, or returns NULL on failure
380*/
bd4c625c
LT
381static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
382{
383 struct reiserfs_journal_cnode *cn;
384 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
385
386 reiserfs_check_lock_depth(p_s_sb, "get_cnode");
387
388 if (journal->j_cnode_free <= 0) {
389 return NULL;
390 }
391 journal->j_cnode_used++;
392 journal->j_cnode_free--;
393 cn = journal->j_cnode_free_list;
394 if (!cn) {
395 return cn;
396 }
397 if (cn->next) {
398 cn->next->prev = NULL;
399 }
400 journal->j_cnode_free_list = cn->next;
401 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
402 return cn;
1da177e4
LT
403}
404
405/*
406** returns a cnode to the free list
407*/
bd4c625c
LT
408static void free_cnode(struct super_block *p_s_sb,
409 struct reiserfs_journal_cnode *cn)
410{
411 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 412
bd4c625c 413 reiserfs_check_lock_depth(p_s_sb, "free_cnode");
1da177e4 414
bd4c625c
LT
415 journal->j_cnode_used--;
416 journal->j_cnode_free++;
417 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
418 cn->next = journal->j_cnode_free_list;
419 if (journal->j_cnode_free_list) {
420 journal->j_cnode_free_list->prev = cn;
421 }
422 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
423 journal->j_cnode_free_list = cn;
1da177e4
LT
424}
425
bd4c625c
LT
426static void clear_prepared_bits(struct buffer_head *bh)
427{
428 clear_buffer_journal_prepared(bh);
429 clear_buffer_journal_restore_dirty(bh);
1da177e4
LT
430}
431
432/* utility function to force a BUG if it is called without the big
433** kernel lock held. caller is the string printed just before calling BUG()
434*/
bd4c625c
LT
435void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
436{
1da177e4 437#ifdef CONFIG_SMP
bd4c625c
LT
438 if (current->lock_depth < 0) {
439 reiserfs_panic(sb, "%s called without kernel lock held",
440 caller);
441 }
1da177e4 442#else
bd4c625c 443 ;
1da177e4
LT
444#endif
445}
446
447/* return a cnode with same dev, block number and size in table, or null if not found */
bd4c625c
LT
448static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
449 super_block
450 *sb,
451 struct
452 reiserfs_journal_cnode
453 **table,
454 long bl)
1da177e4 455{
bd4c625c
LT
456 struct reiserfs_journal_cnode *cn;
457 cn = journal_hash(table, sb, bl);
458 while (cn) {
459 if (cn->blocknr == bl && cn->sb == sb)
460 return cn;
461 cn = cn->hnext;
462 }
463 return (struct reiserfs_journal_cnode *)0;
1da177e4
LT
464}
465
466/*
467** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
468** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
469** being overwritten by a replay after crashing.
470**
471** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
472** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
473** sure you never write the block without logging it.
474**
475** next_zero_bit is a suggestion about the next block to try for find_forward.
476** when bl is rejected because it is set in a journal list bitmap, we search
477** for the next zero bit in the bitmap that rejected bl. Then, we return that
478** through next_zero_bit for find_forward to try.
479**
480** Just because we return something in next_zero_bit does not mean we won't
481** reject it on the next call to reiserfs_in_journal
482**
483*/
484int reiserfs_in_journal(struct super_block *p_s_sb,
3ee16670 485 unsigned int bmap_nr, int bit_nr, int search_all,
bd4c625c
LT
486 b_blocknr_t * next_zero_bit)
487{
488 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
489 struct reiserfs_journal_cnode *cn;
490 struct reiserfs_list_bitmap *jb;
491 int i;
492 unsigned long bl;
493
494 *next_zero_bit = 0; /* always start this at zero. */
495
496 PROC_INFO_INC(p_s_sb, journal.in_journal);
497 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
498 ** if we crash before the transaction that freed it commits, this transaction won't
499 ** have committed either, and the block will never be written
500 */
501 if (search_all) {
502 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
503 PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
504 jb = journal->j_list_bitmap + i;
505 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
506 test_bit(bit_nr,
507 (unsigned long *)jb->bitmaps[bmap_nr]->
508 data)) {
509 *next_zero_bit =
510 find_next_zero_bit((unsigned long *)
511 (jb->bitmaps[bmap_nr]->
512 data),
513 p_s_sb->s_blocksize << 3,
514 bit_nr + 1);
515 return 1;
516 }
517 }
518 }
519
520 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
521 /* is it in any old transactions? */
522 if (search_all
523 && (cn =
524 get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
525 return 1;
526 }
527
528 /* is it in the current transaction. This should never happen */
529 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
530 BUG();
531 return 1;
532 }
533
534 PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
535 /* safe for reuse */
536 return 0;
1da177e4
LT
537}
538
539/* insert cn into table
540*/
bd4c625c
LT
541static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
542 struct reiserfs_journal_cnode *cn)
543{
544 struct reiserfs_journal_cnode *cn_orig;
1da177e4 545
bd4c625c
LT
546 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
547 cn->hnext = cn_orig;
548 cn->hprev = NULL;
549 if (cn_orig) {
550 cn_orig->hprev = cn;
551 }
552 journal_hash(table, cn->sb, cn->blocknr) = cn;
1da177e4
LT
553}
554
555/* lock the current transaction */
77933d72 556static inline void lock_journal(struct super_block *p_s_sb)
bd4c625c
LT
557{
558 PROC_INFO_INC(p_s_sb, journal.lock_journal);
f68215c4 559 mutex_lock(&SB_JOURNAL(p_s_sb)->j_mutex);
1da177e4
LT
560}
561
562/* unlock the current transaction */
77933d72 563static inline void unlock_journal(struct super_block *p_s_sb)
bd4c625c 564{
f68215c4 565 mutex_unlock(&SB_JOURNAL(p_s_sb)->j_mutex);
1da177e4
LT
566}
567
568static inline void get_journal_list(struct reiserfs_journal_list *jl)
569{
bd4c625c 570 jl->j_refcount++;
1da177e4
LT
571}
572
573static inline void put_journal_list(struct super_block *s,
bd4c625c 574 struct reiserfs_journal_list *jl)
1da177e4 575{
bd4c625c 576 if (jl->j_refcount < 1) {
600ed416 577 reiserfs_panic(s, "trans id %u, refcount at %d",
bd4c625c
LT
578 jl->j_trans_id, jl->j_refcount);
579 }
580 if (--jl->j_refcount == 0)
d739b42b 581 kfree(jl);
1da177e4
LT
582}
583
584/*
585** this used to be much more involved, and I'm keeping it just in case things get ugly again.
586** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
587** transaction.
588*/
bd4c625c
LT
589static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
590 struct reiserfs_journal_list *jl)
591{
1da177e4 592
bd4c625c
LT
593 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
594 if (jb) {
595 cleanup_bitmap_list(p_s_sb, jb);
596 }
597 jl->j_list_bitmap->journal_list = NULL;
598 jl->j_list_bitmap = NULL;
1da177e4
LT
599}
600
601static int journal_list_still_alive(struct super_block *s,
600ed416 602 unsigned int trans_id)
bd4c625c
LT
603{
604 struct reiserfs_journal *journal = SB_JOURNAL(s);
605 struct list_head *entry = &journal->j_journal_list;
606 struct reiserfs_journal_list *jl;
607
608 if (!list_empty(entry)) {
609 jl = JOURNAL_LIST_ENTRY(entry->next);
610 if (jl->j_trans_id <= trans_id) {
611 return 1;
612 }
613 }
614 return 0;
615}
616
398c95bd
CM
617/*
618 * If page->mapping was null, we failed to truncate this page for
619 * some reason. Most likely because it was truncated after being
620 * logged via data=journal.
621 *
622 * This does a check to see if the buffer belongs to one of these
623 * lost pages before doing the final put_bh. If page->mapping was
624 * null, it tries to free buffers on the page, which should make the
625 * final page_cache_release drop the page from the lru.
626 */
627static void release_buffer_page(struct buffer_head *bh)
628{
629 struct page *page = bh->b_page;
529ae9aa 630 if (!page->mapping && trylock_page(page)) {
398c95bd
CM
631 page_cache_get(page);
632 put_bh(bh);
633 if (!page->mapping)
634 try_to_free_buffers(page);
635 unlock_page(page);
636 page_cache_release(page);
637 } else {
638 put_bh(bh);
639 }
640}
641
bd4c625c
LT
642static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
643{
644 char b[BDEVNAME_SIZE];
645
646 if (buffer_journaled(bh)) {
45b03d5e
JM
647 reiserfs_warning(NULL, "clm-2084",
648 "pinned buffer %lu:%s sent to disk",
bd4c625c
LT
649 bh->b_blocknr, bdevname(bh->b_bdev, b));
650 }
651 if (uptodate)
652 set_buffer_uptodate(bh);
653 else
654 clear_buffer_uptodate(bh);
398c95bd 655
bd4c625c 656 unlock_buffer(bh);
398c95bd 657 release_buffer_page(bh);
bd4c625c
LT
658}
659
660static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
661{
662 if (uptodate)
663 set_buffer_uptodate(bh);
664 else
665 clear_buffer_uptodate(bh);
666 unlock_buffer(bh);
667 put_bh(bh);
668}
669
670static void submit_logged_buffer(struct buffer_head *bh)
671{
672 get_bh(bh);
673 bh->b_end_io = reiserfs_end_buffer_io_sync;
674 clear_buffer_journal_new(bh);
675 clear_buffer_dirty(bh);
676 if (!test_clear_buffer_journal_test(bh))
677 BUG();
678 if (!buffer_uptodate(bh))
679 BUG();
680 submit_bh(WRITE, bh);
681}
682
683static void submit_ordered_buffer(struct buffer_head *bh)
684{
685 get_bh(bh);
686 bh->b_end_io = reiserfs_end_ordered_io;
687 clear_buffer_dirty(bh);
688 if (!buffer_uptodate(bh))
689 BUG();
690 submit_bh(WRITE, bh);
691}
692
693static int submit_barrier_buffer(struct buffer_head *bh)
694{
695 get_bh(bh);
696 bh->b_end_io = reiserfs_end_ordered_io;
697 clear_buffer_dirty(bh);
698 if (!buffer_uptodate(bh))
699 BUG();
700 return submit_bh(WRITE_BARRIER, bh);
1da177e4
LT
701}
702
703static void check_barrier_completion(struct super_block *s,
bd4c625c
LT
704 struct buffer_head *bh)
705{
706 if (buffer_eopnotsupp(bh)) {
707 clear_buffer_eopnotsupp(bh);
708 disable_barrier(s);
709 set_buffer_uptodate(bh);
710 set_buffer_dirty(bh);
711 sync_dirty_buffer(bh);
712 }
1da177e4
LT
713}
714
715#define CHUNK_SIZE 32
716struct buffer_chunk {
bd4c625c
LT
717 struct buffer_head *bh[CHUNK_SIZE];
718 int nr;
1da177e4
LT
719};
720
bd4c625c
LT
721static void write_chunk(struct buffer_chunk *chunk)
722{
723 int i;
724 get_fs_excl();
725 for (i = 0; i < chunk->nr; i++) {
726 submit_logged_buffer(chunk->bh[i]);
727 }
728 chunk->nr = 0;
729 put_fs_excl();
1da177e4
LT
730}
731
bd4c625c
LT
732static void write_ordered_chunk(struct buffer_chunk *chunk)
733{
734 int i;
735 get_fs_excl();
736 for (i = 0; i < chunk->nr; i++) {
737 submit_ordered_buffer(chunk->bh[i]);
738 }
739 chunk->nr = 0;
740 put_fs_excl();
1da177e4
LT
741}
742
743static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
bd4c625c 744 spinlock_t * lock, void (fn) (struct buffer_chunk *))
1da177e4 745{
bd4c625c 746 int ret = 0;
14a61442 747 BUG_ON(chunk->nr >= CHUNK_SIZE);
bd4c625c
LT
748 chunk->bh[chunk->nr++] = bh;
749 if (chunk->nr >= CHUNK_SIZE) {
750 ret = 1;
751 if (lock)
752 spin_unlock(lock);
753 fn(chunk);
754 if (lock)
755 spin_lock(lock);
756 }
757 return ret;
1da177e4
LT
758}
759
1da177e4 760static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
bd4c625c
LT
761static struct reiserfs_jh *alloc_jh(void)
762{
763 struct reiserfs_jh *jh;
764 while (1) {
765 jh = kmalloc(sizeof(*jh), GFP_NOFS);
766 if (jh) {
767 atomic_inc(&nr_reiserfs_jh);
768 return jh;
769 }
770 yield();
1da177e4 771 }
1da177e4
LT
772}
773
774/*
775 * we want to free the jh when the buffer has been written
776 * and waited on
777 */
bd4c625c
LT
778void reiserfs_free_jh(struct buffer_head *bh)
779{
780 struct reiserfs_jh *jh;
781
782 jh = bh->b_private;
783 if (jh) {
784 bh->b_private = NULL;
785 jh->bh = NULL;
786 list_del_init(&jh->list);
787 kfree(jh);
788 if (atomic_read(&nr_reiserfs_jh) <= 0)
789 BUG();
790 atomic_dec(&nr_reiserfs_jh);
791 put_bh(bh);
792 }
1da177e4
LT
793}
794
795static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
bd4c625c 796 int tail)
1da177e4 797{
bd4c625c 798 struct reiserfs_jh *jh;
1da177e4 799
bd4c625c
LT
800 if (bh->b_private) {
801 spin_lock(&j->j_dirty_buffers_lock);
802 if (!bh->b_private) {
803 spin_unlock(&j->j_dirty_buffers_lock);
804 goto no_jh;
805 }
806 jh = bh->b_private;
807 list_del_init(&jh->list);
808 } else {
809 no_jh:
810 get_bh(bh);
811 jh = alloc_jh();
812 spin_lock(&j->j_dirty_buffers_lock);
813 /* buffer must be locked for __add_jh, should be able to have
814 * two adds at the same time
815 */
14a61442 816 BUG_ON(bh->b_private);
bd4c625c
LT
817 jh->bh = bh;
818 bh->b_private = jh;
1da177e4 819 }
bd4c625c
LT
820 jh->jl = j->j_current_jl;
821 if (tail)
822 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
823 else {
824 list_add_tail(&jh->list, &jh->jl->j_bh_list);
825 }
826 spin_unlock(&j->j_dirty_buffers_lock);
827 return 0;
1da177e4
LT
828}
829
bd4c625c
LT
830int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
831{
832 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
1da177e4 833}
bd4c625c
LT
834int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
835{
836 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
1da177e4
LT
837}
838
839#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
bd4c625c 840static int write_ordered_buffers(spinlock_t * lock,
1da177e4 841 struct reiserfs_journal *j,
bd4c625c 842 struct reiserfs_journal_list *jl,
1da177e4
LT
843 struct list_head *list)
844{
bd4c625c
LT
845 struct buffer_head *bh;
846 struct reiserfs_jh *jh;
847 int ret = j->j_errno;
848 struct buffer_chunk chunk;
849 struct list_head tmp;
850 INIT_LIST_HEAD(&tmp);
851
852 chunk.nr = 0;
853 spin_lock(lock);
854 while (!list_empty(list)) {
855 jh = JH_ENTRY(list->next);
856 bh = jh->bh;
857 get_bh(bh);
ca5de404 858 if (!trylock_buffer(bh)) {
bd4c625c 859 if (!buffer_dirty(bh)) {
f116629d 860 list_move(&jh->list, &tmp);
bd4c625c
LT
861 goto loop_next;
862 }
863 spin_unlock(lock);
864 if (chunk.nr)
865 write_ordered_chunk(&chunk);
866 wait_on_buffer(bh);
867 cond_resched();
868 spin_lock(lock);
869 goto loop_next;
870 }
3d4492f8
CM
871 /* in theory, dirty non-uptodate buffers should never get here,
872 * but the upper layer io error paths still have a few quirks.
873 * Handle them here as gracefully as we can
874 */
875 if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
876 clear_buffer_dirty(bh);
877 ret = -EIO;
878 }
bd4c625c 879 if (buffer_dirty(bh)) {
f116629d 880 list_move(&jh->list, &tmp);
bd4c625c
LT
881 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
882 } else {
883 reiserfs_free_jh(bh);
884 unlock_buffer(bh);
885 }
886 loop_next:
887 put_bh(bh);
888 cond_resched_lock(lock);
889 }
890 if (chunk.nr) {
891 spin_unlock(lock);
1da177e4 892 write_ordered_chunk(&chunk);
bd4c625c 893 spin_lock(lock);
1da177e4 894 }
bd4c625c
LT
895 while (!list_empty(&tmp)) {
896 jh = JH_ENTRY(tmp.prev);
897 bh = jh->bh;
898 get_bh(bh);
899 reiserfs_free_jh(bh);
900
901 if (buffer_locked(bh)) {
902 spin_unlock(lock);
903 wait_on_buffer(bh);
904 spin_lock(lock);
905 }
906 if (!buffer_uptodate(bh)) {
907 ret = -EIO;
908 }
d62b1b87
CM
909 /* ugly interaction with invalidatepage here.
910 * reiserfs_invalidate_page will pin any buffer that has a valid
911 * journal head from an older transaction. If someone else sets
912 * our buffer dirty after we write it in the first loop, and
913 * then someone truncates the page away, nobody will ever write
914 * the buffer. We're safe if we write the page one last time
915 * after freeing the journal header.
916 */
917 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
918 spin_unlock(lock);
919 ll_rw_block(WRITE, 1, &bh);
920 spin_lock(lock);
921 }
bd4c625c
LT
922 put_bh(bh);
923 cond_resched_lock(lock);
1da177e4 924 }
bd4c625c
LT
925 spin_unlock(lock);
926 return ret;
927}
1da177e4 928
bd4c625c
LT
929static int flush_older_commits(struct super_block *s,
930 struct reiserfs_journal_list *jl)
931{
932 struct reiserfs_journal *journal = SB_JOURNAL(s);
933 struct reiserfs_journal_list *other_jl;
934 struct reiserfs_journal_list *first_jl;
935 struct list_head *entry;
600ed416
JM
936 unsigned int trans_id = jl->j_trans_id;
937 unsigned int other_trans_id;
938 unsigned int first_trans_id;
bd4c625c
LT
939
940 find_first:
941 /*
942 * first we walk backwards to find the oldest uncommitted transation
943 */
944 first_jl = jl;
945 entry = jl->j_list.prev;
946 while (1) {
947 other_jl = JOURNAL_LIST_ENTRY(entry);
948 if (entry == &journal->j_journal_list ||
949 atomic_read(&other_jl->j_older_commits_done))
950 break;
1da177e4 951
bd4c625c
LT
952 first_jl = other_jl;
953 entry = other_jl->j_list.prev;
954 }
1da177e4 955
bd4c625c
LT
956 /* if we didn't find any older uncommitted transactions, return now */
957 if (first_jl == jl) {
958 return 0;
959 }
1da177e4 960
bd4c625c
LT
961 first_trans_id = first_jl->j_trans_id;
962
963 entry = &first_jl->j_list;
964 while (1) {
965 other_jl = JOURNAL_LIST_ENTRY(entry);
966 other_trans_id = other_jl->j_trans_id;
967
968 if (other_trans_id < trans_id) {
969 if (atomic_read(&other_jl->j_commit_left) != 0) {
970 flush_commit_list(s, other_jl, 0);
971
972 /* list we were called with is gone, return */
973 if (!journal_list_still_alive(s, trans_id))
974 return 1;
975
976 /* the one we just flushed is gone, this means all
977 * older lists are also gone, so first_jl is no longer
978 * valid either. Go back to the beginning.
979 */
980 if (!journal_list_still_alive
981 (s, other_trans_id)) {
982 goto find_first;
983 }
984 }
985 entry = entry->next;
986 if (entry == &journal->j_journal_list)
987 return 0;
988 } else {
989 return 0;
1da177e4 990 }
1da177e4 991 }
bd4c625c 992 return 0;
1da177e4 993}
deba0f49
AB
994
995static int reiserfs_async_progress_wait(struct super_block *s)
bd4c625c
LT
996{
997 DEFINE_WAIT(wait);
998 struct reiserfs_journal *j = SB_JOURNAL(s);
999 if (atomic_read(&j->j_async_throttle))
3fcfab16 1000 congestion_wait(WRITE, HZ / 10);
bd4c625c 1001 return 0;
1da177e4
LT
1002}
1003
1004/*
1005** if this journal list still has commit blocks unflushed, send them to disk.
1006**
1007** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
1008** Before the commit block can by written, every other log block must be safely on disk
1009**
1010*/
bd4c625c
LT
1011static int flush_commit_list(struct super_block *s,
1012 struct reiserfs_journal_list *jl, int flushall)
1013{
1014 int i;
3ee16670 1015 b_blocknr_t bn;
bd4c625c 1016 struct buffer_head *tbh = NULL;
600ed416 1017 unsigned int trans_id = jl->j_trans_id;
bd4c625c
LT
1018 struct reiserfs_journal *journal = SB_JOURNAL(s);
1019 int barrier = 0;
1020 int retval = 0;
e0e851cf 1021 int write_len;
bd4c625c
LT
1022
1023 reiserfs_check_lock_depth(s, "flush_commit_list");
1024
1025 if (atomic_read(&jl->j_older_commits_done)) {
1026 return 0;
1027 }
1028
1029 get_fs_excl();
1030
1031 /* before we can put our commit blocks on disk, we have to make sure everyone older than
1032 ** us is on disk too
1033 */
1034 BUG_ON(jl->j_len <= 0);
1035 BUG_ON(trans_id == journal->j_trans_id);
1036
1037 get_journal_list(jl);
1038 if (flushall) {
1039 if (flush_older_commits(s, jl) == 1) {
1040 /* list disappeared during flush_older_commits. return */
1041 goto put_jl;
1042 }
1043 }
1044
1045 /* make sure nobody is trying to flush this one at the same time */
90415dea 1046 mutex_lock(&jl->j_commit_mutex);
bd4c625c 1047 if (!journal_list_still_alive(s, trans_id)) {
90415dea 1048 mutex_unlock(&jl->j_commit_mutex);
bd4c625c
LT
1049 goto put_jl;
1050 }
1051 BUG_ON(jl->j_trans_id == 0);
1052
1053 /* this commit is done, exit */
1054 if (atomic_read(&(jl->j_commit_left)) <= 0) {
1055 if (flushall) {
1056 atomic_set(&(jl->j_older_commits_done), 1);
1057 }
90415dea 1058 mutex_unlock(&jl->j_commit_mutex);
bd4c625c
LT
1059 goto put_jl;
1060 }
1061
1062 if (!list_empty(&jl->j_bh_list)) {
3d4492f8 1063 int ret;
bd4c625c 1064 unlock_kernel();
3d4492f8
CM
1065 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1066 journal, jl, &jl->j_bh_list);
1067 if (ret < 0 && retval == 0)
1068 retval = ret;
bd4c625c
LT
1069 lock_kernel();
1070 }
1071 BUG_ON(!list_empty(&jl->j_bh_list));
1072 /*
1073 * for the description block and all the log blocks, submit any buffers
e0e851cf
CM
1074 * that haven't already reached the disk. Try to write at least 256
1075 * log blocks. later on, we will only wait on blocks that correspond
1076 * to this transaction, but while we're unplugging we might as well
1077 * get a chunk of data on there.
bd4c625c
LT
1078 */
1079 atomic_inc(&journal->j_async_throttle);
e0e851cf
CM
1080 write_len = jl->j_len + 1;
1081 if (write_len < 256)
1082 write_len = 256;
1083 for (i = 0 ; i < write_len ; i++) {
bd4c625c
LT
1084 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1085 SB_ONDISK_JOURNAL_SIZE(s);
1086 tbh = journal_find_get_block(s, bn);
e0e851cf
CM
1087 if (tbh) {
1088 if (buffer_dirty(tbh))
1089 ll_rw_block(WRITE, 1, &tbh) ;
1090 put_bh(tbh) ;
1091 }
bd4c625c
LT
1092 }
1093 atomic_dec(&journal->j_async_throttle);
1094
5d5e8156
JM
1095 /* We're skipping the commit if there's an error */
1096 if (retval || reiserfs_is_journal_aborted(journal))
1097 barrier = 0;
1098
bd4c625c
LT
1099 /* wait on everything written so far before writing the commit
1100 * if we are in barrier mode, send the commit down now
1101 */
1102 barrier = reiserfs_barrier_flush(s);
1103 if (barrier) {
1104 int ret;
1105 lock_buffer(jl->j_commit_bh);
1106 ret = submit_barrier_buffer(jl->j_commit_bh);
1107 if (ret == -EOPNOTSUPP) {
1108 set_buffer_uptodate(jl->j_commit_bh);
1109 disable_barrier(s);
1110 barrier = 0;
1111 }
1112 }
1113 for (i = 0; i < (jl->j_len + 1); i++) {
1114 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1115 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1116 tbh = journal_find_get_block(s, bn);
1117 wait_on_buffer(tbh);
1118 // since we're using ll_rw_blk above, it might have skipped over
1119 // a locked buffer. Double check here
1120 //
1121 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
1122 sync_dirty_buffer(tbh);
1123 if (unlikely(!buffer_uptodate(tbh))) {
1da177e4 1124#ifdef CONFIG_REISERFS_CHECK
45b03d5e
JM
1125 reiserfs_warning(s, "journal-601",
1126 "buffer write failed");
1da177e4 1127#endif
bd4c625c
LT
1128 retval = -EIO;
1129 }
1130 put_bh(tbh); /* once for journal_find_get_block */
1131 put_bh(tbh); /* once due to original getblk in do_journal_end */
1132 atomic_dec(&(jl->j_commit_left));
1133 }
1134
1135 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1136
1137 if (!barrier) {
5d5e8156
JM
1138 /* If there was a write error in the journal - we can't commit
1139 * this transaction - it will be invalid and, if successful,
beb7dd86 1140 * will just end up propagating the write error out to
5d5e8156
JM
1141 * the file system. */
1142 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1143 if (buffer_dirty(jl->j_commit_bh))
1144 BUG();
1145 mark_buffer_dirty(jl->j_commit_bh) ;
1146 sync_dirty_buffer(jl->j_commit_bh) ;
1147 }
bd4c625c
LT
1148 } else
1149 wait_on_buffer(jl->j_commit_bh);
1150
1151 check_barrier_completion(s, jl->j_commit_bh);
1152
1153 /* If there was a write error in the journal - we can't commit this
1154 * transaction - it will be invalid and, if successful, will just end
beb7dd86 1155 * up propagating the write error out to the filesystem. */
bd4c625c 1156 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1da177e4 1157#ifdef CONFIG_REISERFS_CHECK
45b03d5e 1158 reiserfs_warning(s, "journal-615", "buffer write failed");
1da177e4 1159#endif
bd4c625c
LT
1160 retval = -EIO;
1161 }
1162 bforget(jl->j_commit_bh);
1163 if (journal->j_last_commit_id != 0 &&
1164 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
45b03d5e 1165 reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu",
bd4c625c
LT
1166 journal->j_last_commit_id, jl->j_trans_id);
1167 }
1168 journal->j_last_commit_id = jl->j_trans_id;
1169
1170 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1171 cleanup_freed_for_journal_list(s, jl);
1172
1173 retval = retval ? retval : journal->j_errno;
1174
1175 /* mark the metadata dirty */
1176 if (!retval)
1177 dirty_one_transaction(s, jl);
1178 atomic_dec(&(jl->j_commit_left));
1179
1180 if (flushall) {
1181 atomic_set(&(jl->j_older_commits_done), 1);
1182 }
90415dea 1183 mutex_unlock(&jl->j_commit_mutex);
bd4c625c
LT
1184 put_jl:
1185 put_journal_list(s, jl);
1186
1187 if (retval)
1188 reiserfs_abort(s, retval, "Journal write error in %s",
fbe5498b 1189 __func__);
bd4c625c
LT
1190 put_fs_excl();
1191 return retval;
1da177e4
LT
1192}
1193
1194/*
1195** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1196** returns NULL if it can't find anything
1197*/
bd4c625c
LT
1198static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1199 reiserfs_journal_cnode
1200 *cn)
1201{
1202 struct super_block *sb = cn->sb;
1203 b_blocknr_t blocknr = cn->blocknr;
1da177e4 1204
bd4c625c
LT
1205 cn = cn->hprev;
1206 while (cn) {
1207 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1208 return cn->jlist;
1209 }
1210 cn = cn->hprev;
1211 }
1212 return NULL;
1da177e4
LT
1213}
1214
a3172027
CM
1215static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1216{
1217 struct super_block *sb = cn->sb;
1218 b_blocknr_t blocknr = cn->blocknr;
1219
1220 cn = cn->hprev;
1221 while (cn) {
1222 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1223 atomic_read(&cn->jlist->j_commit_left) != 0)
1224 return 0;
1225 cn = cn->hprev;
1226 }
1227 return 1;
1228}
1229
bd4c625c
LT
1230static void remove_journal_hash(struct super_block *,
1231 struct reiserfs_journal_cnode **,
1232 struct reiserfs_journal_list *, unsigned long,
1233 int);
1da177e4
LT
1234
1235/*
1236** once all the real blocks have been flushed, it is safe to remove them from the
1237** journal list for this transaction. Aside from freeing the cnode, this also allows the
1238** block to be reallocated for data blocks if it had been deleted.
1239*/
bd4c625c
LT
1240static void remove_all_from_journal_list(struct super_block *p_s_sb,
1241 struct reiserfs_journal_list *jl,
1242 int debug)
1243{
1244 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1245 struct reiserfs_journal_cnode *cn, *last;
1246 cn = jl->j_realblock;
1247
1248 /* which is better, to lock once around the whole loop, or
1249 ** to lock for each call to remove_journal_hash?
1250 */
1251 while (cn) {
1252 if (cn->blocknr != 0) {
1253 if (debug) {
45b03d5e 1254 reiserfs_warning(p_s_sb, "reiserfs-2201",
bd4c625c
LT
1255 "block %u, bh is %d, state %ld",
1256 cn->blocknr, cn->bh ? 1 : 0,
1257 cn->state);
1258 }
1259 cn->state = 0;
1260 remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1261 jl, cn->blocknr, 1);
1262 }
1263 last = cn;
1264 cn = cn->next;
1265 free_cnode(p_s_sb, last);
1266 }
1267 jl->j_realblock = NULL;
1da177e4
LT
1268}
1269
1270/*
1271** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1272** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1273** releasing blocks in this transaction for reuse as data blocks.
1274** called by flush_journal_list, before it calls remove_all_from_journal_list
1275**
1276*/
bd4c625c
LT
1277static int _update_journal_header_block(struct super_block *p_s_sb,
1278 unsigned long offset,
600ed416 1279 unsigned int trans_id)
bd4c625c
LT
1280{
1281 struct reiserfs_journal_header *jh;
1282 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 1283
bd4c625c
LT
1284 if (reiserfs_is_journal_aborted(journal))
1285 return -EIO;
1da177e4 1286
bd4c625c
LT
1287 if (trans_id >= journal->j_last_flush_trans_id) {
1288 if (buffer_locked((journal->j_header_bh))) {
1289 wait_on_buffer((journal->j_header_bh));
1290 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1da177e4 1291#ifdef CONFIG_REISERFS_CHECK
45b03d5e
JM
1292 reiserfs_warning(p_s_sb, "journal-699",
1293 "buffer write failed");
1da177e4 1294#endif
bd4c625c
LT
1295 return -EIO;
1296 }
1297 }
1298 journal->j_last_flush_trans_id = trans_id;
1299 journal->j_first_unflushed_offset = offset;
1300 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1301 b_data);
1302 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1303 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1304 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1305
1306 if (reiserfs_barrier_flush(p_s_sb)) {
1307 int ret;
1308 lock_buffer(journal->j_header_bh);
1309 ret = submit_barrier_buffer(journal->j_header_bh);
1310 if (ret == -EOPNOTSUPP) {
1311 set_buffer_uptodate(journal->j_header_bh);
1312 disable_barrier(p_s_sb);
1313 goto sync;
1314 }
1315 wait_on_buffer(journal->j_header_bh);
1316 check_barrier_completion(p_s_sb, journal->j_header_bh);
1317 } else {
1318 sync:
1319 set_buffer_dirty(journal->j_header_bh);
1320 sync_dirty_buffer(journal->j_header_bh);
1321 }
1322 if (!buffer_uptodate(journal->j_header_bh)) {
45b03d5e
JM
1323 reiserfs_warning(p_s_sb, "journal-837",
1324 "IO error during journal replay");
bd4c625c
LT
1325 return -EIO;
1326 }
1327 }
1328 return 0;
1329}
1330
1331static int update_journal_header_block(struct super_block *p_s_sb,
1332 unsigned long offset,
600ed416 1333 unsigned int trans_id)
bd4c625c
LT
1334{
1335 return _update_journal_header_block(p_s_sb, offset, trans_id);
1da177e4 1336}
bd4c625c 1337
1da177e4
LT
1338/*
1339** flush any and all journal lists older than you are
1340** can only be called from flush_journal_list
1341*/
1342static int flush_older_journal_lists(struct super_block *p_s_sb,
bd4c625c
LT
1343 struct reiserfs_journal_list *jl)
1344{
1345 struct list_head *entry;
1346 struct reiserfs_journal_list *other_jl;
1347 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
600ed416 1348 unsigned int trans_id = jl->j_trans_id;
bd4c625c
LT
1349
1350 /* we know we are the only ones flushing things, no extra race
1351 * protection is required.
1352 */
1353 restart:
1354 entry = journal->j_journal_list.next;
1355 /* Did we wrap? */
1356 if (entry == &journal->j_journal_list)
1357 return 0;
1358 other_jl = JOURNAL_LIST_ENTRY(entry);
1359 if (other_jl->j_trans_id < trans_id) {
1360 BUG_ON(other_jl->j_refcount <= 0);
1361 /* do not flush all */
1362 flush_journal_list(p_s_sb, other_jl, 0);
1363
1364 /* other_jl is now deleted from the list */
1365 goto restart;
1366 }
1367 return 0;
1da177e4
LT
1368}
1369
1370static void del_from_work_list(struct super_block *s,
bd4c625c
LT
1371 struct reiserfs_journal_list *jl)
1372{
1373 struct reiserfs_journal *journal = SB_JOURNAL(s);
1374 if (!list_empty(&jl->j_working_list)) {
1375 list_del_init(&jl->j_working_list);
1376 journal->j_num_work_lists--;
1377 }
1da177e4
LT
1378}
1379
1380/* flush a journal list, both commit and real blocks
1381**
1382** always set flushall to 1, unless you are calling from inside
1383** flush_journal_list
1384**
1385** IMPORTANT. This can only be called while there are no journal writers,
1386** and the journal is locked. That means it can only be called from
1387** do_journal_end, or by journal_release
1388*/
bd4c625c
LT
1389static int flush_journal_list(struct super_block *s,
1390 struct reiserfs_journal_list *jl, int flushall)
1da177e4 1391{
bd4c625c
LT
1392 struct reiserfs_journal_list *pjl;
1393 struct reiserfs_journal_cnode *cn, *last;
1394 int count;
1395 int was_jwait = 0;
1396 int was_dirty = 0;
1397 struct buffer_head *saved_bh;
1398 unsigned long j_len_saved = jl->j_len;
1399 struct reiserfs_journal *journal = SB_JOURNAL(s);
1400 int err = 0;
1401
1402 BUG_ON(j_len_saved <= 0);
1403
1404 if (atomic_read(&journal->j_wcount) != 0) {
45b03d5e 1405 reiserfs_warning(s, "clm-2048", "called with wcount %d",
bd4c625c
LT
1406 atomic_read(&journal->j_wcount));
1407 }
1408 BUG_ON(jl->j_trans_id == 0);
1da177e4 1409
bd4c625c
LT
1410 /* if flushall == 0, the lock is already held */
1411 if (flushall) {
afe70259
JM
1412 mutex_lock(&journal->j_flush_mutex);
1413 } else if (mutex_trylock(&journal->j_flush_mutex)) {
bd4c625c
LT
1414 BUG();
1415 }
1da177e4 1416
bd4c625c
LT
1417 count = 0;
1418 if (j_len_saved > journal->j_trans_max) {
1419 reiserfs_panic(s,
1420 "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1421 j_len_saved, jl->j_trans_id);
1422 return 0;
1423 }
1da177e4 1424
bd4c625c
LT
1425 get_fs_excl();
1426
1427 /* if all the work is already done, get out of here */
1428 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1429 atomic_read(&(jl->j_commit_left)) <= 0) {
1430 goto flush_older_and_return;
1431 }
1432
1433 /* start by putting the commit list on disk. This will also flush
1434 ** the commit lists of any olders transactions
1435 */
1436 flush_commit_list(s, jl, 1);
1437
1438 if (!(jl->j_state & LIST_DIRTY)
1439 && !reiserfs_is_journal_aborted(journal))
1440 BUG();
1441
1442 /* are we done now? */
1443 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1444 atomic_read(&(jl->j_commit_left)) <= 0) {
1445 goto flush_older_and_return;
1446 }
1447
1448 /* loop through each cnode, see if we need to write it,
1449 ** or wait on a more recent transaction, or just ignore it
1450 */
1451 if (atomic_read(&(journal->j_wcount)) != 0) {
1452 reiserfs_panic(s,
1453 "journal-844: panic journal list is flushing, wcount is not 0\n");
1454 }
1455 cn = jl->j_realblock;
1456 while (cn) {
1457 was_jwait = 0;
1458 was_dirty = 0;
1459 saved_bh = NULL;
1460 /* blocknr of 0 is no longer in the hash, ignore it */
1461 if (cn->blocknr == 0) {
1462 goto free_cnode;
1463 }
1464
1465 /* This transaction failed commit. Don't write out to the disk */
1466 if (!(jl->j_state & LIST_DIRTY))
1467 goto free_cnode;
1468
1469 pjl = find_newer_jl_for_cn(cn);
1470 /* the order is important here. We check pjl to make sure we
1471 ** don't clear BH_JDirty_wait if we aren't the one writing this
1472 ** block to disk
1473 */
1474 if (!pjl && cn->bh) {
1475 saved_bh = cn->bh;
1476
1477 /* we do this to make sure nobody releases the buffer while
1478 ** we are working with it
1479 */
1480 get_bh(saved_bh);
1481
1482 if (buffer_journal_dirty(saved_bh)) {
1483 BUG_ON(!can_dirty(cn));
1484 was_jwait = 1;
1485 was_dirty = 1;
1486 } else if (can_dirty(cn)) {
1487 /* everything with !pjl && jwait should be writable */
1488 BUG();
1489 }
1490 }
1491
1492 /* if someone has this block in a newer transaction, just make
0779bf2d 1493 ** sure they are committed, and don't try writing it to disk
bd4c625c
LT
1494 */
1495 if (pjl) {
1496 if (atomic_read(&pjl->j_commit_left))
1497 flush_commit_list(s, pjl, 1);
1498 goto free_cnode;
1499 }
1500
1501 /* bh == NULL when the block got to disk on its own, OR,
1502 ** the block got freed in a future transaction
1503 */
1504 if (saved_bh == NULL) {
1505 goto free_cnode;
1506 }
1507
1508 /* this should never happen. kupdate_one_transaction has this list
1509 ** locked while it works, so we should never see a buffer here that
1510 ** is not marked JDirty_wait
1511 */
1512 if ((!was_jwait) && !buffer_locked(saved_bh)) {
45b03d5e
JM
1513 reiserfs_warning(s, "journal-813",
1514 "BAD! buffer %llu %cdirty %cjwait, "
bd4c625c
LT
1515 "not in a newer tranasction",
1516 (unsigned long long)saved_bh->
1517 b_blocknr, was_dirty ? ' ' : '!',
1518 was_jwait ? ' ' : '!');
1519 }
1520 if (was_dirty) {
1521 /* we inc again because saved_bh gets decremented at free_cnode */
1522 get_bh(saved_bh);
1523 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1524 lock_buffer(saved_bh);
1525 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1526 if (buffer_dirty(saved_bh))
1527 submit_logged_buffer(saved_bh);
1528 else
1529 unlock_buffer(saved_bh);
1530 count++;
1531 } else {
45b03d5e
JM
1532 reiserfs_warning(s, "clm-2082",
1533 "Unable to flush buffer %llu in %s",
bd4c625c 1534 (unsigned long long)saved_bh->
fbe5498b 1535 b_blocknr, __func__);
bd4c625c
LT
1536 }
1537 free_cnode:
1538 last = cn;
1539 cn = cn->next;
1540 if (saved_bh) {
1541 /* we incremented this to keep others from taking the buffer head away */
1542 put_bh(saved_bh);
1543 if (atomic_read(&(saved_bh->b_count)) < 0) {
45b03d5e
JM
1544 reiserfs_warning(s, "journal-945",
1545 "saved_bh->b_count < 0");
bd4c625c
LT
1546 }
1547 }
1548 }
1549 if (count > 0) {
1550 cn = jl->j_realblock;
1551 while (cn) {
1552 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1553 if (!cn->bh) {
1554 reiserfs_panic(s,
1555 "journal-1011: cn->bh is NULL\n");
1556 }
1557 wait_on_buffer(cn->bh);
1558 if (!cn->bh) {
1559 reiserfs_panic(s,
1560 "journal-1012: cn->bh is NULL\n");
1561 }
1562 if (unlikely(!buffer_uptodate(cn->bh))) {
1563#ifdef CONFIG_REISERFS_CHECK
45b03d5e
JM
1564 reiserfs_warning(s, "journal-949",
1565 "buffer write failed");
bd4c625c
LT
1566#endif
1567 err = -EIO;
1568 }
1569 /* note, we must clear the JDirty_wait bit after the up to date
1570 ** check, otherwise we race against our flushpage routine
1571 */
1572 BUG_ON(!test_clear_buffer_journal_dirty
1573 (cn->bh));
1574
398c95bd 1575 /* drop one ref for us */
bd4c625c 1576 put_bh(cn->bh);
398c95bd
CM
1577 /* drop one ref for journal_mark_dirty */
1578 release_buffer_page(cn->bh);
bd4c625c
LT
1579 }
1580 cn = cn->next;
1581 }
1582 }
1583
1584 if (err)
1585 reiserfs_abort(s, -EIO,
1586 "Write error while pushing transaction to disk in %s",
fbe5498b 1587 __func__);
bd4c625c
LT
1588 flush_older_and_return:
1589
1590 /* before we can update the journal header block, we _must_ flush all
1591 ** real blocks from all older transactions to disk. This is because
1592 ** once the header block is updated, this transaction will not be
1593 ** replayed after a crash
1594 */
1595 if (flushall) {
1596 flush_older_journal_lists(s, jl);
1597 }
1598
1599 err = journal->j_errno;
1600 /* before we can remove everything from the hash tables for this
1601 ** transaction, we must make sure it can never be replayed
1602 **
1603 ** since we are only called from do_journal_end, we know for sure there
1604 ** are no allocations going on while we are flushing journal lists. So,
1605 ** we only need to update the journal header block for the last list
1606 ** being flushed
1607 */
1608 if (!err && flushall) {
1609 err =
1610 update_journal_header_block(s,
1611 (jl->j_start + jl->j_len +
1612 2) % SB_ONDISK_JOURNAL_SIZE(s),
1613 jl->j_trans_id);
1614 if (err)
1615 reiserfs_abort(s, -EIO,
1616 "Write error while updating journal header in %s",
fbe5498b 1617 __func__);
bd4c625c
LT
1618 }
1619 remove_all_from_journal_list(s, jl, 0);
1620 list_del_init(&jl->j_list);
1621 journal->j_num_lists--;
1622 del_from_work_list(s, jl);
1623
1624 if (journal->j_last_flush_id != 0 &&
1625 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
45b03d5e 1626 reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu",
bd4c625c
LT
1627 journal->j_last_flush_id, jl->j_trans_id);
1628 }
1629 journal->j_last_flush_id = jl->j_trans_id;
1630
1631 /* not strictly required since we are freeing the list, but it should
1632 * help find code using dead lists later on
1633 */
1634 jl->j_len = 0;
1635 atomic_set(&(jl->j_nonzerolen), 0);
1636 jl->j_start = 0;
1637 jl->j_realblock = NULL;
1638 jl->j_commit_bh = NULL;
1639 jl->j_trans_id = 0;
1640 jl->j_state = 0;
1641 put_journal_list(s, jl);
1642 if (flushall)
afe70259 1643 mutex_unlock(&journal->j_flush_mutex);
bd4c625c
LT
1644 put_fs_excl();
1645 return err;
1646}
1647
a3172027
CM
1648static int test_transaction(struct super_block *s,
1649 struct reiserfs_journal_list *jl)
1650{
1651 struct reiserfs_journal_cnode *cn;
1652
1653 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1654 return 1;
1655
1656 cn = jl->j_realblock;
1657 while (cn) {
1658 /* if the blocknr == 0, this has been cleared from the hash,
1659 ** skip it
1660 */
1661 if (cn->blocknr == 0) {
1662 goto next;
1663 }
1664 if (cn->bh && !newer_jl_done(cn))
1665 return 0;
1666 next:
1667 cn = cn->next;
1668 cond_resched();
1669 }
1670 return 0;
1671}
1672
bd4c625c
LT
1673static int write_one_transaction(struct super_block *s,
1674 struct reiserfs_journal_list *jl,
1675 struct buffer_chunk *chunk)
1676{
1677 struct reiserfs_journal_cnode *cn;
1678 int ret = 0;
1679
1680 jl->j_state |= LIST_TOUCHED;
1681 del_from_work_list(s, jl);
1682 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1683 return 0;
1684 }
1685
1686 cn = jl->j_realblock;
1687 while (cn) {
1688 /* if the blocknr == 0, this has been cleared from the hash,
1689 ** skip it
1690 */
1691 if (cn->blocknr == 0) {
1692 goto next;
1693 }
1694 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1695 struct buffer_head *tmp_bh;
1696 /* we can race against journal_mark_freed when we try
1697 * to lock_buffer(cn->bh), so we have to inc the buffer
1698 * count, and recheck things after locking
1699 */
1700 tmp_bh = cn->bh;
1701 get_bh(tmp_bh);
1702 lock_buffer(tmp_bh);
1703 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1704 if (!buffer_journal_dirty(tmp_bh) ||
1705 buffer_journal_prepared(tmp_bh))
1706 BUG();
1707 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1708 ret++;
1709 } else {
1710 /* note, cn->bh might be null now */
1711 unlock_buffer(tmp_bh);
1712 }
1713 put_bh(tmp_bh);
1714 }
1715 next:
1716 cn = cn->next;
1717 cond_resched();
1718 }
1719 return ret;
1720}
1721
1722/* used by flush_commit_list */
1723static int dirty_one_transaction(struct super_block *s,
1724 struct reiserfs_journal_list *jl)
1725{
1726 struct reiserfs_journal_cnode *cn;
1727 struct reiserfs_journal_list *pjl;
1728 int ret = 0;
1729
1730 jl->j_state |= LIST_DIRTY;
1731 cn = jl->j_realblock;
1732 while (cn) {
1733 /* look for a more recent transaction that logged this
1734 ** buffer. Only the most recent transaction with a buffer in
1735 ** it is allowed to send that buffer to disk
1736 */
1737 pjl = find_newer_jl_for_cn(cn);
1738 if (!pjl && cn->blocknr && cn->bh
1739 && buffer_journal_dirty(cn->bh)) {
1740 BUG_ON(!can_dirty(cn));
1741 /* if the buffer is prepared, it will either be logged
1742 * or restored. If restored, we need to make sure
1743 * it actually gets marked dirty
1744 */
1745 clear_buffer_journal_new(cn->bh);
1746 if (buffer_journal_prepared(cn->bh)) {
1747 set_buffer_journal_restore_dirty(cn->bh);
1748 } else {
1749 set_buffer_journal_test(cn->bh);
1750 mark_buffer_dirty(cn->bh);
1751 }
1752 }
1753 cn = cn->next;
1754 }
1755 return ret;
1756}
1757
1758static int kupdate_transactions(struct super_block *s,
1759 struct reiserfs_journal_list *jl,
1760 struct reiserfs_journal_list **next_jl,
600ed416 1761 unsigned int *next_trans_id,
bd4c625c
LT
1762 int num_blocks, int num_trans)
1763{
1764 int ret = 0;
1765 int written = 0;
1766 int transactions_flushed = 0;
600ed416 1767 unsigned int orig_trans_id = jl->j_trans_id;
bd4c625c
LT
1768 struct buffer_chunk chunk;
1769 struct list_head *entry;
1770 struct reiserfs_journal *journal = SB_JOURNAL(s);
1771 chunk.nr = 0;
1772
afe70259 1773 mutex_lock(&journal->j_flush_mutex);
bd4c625c
LT
1774 if (!journal_list_still_alive(s, orig_trans_id)) {
1775 goto done;
1776 }
1777
afe70259 1778 /* we've got j_flush_mutex held, nobody is going to delete any
bd4c625c
LT
1779 * of these lists out from underneath us
1780 */
1781 while ((num_trans && transactions_flushed < num_trans) ||
1782 (!num_trans && written < num_blocks)) {
1783
1784 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1785 atomic_read(&jl->j_commit_left)
1786 || !(jl->j_state & LIST_DIRTY)) {
1787 del_from_work_list(s, jl);
1788 break;
1789 }
1790 ret = write_one_transaction(s, jl, &chunk);
1791
1792 if (ret < 0)
1793 goto done;
1794 transactions_flushed++;
1795 written += ret;
1796 entry = jl->j_list.next;
1797
1798 /* did we wrap? */
1799 if (entry == &journal->j_journal_list) {
1800 break;
1801 }
1802 jl = JOURNAL_LIST_ENTRY(entry);
1803
1804 /* don't bother with older transactions */
1805 if (jl->j_trans_id <= orig_trans_id)
1806 break;
1807 }
1808 if (chunk.nr) {
1809 write_chunk(&chunk);
1810 }
1811
1812 done:
afe70259 1813 mutex_unlock(&journal->j_flush_mutex);
bd4c625c
LT
1814 return ret;
1815}
1816
1817/* for o_sync and fsync heavy applications, they tend to use
1818** all the journa list slots with tiny transactions. These
1819** trigger lots and lots of calls to update the header block, which
1820** adds seeks and slows things down.
1821**
1822** This function tries to clear out a large chunk of the journal lists
1823** at once, which makes everything faster since only the newest journal
1da177e4
LT
1824** list updates the header block
1825*/
1826static int flush_used_journal_lists(struct super_block *s,
bd4c625c
LT
1827 struct reiserfs_journal_list *jl)
1828{
1829 unsigned long len = 0;
1830 unsigned long cur_len;
1831 int ret;
1832 int i;
1833 int limit = 256;
1834 struct reiserfs_journal_list *tjl;
1835 struct reiserfs_journal_list *flush_jl;
600ed416 1836 unsigned int trans_id;
bd4c625c
LT
1837 struct reiserfs_journal *journal = SB_JOURNAL(s);
1838
1839 flush_jl = tjl = jl;
1840
1841 /* in data logging mode, try harder to flush a lot of blocks */
1842 if (reiserfs_data_log(s))
1843 limit = 1024;
1844 /* flush for 256 transactions or limit blocks, whichever comes first */
1845 for (i = 0; i < 256 && len < limit; i++) {
1846 if (atomic_read(&tjl->j_commit_left) ||
1847 tjl->j_trans_id < jl->j_trans_id) {
1848 break;
1849 }
1850 cur_len = atomic_read(&tjl->j_nonzerolen);
1851 if (cur_len > 0) {
1852 tjl->j_state &= ~LIST_TOUCHED;
1853 }
1854 len += cur_len;
1855 flush_jl = tjl;
1856 if (tjl->j_list.next == &journal->j_journal_list)
1857 break;
1858 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1859 }
1860 /* try to find a group of blocks we can flush across all the
1861 ** transactions, but only bother if we've actually spanned
1862 ** across multiple lists
1863 */
1864 if (flush_jl != jl) {
1865 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1866 }
1867 flush_journal_list(s, flush_jl, 1);
1868 return 0;
1da177e4
LT
1869}
1870
1871/*
1872** removes any nodes in table with name block and dev as bh.
1873** only touchs the hnext and hprev pointers.
1874*/
1875void remove_journal_hash(struct super_block *sb,
bd4c625c
LT
1876 struct reiserfs_journal_cnode **table,
1877 struct reiserfs_journal_list *jl,
1878 unsigned long block, int remove_freed)
1879{
1880 struct reiserfs_journal_cnode *cur;
1881 struct reiserfs_journal_cnode **head;
1882
1883 head = &(journal_hash(table, sb, block));
1884 if (!head) {
1885 return;
1886 }
1887 cur = *head;
1888 while (cur) {
1889 if (cur->blocknr == block && cur->sb == sb
1890 && (jl == NULL || jl == cur->jlist)
1891 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1892 if (cur->hnext) {
1893 cur->hnext->hprev = cur->hprev;
1894 }
1895 if (cur->hprev) {
1896 cur->hprev->hnext = cur->hnext;
1897 } else {
1898 *head = cur->hnext;
1899 }
1900 cur->blocknr = 0;
1901 cur->sb = NULL;
1902 cur->state = 0;
1903 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1904 atomic_dec(&(cur->jlist->j_nonzerolen));
1905 cur->bh = NULL;
1906 cur->jlist = NULL;
1907 }
1908 cur = cur->hnext;
1909 }
1910}
1911
1912static void free_journal_ram(struct super_block *p_s_sb)
1913{
1914 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
d739b42b 1915 kfree(journal->j_current_jl);
bd4c625c
LT
1916 journal->j_num_lists--;
1917
1918 vfree(journal->j_cnode_free_orig);
1919 free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1920 free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */
1921 if (journal->j_header_bh) {
1922 brelse(journal->j_header_bh);
1923 }
1924 /* j_header_bh is on the journal dev, make sure not to release the journal
1925 * dev until we brelse j_header_bh
1926 */
1927 release_journal_dev(p_s_sb, journal);
1928 vfree(journal);
1da177e4
LT
1929}
1930
1931/*
1932** call on unmount. Only set error to 1 if you haven't made your way out
1933** of read_super() yet. Any other caller must keep error at 0.
1934*/
bd4c625c
LT
1935static int do_journal_release(struct reiserfs_transaction_handle *th,
1936 struct super_block *p_s_sb, int error)
1937{
1938 struct reiserfs_transaction_handle myth;
1939 int flushed = 0;
1940 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1941
1942 /* we only want to flush out transactions if we were called with error == 0
1943 */
1944 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1945 /* end the current trans */
1946 BUG_ON(!th->t_trans_id);
1947 do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1948
1949 /* make sure something gets logged to force our way into the flush code */
1950 if (!journal_join(&myth, p_s_sb, 1)) {
1951 reiserfs_prepare_for_journal(p_s_sb,
1952 SB_BUFFER_WITH_SB(p_s_sb),
1953 1);
1954 journal_mark_dirty(&myth, p_s_sb,
1955 SB_BUFFER_WITH_SB(p_s_sb));
1956 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1957 flushed = 1;
1958 }
1959 }
1960
1961 /* this also catches errors during the do_journal_end above */
1962 if (!error && reiserfs_is_journal_aborted(journal)) {
1963 memset(&myth, 0, sizeof(myth));
1964 if (!journal_join_abort(&myth, p_s_sb, 1)) {
1965 reiserfs_prepare_for_journal(p_s_sb,
1966 SB_BUFFER_WITH_SB(p_s_sb),
1967 1);
1968 journal_mark_dirty(&myth, p_s_sb,
1969 SB_BUFFER_WITH_SB(p_s_sb));
1970 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1971 }
1972 }
1973
1974 reiserfs_mounted_fs_count--;
1975 /* wait for all commits to finish */
1976 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1977 flush_workqueue(commit_wq);
1978 if (!reiserfs_mounted_fs_count) {
1979 destroy_workqueue(commit_wq);
1980 commit_wq = NULL;
1981 }
1982
1983 free_journal_ram(p_s_sb);
1984
1985 return 0;
1da177e4
LT
1986}
1987
1988/*
1989** call on unmount. flush all journal trans, release all alloc'd ram
1990*/
bd4c625c
LT
1991int journal_release(struct reiserfs_transaction_handle *th,
1992 struct super_block *p_s_sb)
1993{
1994 return do_journal_release(th, p_s_sb, 0);
1da177e4 1995}
bd4c625c 1996
1da177e4
LT
1997/*
1998** only call from an error condition inside reiserfs_read_super!
1999*/
bd4c625c
LT
2000int journal_release_error(struct reiserfs_transaction_handle *th,
2001 struct super_block *p_s_sb)
2002{
2003 return do_journal_release(th, p_s_sb, 1);
1da177e4
LT
2004}
2005
2006/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
bd4c625c
LT
2007static int journal_compare_desc_commit(struct super_block *p_s_sb,
2008 struct reiserfs_journal_desc *desc,
2009 struct reiserfs_journal_commit *commit)
2010{
2011 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
2012 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
2013 get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
2014 get_commit_trans_len(commit) <= 0) {
2015 return 1;
2016 }
2017 return 0;
1da177e4 2018}
bd4c625c 2019
1da177e4
LT
2020/* returns 0 if it did not find a description block
2021** returns -1 if it found a corrupt commit block
2022** returns 1 if both desc and commit were valid
2023*/
bd4c625c
LT
2024static int journal_transaction_is_valid(struct super_block *p_s_sb,
2025 struct buffer_head *d_bh,
600ed416 2026 unsigned int *oldest_invalid_trans_id,
bd4c625c
LT
2027 unsigned long *newest_mount_id)
2028{
2029 struct reiserfs_journal_desc *desc;
2030 struct reiserfs_journal_commit *commit;
2031 struct buffer_head *c_bh;
2032 unsigned long offset;
2033
2034 if (!d_bh)
2035 return 0;
2036
2037 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2038 if (get_desc_trans_len(desc) > 0
2039 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2040 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2041 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2042 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2043 "journal-986: transaction "
2044 "is valid returning because trans_id %d is greater than "
2045 "oldest_invalid %lu",
2046 get_desc_trans_id(desc),
2047 *oldest_invalid_trans_id);
2048 return 0;
2049 }
2050 if (newest_mount_id
2051 && *newest_mount_id > get_desc_mount_id(desc)) {
2052 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2053 "journal-1087: transaction "
2054 "is valid returning because mount_id %d is less than "
2055 "newest_mount_id %lu",
2056 get_desc_mount_id(desc),
2057 *newest_mount_id);
2058 return -1;
2059 }
2060 if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
45b03d5e
JM
2061 reiserfs_warning(p_s_sb, "journal-2018",
2062 "Bad transaction length %d "
2063 "encountered, ignoring transaction",
bd4c625c
LT
2064 get_desc_trans_len(desc));
2065 return -1;
2066 }
2067 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2068
2069 /* ok, we have a journal description block, lets see if the transaction was valid */
2070 c_bh =
2071 journal_bread(p_s_sb,
2072 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2073 ((offset + get_desc_trans_len(desc) +
2074 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2075 if (!c_bh)
2076 return 0;
2077 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2078 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2079 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2080 "journal_transaction_is_valid, commit offset %ld had bad "
2081 "time %d or length %d",
2082 c_bh->b_blocknr -
2083 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2084 get_commit_trans_id(commit),
2085 get_commit_trans_len(commit));
2086 brelse(c_bh);
2087 if (oldest_invalid_trans_id) {
2088 *oldest_invalid_trans_id =
2089 get_desc_trans_id(desc);
2090 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2091 "journal-1004: "
2092 "transaction_is_valid setting oldest invalid trans_id "
2093 "to %d",
2094 get_desc_trans_id(desc));
2095 }
2096 return -1;
2097 }
2098 brelse(c_bh);
2099 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2100 "journal-1006: found valid "
2101 "transaction start offset %llu, len %d id %d",
2102 d_bh->b_blocknr -
2103 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2104 get_desc_trans_len(desc),
2105 get_desc_trans_id(desc));
2106 return 1;
2107 } else {
2108 return 0;
2109 }
2110}
2111
2112static void brelse_array(struct buffer_head **heads, int num)
2113{
2114 int i;
2115 for (i = 0; i < num; i++) {
2116 brelse(heads[i]);
2117 }
1da177e4
LT
2118}
2119
2120/*
2121** given the start, and values for the oldest acceptable transactions,
2122** this either reads in a replays a transaction, or returns because the transaction
2123** is invalid, or too old.
2124*/
bd4c625c
LT
2125static int journal_read_transaction(struct super_block *p_s_sb,
2126 unsigned long cur_dblock,
2127 unsigned long oldest_start,
600ed416 2128 unsigned int oldest_trans_id,
bd4c625c
LT
2129 unsigned long newest_mount_id)
2130{
2131 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2132 struct reiserfs_journal_desc *desc;
2133 struct reiserfs_journal_commit *commit;
600ed416 2134 unsigned int trans_id = 0;
bd4c625c
LT
2135 struct buffer_head *c_bh;
2136 struct buffer_head *d_bh;
2137 struct buffer_head **log_blocks = NULL;
2138 struct buffer_head **real_blocks = NULL;
600ed416 2139 unsigned int trans_offset;
bd4c625c
LT
2140 int i;
2141 int trans_half;
2142
2143 d_bh = journal_bread(p_s_sb, cur_dblock);
2144 if (!d_bh)
2145 return 1;
2146 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2147 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2148 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2149 "journal_read_transaction, offset %llu, len %d mount_id %d",
2150 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2151 get_desc_trans_len(desc), get_desc_mount_id(desc));
2152 if (get_desc_trans_id(desc) < oldest_trans_id) {
2153 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2154 "journal_read_trans skipping because %lu is too old",
2155 cur_dblock -
2156 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2157 brelse(d_bh);
2158 return 1;
2159 }
2160 if (get_desc_mount_id(desc) != newest_mount_id) {
2161 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2162 "journal_read_trans skipping because %d is != "
2163 "newest_mount_id %lu", get_desc_mount_id(desc),
2164 newest_mount_id);
2165 brelse(d_bh);
2166 return 1;
2167 }
2168 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2169 ((trans_offset + get_desc_trans_len(desc) + 1) %
2170 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2171 if (!c_bh) {
2172 brelse(d_bh);
2173 return 1;
2174 }
2175 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2176 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2177 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2178 "journal_read_transaction, "
2179 "commit offset %llu had bad time %d or length %d",
2180 c_bh->b_blocknr -
2181 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2182 get_commit_trans_id(commit),
2183 get_commit_trans_len(commit));
2184 brelse(c_bh);
2185 brelse(d_bh);
2186 return 1;
2187 }
2188 trans_id = get_desc_trans_id(desc);
2189 /* now we know we've got a good transaction, and it was inside the valid time ranges */
d739b42b
PE
2190 log_blocks = kmalloc(get_desc_trans_len(desc) *
2191 sizeof(struct buffer_head *), GFP_NOFS);
2192 real_blocks = kmalloc(get_desc_trans_len(desc) *
2193 sizeof(struct buffer_head *), GFP_NOFS);
bd4c625c
LT
2194 if (!log_blocks || !real_blocks) {
2195 brelse(c_bh);
2196 brelse(d_bh);
d739b42b
PE
2197 kfree(log_blocks);
2198 kfree(real_blocks);
45b03d5e
JM
2199 reiserfs_warning(p_s_sb, "journal-1169",
2200 "kmalloc failed, unable to mount FS");
bd4c625c
LT
2201 return -1;
2202 }
2203 /* get all the buffer heads */
2204 trans_half = journal_trans_half(p_s_sb->s_blocksize);
2205 for (i = 0; i < get_desc_trans_len(desc); i++) {
2206 log_blocks[i] =
2207 journal_getblk(p_s_sb,
2208 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2209 (trans_offset + 1 +
2210 i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2211 if (i < trans_half) {
2212 real_blocks[i] =
2213 sb_getblk(p_s_sb,
2214 le32_to_cpu(desc->j_realblock[i]));
2215 } else {
2216 real_blocks[i] =
2217 sb_getblk(p_s_sb,
2218 le32_to_cpu(commit->
2219 j_realblock[i - trans_half]));
2220 }
2221 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
45b03d5e
JM
2222 reiserfs_warning(p_s_sb, "journal-1207",
2223 "REPLAY FAILURE fsck required! "
2224 "Block to replay is outside of "
2225 "filesystem");
bd4c625c
LT
2226 goto abort_replay;
2227 }
2228 /* make sure we don't try to replay onto log or reserved area */
2229 if (is_block_in_log_or_reserved_area
2230 (p_s_sb, real_blocks[i]->b_blocknr)) {
45b03d5e
JM
2231 reiserfs_warning(p_s_sb, "journal-1204",
2232 "REPLAY FAILURE fsck required! "
2233 "Trying to replay onto a log block");
bd4c625c
LT
2234 abort_replay:
2235 brelse_array(log_blocks, i);
2236 brelse_array(real_blocks, i);
2237 brelse(c_bh);
2238 brelse(d_bh);
d739b42b
PE
2239 kfree(log_blocks);
2240 kfree(real_blocks);
bd4c625c
LT
2241 return -1;
2242 }
2243 }
2244 /* read in the log blocks, memcpy to the corresponding real block */
2245 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2246 for (i = 0; i < get_desc_trans_len(desc); i++) {
2247 wait_on_buffer(log_blocks[i]);
2248 if (!buffer_uptodate(log_blocks[i])) {
45b03d5e
JM
2249 reiserfs_warning(p_s_sb, "journal-1212",
2250 "REPLAY FAILURE fsck required! "
2251 "buffer write failed");
bd4c625c
LT
2252 brelse_array(log_blocks + i,
2253 get_desc_trans_len(desc) - i);
2254 brelse_array(real_blocks, get_desc_trans_len(desc));
2255 brelse(c_bh);
2256 brelse(d_bh);
d739b42b
PE
2257 kfree(log_blocks);
2258 kfree(real_blocks);
bd4c625c
LT
2259 return -1;
2260 }
2261 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2262 real_blocks[i]->b_size);
2263 set_buffer_uptodate(real_blocks[i]);
2264 brelse(log_blocks[i]);
2265 }
2266 /* flush out the real blocks */
2267 for (i = 0; i < get_desc_trans_len(desc); i++) {
2268 set_buffer_dirty(real_blocks[i]);
53778ffd 2269 ll_rw_block(SWRITE, 1, real_blocks + i);
bd4c625c
LT
2270 }
2271 for (i = 0; i < get_desc_trans_len(desc); i++) {
2272 wait_on_buffer(real_blocks[i]);
2273 if (!buffer_uptodate(real_blocks[i])) {
45b03d5e
JM
2274 reiserfs_warning(p_s_sb, "journal-1226",
2275 "REPLAY FAILURE, fsck required! "
2276 "buffer write failed");
bd4c625c
LT
2277 brelse_array(real_blocks + i,
2278 get_desc_trans_len(desc) - i);
2279 brelse(c_bh);
2280 brelse(d_bh);
d739b42b
PE
2281 kfree(log_blocks);
2282 kfree(real_blocks);
bd4c625c
LT
2283 return -1;
2284 }
2285 brelse(real_blocks[i]);
2286 }
2287 cur_dblock =
2288 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2289 ((trans_offset + get_desc_trans_len(desc) +
2290 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2291 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2292 "journal-1095: setting journal " "start to offset %ld",
2293 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2294
2295 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2296 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2297 journal->j_last_flush_trans_id = trans_id;
2298 journal->j_trans_id = trans_id + 1;
a44c94a7
AZ
2299 /* check for trans_id overflow */
2300 if (journal->j_trans_id == 0)
2301 journal->j_trans_id = 10;
bd4c625c
LT
2302 brelse(c_bh);
2303 brelse(d_bh);
d739b42b
PE
2304 kfree(log_blocks);
2305 kfree(real_blocks);
bd4c625c 2306 return 0;
1da177e4
LT
2307}
2308
2309/* This function reads blocks starting from block and to max_block of bufsize
2310 size (but no more than BUFNR blocks at a time). This proved to improve
2311 mounting speed on self-rebuilding raid5 arrays at least.
2312 Right now it is only used from journal code. But later we might use it
2313 from other places.
2314 Note: Do not use journal_getblk/sb_getblk functions here! */
3ee16670
JM
2315static struct buffer_head *reiserfs_breada(struct block_device *dev,
2316 b_blocknr_t block, int bufsize,
2317 b_blocknr_t max_block)
1da177e4 2318{
bd4c625c 2319 struct buffer_head *bhlist[BUFNR];
1da177e4 2320 unsigned int blocks = BUFNR;
bd4c625c 2321 struct buffer_head *bh;
1da177e4 2322 int i, j;
bd4c625c
LT
2323
2324 bh = __getblk(dev, block, bufsize);
2325 if (buffer_uptodate(bh))
2326 return (bh);
2327
1da177e4
LT
2328 if (block + BUFNR > max_block) {
2329 blocks = max_block - block;
2330 }
2331 bhlist[0] = bh;
2332 j = 1;
2333 for (i = 1; i < blocks; i++) {
bd4c625c
LT
2334 bh = __getblk(dev, block + i, bufsize);
2335 if (buffer_uptodate(bh)) {
2336 brelse(bh);
1da177e4 2337 break;
bd4c625c
LT
2338 } else
2339 bhlist[j++] = bh;
1da177e4 2340 }
bd4c625c
LT
2341 ll_rw_block(READ, j, bhlist);
2342 for (i = 1; i < j; i++)
2343 brelse(bhlist[i]);
1da177e4 2344 bh = bhlist[0];
bd4c625c
LT
2345 wait_on_buffer(bh);
2346 if (buffer_uptodate(bh))
1da177e4 2347 return bh;
bd4c625c 2348 brelse(bh);
1da177e4
LT
2349 return NULL;
2350}
2351
2352/*
2353** read and replay the log
2354** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2355** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2356**
2357** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2358**
2359** On exit, it sets things up so the first transaction will work correctly.
2360*/
bd4c625c
LT
2361static int journal_read(struct super_block *p_s_sb)
2362{
2363 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2364 struct reiserfs_journal_desc *desc;
600ed416
JM
2365 unsigned int oldest_trans_id = 0;
2366 unsigned int oldest_invalid_trans_id = 0;
bd4c625c
LT
2367 time_t start;
2368 unsigned long oldest_start = 0;
2369 unsigned long cur_dblock = 0;
2370 unsigned long newest_mount_id = 9;
2371 struct buffer_head *d_bh;
2372 struct reiserfs_journal_header *jh;
2373 int valid_journal_header = 0;
2374 int replay_count = 0;
2375 int continue_replay = 1;
2376 int ret;
2377 char b[BDEVNAME_SIZE];
2378
2379 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2380 reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2381 bdevname(journal->j_dev_bd, b));
2382 start = get_seconds();
2383
2384 /* step 1, read in the journal header block. Check the transaction it says
2385 ** is the first unflushed, and if that transaction is not valid,
2386 ** replay is done
2387 */
2388 journal->j_header_bh = journal_bread(p_s_sb,
2389 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2390 + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2391 if (!journal->j_header_bh) {
2392 return 1;
2393 }
2394 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
c499ec24 2395 if (le32_to_cpu(jh->j_first_unflushed_offset) <
bd4c625c
LT
2396 SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2397 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2398 oldest_start =
2399 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2400 le32_to_cpu(jh->j_first_unflushed_offset);
2401 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2402 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2403 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2404 "journal-1153: found in "
2405 "header: first_unflushed_offset %d, last_flushed_trans_id "
2406 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2407 le32_to_cpu(jh->j_last_flush_trans_id));
2408 valid_journal_header = 1;
2409
2410 /* now, we try to read the first unflushed offset. If it is not valid,
2411 ** there is nothing more we can do, and it makes no sense to read
2412 ** through the whole log.
2413 */
2414 d_bh =
2415 journal_bread(p_s_sb,
2416 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2417 le32_to_cpu(jh->j_first_unflushed_offset));
2418 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2419 if (!ret) {
2420 continue_replay = 0;
2421 }
2422 brelse(d_bh);
2423 goto start_log_replay;
2424 }
2425
2426 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
45b03d5e
JM
2427 reiserfs_warning(p_s_sb, "clm-2076",
2428 "device is readonly, unable to replay log");
bd4c625c
LT
2429 return -1;
2430 }
2431
2432 /* ok, there are transactions that need to be replayed. start with the first log block, find
2433 ** all the valid transactions, and pick out the oldest.
2434 */
2435 while (continue_replay
2436 && cur_dblock <
2437 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2438 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2439 /* Note that it is required for blocksize of primary fs device and journal
2440 device to be the same */
2441 d_bh =
2442 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2443 p_s_sb->s_blocksize,
2444 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2445 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2446 ret =
2447 journal_transaction_is_valid(p_s_sb, d_bh,
2448 &oldest_invalid_trans_id,
2449 &newest_mount_id);
2450 if (ret == 1) {
2451 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2452 if (oldest_start == 0) { /* init all oldest_ values */
2453 oldest_trans_id = get_desc_trans_id(desc);
2454 oldest_start = d_bh->b_blocknr;
2455 newest_mount_id = get_desc_mount_id(desc);
2456 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2457 "journal-1179: Setting "
2458 "oldest_start to offset %llu, trans_id %lu",
2459 oldest_start -
2460 SB_ONDISK_JOURNAL_1st_BLOCK
2461 (p_s_sb), oldest_trans_id);
2462 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2463 /* one we just read was older */
2464 oldest_trans_id = get_desc_trans_id(desc);
2465 oldest_start = d_bh->b_blocknr;
2466 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2467 "journal-1180: Resetting "
2468 "oldest_start to offset %lu, trans_id %lu",
2469 oldest_start -
2470 SB_ONDISK_JOURNAL_1st_BLOCK
2471 (p_s_sb), oldest_trans_id);
2472 }
2473 if (newest_mount_id < get_desc_mount_id(desc)) {
2474 newest_mount_id = get_desc_mount_id(desc);
2475 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2476 "journal-1299: Setting "
2477 "newest_mount_id to %d",
2478 get_desc_mount_id(desc));
2479 }
2480 cur_dblock += get_desc_trans_len(desc) + 2;
2481 } else {
2482 cur_dblock++;
2483 }
2484 brelse(d_bh);
2485 }
2486
2487 start_log_replay:
2488 cur_dblock = oldest_start;
2489 if (oldest_trans_id) {
2490 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2491 "journal-1206: Starting replay "
2492 "from offset %llu, trans_id %lu",
2493 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2494 oldest_trans_id);
2495
2496 }
2497 replay_count = 0;
2498 while (continue_replay && oldest_trans_id > 0) {
2499 ret =
2500 journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2501 oldest_trans_id, newest_mount_id);
2502 if (ret < 0) {
2503 return ret;
2504 } else if (ret != 0) {
2505 break;
2506 }
2507 cur_dblock =
2508 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2509 replay_count++;
2510 if (cur_dblock == oldest_start)
2511 break;
2512 }
2513
2514 if (oldest_trans_id == 0) {
2515 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2516 "journal-1225: No valid " "transactions found");
2517 }
2518 /* j_start does not get set correctly if we don't replay any transactions.
2519 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2520 ** copy the trans_id from the header
2521 */
2522 if (valid_journal_header && replay_count == 0) {
2523 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2524 journal->j_trans_id =
2525 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
a44c94a7
AZ
2526 /* check for trans_id overflow */
2527 if (journal->j_trans_id == 0)
2528 journal->j_trans_id = 10;
bd4c625c
LT
2529 journal->j_last_flush_trans_id =
2530 le32_to_cpu(jh->j_last_flush_trans_id);
2531 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2532 } else {
2533 journal->j_mount_id = newest_mount_id + 1;
2534 }
1da177e4 2535 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
bd4c625c
LT
2536 "newest_mount_id to %lu", journal->j_mount_id);
2537 journal->j_first_unflushed_offset = journal->j_start;
2538 if (replay_count > 0) {
2539 reiserfs_info(p_s_sb,
2540 "replayed %d transactions in %lu seconds\n",
2541 replay_count, get_seconds() - start);
2542 }
2543 if (!bdev_read_only(p_s_sb->s_bdev) &&
2544 _update_journal_header_block(p_s_sb, journal->j_start,
2545 journal->j_last_flush_trans_id)) {
2546 /* replay failed, caller must call free_journal_ram and abort
2547 ** the mount
2548 */
2549 return -1;
2550 }
2551 return 0;
1da177e4
LT
2552}
2553
2554static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2555{
bd4c625c 2556 struct reiserfs_journal_list *jl;
8c777cc4
PE
2557 jl = kzalloc(sizeof(struct reiserfs_journal_list),
2558 GFP_NOFS | __GFP_NOFAIL);
bd4c625c
LT
2559 INIT_LIST_HEAD(&jl->j_list);
2560 INIT_LIST_HEAD(&jl->j_working_list);
2561 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2562 INIT_LIST_HEAD(&jl->j_bh_list);
90415dea 2563 mutex_init(&jl->j_commit_mutex);
bd4c625c
LT
2564 SB_JOURNAL(s)->j_num_lists++;
2565 get_journal_list(jl);
2566 return jl;
2567}
2568
2569static void journal_list_init(struct super_block *p_s_sb)
2570{
2571 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2572}
2573
2574static int release_journal_dev(struct super_block *super,
2575 struct reiserfs_journal *journal)
2576{
2577 int result;
2578
2579 result = 0;
2580
86098fa0
CH
2581 if (journal->j_dev_bd != NULL) {
2582 if (journal->j_dev_bd->bd_dev != super->s_dev)
2583 bd_release(journal->j_dev_bd);
e5eb8caa 2584 result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
bd4c625c
LT
2585 journal->j_dev_bd = NULL;
2586 }
2587
2588 if (result != 0) {
45b03d5e
JM
2589 reiserfs_warning(super, "sh-457",
2590 "Cannot release journal device: %i", result);
bd4c625c
LT
2591 }
2592 return result;
2593}
2594
2595static int journal_init_dev(struct super_block *super,
2596 struct reiserfs_journal *journal,
2597 const char *jdev_name)
1da177e4
LT
2598{
2599 int result;
2600 dev_t jdev;
aeb5d727 2601 fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
1da177e4
LT
2602 char b[BDEVNAME_SIZE];
2603
2604 result = 0;
2605
bd4c625c 2606 journal->j_dev_bd = NULL;
bd4c625c
LT
2607 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2608 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
1da177e4
LT
2609
2610 if (bdev_read_only(super->s_bdev))
bd4c625c 2611 blkdev_mode = FMODE_READ;
1da177e4
LT
2612
2613 /* there is no "jdev" option and journal is on separate device */
bd4c625c 2614 if ((!jdev_name || !jdev_name[0])) {
1da177e4 2615 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
e5eb8caa 2616 journal->j_dev_mode = blkdev_mode;
1da177e4
LT
2617 if (IS_ERR(journal->j_dev_bd)) {
2618 result = PTR_ERR(journal->j_dev_bd);
2619 journal->j_dev_bd = NULL;
45b03d5e 2620 reiserfs_warning(super, "sh-458",
bd4c625c
LT
2621 "cannot init journal device '%s': %i",
2622 __bdevname(jdev, b), result);
1da177e4 2623 return result;
86098fa0
CH
2624 } else if (jdev != super->s_dev) {
2625 result = bd_claim(journal->j_dev_bd, journal);
2626 if (result) {
9a1c3542 2627 blkdev_put(journal->j_dev_bd, blkdev_mode);
86098fa0
CH
2628 return result;
2629 }
2630
1da177e4 2631 set_blocksize(journal->j_dev_bd, super->s_blocksize);
86098fa0
CH
2632 }
2633
1da177e4
LT
2634 return 0;
2635 }
2636
e5eb8caa 2637 journal->j_dev_mode = blkdev_mode;
30c40d2c 2638 journal->j_dev_bd = open_bdev_exclusive(jdev_name,
e5eb8caa 2639 blkdev_mode, journal);
86098fa0
CH
2640 if (IS_ERR(journal->j_dev_bd)) {
2641 result = PTR_ERR(journal->j_dev_bd);
2642 journal->j_dev_bd = NULL;
bd4c625c
LT
2643 reiserfs_warning(super,
2644 "journal_init_dev: Cannot open '%s': %i",
2645 jdev_name, result);
86098fa0 2646 return result;
1da177e4 2647 }
86098fa0
CH
2648
2649 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2650 reiserfs_info(super,
2651 "journal_init_dev: journal device: %s\n",
2652 bdevname(journal->j_dev_bd, b));
2653 return 0;
1da177e4
LT
2654}
2655
cf3d0b81
ES
2656/**
2657 * When creating/tuning a file system user can assign some
2658 * journal params within boundaries which depend on the ratio
2659 * blocksize/standard_blocksize.
2660 *
2661 * For blocks >= standard_blocksize transaction size should
2662 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2663 * then JOURNAL_TRANS_MAX_DEFAULT.
2664 *
2665 * For blocks < standard_blocksize these boundaries should be
2666 * decreased proportionally.
2667 */
2668#define REISERFS_STANDARD_BLKSIZE (4096)
2669
2670static int check_advise_trans_params(struct super_block *p_s_sb,
2671 struct reiserfs_journal *journal)
2672{
2673 if (journal->j_trans_max) {
2674 /* Non-default journal params.
2675 Do sanity check for them. */
2676 int ratio = 1;
2677 if (p_s_sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
2678 ratio = REISERFS_STANDARD_BLKSIZE / p_s_sb->s_blocksize;
2679
2680 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
2681 journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
2682 SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2683 JOURNAL_MIN_RATIO) {
45b03d5e
JM
2684 reiserfs_warning(p_s_sb, "sh-462",
2685 "bad transaction max size (%u). "
2686 "FSCK?", journal->j_trans_max);
cf3d0b81
ES
2687 return 1;
2688 }
2689 if (journal->j_max_batch != (journal->j_trans_max) *
2690 JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
45b03d5e
JM
2691 reiserfs_warning(p_s_sb, "sh-463",
2692 "bad transaction max batch (%u). "
2693 "FSCK?", journal->j_max_batch);
cf3d0b81
ES
2694 return 1;
2695 }
2696 } else {
2697 /* Default journal params.
2698 The file system was created by old version
2699 of mkreiserfs, so some fields contain zeros,
2700 and we need to advise proper values for them */
45b03d5e
JM
2701 if (p_s_sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
2702 reiserfs_warning(p_s_sb, "sh-464", "bad blocksize (%u)",
2703 p_s_sb->s_blocksize);
2704 return 1;
2705 }
cf3d0b81
ES
2706 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2707 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2708 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2709 }
2710 return 0;
2711}
2712
1da177e4
LT
2713/*
2714** must be called once on fs mount. calls journal_read for you
2715*/
bd4c625c
LT
2716int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2717 int old_format, unsigned int commit_max_age)
2718{
2719 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2720 struct buffer_head *bhjh;
2721 struct reiserfs_super_block *rs;
2722 struct reiserfs_journal_header *jh;
2723 struct reiserfs_journal *journal;
2724 struct reiserfs_journal_list *jl;
2725 char b[BDEVNAME_SIZE];
2726
2727 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2728 if (!journal) {
45b03d5e
JM
2729 reiserfs_warning(p_s_sb, "journal-1256",
2730 "unable to get memory for journal structure");
bd4c625c
LT
2731 return 1;
2732 }
2733 memset(journal, 0, sizeof(struct reiserfs_journal));
2734 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2735 INIT_LIST_HEAD(&journal->j_prealloc_list);
2736 INIT_LIST_HEAD(&journal->j_working_list);
2737 INIT_LIST_HEAD(&journal->j_journal_list);
2738 journal->j_persistent_trans = 0;
2739 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2740 journal->j_list_bitmap,
cb680c1b 2741 reiserfs_bmap_count(p_s_sb)))
bd4c625c
LT
2742 goto free_and_return;
2743 allocate_bitmap_nodes(p_s_sb);
2744
2745 /* reserved for journal area support */
2746 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2747 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2748 / p_s_sb->s_blocksize +
cb680c1b 2749 reiserfs_bmap_count(p_s_sb) +
bd4c625c
LT
2750 1 :
2751 REISERFS_DISK_OFFSET_IN_BYTES /
2752 p_s_sb->s_blocksize + 2);
2753
2754 /* Sanity check to see is the standard journal fitting withing first bitmap
2755 (actual for small blocksizes) */
2756 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2757 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2758 SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
45b03d5e
JM
2759 reiserfs_warning(p_s_sb, "journal-1393",
2760 "journal does not fit for area addressed "
2761 "by first of bitmap blocks. It starts at "
bd4c625c
LT
2762 "%u and its size is %u. Block size %ld",
2763 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2764 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2765 p_s_sb->s_blocksize);
2766 goto free_and_return;
2767 }
2768
2769 if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
45b03d5e
JM
2770 reiserfs_warning(p_s_sb, "sh-462",
2771 "unable to initialize jornal device");
bd4c625c
LT
2772 goto free_and_return;
2773 }
2774
2775 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2776
2777 /* read journal header */
2778 bhjh = journal_bread(p_s_sb,
2779 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2780 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2781 if (!bhjh) {
45b03d5e
JM
2782 reiserfs_warning(p_s_sb, "sh-459",
2783 "unable to read journal header");
bd4c625c
LT
2784 goto free_and_return;
2785 }
2786 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2787
2788 /* make sure that journal matches to the super block */
2789 if (is_reiserfs_jr(rs)
2790 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2791 sb_jp_journal_magic(rs))) {
45b03d5e
JM
2792 reiserfs_warning(p_s_sb, "sh-460",
2793 "journal header magic %x (device %s) does "
2794 "not match to magic found in super block %x",
2795 jh->jh_journal.jp_journal_magic,
bd4c625c
LT
2796 bdevname(journal->j_dev_bd, b),
2797 sb_jp_journal_magic(rs));
2798 brelse(bhjh);
2799 goto free_and_return;
2800 }
2801
2802 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2803 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2804 journal->j_max_commit_age =
2805 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2806 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2807
cf3d0b81
ES
2808 if (check_advise_trans_params(p_s_sb, journal) != 0)
2809 goto free_and_return;
bd4c625c
LT
2810 journal->j_default_max_commit_age = journal->j_max_commit_age;
2811
2812 if (commit_max_age != 0) {
2813 journal->j_max_commit_age = commit_max_age;
2814 journal->j_max_trans_age = commit_max_age;
2815 }
2816
2817 reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2818 "journal first block %u, max trans len %u, max batch %u, "
2819 "max commit age %u, max trans age %u\n",
2820 bdevname(journal->j_dev_bd, b),
2821 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2822 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2823 journal->j_trans_max,
2824 journal->j_max_batch,
2825 journal->j_max_commit_age, journal->j_max_trans_age);
2826
2827 brelse(bhjh);
2828
2829 journal->j_list_bitmap_index = 0;
2830 journal_list_init(p_s_sb);
2831
2832 memset(journal->j_list_hash_table, 0,
2833 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2834
2835 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2836 spin_lock_init(&journal->j_dirty_buffers_lock);
2837
2838 journal->j_start = 0;
2839 journal->j_len = 0;
2840 journal->j_len_alloc = 0;
2841 atomic_set(&(journal->j_wcount), 0);
2842 atomic_set(&(journal->j_async_throttle), 0);
2843 journal->j_bcount = 0;
2844 journal->j_trans_start_time = 0;
2845 journal->j_last = NULL;
2846 journal->j_first = NULL;
2847 init_waitqueue_head(&(journal->j_join_wait));
f68215c4 2848 mutex_init(&journal->j_mutex);
afe70259 2849 mutex_init(&journal->j_flush_mutex);
bd4c625c
LT
2850
2851 journal->j_trans_id = 10;
2852 journal->j_mount_id = 10;
2853 journal->j_state = 0;
2854 atomic_set(&(journal->j_jlock), 0);
2855 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2856 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2857 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2858 journal->j_cnode_used = 0;
2859 journal->j_must_wait = 0;
2860
576f6d79 2861 if (journal->j_cnode_free == 0) {
45b03d5e 2862 reiserfs_warning(p_s_sb, "journal-2004", "Journal cnode memory "
576f6d79
JM
2863 "allocation failed (%ld bytes). Journal is "
2864 "too large for available memory. Usually "
2865 "this is due to a journal that is too large.",
2866 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2867 goto free_and_return;
2868 }
2869
bd4c625c
LT
2870 init_journal_hash(p_s_sb);
2871 jl = journal->j_current_jl;
2872 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2873 if (!jl->j_list_bitmap) {
45b03d5e
JM
2874 reiserfs_warning(p_s_sb, "journal-2005",
2875 "get_list_bitmap failed for journal list 0");
bd4c625c
LT
2876 goto free_and_return;
2877 }
2878 if (journal_read(p_s_sb) < 0) {
45b03d5e
JM
2879 reiserfs_warning(p_s_sb, "reiserfs-2006",
2880 "Replay Failure, unable to mount");
bd4c625c
LT
2881 goto free_and_return;
2882 }
2883
2884 reiserfs_mounted_fs_count++;
2885 if (reiserfs_mounted_fs_count <= 1)
2886 commit_wq = create_workqueue("reiserfs");
2887
c4028958
DH
2888 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2889 journal->j_work_sb = p_s_sb;
bd4c625c
LT
2890 return 0;
2891 free_and_return:
2892 free_journal_ram(p_s_sb);
2893 return 1;
1da177e4
LT
2894}
2895
2896/*
2897** test for a polite end of the current transaction. Used by file_write, and should
2898** be used by delete to make sure they don't write more than can fit inside a single
2899** transaction
2900*/
bd4c625c
LT
2901int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2902 int new_alloc)
2903{
2904 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2905 time_t now = get_seconds();
2906 /* cannot restart while nested */
2907 BUG_ON(!th->t_trans_id);
2908 if (th->t_refcount > 1)
2909 return 0;
2910 if (journal->j_must_wait > 0 ||
2911 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2912 atomic_read(&(journal->j_jlock)) ||
2913 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2914 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2915 return 1;
2916 }
6ae1ea44
CM
2917 /* protected by the BKL here */
2918 journal->j_len_alloc += new_alloc;
2919 th->t_blocks_allocated += new_alloc ;
bd4c625c 2920 return 0;
1da177e4
LT
2921}
2922
2923/* this must be called inside a transaction, and requires the
2924** kernel_lock to be held
2925*/
bd4c625c
LT
2926void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2927{
2928 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2929 BUG_ON(!th->t_trans_id);
2930 journal->j_must_wait = 1;
2931 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2932 return;
1da177e4
LT
2933}
2934
2935/* this must be called without a transaction started, and does not
2936** require BKL
2937*/
bd4c625c
LT
2938void reiserfs_allow_writes(struct super_block *s)
2939{
2940 struct reiserfs_journal *journal = SB_JOURNAL(s);
2941 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2942 wake_up(&journal->j_join_wait);
1da177e4
LT
2943}
2944
2945/* this must be called without a transaction started, and does not
2946** require BKL
2947*/
bd4c625c
LT
2948void reiserfs_wait_on_write_block(struct super_block *s)
2949{
2950 struct reiserfs_journal *journal = SB_JOURNAL(s);
2951 wait_event(journal->j_join_wait,
2952 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2953}
2954
2955static void queue_log_writer(struct super_block *s)
2956{
2957 wait_queue_t wait;
2958 struct reiserfs_journal *journal = SB_JOURNAL(s);
2959 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2960
2961 /*
2962 * we don't want to use wait_event here because
2963 * we only want to wait once.
2964 */
2965 init_waitqueue_entry(&wait, current);
2966 add_wait_queue(&journal->j_join_wait, &wait);
1da177e4 2967 set_current_state(TASK_UNINTERRUPTIBLE);
bd4c625c
LT
2968 if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2969 schedule();
5ab2f7e0 2970 __set_current_state(TASK_RUNNING);
bd4c625c
LT
2971 remove_wait_queue(&journal->j_join_wait, &wait);
2972}
2973
2974static void wake_queued_writers(struct super_block *s)
2975{
2976 struct reiserfs_journal *journal = SB_JOURNAL(s);
2977 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2978 wake_up(&journal->j_join_wait);
2979}
2980
600ed416 2981static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
bd4c625c
LT
2982{
2983 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2984 unsigned long bcount = journal->j_bcount;
2985 while (1) {
041e0e3b 2986 schedule_timeout_uninterruptible(1);
bd4c625c
LT
2987 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2988 while ((atomic_read(&journal->j_wcount) > 0 ||
2989 atomic_read(&journal->j_jlock)) &&
2990 journal->j_trans_id == trans_id) {
2991 queue_log_writer(sb);
2992 }
2993 if (journal->j_trans_id != trans_id)
2994 break;
2995 if (bcount == journal->j_bcount)
2996 break;
2997 bcount = journal->j_bcount;
1da177e4 2998 }
1da177e4
LT
2999}
3000
3001/* join == true if you must join an existing transaction.
3002** join == false if you can deal with waiting for others to finish
3003**
3004** this will block until the transaction is joinable. send the number of blocks you
3005** expect to use in nblocks.
3006*/
bd4c625c
LT
3007static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3008 struct super_block *p_s_sb, unsigned long nblocks,
3009 int join)
3010{
3011 time_t now = get_seconds();
600ed416 3012 unsigned int old_trans_id;
bd4c625c
LT
3013 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3014 struct reiserfs_transaction_handle myth;
3015 int sched_count = 0;
3016 int retval;
3017
3018 reiserfs_check_lock_depth(p_s_sb, "journal_begin");
14a61442 3019 BUG_ON(nblocks > journal->j_trans_max);
bd4c625c
LT
3020
3021 PROC_INFO_INC(p_s_sb, journal.journal_being);
3022 /* set here for journal_join */
3023 th->t_refcount = 1;
3024 th->t_super = p_s_sb;
3025
3026 relock:
3027 lock_journal(p_s_sb);
3028 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
3029 unlock_journal(p_s_sb);
3030 retval = journal->j_errno;
3031 goto out_fail;
3032 }
3033 journal->j_bcount++;
3034
3035 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3036 unlock_journal(p_s_sb);
3037 reiserfs_wait_on_write_block(p_s_sb);
3038 PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
3039 goto relock;
3040 }
3041 now = get_seconds();
3042
3043 /* if there is no room in the journal OR
3044 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
3045 ** we don't sleep if there aren't other writers
3046 */
3047
3048 if ((!join && journal->j_must_wait > 0) ||
3049 (!join
3050 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3051 || (!join && atomic_read(&journal->j_wcount) > 0
3052 && journal->j_trans_start_time > 0
3053 && (now - journal->j_trans_start_time) >
3054 journal->j_max_trans_age) || (!join
3055 && atomic_read(&journal->j_jlock))
3056 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3057
3058 old_trans_id = journal->j_trans_id;
3059 unlock_journal(p_s_sb); /* allow others to finish this transaction */
3060
3061 if (!join && (journal->j_len_alloc + nblocks + 2) >=
3062 journal->j_max_batch &&
3063 ((journal->j_len + nblocks + 2) * 100) <
3064 (journal->j_len_alloc * 75)) {
3065 if (atomic_read(&journal->j_wcount) > 10) {
3066 sched_count++;
3067 queue_log_writer(p_s_sb);
3068 goto relock;
3069 }
3070 }
3071 /* don't mess with joining the transaction if all we have to do is
3072 * wait for someone else to do a commit
3073 */
3074 if (atomic_read(&journal->j_jlock)) {
3075 while (journal->j_trans_id == old_trans_id &&
3076 atomic_read(&journal->j_jlock)) {
3077 queue_log_writer(p_s_sb);
3078 }
3079 goto relock;
3080 }
3081 retval = journal_join(&myth, p_s_sb, 1);
3082 if (retval)
3083 goto out_fail;
3084
3085 /* someone might have ended the transaction while we joined */
3086 if (old_trans_id != journal->j_trans_id) {
3087 retval = do_journal_end(&myth, p_s_sb, 1, 0);
3088 } else {
3089 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
3090 }
3091
3092 if (retval)
3093 goto out_fail;
3094
3095 PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
3096 goto relock;
3097 }
3098 /* we are the first writer, set trans_id */
3099 if (journal->j_trans_start_time == 0) {
3100 journal->j_trans_start_time = get_seconds();
3101 }
3102 atomic_inc(&(journal->j_wcount));
3103 journal->j_len_alloc += nblocks;
3104 th->t_blocks_logged = 0;
3105 th->t_blocks_allocated = nblocks;
3106 th->t_trans_id = journal->j_trans_id;
3107 unlock_journal(p_s_sb);
3108 INIT_LIST_HEAD(&th->t_list);
3109 get_fs_excl();
3110 return 0;
3111
3112 out_fail:
3113 memset(th, 0, sizeof(*th));
3114 /* Re-set th->t_super, so we can properly keep track of how many
3115 * persistent transactions there are. We need to do this so if this
3116 * call is part of a failed restart_transaction, we can free it later */
3117 th->t_super = p_s_sb;
3118 return retval;
3119}
3120
3121struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3122 super_block
3123 *s,
3124 int nblocks)
3125{
3126 int ret;
3127 struct reiserfs_transaction_handle *th;
3128
3129 /* if we're nesting into an existing transaction. It will be
3130 ** persistent on its own
3131 */
3132 if (reiserfs_transaction_running(s)) {
3133 th = current->journal_info;
3134 th->t_refcount++;
14a61442
ES
3135 BUG_ON(th->t_refcount < 2);
3136
bd4c625c
LT
3137 return th;
3138 }
d739b42b 3139 th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
bd4c625c
LT
3140 if (!th)
3141 return NULL;
3142 ret = journal_begin(th, s, nblocks);
3143 if (ret) {
d739b42b 3144 kfree(th);
bd4c625c
LT
3145 return NULL;
3146 }
3147
3148 SB_JOURNAL(s)->j_persistent_trans++;
3149 return th;
3150}
3151
3152int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3153{
3154 struct super_block *s = th->t_super;
3155 int ret = 0;
3156 if (th->t_trans_id)
3157 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3158 else
3159 ret = -EIO;
3160 if (th->t_refcount == 0) {
3161 SB_JOURNAL(s)->j_persistent_trans--;
d739b42b 3162 kfree(th);
bd4c625c
LT
3163 }
3164 return ret;
3165}
3166
3167static int journal_join(struct reiserfs_transaction_handle *th,
3168 struct super_block *p_s_sb, unsigned long nblocks)
3169{
3170 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3171
3172 /* this keeps do_journal_end from NULLing out the current->journal_info
3173 ** pointer
3174 */
3175 th->t_handle_save = cur_th;
14a61442 3176 BUG_ON(cur_th && cur_th->t_refcount > 1);
bd4c625c
LT
3177 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3178}
3179
3180int journal_join_abort(struct reiserfs_transaction_handle *th,
3181 struct super_block *p_s_sb, unsigned long nblocks)
3182{
3183 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3184
3185 /* this keeps do_journal_end from NULLing out the current->journal_info
3186 ** pointer
3187 */
3188 th->t_handle_save = cur_th;
14a61442 3189 BUG_ON(cur_th && cur_th->t_refcount > 1);
bd4c625c
LT
3190 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3191}
3192
3193int journal_begin(struct reiserfs_transaction_handle *th,
3194 struct super_block *p_s_sb, unsigned long nblocks)
3195{
3196 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3197 int ret;
3198
3199 th->t_handle_save = NULL;
3200 if (cur_th) {
3201 /* we are nesting into the current transaction */
3202 if (cur_th->t_super == p_s_sb) {
3203 BUG_ON(!cur_th->t_refcount);
3204 cur_th->t_refcount++;
3205 memcpy(th, cur_th, sizeof(*th));
3206 if (th->t_refcount <= 1)
45b03d5e
JM
3207 reiserfs_warning(p_s_sb, "reiserfs-2005",
3208 "BAD: refcount <= 1, but "
3209 "journal_info != 0");
bd4c625c
LT
3210 return 0;
3211 } else {
3212 /* we've ended up with a handle from a different filesystem.
3213 ** save it and restore on journal_end. This should never
3214 ** really happen...
3215 */
45b03d5e
JM
3216 reiserfs_warning(p_s_sb, "clm-2100",
3217 "nesting info a different FS");
bd4c625c
LT
3218 th->t_handle_save = current->journal_info;
3219 current->journal_info = th;
3220 }
1da177e4 3221 } else {
bd4c625c
LT
3222 current->journal_info = th;
3223 }
3224 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
14a61442 3225 BUG_ON(current->journal_info != th);
1da177e4 3226
bd4c625c
LT
3227 /* I guess this boils down to being the reciprocal of clm-2100 above.
3228 * If do_journal_begin_r fails, we need to put it back, since journal_end
3229 * won't be called to do it. */
3230 if (ret)
3231 current->journal_info = th->t_handle_save;
3232 else
3233 BUG_ON(!th->t_refcount);
1da177e4 3234
bd4c625c 3235 return ret;
1da177e4
LT
3236}
3237
3238/*
3239** puts bh into the current transaction. If it was already there, reorders removes the
3240** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3241**
3242** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3243** transaction is committed.
3244**
3245** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3246*/
bd4c625c
LT
3247int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3248 struct super_block *p_s_sb, struct buffer_head *bh)
3249{
3250 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3251 struct reiserfs_journal_cnode *cn = NULL;
3252 int count_already_incd = 0;
3253 int prepared = 0;
3254 BUG_ON(!th->t_trans_id);
3255
3256 PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3257 if (th->t_trans_id != journal->j_trans_id) {
3258 reiserfs_panic(th->t_super,
3259 "journal-1577: handle trans id %ld != current trans id %ld\n",
3260 th->t_trans_id, journal->j_trans_id);
3261 }
3262
3263 p_s_sb->s_dirt = 1;
3264
3265 prepared = test_clear_buffer_journal_prepared(bh);
3266 clear_buffer_journal_restore_dirty(bh);
3267 /* already in this transaction, we are done */
3268 if (buffer_journaled(bh)) {
3269 PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3270 return 0;
3271 }
3272
3273 /* this must be turned into a panic instead of a warning. We can't allow
3274 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3275 ** could get to disk too early. NOT GOOD.
3276 */
3277 if (!prepared || buffer_dirty(bh)) {
45b03d5e
JM
3278 reiserfs_warning(p_s_sb, "journal-1777",
3279 "buffer %llu bad state "
bd4c625c
LT
3280 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3281 (unsigned long long)bh->b_blocknr,
3282 prepared ? ' ' : '!',
3283 buffer_locked(bh) ? ' ' : '!',
3284 buffer_dirty(bh) ? ' ' : '!',
3285 buffer_journal_dirty(bh) ? ' ' : '!');
3286 }
3287
3288 if (atomic_read(&(journal->j_wcount)) <= 0) {
45b03d5e
JM
3289 reiserfs_warning(p_s_sb, "journal-1409",
3290 "returning because j_wcount was %d",
bd4c625c
LT
3291 atomic_read(&(journal->j_wcount)));
3292 return 1;
3293 }
3294 /* this error means I've screwed up, and we've overflowed the transaction.
3295 ** Nothing can be done here, except make the FS readonly or panic.
3296 */
3297 if (journal->j_len >= journal->j_trans_max) {
3298 reiserfs_panic(th->t_super,
3299 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3300 journal->j_len);
3301 }
3302
3303 if (buffer_journal_dirty(bh)) {
3304 count_already_incd = 1;
3305 PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3306 clear_buffer_journal_dirty(bh);
3307 }
3308
3309 if (journal->j_len > journal->j_len_alloc) {
3310 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3311 }
3312
3313 set_buffer_journaled(bh);
3314
3315 /* now put this guy on the end */
3316 if (!cn) {
3317 cn = get_cnode(p_s_sb);
3318 if (!cn) {
3319 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3320 }
3321
3322 if (th->t_blocks_logged == th->t_blocks_allocated) {
3323 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3324 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3325 }
3326 th->t_blocks_logged++;
3327 journal->j_len++;
3328
3329 cn->bh = bh;
3330 cn->blocknr = bh->b_blocknr;
3331 cn->sb = p_s_sb;
3332 cn->jlist = NULL;
3333 insert_journal_hash(journal->j_hash_table, cn);
3334 if (!count_already_incd) {
3335 get_bh(bh);
3336 }
3337 }
3338 cn->next = NULL;
3339 cn->prev = journal->j_last;
3340 cn->bh = bh;
3341 if (journal->j_last) {
3342 journal->j_last->next = cn;
3343 journal->j_last = cn;
3344 } else {
3345 journal->j_first = cn;
3346 journal->j_last = cn;
3347 }
3348 return 0;
3349}
3350
3351int journal_end(struct reiserfs_transaction_handle *th,
3352 struct super_block *p_s_sb, unsigned long nblocks)
3353{
3354 if (!current->journal_info && th->t_refcount > 1)
45b03d5e
JM
3355 reiserfs_warning(p_s_sb, "REISER-NESTING",
3356 "th NULL, refcount %d", th->t_refcount);
bd4c625c
LT
3357
3358 if (!th->t_trans_id) {
3359 WARN_ON(1);
3360 return -EIO;
3361 }
3362
3363 th->t_refcount--;
3364 if (th->t_refcount > 0) {
3365 struct reiserfs_transaction_handle *cur_th =
3366 current->journal_info;
3367
3368 /* we aren't allowed to close a nested transaction on a different
3369 ** filesystem from the one in the task struct
3370 */
14a61442 3371 BUG_ON(cur_th->t_super != th->t_super);
bd4c625c
LT
3372
3373 if (th != cur_th) {
3374 memcpy(current->journal_info, th, sizeof(*th));
3375 th->t_trans_id = 0;
3376 }
3377 return 0;
3378 } else {
3379 return do_journal_end(th, p_s_sb, nblocks, 0);
3380 }
1da177e4
LT
3381}
3382
3383/* removes from the current transaction, relsing and descrementing any counters.
3384** also files the removed buffer directly onto the clean list
3385**
3386** called by journal_mark_freed when a block has been deleted
3387**
3388** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3389*/
bd4c625c
LT
3390static int remove_from_transaction(struct super_block *p_s_sb,
3391 b_blocknr_t blocknr, int already_cleaned)
3392{
3393 struct buffer_head *bh;
3394 struct reiserfs_journal_cnode *cn;
3395 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3396 int ret = 0;
3397
3398 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3399 if (!cn || !cn->bh) {
3400 return ret;
3401 }
3402 bh = cn->bh;
3403 if (cn->prev) {
3404 cn->prev->next = cn->next;
3405 }
3406 if (cn->next) {
3407 cn->next->prev = cn->prev;
3408 }
3409 if (cn == journal->j_first) {
3410 journal->j_first = cn->next;
3411 }
3412 if (cn == journal->j_last) {
3413 journal->j_last = cn->prev;
3414 }
3415 if (bh)
3416 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3417 bh->b_blocknr, 0);
3418 clear_buffer_journaled(bh); /* don't log this one */
3419
3420 if (!already_cleaned) {
3421 clear_buffer_journal_dirty(bh);
3422 clear_buffer_dirty(bh);
3423 clear_buffer_journal_test(bh);
3424 put_bh(bh);
3425 if (atomic_read(&(bh->b_count)) < 0) {
45b03d5e
JM
3426 reiserfs_warning(p_s_sb, "journal-1752",
3427 "b_count < 0");
bd4c625c
LT
3428 }
3429 ret = 1;
3430 }
3431 journal->j_len--;
3432 journal->j_len_alloc--;
3433 free_cnode(p_s_sb, cn);
3434 return ret;
1da177e4
LT
3435}
3436
3437/*
3438** for any cnode in a journal list, it can only be dirtied of all the
0779bf2d 3439** transactions that include it are committed to disk.
1da177e4
LT
3440** this checks through each transaction, and returns 1 if you are allowed to dirty,
3441** and 0 if you aren't
3442**
3443** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3444** blocks for a given transaction on disk
3445**
3446*/
bd4c625c
LT
3447static int can_dirty(struct reiserfs_journal_cnode *cn)
3448{
3449 struct super_block *sb = cn->sb;
3450 b_blocknr_t blocknr = cn->blocknr;
3451 struct reiserfs_journal_cnode *cur = cn->hprev;
3452 int can_dirty = 1;
3453
3454 /* first test hprev. These are all newer than cn, so any node here
3455 ** with the same block number and dev means this node can't be sent
3456 ** to disk right now.
3457 */
3458 while (cur && can_dirty) {
3459 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3460 cur->blocknr == blocknr) {
3461 can_dirty = 0;
3462 }
3463 cur = cur->hprev;
3464 }
3465 /* then test hnext. These are all older than cn. As long as they
3466 ** are committed to the log, it is safe to write cn to disk
3467 */
3468 cur = cn->hnext;
3469 while (cur && can_dirty) {
3470 if (cur->jlist && cur->jlist->j_len > 0 &&
3471 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3472 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3473 can_dirty = 0;
3474 }
3475 cur = cur->hnext;
3476 }
3477 return can_dirty;
1da177e4
LT
3478}
3479
3480/* syncs the commit blocks, but does not force the real buffers to disk
0779bf2d 3481** will wait until the current transaction is done/committed before returning
1da177e4 3482*/
bd4c625c
LT
3483int journal_end_sync(struct reiserfs_transaction_handle *th,
3484 struct super_block *p_s_sb, unsigned long nblocks)
3485{
3486 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 3487
bd4c625c
LT
3488 BUG_ON(!th->t_trans_id);
3489 /* you can sync while nested, very, very bad */
14a61442 3490 BUG_ON(th->t_refcount > 1);
bd4c625c
LT
3491 if (journal->j_len == 0) {
3492 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3493 1);
3494 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3495 }
3496 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
1da177e4
LT
3497}
3498
3499/*
3500** writeback the pending async commits to disk
3501*/
c4028958 3502static void flush_async_commits(struct work_struct *work)
bd4c625c 3503{
c4028958
DH
3504 struct reiserfs_journal *journal =
3505 container_of(work, struct reiserfs_journal, j_work.work);
3506 struct super_block *p_s_sb = journal->j_work_sb;
bd4c625c
LT
3507 struct reiserfs_journal_list *jl;
3508 struct list_head *entry;
3509
3510 lock_kernel();
3511 if (!list_empty(&journal->j_journal_list)) {
3512 /* last entry is the youngest, commit it and you get everything */
3513 entry = journal->j_journal_list.prev;
3514 jl = JOURNAL_LIST_ENTRY(entry);
3515 flush_commit_list(p_s_sb, jl, 1);
3516 }
3517 unlock_kernel();
1da177e4
LT
3518}
3519
3520/*
3521** flushes any old transactions to disk
3522** ends the current transaction if it is too old
3523*/
bd4c625c
LT
3524int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3525{
3526 time_t now;
3527 struct reiserfs_transaction_handle th;
3528 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3529
3530 now = get_seconds();
3531 /* safety check so we don't flush while we are replaying the log during
3532 * mount
3533 */
3534 if (list_empty(&journal->j_journal_list)) {
3535 return 0;
3536 }
3537
3538 /* check the current transaction. If there are no writers, and it is
3539 * too old, finish it, and force the commit blocks to disk
3540 */
3541 if (atomic_read(&journal->j_wcount) <= 0 &&
3542 journal->j_trans_start_time > 0 &&
3543 journal->j_len > 0 &&
3544 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3545 if (!journal_join(&th, p_s_sb, 1)) {
3546 reiserfs_prepare_for_journal(p_s_sb,
3547 SB_BUFFER_WITH_SB(p_s_sb),
3548 1);
3549 journal_mark_dirty(&th, p_s_sb,
3550 SB_BUFFER_WITH_SB(p_s_sb));
3551
3552 /* we're only being called from kreiserfsd, it makes no sense to do
3553 ** an async commit so that kreiserfsd can do it later
3554 */
3555 do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3556 }
3557 }
3558 return p_s_sb->s_dirt;
1da177e4
LT
3559}
3560
3561/*
3562** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3563**
3564** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3565** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3566** flushes the commit list and returns 0.
3567**
3568** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3569**
3570** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3571*/
bd4c625c
LT
3572static int check_journal_end(struct reiserfs_transaction_handle *th,
3573 struct super_block *p_s_sb, unsigned long nblocks,
3574 int flags)
3575{
3576
3577 time_t now;
3578 int flush = flags & FLUSH_ALL;
3579 int commit_now = flags & COMMIT_NOW;
3580 int wait_on_commit = flags & WAIT;
3581 struct reiserfs_journal_list *jl;
3582 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3583
3584 BUG_ON(!th->t_trans_id);
3585
3586 if (th->t_trans_id != journal->j_trans_id) {
3587 reiserfs_panic(th->t_super,
3588 "journal-1577: handle trans id %ld != current trans id %ld\n",
3589 th->t_trans_id, journal->j_trans_id);
3590 }
3591
3592 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3593 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3594 atomic_dec(&(journal->j_wcount));
3595 }
3596
3597 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3598 ** will be dealt with by next transaction that actually writes something, but should be taken
3599 ** care of in this trans
3600 */
14a61442
ES
3601 BUG_ON(journal->j_len == 0);
3602
bd4c625c
LT
3603 /* if wcount > 0, and we are called to with flush or commit_now,
3604 ** we wait on j_join_wait. We will wake up when the last writer has
3605 ** finished the transaction, and started it on its way to the disk.
3606 ** Then, we flush the commit or journal list, and just return 0
3607 ** because the rest of journal end was already done for this transaction.
3608 */
3609 if (atomic_read(&(journal->j_wcount)) > 0) {
3610 if (flush || commit_now) {
3611 unsigned trans_id;
3612
3613 jl = journal->j_current_jl;
3614 trans_id = jl->j_trans_id;
3615 if (wait_on_commit)
3616 jl->j_state |= LIST_COMMIT_PENDING;
3617 atomic_set(&(journal->j_jlock), 1);
3618 if (flush) {
3619 journal->j_next_full_flush = 1;
3620 }
3621 unlock_journal(p_s_sb);
3622
3623 /* sleep while the current transaction is still j_jlocked */
3624 while (journal->j_trans_id == trans_id) {
3625 if (atomic_read(&journal->j_jlock)) {
3626 queue_log_writer(p_s_sb);
3627 } else {
3628 lock_journal(p_s_sb);
3629 if (journal->j_trans_id == trans_id) {
3630 atomic_set(&(journal->j_jlock),
3631 1);
3632 }
3633 unlock_journal(p_s_sb);
3634 }
3635 }
14a61442
ES
3636 BUG_ON(journal->j_trans_id == trans_id);
3637
bd4c625c
LT
3638 if (commit_now
3639 && journal_list_still_alive(p_s_sb, trans_id)
3640 && wait_on_commit) {
3641 flush_commit_list(p_s_sb, jl, 1);
3642 }
3643 return 0;
3644 }
3645 unlock_journal(p_s_sb);
3646 return 0;
3647 }
3648
3649 /* deal with old transactions where we are the last writers */
3650 now = get_seconds();
3651 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3652 commit_now = 1;
3653 journal->j_next_async_flush = 1;
3654 }
3655 /* don't batch when someone is waiting on j_join_wait */
3656 /* don't batch when syncing the commit or flushing the whole trans */
3657 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3658 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3659 && journal->j_len_alloc < journal->j_max_batch
3660 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3661 journal->j_bcount++;
3662 unlock_journal(p_s_sb);
3663 return 0;
3664 }
3665
3666 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3667 reiserfs_panic(p_s_sb,
3668 "journal-003: journal_end: j_start (%ld) is too high\n",
3669 journal->j_start);
3670 }
3671 return 1;
1da177e4
LT
3672}
3673
3674/*
3675** Does all the work that makes deleting blocks safe.
3676** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3677**
3678** otherwise:
3679** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3680** before this transaction has finished.
3681**
3682** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3683** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3684** the block can't be reallocated yet.
3685**
3686** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3687*/
bd4c625c
LT
3688int journal_mark_freed(struct reiserfs_transaction_handle *th,
3689 struct super_block *p_s_sb, b_blocknr_t blocknr)
3690{
3691 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3692 struct reiserfs_journal_cnode *cn = NULL;
3693 struct buffer_head *bh = NULL;
3694 struct reiserfs_list_bitmap *jb = NULL;
3695 int cleaned = 0;
3696 BUG_ON(!th->t_trans_id);
3697
3698 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3699 if (cn && cn->bh) {
3700 bh = cn->bh;
3701 get_bh(bh);
3702 }
3703 /* if it is journal new, we just remove it from this transaction */
3704 if (bh && buffer_journal_new(bh)) {
3705 clear_buffer_journal_new(bh);
3706 clear_prepared_bits(bh);
3707 reiserfs_clean_and_file_buffer(bh);
3708 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3709 } else {
3710 /* set the bit for this block in the journal bitmap for this transaction */
3711 jb = journal->j_current_jl->j_list_bitmap;
3712 if (!jb) {
3713 reiserfs_panic(p_s_sb,
3714 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3715 }
3716 set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3717
3718 /* Note, the entire while loop is not allowed to schedule. */
3719
3720 if (bh) {
3721 clear_prepared_bits(bh);
3722 reiserfs_clean_and_file_buffer(bh);
3723 }
3724 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3725
3726 /* find all older transactions with this block, make sure they don't try to write it out */
3727 cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3728 blocknr);
3729 while (cn) {
3730 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3731 set_bit(BLOCK_FREED, &cn->state);
3732 if (cn->bh) {
3733 if (!cleaned) {
3734 /* remove_from_transaction will brelse the buffer if it was
3735 ** in the current trans
3736 */
3737 clear_buffer_journal_dirty(cn->
3738 bh);
3739 clear_buffer_dirty(cn->bh);
3740 clear_buffer_journal_test(cn->
3741 bh);
3742 cleaned = 1;
3743 put_bh(cn->bh);
3744 if (atomic_read
3745 (&(cn->bh->b_count)) < 0) {
3746 reiserfs_warning(p_s_sb,
45b03d5e
JM
3747 "journal-2138",
3748 "cn->bh->b_count < 0");
bd4c625c
LT
3749 }
3750 }
3751 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3752 atomic_dec(&
3753 (cn->jlist->
3754 j_nonzerolen));
3755 }
3756 cn->bh = NULL;
3757 }
3758 }
3759 cn = cn->hnext;
3760 }
3761 }
3762
398c95bd
CM
3763 if (bh)
3764 release_buffer_page(bh); /* get_hash grabs the buffer */
bd4c625c
LT
3765 return 0;
3766}
3767
3768void reiserfs_update_inode_transaction(struct inode *inode)
3769{
3770 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3771 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3772 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
1da177e4
LT
3773}
3774
3775/*
3776 * returns -1 on error, 0 if no commits/barriers were done and 1
3777 * if a transaction was actually committed and the barrier was done
3778 */
3779static int __commit_trans_jl(struct inode *inode, unsigned long id,
bd4c625c 3780 struct reiserfs_journal_list *jl)
1da177e4 3781{
bd4c625c
LT
3782 struct reiserfs_transaction_handle th;
3783 struct super_block *sb = inode->i_sb;
3784 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3785 int ret = 0;
3786
3787 /* is it from the current transaction, or from an unknown transaction? */
3788 if (id == journal->j_trans_id) {
3789 jl = journal->j_current_jl;
3790 /* try to let other writers come in and grow this transaction */
3791 let_transaction_grow(sb, id);
3792 if (journal->j_trans_id != id) {
3793 goto flush_commit_only;
3794 }
1da177e4 3795
bd4c625c
LT
3796 ret = journal_begin(&th, sb, 1);
3797 if (ret)
3798 return ret;
3799
3800 /* someone might have ended this transaction while we joined */
3801 if (journal->j_trans_id != id) {
3802 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3803 1);
3804 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3805 ret = journal_end(&th, sb, 1);
3806 goto flush_commit_only;
3807 }
1da177e4 3808
bd4c625c
LT
3809 ret = journal_end_sync(&th, sb, 1);
3810 if (!ret)
3811 ret = 1;
1da177e4 3812
bd4c625c
LT
3813 } else {
3814 /* this gets tricky, we have to make sure the journal list in
3815 * the inode still exists. We know the list is still around
3816 * if we've got a larger transaction id than the oldest list
3817 */
3818 flush_commit_only:
3819 if (journal_list_still_alive(inode->i_sb, id)) {
3820 /*
3821 * we only set ret to 1 when we know for sure
3822 * the barrier hasn't been started yet on the commit
3823 * block.
3824 */
3825 if (atomic_read(&jl->j_commit_left) > 1)
3826 ret = 1;
3827 flush_commit_list(sb, jl, 1);
3828 if (journal->j_errno)
3829 ret = journal->j_errno;
3830 }
1da177e4 3831 }
bd4c625c
LT
3832 /* otherwise the list is gone, and long since committed */
3833 return ret;
3834}
1da177e4 3835
bd4c625c
LT
3836int reiserfs_commit_for_inode(struct inode *inode)
3837{
600ed416 3838 unsigned int id = REISERFS_I(inode)->i_trans_id;
bd4c625c 3839 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
1da177e4 3840
bd4c625c
LT
3841 /* for the whole inode, assume unset id means it was
3842 * changed in the current transaction. More conservative
1da177e4 3843 */
bd4c625c
LT
3844 if (!id || !jl) {
3845 reiserfs_update_inode_transaction(inode);
3846 id = REISERFS_I(inode)->i_trans_id;
3847 /* jl will be updated in __commit_trans_jl */
3848 }
3849
3850 return __commit_trans_jl(inode, id, jl);
3851}
3852
3853void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3854 struct buffer_head *bh)
3855{
3856 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3857 PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3858 if (!bh) {
3859 return;
3860 }
3861 if (test_clear_buffer_journal_restore_dirty(bh) &&
3862 buffer_journal_dirty(bh)) {
3863 struct reiserfs_journal_cnode *cn;
3864 cn = get_journal_hash_dev(p_s_sb,
3865 journal->j_list_hash_table,
3866 bh->b_blocknr);
3867 if (cn && can_dirty(cn)) {
3868 set_buffer_journal_test(bh);
3869 mark_buffer_dirty(bh);
3870 }
3871 }
3872 clear_buffer_journal_prepared(bh);
3873}
3874
3875extern struct tree_balance *cur_tb;
1da177e4
LT
3876/*
3877** before we can change a metadata block, we have to make sure it won't
3878** be written to disk while we are altering it. So, we must:
3879** clean it
3880** wait on it.
3881**
3882*/
3883int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
bd4c625c
LT
3884 struct buffer_head *bh, int wait)
3885{
3886 PROC_INFO_INC(p_s_sb, journal.prepare);
3887
ca5de404 3888 if (!trylock_buffer(bh)) {
bd4c625c
LT
3889 if (!wait)
3890 return 0;
3891 lock_buffer(bh);
3892 }
3893 set_buffer_journal_prepared(bh);
3894 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3895 clear_buffer_journal_test(bh);
3896 set_buffer_journal_restore_dirty(bh);
3897 }
3898 unlock_buffer(bh);
3899 return 1;
3900}
3901
3902static void flush_old_journal_lists(struct super_block *s)
3903{
3904 struct reiserfs_journal *journal = SB_JOURNAL(s);
3905 struct reiserfs_journal_list *jl;
3906 struct list_head *entry;
3907 time_t now = get_seconds();
3908
3909 while (!list_empty(&journal->j_journal_list)) {
3910 entry = journal->j_journal_list.next;
3911 jl = JOURNAL_LIST_ENTRY(entry);
3912 /* this check should always be run, to send old lists to disk */
a3172027
CM
3913 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3914 atomic_read(&jl->j_commit_left) == 0 &&
3915 test_transaction(s, jl)) {
bd4c625c
LT
3916 flush_used_journal_lists(s, jl);
3917 } else {
3918 break;
3919 }
1da177e4 3920 }
1da177e4
LT
3921}
3922
3923/*
3924** long and ugly. If flush, will not return until all commit
3925** blocks and all real buffers in the trans are on disk.
3926** If no_async, won't return until all commit blocks are on disk.
3927**
3928** keep reading, there are comments as you go along
3929**
3930** If the journal is aborted, we just clean up. Things like flushing
3931** journal lists, etc just won't happen.
3932*/
bd4c625c
LT
3933static int do_journal_end(struct reiserfs_transaction_handle *th,
3934 struct super_block *p_s_sb, unsigned long nblocks,
3935 int flags)
3936{
3937 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3938 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3939 struct reiserfs_journal_cnode *last_cn = NULL;
3940 struct reiserfs_journal_desc *desc;
3941 struct reiserfs_journal_commit *commit;
3942 struct buffer_head *c_bh; /* commit bh */
3943 struct buffer_head *d_bh; /* desc bh */
3944 int cur_write_start = 0; /* start index of current log write */
3945 int old_start;
3946 int i;
a44c94a7
AZ
3947 int flush;
3948 int wait_on_commit;
bd4c625c
LT
3949 struct reiserfs_journal_list *jl, *temp_jl;
3950 struct list_head *entry, *safe;
3951 unsigned long jindex;
600ed416 3952 unsigned int commit_trans_id;
bd4c625c
LT
3953 int trans_half;
3954
3955 BUG_ON(th->t_refcount > 1);
3956 BUG_ON(!th->t_trans_id);
3957
a44c94a7
AZ
3958 /* protect flush_older_commits from doing mistakes if the
3959 transaction ID counter gets overflowed. */
600ed416 3960 if (th->t_trans_id == ~0U)
a44c94a7
AZ
3961 flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
3962 flush = flags & FLUSH_ALL;
3963 wait_on_commit = flags & WAIT;
3964
bd4c625c
LT
3965 put_fs_excl();
3966 current->journal_info = th->t_handle_save;
3967 reiserfs_check_lock_depth(p_s_sb, "journal end");
3968 if (journal->j_len == 0) {
3969 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3970 1);
3971 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3972 }
1da177e4 3973
bd4c625c
LT
3974 lock_journal(p_s_sb);
3975 if (journal->j_next_full_flush) {
3976 flags |= FLUSH_ALL;
3977 flush = 1;
3978 }
3979 if (journal->j_next_async_flush) {
3980 flags |= COMMIT_NOW | WAIT;
3981 wait_on_commit = 1;
3982 }
3983
3984 /* check_journal_end locks the journal, and unlocks if it does not return 1
3985 ** it tells us if we should continue with the journal_end, or just return
3986 */
3987 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3988 p_s_sb->s_dirt = 1;
3989 wake_queued_writers(p_s_sb);
3990 reiserfs_async_progress_wait(p_s_sb);
3991 goto out;
3992 }
3993
3994 /* check_journal_end might set these, check again */
3995 if (journal->j_next_full_flush) {
3996 flush = 1;
3997 }
3998
3999 /*
4000 ** j must wait means we have to flush the log blocks, and the real blocks for
4001 ** this transaction
4002 */
4003 if (journal->j_must_wait > 0) {
4004 flush = 1;
4005 }
1da177e4 4006#ifdef REISERFS_PREALLOCATE
ef43bc4f
JK
4007 /* quota ops might need to nest, setup the journal_info pointer for them
4008 * and raise the refcount so that it is > 0. */
bd4c625c 4009 current->journal_info = th;
ef43bc4f 4010 th->t_refcount++;
bd4c625c
LT
4011 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
4012 * the transaction */
ef43bc4f 4013 th->t_refcount--;
bd4c625c 4014 current->journal_info = th->t_handle_save;
1da177e4 4015#endif
bd4c625c
LT
4016
4017 /* setup description block */
4018 d_bh =
4019 journal_getblk(p_s_sb,
4020 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4021 journal->j_start);
4022 set_buffer_uptodate(d_bh);
4023 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
4024 memset(d_bh->b_data, 0, d_bh->b_size);
4025 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
4026 set_desc_trans_id(desc, journal->j_trans_id);
4027
4028 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
4029 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4030 ((journal->j_start + journal->j_len +
4031 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4032 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
4033 memset(c_bh->b_data, 0, c_bh->b_size);
4034 set_commit_trans_id(commit, journal->j_trans_id);
4035 set_buffer_uptodate(c_bh);
4036
4037 /* init this journal list */
4038 jl = journal->j_current_jl;
4039
4040 /* we lock the commit before doing anything because
4041 * we want to make sure nobody tries to run flush_commit_list until
4042 * the new transaction is fully setup, and we've already flushed the
4043 * ordered bh list
4044 */
90415dea 4045 mutex_lock(&jl->j_commit_mutex);
bd4c625c
LT
4046
4047 /* save the transaction id in case we need to commit it later */
4048 commit_trans_id = jl->j_trans_id;
4049
4050 atomic_set(&jl->j_older_commits_done, 0);
4051 jl->j_trans_id = journal->j_trans_id;
4052 jl->j_timestamp = journal->j_trans_start_time;
4053 jl->j_commit_bh = c_bh;
4054 jl->j_start = journal->j_start;
4055 jl->j_len = journal->j_len;
4056 atomic_set(&jl->j_nonzerolen, journal->j_len);
4057 atomic_set(&jl->j_commit_left, journal->j_len + 2);
4058 jl->j_realblock = NULL;
4059
4060 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
4061 ** for each real block, add it to the journal list hash,
4062 ** copy into real block index array in the commit or desc block
4063 */
4064 trans_half = journal_trans_half(p_s_sb->s_blocksize);
4065 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4066 if (buffer_journaled(cn->bh)) {
4067 jl_cn = get_cnode(p_s_sb);
4068 if (!jl_cn) {
4069 reiserfs_panic(p_s_sb,
4070 "journal-1676, get_cnode returned NULL\n");
4071 }
4072 if (i == 0) {
4073 jl->j_realblock = jl_cn;
4074 }
4075 jl_cn->prev = last_cn;
4076 jl_cn->next = NULL;
4077 if (last_cn) {
4078 last_cn->next = jl_cn;
4079 }
4080 last_cn = jl_cn;
4081 /* make sure the block we are trying to log is not a block
4082 of journal or reserved area */
4083
4084 if (is_block_in_log_or_reserved_area
4085 (p_s_sb, cn->bh->b_blocknr)) {
4086 reiserfs_panic(p_s_sb,
4087 "journal-2332: Trying to log block %lu, which is a log block\n",
4088 cn->bh->b_blocknr);
4089 }
4090 jl_cn->blocknr = cn->bh->b_blocknr;
4091 jl_cn->state = 0;
4092 jl_cn->sb = p_s_sb;
4093 jl_cn->bh = cn->bh;
4094 jl_cn->jlist = jl;
4095 insert_journal_hash(journal->j_list_hash_table, jl_cn);
4096 if (i < trans_half) {
4097 desc->j_realblock[i] =
4098 cpu_to_le32(cn->bh->b_blocknr);
4099 } else {
4100 commit->j_realblock[i - trans_half] =
4101 cpu_to_le32(cn->bh->b_blocknr);
4102 }
4103 } else {
4104 i--;
4105 }
4106 }
4107 set_desc_trans_len(desc, journal->j_len);
4108 set_desc_mount_id(desc, journal->j_mount_id);
4109 set_desc_trans_id(desc, journal->j_trans_id);
4110 set_commit_trans_len(commit, journal->j_len);
4111
4112 /* special check in case all buffers in the journal were marked for not logging */
14a61442 4113 BUG_ON(journal->j_len == 0);
bd4c625c
LT
4114
4115 /* we're about to dirty all the log blocks, mark the description block
4116 * dirty now too. Don't mark the commit block dirty until all the
4117 * others are on disk
4118 */
4119 mark_buffer_dirty(d_bh);
4120
4121 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4122 cur_write_start = journal->j_start;
4123 cn = journal->j_first;
4124 jindex = 1; /* start at one so we don't get the desc again */
4125 while (cn) {
4126 clear_buffer_journal_new(cn->bh);
4127 /* copy all the real blocks into log area. dirty log blocks */
4128 if (buffer_journaled(cn->bh)) {
4129 struct buffer_head *tmp_bh;
4130 char *addr;
4131 struct page *page;
4132 tmp_bh =
4133 journal_getblk(p_s_sb,
4134 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4135 ((cur_write_start +
4136 jindex) %
4137 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4138 set_buffer_uptodate(tmp_bh);
4139 page = cn->bh->b_page;
4140 addr = kmap(page);
4141 memcpy(tmp_bh->b_data,
4142 addr + offset_in_page(cn->bh->b_data),
4143 cn->bh->b_size);
4144 kunmap(page);
4145 mark_buffer_dirty(tmp_bh);
4146 jindex++;
4147 set_buffer_journal_dirty(cn->bh);
4148 clear_buffer_journaled(cn->bh);
4149 } else {
4150 /* JDirty cleared sometime during transaction. don't log this one */
45b03d5e
JM
4151 reiserfs_warning(p_s_sb, "journal-2048",
4152 "BAD, buffer in journal hash, "
4153 "but not JDirty!");
bd4c625c
LT
4154 brelse(cn->bh);
4155 }
4156 next = cn->next;
4157 free_cnode(p_s_sb, cn);
4158 cn = next;
4159 cond_resched();
4160 }
4161
4162 /* we are done with both the c_bh and d_bh, but
4163 ** c_bh must be written after all other commit blocks,
4164 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4165 */
4166
4167 journal->j_current_jl = alloc_journal_list(p_s_sb);
4168
4169 /* now it is safe to insert this transaction on the main list */
4170 list_add_tail(&jl->j_list, &journal->j_journal_list);
4171 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4172 journal->j_num_work_lists++;
4173
4174 /* reset journal values for the next transaction */
4175 old_start = journal->j_start;
4176 journal->j_start =
4177 (journal->j_start + journal->j_len +
4178 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4179 atomic_set(&(journal->j_wcount), 0);
4180 journal->j_bcount = 0;
4181 journal->j_last = NULL;
4182 journal->j_first = NULL;
4183 journal->j_len = 0;
4184 journal->j_trans_start_time = 0;
a44c94a7
AZ
4185 /* check for trans_id overflow */
4186 if (++journal->j_trans_id == 0)
4187 journal->j_trans_id = 10;
bd4c625c
LT
4188 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4189 journal->j_must_wait = 0;
4190 journal->j_len_alloc = 0;
4191 journal->j_next_full_flush = 0;
4192 journal->j_next_async_flush = 0;
4193 init_journal_hash(p_s_sb);
4194
4195 // make sure reiserfs_add_jh sees the new current_jl before we
4196 // write out the tails
4197 smp_mb();
4198
4199 /* tail conversion targets have to hit the disk before we end the
4200 * transaction. Otherwise a later transaction might repack the tail
4201 * before this transaction commits, leaving the data block unflushed and
4202 * clean, if we crash before the later transaction commits, the data block
4203 * is lost.
4204 */
4205 if (!list_empty(&jl->j_tail_bh_list)) {
4206 unlock_kernel();
4207 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4208 journal, jl, &jl->j_tail_bh_list);
4209 lock_kernel();
4210 }
14a61442 4211 BUG_ON(!list_empty(&jl->j_tail_bh_list));
90415dea 4212 mutex_unlock(&jl->j_commit_mutex);
bd4c625c
LT
4213
4214 /* honor the flush wishes from the caller, simple commits can
4215 ** be done outside the journal lock, they are done below
4216 **
4217 ** if we don't flush the commit list right now, we put it into
4218 ** the work queue so the people waiting on the async progress work
4219 ** queue don't wait for this proc to flush journal lists and such.
4220 */
4221 if (flush) {
4222 flush_commit_list(p_s_sb, jl, 1);
4223 flush_journal_list(p_s_sb, jl, 1);
4224 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4225 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4226
4227 /* if the next transaction has any chance of wrapping, flush
4228 ** transactions that might get overwritten. If any journal lists are very
4229 ** old flush them as well.
4230 */
4231 first_jl:
4232 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4233 temp_jl = JOURNAL_LIST_ENTRY(entry);
4234 if (journal->j_start <= temp_jl->j_start) {
4235 if ((journal->j_start + journal->j_trans_max + 1) >=
4236 temp_jl->j_start) {
4237 flush_used_journal_lists(p_s_sb, temp_jl);
4238 goto first_jl;
4239 } else if ((journal->j_start +
4240 journal->j_trans_max + 1) <
4241 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4242 /* if we don't cross into the next transaction and we don't
4243 * wrap, there is no way we can overlap any later transactions
4244 * break now
4245 */
4246 break;
4247 }
4248 } else if ((journal->j_start +
4249 journal->j_trans_max + 1) >
4250 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4251 if (((journal->j_start + journal->j_trans_max + 1) %
4252 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4253 temp_jl->j_start) {
4254 flush_used_journal_lists(p_s_sb, temp_jl);
4255 goto first_jl;
4256 } else {
4257 /* we don't overlap anything from out start to the end of the
4258 * log, and our wrapped portion doesn't overlap anything at
4259 * the start of the log. We can break
4260 */
4261 break;
4262 }
4263 }
4264 }
4265 flush_old_journal_lists(p_s_sb);
4266
4267 journal->j_current_jl->j_list_bitmap =
4268 get_list_bitmap(p_s_sb, journal->j_current_jl);
4269
4270 if (!(journal->j_current_jl->j_list_bitmap)) {
4271 reiserfs_panic(p_s_sb,
4272 "journal-1996: do_journal_end, could not get a list bitmap\n");
4273 }
4274
4275 atomic_set(&(journal->j_jlock), 0);
4276 unlock_journal(p_s_sb);
4277 /* wake up any body waiting to join. */
4278 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4279 wake_up(&(journal->j_join_wait));
4280
4281 if (!flush && wait_on_commit &&
4282 journal_list_still_alive(p_s_sb, commit_trans_id)) {
4283 flush_commit_list(p_s_sb, jl, 1);
4284 }
4285 out:
4286 reiserfs_check_lock_depth(p_s_sb, "journal end2");
4287
4288 memset(th, 0, sizeof(*th));
4289 /* Re-set th->t_super, so we can properly keep track of how many
4290 * persistent transactions there are. We need to do this so if this
4291 * call is part of a failed restart_transaction, we can free it later */
4292 th->t_super = p_s_sb;
4293
4294 return journal->j_errno;
4295}
4296
4297static void __reiserfs_journal_abort_hard(struct super_block *sb)
4298{
4299 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4300 if (test_bit(J_ABORTED, &journal->j_state))
4301 return;
4302
4303 printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4304 reiserfs_bdevname(sb));
4305
4306 sb->s_flags |= MS_RDONLY;
4307 set_bit(J_ABORTED, &journal->j_state);
1da177e4
LT
4308
4309#ifdef CONFIG_REISERFS_CHECK
bd4c625c 4310 dump_stack();
1da177e4
LT
4311#endif
4312}
4313
bd4c625c 4314static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
1da177e4 4315{
bd4c625c
LT
4316 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4317 if (test_bit(J_ABORTED, &journal->j_state))
4318 return;
1da177e4 4319
bd4c625c
LT
4320 if (!journal->j_errno)
4321 journal->j_errno = errno;
1da177e4 4322
bd4c625c 4323 __reiserfs_journal_abort_hard(sb);
1da177e4
LT
4324}
4325
bd4c625c 4326void reiserfs_journal_abort(struct super_block *sb, int errno)
1da177e4 4327{
e13601bc 4328 __reiserfs_journal_abort_soft(sb, errno);
1da177e4 4329}