]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/reiserfs/journal.c
[PATCH] synclink_gt remove unnecessary page alignment
[net-next-2.6.git] / fs / reiserfs / journal.c
CommitLineData
1da177e4
LT
1/*
2** Write ahead logging implementation copyright Chris Mason 2000
3**
4** The background commits make this code very interelated, and
5** overly complex. I need to rethink things a bit....The major players:
6**
7** journal_begin -- call with the number of blocks you expect to log.
8** If the current transaction is too
9** old, it will block until the current transaction is
10** finished, and then start a new one.
11** Usually, your transaction will get joined in with
12** previous ones for speed.
13**
14** journal_join -- same as journal_begin, but won't block on the current
15** transaction regardless of age. Don't ever call
16** this. Ever. There are only two places it should be
17** called from, and they are both inside this file.
18**
19** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20** that might make them get sent to disk
21** and then marks them BH_JDirty. Puts the buffer head
22** into the current transaction hash.
23**
24** journal_end -- if the current transaction is batchable, it does nothing
25** otherwise, it could do an async/synchronous commit, or
26** a full flush of all log and real blocks in the
27** transaction.
28**
29** flush_old_commits -- if the current transaction is too old, it is ended and
30** commit blocks are sent to disk. Forces commit blocks
31** to disk for all backgrounded commits that have been
32** around too long.
33** -- Note, if you call this as an immediate flush from
34** from within kupdate, it will ignore the immediate flag
35*/
36
37#include <linux/config.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40
41#include <linux/time.h>
42#include <asm/semaphore.h>
43
44#include <linux/vmalloc.h>
45#include <linux/reiserfs_fs.h>
46
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/fcntl.h>
50#include <linux/stat.h>
51#include <linux/string.h>
52#include <linux/smp_lock.h>
53#include <linux/buffer_head.h>
54#include <linux/workqueue.h>
55#include <linux/writeback.h>
56#include <linux/blkdev.h>
57
1da177e4
LT
58/* gets a struct reiserfs_journal_list * from a list head */
59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60 j_list))
61#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 j_working_list))
63
64/* the number of mounted filesystems. This is used to decide when to
65** start and kill the commit workqueue
66*/
67static int reiserfs_mounted_fs_count;
68
69static struct workqueue_struct *commit_wq;
70
bd4c625c
LT
71#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
72 structs at 4k */
73#define BUFNR 64 /*read ahead */
1da177e4
LT
74
75/* cnode stat bits. Move these into reiserfs_fs.h */
76
77#define BLOCK_FREED 2 /* this block was freed, and can't be written. */
bd4c625c 78#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
1da177e4
LT
79
80#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
81#define BLOCK_DIRTIED 5
82
1da177e4
LT
83/* journal list state bits */
84#define LIST_TOUCHED 1
85#define LIST_DIRTY 2
bd4c625c 86#define LIST_COMMIT_PENDING 4 /* someone will commit this list */
1da177e4
LT
87
88/* flags for do_journal_end */
89#define FLUSH_ALL 1 /* flush commit and real blocks */
90#define COMMIT_NOW 2 /* end and commit this transaction */
bd4c625c
LT
91#define WAIT 4 /* wait for the log blocks to hit the disk */
92
93static int do_journal_end(struct reiserfs_transaction_handle *,
94 struct super_block *, unsigned long nblocks,
95 int flags);
96static int flush_journal_list(struct super_block *s,
97 struct reiserfs_journal_list *jl, int flushall);
98static int flush_commit_list(struct super_block *s,
99 struct reiserfs_journal_list *jl, int flushall);
100static int can_dirty(struct reiserfs_journal_cnode *cn);
101static int journal_join(struct reiserfs_transaction_handle *th,
102 struct super_block *p_s_sb, unsigned long nblocks);
103static int release_journal_dev(struct super_block *super,
104 struct reiserfs_journal *journal);
1da177e4 105static int dirty_one_transaction(struct super_block *s,
bd4c625c 106 struct reiserfs_journal_list *jl);
1da177e4
LT
107static void flush_async_commits(void *p);
108static void queue_log_writer(struct super_block *s);
109
110/* values for join in do_journal_begin_r */
111enum {
bd4c625c
LT
112 JBEGIN_REG = 0, /* regular journal begin */
113 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
114 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
1da177e4
LT
115};
116
117static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
bd4c625c
LT
118 struct super_block *p_s_sb,
119 unsigned long nblocks, int join);
1da177e4 120
bd4c625c
LT
121static void init_journal_hash(struct super_block *p_s_sb)
122{
123 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
124 memset(journal->j_hash_table, 0,
125 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
1da177e4
LT
126}
127
128/*
129** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
130** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
131** more details.
132*/
bd4c625c
LT
133static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134{
135 if (bh) {
136 clear_buffer_dirty(bh);
137 clear_buffer_journal_test(bh);
138 }
139 return 0;
1da177e4
LT
140}
141
142static void disable_barrier(struct super_block *s)
143{
bd4c625c
LT
144 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
145 printk("reiserfs: disabling flush barriers on %s\n",
146 reiserfs_bdevname(s));
147}
148
149static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
150 *p_s_sb)
151{
152 struct reiserfs_bitmap_node *bn;
153 static int id;
154
155 bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS,
156 p_s_sb);
157 if (!bn) {
158 return NULL;
159 }
160 bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb);
161 if (!bn->data) {
162 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb);
163 return NULL;
164 }
165 bn->id = id++;
166 memset(bn->data, 0, p_s_sb->s_blocksize);
167 INIT_LIST_HEAD(&bn->list);
168 return bn;
169}
170
171static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
172{
173 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
174 struct reiserfs_bitmap_node *bn = NULL;
175 struct list_head *entry = journal->j_bitmap_nodes.next;
176
177 journal->j_used_bitmap_nodes++;
178 repeat:
179
180 if (entry != &journal->j_bitmap_nodes) {
181 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
182 list_del(entry);
183 memset(bn->data, 0, p_s_sb->s_blocksize);
184 journal->j_free_bitmap_nodes--;
185 return bn;
186 }
187 bn = allocate_bitmap_node(p_s_sb);
188 if (!bn) {
189 yield();
190 goto repeat;
191 }
192 return bn;
1da177e4
LT
193}
194static inline void free_bitmap_node(struct super_block *p_s_sb,
bd4c625c
LT
195 struct reiserfs_bitmap_node *bn)
196{
197 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
198 journal->j_used_bitmap_nodes--;
199 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
200 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb);
201 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb);
202 } else {
203 list_add(&bn->list, &journal->j_bitmap_nodes);
204 journal->j_free_bitmap_nodes++;
205 }
206}
207
208static void allocate_bitmap_nodes(struct super_block *p_s_sb)
209{
210 int i;
211 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
212 struct reiserfs_bitmap_node *bn = NULL;
213 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
214 bn = allocate_bitmap_node(p_s_sb);
215 if (bn) {
216 list_add(&bn->list, &journal->j_bitmap_nodes);
217 journal->j_free_bitmap_nodes++;
218 } else {
219 break; // this is ok, we'll try again when more are needed
220 }
221 }
1da177e4
LT
222}
223
224static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
bd4c625c
LT
225 struct reiserfs_list_bitmap *jb)
226{
227 int bmap_nr = block / (p_s_sb->s_blocksize << 3);
228 int bit_nr = block % (p_s_sb->s_blocksize << 3);
1da177e4 229
bd4c625c
LT
230 if (!jb->bitmaps[bmap_nr]) {
231 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
232 }
233 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
234 return 0;
1da177e4
LT
235}
236
237static void cleanup_bitmap_list(struct super_block *p_s_sb,
bd4c625c
LT
238 struct reiserfs_list_bitmap *jb)
239{
240 int i;
241 if (jb->bitmaps == NULL)
242 return;
243
244 for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) {
245 if (jb->bitmaps[i]) {
246 free_bitmap_node(p_s_sb, jb->bitmaps[i]);
247 jb->bitmaps[i] = NULL;
248 }
249 }
1da177e4
LT
250}
251
252/*
253** only call this on FS unmount.
254*/
255static int free_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
256 struct reiserfs_list_bitmap *jb_array)
257{
258 int i;
259 struct reiserfs_list_bitmap *jb;
260 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
261 jb = jb_array + i;
262 jb->journal_list = NULL;
263 cleanup_bitmap_list(p_s_sb, jb);
264 vfree(jb->bitmaps);
265 jb->bitmaps = NULL;
266 }
267 return 0;
268}
269
270static int free_bitmap_nodes(struct super_block *p_s_sb)
271{
272 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
273 struct list_head *next = journal->j_bitmap_nodes.next;
274 struct reiserfs_bitmap_node *bn;
275
276 while (next != &journal->j_bitmap_nodes) {
277 bn = list_entry(next, struct reiserfs_bitmap_node, list);
278 list_del(next);
279 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb);
280 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb);
281 next = journal->j_bitmap_nodes.next;
282 journal->j_free_bitmap_nodes--;
283 }
284
285 return 0;
1da177e4
LT
286}
287
288/*
289** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
290** jb_array is the array to be filled in.
291*/
292int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
293 struct reiserfs_list_bitmap *jb_array,
294 int bmap_nr)
295{
296 int i;
297 int failed = 0;
298 struct reiserfs_list_bitmap *jb;
299 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
300
301 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
302 jb = jb_array + i;
303 jb->journal_list = NULL;
304 jb->bitmaps = vmalloc(mem);
305 if (!jb->bitmaps) {
306 reiserfs_warning(p_s_sb,
307 "clm-2000, unable to allocate bitmaps for journal lists");
308 failed = 1;
309 break;
310 }
311 memset(jb->bitmaps, 0, mem);
312 }
313 if (failed) {
314 free_list_bitmaps(p_s_sb, jb_array);
315 return -1;
316 }
317 return 0;
1da177e4
LT
318}
319
320/*
321** find an available list bitmap. If you can't find one, flush a commit list
322** and try again
323*/
bd4c625c
LT
324static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
325 struct reiserfs_journal_list
326 *jl)
327{
328 int i, j;
329 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
330 struct reiserfs_list_bitmap *jb = NULL;
331
332 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
333 i = journal->j_list_bitmap_index;
334 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
335 jb = journal->j_list_bitmap + i;
336 if (journal->j_list_bitmap[i].journal_list) {
337 flush_commit_list(p_s_sb,
338 journal->j_list_bitmap[i].
339 journal_list, 1);
340 if (!journal->j_list_bitmap[i].journal_list) {
341 break;
342 }
343 } else {
344 break;
345 }
346 }
347 if (jb->journal_list) { /* double check to make sure if flushed correctly */
348 return NULL;
349 }
350 jb->journal_list = jl;
351 return jb;
1da177e4
LT
352}
353
354/*
355** allocates a new chunk of X nodes, and links them all together as a list.
356** Uses the cnode->next and cnode->prev pointers
357** returns NULL on failure
358*/
bd4c625c
LT
359static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
360{
361 struct reiserfs_journal_cnode *head;
362 int i;
363 if (num_cnodes <= 0) {
364 return NULL;
365 }
366 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
367 if (!head) {
368 return NULL;
369 }
370 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
371 head[0].prev = NULL;
372 head[0].next = head + 1;
373 for (i = 1; i < num_cnodes; i++) {
374 head[i].prev = head + (i - 1);
375 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
376 }
377 head[num_cnodes - 1].next = NULL;
378 return head;
1da177e4
LT
379}
380
381/*
382** pulls a cnode off the free list, or returns NULL on failure
383*/
bd4c625c
LT
384static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
385{
386 struct reiserfs_journal_cnode *cn;
387 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
388
389 reiserfs_check_lock_depth(p_s_sb, "get_cnode");
390
391 if (journal->j_cnode_free <= 0) {
392 return NULL;
393 }
394 journal->j_cnode_used++;
395 journal->j_cnode_free--;
396 cn = journal->j_cnode_free_list;
397 if (!cn) {
398 return cn;
399 }
400 if (cn->next) {
401 cn->next->prev = NULL;
402 }
403 journal->j_cnode_free_list = cn->next;
404 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
405 return cn;
1da177e4
LT
406}
407
408/*
409** returns a cnode to the free list
410*/
bd4c625c
LT
411static void free_cnode(struct super_block *p_s_sb,
412 struct reiserfs_journal_cnode *cn)
413{
414 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 415
bd4c625c 416 reiserfs_check_lock_depth(p_s_sb, "free_cnode");
1da177e4 417
bd4c625c
LT
418 journal->j_cnode_used--;
419 journal->j_cnode_free++;
420 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
421 cn->next = journal->j_cnode_free_list;
422 if (journal->j_cnode_free_list) {
423 journal->j_cnode_free_list->prev = cn;
424 }
425 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
426 journal->j_cnode_free_list = cn;
1da177e4
LT
427}
428
bd4c625c
LT
429static void clear_prepared_bits(struct buffer_head *bh)
430{
431 clear_buffer_journal_prepared(bh);
432 clear_buffer_journal_restore_dirty(bh);
1da177e4
LT
433}
434
435/* utility function to force a BUG if it is called without the big
436** kernel lock held. caller is the string printed just before calling BUG()
437*/
bd4c625c
LT
438void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
439{
1da177e4 440#ifdef CONFIG_SMP
bd4c625c
LT
441 if (current->lock_depth < 0) {
442 reiserfs_panic(sb, "%s called without kernel lock held",
443 caller);
444 }
1da177e4 445#else
bd4c625c 446 ;
1da177e4
LT
447#endif
448}
449
450/* return a cnode with same dev, block number and size in table, or null if not found */
bd4c625c
LT
451static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
452 super_block
453 *sb,
454 struct
455 reiserfs_journal_cnode
456 **table,
457 long bl)
1da177e4 458{
bd4c625c
LT
459 struct reiserfs_journal_cnode *cn;
460 cn = journal_hash(table, sb, bl);
461 while (cn) {
462 if (cn->blocknr == bl && cn->sb == sb)
463 return cn;
464 cn = cn->hnext;
465 }
466 return (struct reiserfs_journal_cnode *)0;
1da177e4
LT
467}
468
469/*
470** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
471** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
472** being overwritten by a replay after crashing.
473**
474** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
475** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
476** sure you never write the block without logging it.
477**
478** next_zero_bit is a suggestion about the next block to try for find_forward.
479** when bl is rejected because it is set in a journal list bitmap, we search
480** for the next zero bit in the bitmap that rejected bl. Then, we return that
481** through next_zero_bit for find_forward to try.
482**
483** Just because we return something in next_zero_bit does not mean we won't
484** reject it on the next call to reiserfs_in_journal
485**
486*/
487int reiserfs_in_journal(struct super_block *p_s_sb,
bd4c625c
LT
488 int bmap_nr, int bit_nr, int search_all,
489 b_blocknr_t * next_zero_bit)
490{
491 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
492 struct reiserfs_journal_cnode *cn;
493 struct reiserfs_list_bitmap *jb;
494 int i;
495 unsigned long bl;
496
497 *next_zero_bit = 0; /* always start this at zero. */
498
499 PROC_INFO_INC(p_s_sb, journal.in_journal);
500 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
501 ** if we crash before the transaction that freed it commits, this transaction won't
502 ** have committed either, and the block will never be written
503 */
504 if (search_all) {
505 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
506 PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
507 jb = journal->j_list_bitmap + i;
508 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
509 test_bit(bit_nr,
510 (unsigned long *)jb->bitmaps[bmap_nr]->
511 data)) {
512 *next_zero_bit =
513 find_next_zero_bit((unsigned long *)
514 (jb->bitmaps[bmap_nr]->
515 data),
516 p_s_sb->s_blocksize << 3,
517 bit_nr + 1);
518 return 1;
519 }
520 }
521 }
522
523 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
524 /* is it in any old transactions? */
525 if (search_all
526 && (cn =
527 get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
528 return 1;
529 }
530
531 /* is it in the current transaction. This should never happen */
532 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
533 BUG();
534 return 1;
535 }
536
537 PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
538 /* safe for reuse */
539 return 0;
1da177e4
LT
540}
541
542/* insert cn into table
543*/
bd4c625c
LT
544static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
545 struct reiserfs_journal_cnode *cn)
546{
547 struct reiserfs_journal_cnode *cn_orig;
1da177e4 548
bd4c625c
LT
549 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
550 cn->hnext = cn_orig;
551 cn->hprev = NULL;
552 if (cn_orig) {
553 cn_orig->hprev = cn;
554 }
555 journal_hash(table, cn->sb, cn->blocknr) = cn;
1da177e4
LT
556}
557
558/* lock the current transaction */
77933d72 559static inline void lock_journal(struct super_block *p_s_sb)
bd4c625c
LT
560{
561 PROC_INFO_INC(p_s_sb, journal.lock_journal);
562 down(&SB_JOURNAL(p_s_sb)->j_lock);
1da177e4
LT
563}
564
565/* unlock the current transaction */
77933d72 566static inline void unlock_journal(struct super_block *p_s_sb)
bd4c625c
LT
567{
568 up(&SB_JOURNAL(p_s_sb)->j_lock);
1da177e4
LT
569}
570
571static inline void get_journal_list(struct reiserfs_journal_list *jl)
572{
bd4c625c 573 jl->j_refcount++;
1da177e4
LT
574}
575
576static inline void put_journal_list(struct super_block *s,
bd4c625c 577 struct reiserfs_journal_list *jl)
1da177e4 578{
bd4c625c
LT
579 if (jl->j_refcount < 1) {
580 reiserfs_panic(s, "trans id %lu, refcount at %d",
581 jl->j_trans_id, jl->j_refcount);
582 }
583 if (--jl->j_refcount == 0)
584 reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);
1da177e4
LT
585}
586
587/*
588** this used to be much more involved, and I'm keeping it just in case things get ugly again.
589** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
590** transaction.
591*/
bd4c625c
LT
592static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
593 struct reiserfs_journal_list *jl)
594{
1da177e4 595
bd4c625c
LT
596 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
597 if (jb) {
598 cleanup_bitmap_list(p_s_sb, jb);
599 }
600 jl->j_list_bitmap->journal_list = NULL;
601 jl->j_list_bitmap = NULL;
1da177e4
LT
602}
603
604static int journal_list_still_alive(struct super_block *s,
bd4c625c
LT
605 unsigned long trans_id)
606{
607 struct reiserfs_journal *journal = SB_JOURNAL(s);
608 struct list_head *entry = &journal->j_journal_list;
609 struct reiserfs_journal_list *jl;
610
611 if (!list_empty(entry)) {
612 jl = JOURNAL_LIST_ENTRY(entry->next);
613 if (jl->j_trans_id <= trans_id) {
614 return 1;
615 }
616 }
617 return 0;
618}
619
620static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
621{
622 char b[BDEVNAME_SIZE];
623
624 if (buffer_journaled(bh)) {
625 reiserfs_warning(NULL,
626 "clm-2084: pinned buffer %lu:%s sent to disk",
627 bh->b_blocknr, bdevname(bh->b_bdev, b));
628 }
629 if (uptodate)
630 set_buffer_uptodate(bh);
631 else
632 clear_buffer_uptodate(bh);
633 unlock_buffer(bh);
634 put_bh(bh);
635}
636
637static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
638{
639 if (uptodate)
640 set_buffer_uptodate(bh);
641 else
642 clear_buffer_uptodate(bh);
643 unlock_buffer(bh);
644 put_bh(bh);
645}
646
647static void submit_logged_buffer(struct buffer_head *bh)
648{
649 get_bh(bh);
650 bh->b_end_io = reiserfs_end_buffer_io_sync;
651 clear_buffer_journal_new(bh);
652 clear_buffer_dirty(bh);
653 if (!test_clear_buffer_journal_test(bh))
654 BUG();
655 if (!buffer_uptodate(bh))
656 BUG();
657 submit_bh(WRITE, bh);
658}
659
660static void submit_ordered_buffer(struct buffer_head *bh)
661{
662 get_bh(bh);
663 bh->b_end_io = reiserfs_end_ordered_io;
664 clear_buffer_dirty(bh);
665 if (!buffer_uptodate(bh))
666 BUG();
667 submit_bh(WRITE, bh);
668}
669
670static int submit_barrier_buffer(struct buffer_head *bh)
671{
672 get_bh(bh);
673 bh->b_end_io = reiserfs_end_ordered_io;
674 clear_buffer_dirty(bh);
675 if (!buffer_uptodate(bh))
676 BUG();
677 return submit_bh(WRITE_BARRIER, bh);
1da177e4
LT
678}
679
680static void check_barrier_completion(struct super_block *s,
bd4c625c
LT
681 struct buffer_head *bh)
682{
683 if (buffer_eopnotsupp(bh)) {
684 clear_buffer_eopnotsupp(bh);
685 disable_barrier(s);
686 set_buffer_uptodate(bh);
687 set_buffer_dirty(bh);
688 sync_dirty_buffer(bh);
689 }
1da177e4
LT
690}
691
692#define CHUNK_SIZE 32
693struct buffer_chunk {
bd4c625c
LT
694 struct buffer_head *bh[CHUNK_SIZE];
695 int nr;
1da177e4
LT
696};
697
bd4c625c
LT
698static void write_chunk(struct buffer_chunk *chunk)
699{
700 int i;
701 get_fs_excl();
702 for (i = 0; i < chunk->nr; i++) {
703 submit_logged_buffer(chunk->bh[i]);
704 }
705 chunk->nr = 0;
706 put_fs_excl();
1da177e4
LT
707}
708
bd4c625c
LT
709static void write_ordered_chunk(struct buffer_chunk *chunk)
710{
711 int i;
712 get_fs_excl();
713 for (i = 0; i < chunk->nr; i++) {
714 submit_ordered_buffer(chunk->bh[i]);
715 }
716 chunk->nr = 0;
717 put_fs_excl();
1da177e4
LT
718}
719
720static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
bd4c625c 721 spinlock_t * lock, void (fn) (struct buffer_chunk *))
1da177e4 722{
bd4c625c
LT
723 int ret = 0;
724 if (chunk->nr >= CHUNK_SIZE)
725 BUG();
726 chunk->bh[chunk->nr++] = bh;
727 if (chunk->nr >= CHUNK_SIZE) {
728 ret = 1;
729 if (lock)
730 spin_unlock(lock);
731 fn(chunk);
732 if (lock)
733 spin_lock(lock);
734 }
735 return ret;
1da177e4
LT
736}
737
1da177e4 738static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
bd4c625c
LT
739static struct reiserfs_jh *alloc_jh(void)
740{
741 struct reiserfs_jh *jh;
742 while (1) {
743 jh = kmalloc(sizeof(*jh), GFP_NOFS);
744 if (jh) {
745 atomic_inc(&nr_reiserfs_jh);
746 return jh;
747 }
748 yield();
1da177e4 749 }
1da177e4
LT
750}
751
752/*
753 * we want to free the jh when the buffer has been written
754 * and waited on
755 */
bd4c625c
LT
756void reiserfs_free_jh(struct buffer_head *bh)
757{
758 struct reiserfs_jh *jh;
759
760 jh = bh->b_private;
761 if (jh) {
762 bh->b_private = NULL;
763 jh->bh = NULL;
764 list_del_init(&jh->list);
765 kfree(jh);
766 if (atomic_read(&nr_reiserfs_jh) <= 0)
767 BUG();
768 atomic_dec(&nr_reiserfs_jh);
769 put_bh(bh);
770 }
1da177e4
LT
771}
772
773static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
bd4c625c 774 int tail)
1da177e4 775{
bd4c625c 776 struct reiserfs_jh *jh;
1da177e4 777
bd4c625c
LT
778 if (bh->b_private) {
779 spin_lock(&j->j_dirty_buffers_lock);
780 if (!bh->b_private) {
781 spin_unlock(&j->j_dirty_buffers_lock);
782 goto no_jh;
783 }
784 jh = bh->b_private;
785 list_del_init(&jh->list);
786 } else {
787 no_jh:
788 get_bh(bh);
789 jh = alloc_jh();
790 spin_lock(&j->j_dirty_buffers_lock);
791 /* buffer must be locked for __add_jh, should be able to have
792 * two adds at the same time
793 */
794 if (bh->b_private)
795 BUG();
796 jh->bh = bh;
797 bh->b_private = jh;
1da177e4 798 }
bd4c625c
LT
799 jh->jl = j->j_current_jl;
800 if (tail)
801 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
802 else {
803 list_add_tail(&jh->list, &jh->jl->j_bh_list);
804 }
805 spin_unlock(&j->j_dirty_buffers_lock);
806 return 0;
1da177e4
LT
807}
808
bd4c625c
LT
809int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
810{
811 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
1da177e4 812}
bd4c625c
LT
813int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
814{
815 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
1da177e4
LT
816}
817
818#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
bd4c625c 819static int write_ordered_buffers(spinlock_t * lock,
1da177e4 820 struct reiserfs_journal *j,
bd4c625c 821 struct reiserfs_journal_list *jl,
1da177e4
LT
822 struct list_head *list)
823{
bd4c625c
LT
824 struct buffer_head *bh;
825 struct reiserfs_jh *jh;
826 int ret = j->j_errno;
827 struct buffer_chunk chunk;
828 struct list_head tmp;
829 INIT_LIST_HEAD(&tmp);
830
831 chunk.nr = 0;
832 spin_lock(lock);
833 while (!list_empty(list)) {
834 jh = JH_ENTRY(list->next);
835 bh = jh->bh;
836 get_bh(bh);
837 if (test_set_buffer_locked(bh)) {
838 if (!buffer_dirty(bh)) {
839 list_del_init(&jh->list);
840 list_add(&jh->list, &tmp);
841 goto loop_next;
842 }
843 spin_unlock(lock);
844 if (chunk.nr)
845 write_ordered_chunk(&chunk);
846 wait_on_buffer(bh);
847 cond_resched();
848 spin_lock(lock);
849 goto loop_next;
850 }
851 if (buffer_dirty(bh)) {
852 list_del_init(&jh->list);
853 list_add(&jh->list, &tmp);
854 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
855 } else {
856 reiserfs_free_jh(bh);
857 unlock_buffer(bh);
858 }
859 loop_next:
860 put_bh(bh);
861 cond_resched_lock(lock);
862 }
863 if (chunk.nr) {
864 spin_unlock(lock);
1da177e4 865 write_ordered_chunk(&chunk);
bd4c625c 866 spin_lock(lock);
1da177e4 867 }
bd4c625c
LT
868 while (!list_empty(&tmp)) {
869 jh = JH_ENTRY(tmp.prev);
870 bh = jh->bh;
871 get_bh(bh);
872 reiserfs_free_jh(bh);
873
874 if (buffer_locked(bh)) {
875 spin_unlock(lock);
876 wait_on_buffer(bh);
877 spin_lock(lock);
878 }
879 if (!buffer_uptodate(bh)) {
880 ret = -EIO;
881 }
882 put_bh(bh);
883 cond_resched_lock(lock);
1da177e4 884 }
bd4c625c
LT
885 spin_unlock(lock);
886 return ret;
887}
1da177e4 888
bd4c625c
LT
889static int flush_older_commits(struct super_block *s,
890 struct reiserfs_journal_list *jl)
891{
892 struct reiserfs_journal *journal = SB_JOURNAL(s);
893 struct reiserfs_journal_list *other_jl;
894 struct reiserfs_journal_list *first_jl;
895 struct list_head *entry;
896 unsigned long trans_id = jl->j_trans_id;
897 unsigned long other_trans_id;
898 unsigned long first_trans_id;
899
900 find_first:
901 /*
902 * first we walk backwards to find the oldest uncommitted transation
903 */
904 first_jl = jl;
905 entry = jl->j_list.prev;
906 while (1) {
907 other_jl = JOURNAL_LIST_ENTRY(entry);
908 if (entry == &journal->j_journal_list ||
909 atomic_read(&other_jl->j_older_commits_done))
910 break;
1da177e4 911
bd4c625c
LT
912 first_jl = other_jl;
913 entry = other_jl->j_list.prev;
914 }
1da177e4 915
bd4c625c
LT
916 /* if we didn't find any older uncommitted transactions, return now */
917 if (first_jl == jl) {
918 return 0;
919 }
1da177e4 920
bd4c625c
LT
921 first_trans_id = first_jl->j_trans_id;
922
923 entry = &first_jl->j_list;
924 while (1) {
925 other_jl = JOURNAL_LIST_ENTRY(entry);
926 other_trans_id = other_jl->j_trans_id;
927
928 if (other_trans_id < trans_id) {
929 if (atomic_read(&other_jl->j_commit_left) != 0) {
930 flush_commit_list(s, other_jl, 0);
931
932 /* list we were called with is gone, return */
933 if (!journal_list_still_alive(s, trans_id))
934 return 1;
935
936 /* the one we just flushed is gone, this means all
937 * older lists are also gone, so first_jl is no longer
938 * valid either. Go back to the beginning.
939 */
940 if (!journal_list_still_alive
941 (s, other_trans_id)) {
942 goto find_first;
943 }
944 }
945 entry = entry->next;
946 if (entry == &journal->j_journal_list)
947 return 0;
948 } else {
949 return 0;
1da177e4 950 }
1da177e4 951 }
bd4c625c 952 return 0;
1da177e4 953}
bd4c625c
LT
954int reiserfs_async_progress_wait(struct super_block *s)
955{
956 DEFINE_WAIT(wait);
957 struct reiserfs_journal *j = SB_JOURNAL(s);
958 if (atomic_read(&j->j_async_throttle))
959 blk_congestion_wait(WRITE, HZ / 10);
960 return 0;
1da177e4
LT
961}
962
963/*
964** if this journal list still has commit blocks unflushed, send them to disk.
965**
966** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
967** Before the commit block can by written, every other log block must be safely on disk
968**
969*/
bd4c625c
LT
970static int flush_commit_list(struct super_block *s,
971 struct reiserfs_journal_list *jl, int flushall)
972{
973 int i;
974 int bn;
975 struct buffer_head *tbh = NULL;
976 unsigned long trans_id = jl->j_trans_id;
977 struct reiserfs_journal *journal = SB_JOURNAL(s);
978 int barrier = 0;
979 int retval = 0;
980
981 reiserfs_check_lock_depth(s, "flush_commit_list");
982
983 if (atomic_read(&jl->j_older_commits_done)) {
984 return 0;
985 }
986
987 get_fs_excl();
988
989 /* before we can put our commit blocks on disk, we have to make sure everyone older than
990 ** us is on disk too
991 */
992 BUG_ON(jl->j_len <= 0);
993 BUG_ON(trans_id == journal->j_trans_id);
994
995 get_journal_list(jl);
996 if (flushall) {
997 if (flush_older_commits(s, jl) == 1) {
998 /* list disappeared during flush_older_commits. return */
999 goto put_jl;
1000 }
1001 }
1002
1003 /* make sure nobody is trying to flush this one at the same time */
1004 down(&jl->j_commit_lock);
1005 if (!journal_list_still_alive(s, trans_id)) {
1006 up(&jl->j_commit_lock);
1007 goto put_jl;
1008 }
1009 BUG_ON(jl->j_trans_id == 0);
1010
1011 /* this commit is done, exit */
1012 if (atomic_read(&(jl->j_commit_left)) <= 0) {
1013 if (flushall) {
1014 atomic_set(&(jl->j_older_commits_done), 1);
1015 }
1016 up(&jl->j_commit_lock);
1017 goto put_jl;
1018 }
1019
1020 if (!list_empty(&jl->j_bh_list)) {
1021 unlock_kernel();
1022 write_ordered_buffers(&journal->j_dirty_buffers_lock,
1023 journal, jl, &jl->j_bh_list);
1024 lock_kernel();
1025 }
1026 BUG_ON(!list_empty(&jl->j_bh_list));
1027 /*
1028 * for the description block and all the log blocks, submit any buffers
1029 * that haven't already reached the disk
1030 */
1031 atomic_inc(&journal->j_async_throttle);
1032 for (i = 0; i < (jl->j_len + 1); i++) {
1033 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1034 SB_ONDISK_JOURNAL_SIZE(s);
1035 tbh = journal_find_get_block(s, bn);
1036 if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
53778ffd 1037 ll_rw_block(SWRITE, 1, &tbh);
bd4c625c
LT
1038 put_bh(tbh);
1039 }
1040 atomic_dec(&journal->j_async_throttle);
1041
5d5e8156
JM
1042 /* We're skipping the commit if there's an error */
1043 if (retval || reiserfs_is_journal_aborted(journal))
1044 barrier = 0;
1045
bd4c625c
LT
1046 /* wait on everything written so far before writing the commit
1047 * if we are in barrier mode, send the commit down now
1048 */
1049 barrier = reiserfs_barrier_flush(s);
1050 if (barrier) {
1051 int ret;
1052 lock_buffer(jl->j_commit_bh);
1053 ret = submit_barrier_buffer(jl->j_commit_bh);
1054 if (ret == -EOPNOTSUPP) {
1055 set_buffer_uptodate(jl->j_commit_bh);
1056 disable_barrier(s);
1057 barrier = 0;
1058 }
1059 }
1060 for (i = 0; i < (jl->j_len + 1); i++) {
1061 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1062 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1063 tbh = journal_find_get_block(s, bn);
1064 wait_on_buffer(tbh);
1065 // since we're using ll_rw_blk above, it might have skipped over
1066 // a locked buffer. Double check here
1067 //
1068 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
1069 sync_dirty_buffer(tbh);
1070 if (unlikely(!buffer_uptodate(tbh))) {
1da177e4 1071#ifdef CONFIG_REISERFS_CHECK
bd4c625c 1072 reiserfs_warning(s, "journal-601, buffer write failed");
1da177e4 1073#endif
bd4c625c
LT
1074 retval = -EIO;
1075 }
1076 put_bh(tbh); /* once for journal_find_get_block */
1077 put_bh(tbh); /* once due to original getblk in do_journal_end */
1078 atomic_dec(&(jl->j_commit_left));
1079 }
1080
1081 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1082
1083 if (!barrier) {
5d5e8156
JM
1084 /* If there was a write error in the journal - we can't commit
1085 * this transaction - it will be invalid and, if successful,
1086 * will just end up propogating the write error out to
1087 * the file system. */
1088 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1089 if (buffer_dirty(jl->j_commit_bh))
1090 BUG();
1091 mark_buffer_dirty(jl->j_commit_bh) ;
1092 sync_dirty_buffer(jl->j_commit_bh) ;
1093 }
bd4c625c
LT
1094 } else
1095 wait_on_buffer(jl->j_commit_bh);
1096
1097 check_barrier_completion(s, jl->j_commit_bh);
1098
1099 /* If there was a write error in the journal - we can't commit this
1100 * transaction - it will be invalid and, if successful, will just end
1101 * up propogating the write error out to the filesystem. */
1102 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1da177e4 1103#ifdef CONFIG_REISERFS_CHECK
bd4c625c 1104 reiserfs_warning(s, "journal-615: buffer write failed");
1da177e4 1105#endif
bd4c625c
LT
1106 retval = -EIO;
1107 }
1108 bforget(jl->j_commit_bh);
1109 if (journal->j_last_commit_id != 0 &&
1110 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1111 reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1112 journal->j_last_commit_id, jl->j_trans_id);
1113 }
1114 journal->j_last_commit_id = jl->j_trans_id;
1115
1116 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1117 cleanup_freed_for_journal_list(s, jl);
1118
1119 retval = retval ? retval : journal->j_errno;
1120
1121 /* mark the metadata dirty */
1122 if (!retval)
1123 dirty_one_transaction(s, jl);
1124 atomic_dec(&(jl->j_commit_left));
1125
1126 if (flushall) {
1127 atomic_set(&(jl->j_older_commits_done), 1);
1128 }
1129 up(&jl->j_commit_lock);
1130 put_jl:
1131 put_journal_list(s, jl);
1132
1133 if (retval)
1134 reiserfs_abort(s, retval, "Journal write error in %s",
1135 __FUNCTION__);
1136 put_fs_excl();
1137 return retval;
1da177e4
LT
1138}
1139
1140/*
1141** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1142** returns NULL if it can't find anything
1143*/
bd4c625c
LT
1144static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1145 reiserfs_journal_cnode
1146 *cn)
1147{
1148 struct super_block *sb = cn->sb;
1149 b_blocknr_t blocknr = cn->blocknr;
1da177e4 1150
bd4c625c
LT
1151 cn = cn->hprev;
1152 while (cn) {
1153 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1154 return cn->jlist;
1155 }
1156 cn = cn->hprev;
1157 }
1158 return NULL;
1da177e4
LT
1159}
1160
bd4c625c
LT
1161static void remove_journal_hash(struct super_block *,
1162 struct reiserfs_journal_cnode **,
1163 struct reiserfs_journal_list *, unsigned long,
1164 int);
1da177e4
LT
1165
1166/*
1167** once all the real blocks have been flushed, it is safe to remove them from the
1168** journal list for this transaction. Aside from freeing the cnode, this also allows the
1169** block to be reallocated for data blocks if it had been deleted.
1170*/
bd4c625c
LT
1171static void remove_all_from_journal_list(struct super_block *p_s_sb,
1172 struct reiserfs_journal_list *jl,
1173 int debug)
1174{
1175 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1176 struct reiserfs_journal_cnode *cn, *last;
1177 cn = jl->j_realblock;
1178
1179 /* which is better, to lock once around the whole loop, or
1180 ** to lock for each call to remove_journal_hash?
1181 */
1182 while (cn) {
1183 if (cn->blocknr != 0) {
1184 if (debug) {
1185 reiserfs_warning(p_s_sb,
1186 "block %u, bh is %d, state %ld",
1187 cn->blocknr, cn->bh ? 1 : 0,
1188 cn->state);
1189 }
1190 cn->state = 0;
1191 remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1192 jl, cn->blocknr, 1);
1193 }
1194 last = cn;
1195 cn = cn->next;
1196 free_cnode(p_s_sb, last);
1197 }
1198 jl->j_realblock = NULL;
1da177e4
LT
1199}
1200
1201/*
1202** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1203** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1204** releasing blocks in this transaction for reuse as data blocks.
1205** called by flush_journal_list, before it calls remove_all_from_journal_list
1206**
1207*/
bd4c625c
LT
1208static int _update_journal_header_block(struct super_block *p_s_sb,
1209 unsigned long offset,
1210 unsigned long trans_id)
1211{
1212 struct reiserfs_journal_header *jh;
1213 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 1214
bd4c625c
LT
1215 if (reiserfs_is_journal_aborted(journal))
1216 return -EIO;
1da177e4 1217
bd4c625c
LT
1218 if (trans_id >= journal->j_last_flush_trans_id) {
1219 if (buffer_locked((journal->j_header_bh))) {
1220 wait_on_buffer((journal->j_header_bh));
1221 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1da177e4 1222#ifdef CONFIG_REISERFS_CHECK
bd4c625c
LT
1223 reiserfs_warning(p_s_sb,
1224 "journal-699: buffer write failed");
1da177e4 1225#endif
bd4c625c
LT
1226 return -EIO;
1227 }
1228 }
1229 journal->j_last_flush_trans_id = trans_id;
1230 journal->j_first_unflushed_offset = offset;
1231 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1232 b_data);
1233 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1234 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1235 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1236
1237 if (reiserfs_barrier_flush(p_s_sb)) {
1238 int ret;
1239 lock_buffer(journal->j_header_bh);
1240 ret = submit_barrier_buffer(journal->j_header_bh);
1241 if (ret == -EOPNOTSUPP) {
1242 set_buffer_uptodate(journal->j_header_bh);
1243 disable_barrier(p_s_sb);
1244 goto sync;
1245 }
1246 wait_on_buffer(journal->j_header_bh);
1247 check_barrier_completion(p_s_sb, journal->j_header_bh);
1248 } else {
1249 sync:
1250 set_buffer_dirty(journal->j_header_bh);
1251 sync_dirty_buffer(journal->j_header_bh);
1252 }
1253 if (!buffer_uptodate(journal->j_header_bh)) {
1254 reiserfs_warning(p_s_sb,
1255 "journal-837: IO error during journal replay");
1256 return -EIO;
1257 }
1258 }
1259 return 0;
1260}
1261
1262static int update_journal_header_block(struct super_block *p_s_sb,
1263 unsigned long offset,
1264 unsigned long trans_id)
1265{
1266 return _update_journal_header_block(p_s_sb, offset, trans_id);
1da177e4 1267}
bd4c625c 1268
1da177e4
LT
1269/*
1270** flush any and all journal lists older than you are
1271** can only be called from flush_journal_list
1272*/
1273static int flush_older_journal_lists(struct super_block *p_s_sb,
bd4c625c
LT
1274 struct reiserfs_journal_list *jl)
1275{
1276 struct list_head *entry;
1277 struct reiserfs_journal_list *other_jl;
1278 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1279 unsigned long trans_id = jl->j_trans_id;
1280
1281 /* we know we are the only ones flushing things, no extra race
1282 * protection is required.
1283 */
1284 restart:
1285 entry = journal->j_journal_list.next;
1286 /* Did we wrap? */
1287 if (entry == &journal->j_journal_list)
1288 return 0;
1289 other_jl = JOURNAL_LIST_ENTRY(entry);
1290 if (other_jl->j_trans_id < trans_id) {
1291 BUG_ON(other_jl->j_refcount <= 0);
1292 /* do not flush all */
1293 flush_journal_list(p_s_sb, other_jl, 0);
1294
1295 /* other_jl is now deleted from the list */
1296 goto restart;
1297 }
1298 return 0;
1da177e4
LT
1299}
1300
1301static void del_from_work_list(struct super_block *s,
bd4c625c
LT
1302 struct reiserfs_journal_list *jl)
1303{
1304 struct reiserfs_journal *journal = SB_JOURNAL(s);
1305 if (!list_empty(&jl->j_working_list)) {
1306 list_del_init(&jl->j_working_list);
1307 journal->j_num_work_lists--;
1308 }
1da177e4
LT
1309}
1310
1311/* flush a journal list, both commit and real blocks
1312**
1313** always set flushall to 1, unless you are calling from inside
1314** flush_journal_list
1315**
1316** IMPORTANT. This can only be called while there are no journal writers,
1317** and the journal is locked. That means it can only be called from
1318** do_journal_end, or by journal_release
1319*/
bd4c625c
LT
1320static int flush_journal_list(struct super_block *s,
1321 struct reiserfs_journal_list *jl, int flushall)
1da177e4 1322{
bd4c625c
LT
1323 struct reiserfs_journal_list *pjl;
1324 struct reiserfs_journal_cnode *cn, *last;
1325 int count;
1326 int was_jwait = 0;
1327 int was_dirty = 0;
1328 struct buffer_head *saved_bh;
1329 unsigned long j_len_saved = jl->j_len;
1330 struct reiserfs_journal *journal = SB_JOURNAL(s);
1331 int err = 0;
1332
1333 BUG_ON(j_len_saved <= 0);
1334
1335 if (atomic_read(&journal->j_wcount) != 0) {
1336 reiserfs_warning(s,
1337 "clm-2048: flush_journal_list called with wcount %d",
1338 atomic_read(&journal->j_wcount));
1339 }
1340 BUG_ON(jl->j_trans_id == 0);
1da177e4 1341
bd4c625c
LT
1342 /* if flushall == 0, the lock is already held */
1343 if (flushall) {
1344 down(&journal->j_flush_sem);
1345 } else if (!down_trylock(&journal->j_flush_sem)) {
1346 BUG();
1347 }
1da177e4 1348
bd4c625c
LT
1349 count = 0;
1350 if (j_len_saved > journal->j_trans_max) {
1351 reiserfs_panic(s,
1352 "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1353 j_len_saved, jl->j_trans_id);
1354 return 0;
1355 }
1da177e4 1356
bd4c625c
LT
1357 get_fs_excl();
1358
1359 /* if all the work is already done, get out of here */
1360 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1361 atomic_read(&(jl->j_commit_left)) <= 0) {
1362 goto flush_older_and_return;
1363 }
1364
1365 /* start by putting the commit list on disk. This will also flush
1366 ** the commit lists of any olders transactions
1367 */
1368 flush_commit_list(s, jl, 1);
1369
1370 if (!(jl->j_state & LIST_DIRTY)
1371 && !reiserfs_is_journal_aborted(journal))
1372 BUG();
1373
1374 /* are we done now? */
1375 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1376 atomic_read(&(jl->j_commit_left)) <= 0) {
1377 goto flush_older_and_return;
1378 }
1379
1380 /* loop through each cnode, see if we need to write it,
1381 ** or wait on a more recent transaction, or just ignore it
1382 */
1383 if (atomic_read(&(journal->j_wcount)) != 0) {
1384 reiserfs_panic(s,
1385 "journal-844: panic journal list is flushing, wcount is not 0\n");
1386 }
1387 cn = jl->j_realblock;
1388 while (cn) {
1389 was_jwait = 0;
1390 was_dirty = 0;
1391 saved_bh = NULL;
1392 /* blocknr of 0 is no longer in the hash, ignore it */
1393 if (cn->blocknr == 0) {
1394 goto free_cnode;
1395 }
1396
1397 /* This transaction failed commit. Don't write out to the disk */
1398 if (!(jl->j_state & LIST_DIRTY))
1399 goto free_cnode;
1400
1401 pjl = find_newer_jl_for_cn(cn);
1402 /* the order is important here. We check pjl to make sure we
1403 ** don't clear BH_JDirty_wait if we aren't the one writing this
1404 ** block to disk
1405 */
1406 if (!pjl && cn->bh) {
1407 saved_bh = cn->bh;
1408
1409 /* we do this to make sure nobody releases the buffer while
1410 ** we are working with it
1411 */
1412 get_bh(saved_bh);
1413
1414 if (buffer_journal_dirty(saved_bh)) {
1415 BUG_ON(!can_dirty(cn));
1416 was_jwait = 1;
1417 was_dirty = 1;
1418 } else if (can_dirty(cn)) {
1419 /* everything with !pjl && jwait should be writable */
1420 BUG();
1421 }
1422 }
1423
1424 /* if someone has this block in a newer transaction, just make
1425 ** sure they are commited, and don't try writing it to disk
1426 */
1427 if (pjl) {
1428 if (atomic_read(&pjl->j_commit_left))
1429 flush_commit_list(s, pjl, 1);
1430 goto free_cnode;
1431 }
1432
1433 /* bh == NULL when the block got to disk on its own, OR,
1434 ** the block got freed in a future transaction
1435 */
1436 if (saved_bh == NULL) {
1437 goto free_cnode;
1438 }
1439
1440 /* this should never happen. kupdate_one_transaction has this list
1441 ** locked while it works, so we should never see a buffer here that
1442 ** is not marked JDirty_wait
1443 */
1444 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1445 reiserfs_warning(s,
1446 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1447 "not in a newer tranasction",
1448 (unsigned long long)saved_bh->
1449 b_blocknr, was_dirty ? ' ' : '!',
1450 was_jwait ? ' ' : '!');
1451 }
1452 if (was_dirty) {
1453 /* we inc again because saved_bh gets decremented at free_cnode */
1454 get_bh(saved_bh);
1455 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1456 lock_buffer(saved_bh);
1457 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1458 if (buffer_dirty(saved_bh))
1459 submit_logged_buffer(saved_bh);
1460 else
1461 unlock_buffer(saved_bh);
1462 count++;
1463 } else {
1464 reiserfs_warning(s,
1465 "clm-2082: Unable to flush buffer %llu in %s",
1466 (unsigned long long)saved_bh->
1467 b_blocknr, __FUNCTION__);
1468 }
1469 free_cnode:
1470 last = cn;
1471 cn = cn->next;
1472 if (saved_bh) {
1473 /* we incremented this to keep others from taking the buffer head away */
1474 put_bh(saved_bh);
1475 if (atomic_read(&(saved_bh->b_count)) < 0) {
1476 reiserfs_warning(s,
1477 "journal-945: saved_bh->b_count < 0");
1478 }
1479 }
1480 }
1481 if (count > 0) {
1482 cn = jl->j_realblock;
1483 while (cn) {
1484 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1485 if (!cn->bh) {
1486 reiserfs_panic(s,
1487 "journal-1011: cn->bh is NULL\n");
1488 }
1489 wait_on_buffer(cn->bh);
1490 if (!cn->bh) {
1491 reiserfs_panic(s,
1492 "journal-1012: cn->bh is NULL\n");
1493 }
1494 if (unlikely(!buffer_uptodate(cn->bh))) {
1495#ifdef CONFIG_REISERFS_CHECK
1496 reiserfs_warning(s,
1497 "journal-949: buffer write failed\n");
1498#endif
1499 err = -EIO;
1500 }
1501 /* note, we must clear the JDirty_wait bit after the up to date
1502 ** check, otherwise we race against our flushpage routine
1503 */
1504 BUG_ON(!test_clear_buffer_journal_dirty
1505 (cn->bh));
1506
1507 /* undo the inc from journal_mark_dirty */
1508 put_bh(cn->bh);
1509 brelse(cn->bh);
1510 }
1511 cn = cn->next;
1512 }
1513 }
1514
1515 if (err)
1516 reiserfs_abort(s, -EIO,
1517 "Write error while pushing transaction to disk in %s",
1518 __FUNCTION__);
1519 flush_older_and_return:
1520
1521 /* before we can update the journal header block, we _must_ flush all
1522 ** real blocks from all older transactions to disk. This is because
1523 ** once the header block is updated, this transaction will not be
1524 ** replayed after a crash
1525 */
1526 if (flushall) {
1527 flush_older_journal_lists(s, jl);
1528 }
1529
1530 err = journal->j_errno;
1531 /* before we can remove everything from the hash tables for this
1532 ** transaction, we must make sure it can never be replayed
1533 **
1534 ** since we are only called from do_journal_end, we know for sure there
1535 ** are no allocations going on while we are flushing journal lists. So,
1536 ** we only need to update the journal header block for the last list
1537 ** being flushed
1538 */
1539 if (!err && flushall) {
1540 err =
1541 update_journal_header_block(s,
1542 (jl->j_start + jl->j_len +
1543 2) % SB_ONDISK_JOURNAL_SIZE(s),
1544 jl->j_trans_id);
1545 if (err)
1546 reiserfs_abort(s, -EIO,
1547 "Write error while updating journal header in %s",
1548 __FUNCTION__);
1549 }
1550 remove_all_from_journal_list(s, jl, 0);
1551 list_del_init(&jl->j_list);
1552 journal->j_num_lists--;
1553 del_from_work_list(s, jl);
1554
1555 if (journal->j_last_flush_id != 0 &&
1556 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1557 reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1558 journal->j_last_flush_id, jl->j_trans_id);
1559 }
1560 journal->j_last_flush_id = jl->j_trans_id;
1561
1562 /* not strictly required since we are freeing the list, but it should
1563 * help find code using dead lists later on
1564 */
1565 jl->j_len = 0;
1566 atomic_set(&(jl->j_nonzerolen), 0);
1567 jl->j_start = 0;
1568 jl->j_realblock = NULL;
1569 jl->j_commit_bh = NULL;
1570 jl->j_trans_id = 0;
1571 jl->j_state = 0;
1572 put_journal_list(s, jl);
1573 if (flushall)
1574 up(&journal->j_flush_sem);
1575 put_fs_excl();
1576 return err;
1577}
1578
1579static int write_one_transaction(struct super_block *s,
1580 struct reiserfs_journal_list *jl,
1581 struct buffer_chunk *chunk)
1582{
1583 struct reiserfs_journal_cnode *cn;
1584 int ret = 0;
1585
1586 jl->j_state |= LIST_TOUCHED;
1587 del_from_work_list(s, jl);
1588 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1589 return 0;
1590 }
1591
1592 cn = jl->j_realblock;
1593 while (cn) {
1594 /* if the blocknr == 0, this has been cleared from the hash,
1595 ** skip it
1596 */
1597 if (cn->blocknr == 0) {
1598 goto next;
1599 }
1600 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1601 struct buffer_head *tmp_bh;
1602 /* we can race against journal_mark_freed when we try
1603 * to lock_buffer(cn->bh), so we have to inc the buffer
1604 * count, and recheck things after locking
1605 */
1606 tmp_bh = cn->bh;
1607 get_bh(tmp_bh);
1608 lock_buffer(tmp_bh);
1609 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1610 if (!buffer_journal_dirty(tmp_bh) ||
1611 buffer_journal_prepared(tmp_bh))
1612 BUG();
1613 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1614 ret++;
1615 } else {
1616 /* note, cn->bh might be null now */
1617 unlock_buffer(tmp_bh);
1618 }
1619 put_bh(tmp_bh);
1620 }
1621 next:
1622 cn = cn->next;
1623 cond_resched();
1624 }
1625 return ret;
1626}
1627
1628/* used by flush_commit_list */
1629static int dirty_one_transaction(struct super_block *s,
1630 struct reiserfs_journal_list *jl)
1631{
1632 struct reiserfs_journal_cnode *cn;
1633 struct reiserfs_journal_list *pjl;
1634 int ret = 0;
1635
1636 jl->j_state |= LIST_DIRTY;
1637 cn = jl->j_realblock;
1638 while (cn) {
1639 /* look for a more recent transaction that logged this
1640 ** buffer. Only the most recent transaction with a buffer in
1641 ** it is allowed to send that buffer to disk
1642 */
1643 pjl = find_newer_jl_for_cn(cn);
1644 if (!pjl && cn->blocknr && cn->bh
1645 && buffer_journal_dirty(cn->bh)) {
1646 BUG_ON(!can_dirty(cn));
1647 /* if the buffer is prepared, it will either be logged
1648 * or restored. If restored, we need to make sure
1649 * it actually gets marked dirty
1650 */
1651 clear_buffer_journal_new(cn->bh);
1652 if (buffer_journal_prepared(cn->bh)) {
1653 set_buffer_journal_restore_dirty(cn->bh);
1654 } else {
1655 set_buffer_journal_test(cn->bh);
1656 mark_buffer_dirty(cn->bh);
1657 }
1658 }
1659 cn = cn->next;
1660 }
1661 return ret;
1662}
1663
1664static int kupdate_transactions(struct super_block *s,
1665 struct reiserfs_journal_list *jl,
1666 struct reiserfs_journal_list **next_jl,
1667 unsigned long *next_trans_id,
1668 int num_blocks, int num_trans)
1669{
1670 int ret = 0;
1671 int written = 0;
1672 int transactions_flushed = 0;
1673 unsigned long orig_trans_id = jl->j_trans_id;
1674 struct buffer_chunk chunk;
1675 struct list_head *entry;
1676 struct reiserfs_journal *journal = SB_JOURNAL(s);
1677 chunk.nr = 0;
1678
1679 down(&journal->j_flush_sem);
1680 if (!journal_list_still_alive(s, orig_trans_id)) {
1681 goto done;
1682 }
1683
1684 /* we've got j_flush_sem held, nobody is going to delete any
1685 * of these lists out from underneath us
1686 */
1687 while ((num_trans && transactions_flushed < num_trans) ||
1688 (!num_trans && written < num_blocks)) {
1689
1690 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1691 atomic_read(&jl->j_commit_left)
1692 || !(jl->j_state & LIST_DIRTY)) {
1693 del_from_work_list(s, jl);
1694 break;
1695 }
1696 ret = write_one_transaction(s, jl, &chunk);
1697
1698 if (ret < 0)
1699 goto done;
1700 transactions_flushed++;
1701 written += ret;
1702 entry = jl->j_list.next;
1703
1704 /* did we wrap? */
1705 if (entry == &journal->j_journal_list) {
1706 break;
1707 }
1708 jl = JOURNAL_LIST_ENTRY(entry);
1709
1710 /* don't bother with older transactions */
1711 if (jl->j_trans_id <= orig_trans_id)
1712 break;
1713 }
1714 if (chunk.nr) {
1715 write_chunk(&chunk);
1716 }
1717
1718 done:
1719 up(&journal->j_flush_sem);
1720 return ret;
1721}
1722
1723/* for o_sync and fsync heavy applications, they tend to use
1724** all the journa list slots with tiny transactions. These
1725** trigger lots and lots of calls to update the header block, which
1726** adds seeks and slows things down.
1727**
1728** This function tries to clear out a large chunk of the journal lists
1729** at once, which makes everything faster since only the newest journal
1da177e4
LT
1730** list updates the header block
1731*/
1732static int flush_used_journal_lists(struct super_block *s,
bd4c625c
LT
1733 struct reiserfs_journal_list *jl)
1734{
1735 unsigned long len = 0;
1736 unsigned long cur_len;
1737 int ret;
1738 int i;
1739 int limit = 256;
1740 struct reiserfs_journal_list *tjl;
1741 struct reiserfs_journal_list *flush_jl;
1742 unsigned long trans_id;
1743 struct reiserfs_journal *journal = SB_JOURNAL(s);
1744
1745 flush_jl = tjl = jl;
1746
1747 /* in data logging mode, try harder to flush a lot of blocks */
1748 if (reiserfs_data_log(s))
1749 limit = 1024;
1750 /* flush for 256 transactions or limit blocks, whichever comes first */
1751 for (i = 0; i < 256 && len < limit; i++) {
1752 if (atomic_read(&tjl->j_commit_left) ||
1753 tjl->j_trans_id < jl->j_trans_id) {
1754 break;
1755 }
1756 cur_len = atomic_read(&tjl->j_nonzerolen);
1757 if (cur_len > 0) {
1758 tjl->j_state &= ~LIST_TOUCHED;
1759 }
1760 len += cur_len;
1761 flush_jl = tjl;
1762 if (tjl->j_list.next == &journal->j_journal_list)
1763 break;
1764 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1765 }
1766 /* try to find a group of blocks we can flush across all the
1767 ** transactions, but only bother if we've actually spanned
1768 ** across multiple lists
1769 */
1770 if (flush_jl != jl) {
1771 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1772 }
1773 flush_journal_list(s, flush_jl, 1);
1774 return 0;
1da177e4
LT
1775}
1776
1777/*
1778** removes any nodes in table with name block and dev as bh.
1779** only touchs the hnext and hprev pointers.
1780*/
1781void remove_journal_hash(struct super_block *sb,
bd4c625c
LT
1782 struct reiserfs_journal_cnode **table,
1783 struct reiserfs_journal_list *jl,
1784 unsigned long block, int remove_freed)
1785{
1786 struct reiserfs_journal_cnode *cur;
1787 struct reiserfs_journal_cnode **head;
1788
1789 head = &(journal_hash(table, sb, block));
1790 if (!head) {
1791 return;
1792 }
1793 cur = *head;
1794 while (cur) {
1795 if (cur->blocknr == block && cur->sb == sb
1796 && (jl == NULL || jl == cur->jlist)
1797 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1798 if (cur->hnext) {
1799 cur->hnext->hprev = cur->hprev;
1800 }
1801 if (cur->hprev) {
1802 cur->hprev->hnext = cur->hnext;
1803 } else {
1804 *head = cur->hnext;
1805 }
1806 cur->blocknr = 0;
1807 cur->sb = NULL;
1808 cur->state = 0;
1809 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1810 atomic_dec(&(cur->jlist->j_nonzerolen));
1811 cur->bh = NULL;
1812 cur->jlist = NULL;
1813 }
1814 cur = cur->hnext;
1815 }
1816}
1817
1818static void free_journal_ram(struct super_block *p_s_sb)
1819{
1820 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1821 reiserfs_kfree(journal->j_current_jl,
1822 sizeof(struct reiserfs_journal_list), p_s_sb);
1823 journal->j_num_lists--;
1824
1825 vfree(journal->j_cnode_free_orig);
1826 free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1827 free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */
1828 if (journal->j_header_bh) {
1829 brelse(journal->j_header_bh);
1830 }
1831 /* j_header_bh is on the journal dev, make sure not to release the journal
1832 * dev until we brelse j_header_bh
1833 */
1834 release_journal_dev(p_s_sb, journal);
1835 vfree(journal);
1da177e4
LT
1836}
1837
1838/*
1839** call on unmount. Only set error to 1 if you haven't made your way out
1840** of read_super() yet. Any other caller must keep error at 0.
1841*/
bd4c625c
LT
1842static int do_journal_release(struct reiserfs_transaction_handle *th,
1843 struct super_block *p_s_sb, int error)
1844{
1845 struct reiserfs_transaction_handle myth;
1846 int flushed = 0;
1847 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1848
1849 /* we only want to flush out transactions if we were called with error == 0
1850 */
1851 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1852 /* end the current trans */
1853 BUG_ON(!th->t_trans_id);
1854 do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1855
1856 /* make sure something gets logged to force our way into the flush code */
1857 if (!journal_join(&myth, p_s_sb, 1)) {
1858 reiserfs_prepare_for_journal(p_s_sb,
1859 SB_BUFFER_WITH_SB(p_s_sb),
1860 1);
1861 journal_mark_dirty(&myth, p_s_sb,
1862 SB_BUFFER_WITH_SB(p_s_sb));
1863 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1864 flushed = 1;
1865 }
1866 }
1867
1868 /* this also catches errors during the do_journal_end above */
1869 if (!error && reiserfs_is_journal_aborted(journal)) {
1870 memset(&myth, 0, sizeof(myth));
1871 if (!journal_join_abort(&myth, p_s_sb, 1)) {
1872 reiserfs_prepare_for_journal(p_s_sb,
1873 SB_BUFFER_WITH_SB(p_s_sb),
1874 1);
1875 journal_mark_dirty(&myth, p_s_sb,
1876 SB_BUFFER_WITH_SB(p_s_sb));
1877 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1878 }
1879 }
1880
1881 reiserfs_mounted_fs_count--;
1882 /* wait for all commits to finish */
1883 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1884 flush_workqueue(commit_wq);
1885 if (!reiserfs_mounted_fs_count) {
1886 destroy_workqueue(commit_wq);
1887 commit_wq = NULL;
1888 }
1889
1890 free_journal_ram(p_s_sb);
1891
1892 return 0;
1da177e4
LT
1893}
1894
1895/*
1896** call on unmount. flush all journal trans, release all alloc'd ram
1897*/
bd4c625c
LT
1898int journal_release(struct reiserfs_transaction_handle *th,
1899 struct super_block *p_s_sb)
1900{
1901 return do_journal_release(th, p_s_sb, 0);
1da177e4 1902}
bd4c625c 1903
1da177e4
LT
1904/*
1905** only call from an error condition inside reiserfs_read_super!
1906*/
bd4c625c
LT
1907int journal_release_error(struct reiserfs_transaction_handle *th,
1908 struct super_block *p_s_sb)
1909{
1910 return do_journal_release(th, p_s_sb, 1);
1da177e4
LT
1911}
1912
1913/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
bd4c625c
LT
1914static int journal_compare_desc_commit(struct super_block *p_s_sb,
1915 struct reiserfs_journal_desc *desc,
1916 struct reiserfs_journal_commit *commit)
1917{
1918 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1919 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
1920 get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
1921 get_commit_trans_len(commit) <= 0) {
1922 return 1;
1923 }
1924 return 0;
1da177e4 1925}
bd4c625c 1926
1da177e4
LT
1927/* returns 0 if it did not find a description block
1928** returns -1 if it found a corrupt commit block
1929** returns 1 if both desc and commit were valid
1930*/
bd4c625c
LT
1931static int journal_transaction_is_valid(struct super_block *p_s_sb,
1932 struct buffer_head *d_bh,
1933 unsigned long *oldest_invalid_trans_id,
1934 unsigned long *newest_mount_id)
1935{
1936 struct reiserfs_journal_desc *desc;
1937 struct reiserfs_journal_commit *commit;
1938 struct buffer_head *c_bh;
1939 unsigned long offset;
1940
1941 if (!d_bh)
1942 return 0;
1943
1944 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
1945 if (get_desc_trans_len(desc) > 0
1946 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
1947 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
1948 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
1949 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1950 "journal-986: transaction "
1951 "is valid returning because trans_id %d is greater than "
1952 "oldest_invalid %lu",
1953 get_desc_trans_id(desc),
1954 *oldest_invalid_trans_id);
1955 return 0;
1956 }
1957 if (newest_mount_id
1958 && *newest_mount_id > get_desc_mount_id(desc)) {
1959 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1960 "journal-1087: transaction "
1961 "is valid returning because mount_id %d is less than "
1962 "newest_mount_id %lu",
1963 get_desc_mount_id(desc),
1964 *newest_mount_id);
1965 return -1;
1966 }
1967 if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
1968 reiserfs_warning(p_s_sb,
1969 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
1970 get_desc_trans_len(desc));
1971 return -1;
1972 }
1973 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
1974
1975 /* ok, we have a journal description block, lets see if the transaction was valid */
1976 c_bh =
1977 journal_bread(p_s_sb,
1978 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1979 ((offset + get_desc_trans_len(desc) +
1980 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
1981 if (!c_bh)
1982 return 0;
1983 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
1984 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1985 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1986 "journal_transaction_is_valid, commit offset %ld had bad "
1987 "time %d or length %d",
1988 c_bh->b_blocknr -
1989 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1990 get_commit_trans_id(commit),
1991 get_commit_trans_len(commit));
1992 brelse(c_bh);
1993 if (oldest_invalid_trans_id) {
1994 *oldest_invalid_trans_id =
1995 get_desc_trans_id(desc);
1996 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1997 "journal-1004: "
1998 "transaction_is_valid setting oldest invalid trans_id "
1999 "to %d",
2000 get_desc_trans_id(desc));
2001 }
2002 return -1;
2003 }
2004 brelse(c_bh);
2005 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2006 "journal-1006: found valid "
2007 "transaction start offset %llu, len %d id %d",
2008 d_bh->b_blocknr -
2009 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2010 get_desc_trans_len(desc),
2011 get_desc_trans_id(desc));
2012 return 1;
2013 } else {
2014 return 0;
2015 }
2016}
2017
2018static void brelse_array(struct buffer_head **heads, int num)
2019{
2020 int i;
2021 for (i = 0; i < num; i++) {
2022 brelse(heads[i]);
2023 }
1da177e4
LT
2024}
2025
2026/*
2027** given the start, and values for the oldest acceptable transactions,
2028** this either reads in a replays a transaction, or returns because the transaction
2029** is invalid, or too old.
2030*/
bd4c625c
LT
2031static int journal_read_transaction(struct super_block *p_s_sb,
2032 unsigned long cur_dblock,
2033 unsigned long oldest_start,
2034 unsigned long oldest_trans_id,
2035 unsigned long newest_mount_id)
2036{
2037 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2038 struct reiserfs_journal_desc *desc;
2039 struct reiserfs_journal_commit *commit;
2040 unsigned long trans_id = 0;
2041 struct buffer_head *c_bh;
2042 struct buffer_head *d_bh;
2043 struct buffer_head **log_blocks = NULL;
2044 struct buffer_head **real_blocks = NULL;
2045 unsigned long trans_offset;
2046 int i;
2047 int trans_half;
2048
2049 d_bh = journal_bread(p_s_sb, cur_dblock);
2050 if (!d_bh)
2051 return 1;
2052 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2053 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2054 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2055 "journal_read_transaction, offset %llu, len %d mount_id %d",
2056 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2057 get_desc_trans_len(desc), get_desc_mount_id(desc));
2058 if (get_desc_trans_id(desc) < oldest_trans_id) {
2059 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2060 "journal_read_trans skipping because %lu is too old",
2061 cur_dblock -
2062 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2063 brelse(d_bh);
2064 return 1;
2065 }
2066 if (get_desc_mount_id(desc) != newest_mount_id) {
2067 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2068 "journal_read_trans skipping because %d is != "
2069 "newest_mount_id %lu", get_desc_mount_id(desc),
2070 newest_mount_id);
2071 brelse(d_bh);
2072 return 1;
2073 }
2074 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2075 ((trans_offset + get_desc_trans_len(desc) + 1) %
2076 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2077 if (!c_bh) {
2078 brelse(d_bh);
2079 return 1;
2080 }
2081 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2082 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2083 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2084 "journal_read_transaction, "
2085 "commit offset %llu had bad time %d or length %d",
2086 c_bh->b_blocknr -
2087 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2088 get_commit_trans_id(commit),
2089 get_commit_trans_len(commit));
2090 brelse(c_bh);
2091 brelse(d_bh);
2092 return 1;
2093 }
2094 trans_id = get_desc_trans_id(desc);
2095 /* now we know we've got a good transaction, and it was inside the valid time ranges */
2096 log_blocks =
2097 reiserfs_kmalloc(get_desc_trans_len(desc) *
2098 sizeof(struct buffer_head *), GFP_NOFS, p_s_sb);
2099 real_blocks =
2100 reiserfs_kmalloc(get_desc_trans_len(desc) *
2101 sizeof(struct buffer_head *), GFP_NOFS, p_s_sb);
2102 if (!log_blocks || !real_blocks) {
2103 brelse(c_bh);
2104 brelse(d_bh);
2105 reiserfs_kfree(log_blocks,
2106 get_desc_trans_len(desc) *
2107 sizeof(struct buffer_head *), p_s_sb);
2108 reiserfs_kfree(real_blocks,
2109 get_desc_trans_len(desc) *
2110 sizeof(struct buffer_head *), p_s_sb);
2111 reiserfs_warning(p_s_sb,
2112 "journal-1169: kmalloc failed, unable to mount FS");
2113 return -1;
2114 }
2115 /* get all the buffer heads */
2116 trans_half = journal_trans_half(p_s_sb->s_blocksize);
2117 for (i = 0; i < get_desc_trans_len(desc); i++) {
2118 log_blocks[i] =
2119 journal_getblk(p_s_sb,
2120 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2121 (trans_offset + 1 +
2122 i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2123 if (i < trans_half) {
2124 real_blocks[i] =
2125 sb_getblk(p_s_sb,
2126 le32_to_cpu(desc->j_realblock[i]));
2127 } else {
2128 real_blocks[i] =
2129 sb_getblk(p_s_sb,
2130 le32_to_cpu(commit->
2131 j_realblock[i - trans_half]));
2132 }
2133 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
2134 reiserfs_warning(p_s_sb,
2135 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2136 goto abort_replay;
2137 }
2138 /* make sure we don't try to replay onto log or reserved area */
2139 if (is_block_in_log_or_reserved_area
2140 (p_s_sb, real_blocks[i]->b_blocknr)) {
2141 reiserfs_warning(p_s_sb,
2142 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2143 abort_replay:
2144 brelse_array(log_blocks, i);
2145 brelse_array(real_blocks, i);
2146 brelse(c_bh);
2147 brelse(d_bh);
2148 reiserfs_kfree(log_blocks,
2149 get_desc_trans_len(desc) *
2150 sizeof(struct buffer_head *), p_s_sb);
2151 reiserfs_kfree(real_blocks,
2152 get_desc_trans_len(desc) *
2153 sizeof(struct buffer_head *), p_s_sb);
2154 return -1;
2155 }
2156 }
2157 /* read in the log blocks, memcpy to the corresponding real block */
2158 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2159 for (i = 0; i < get_desc_trans_len(desc); i++) {
2160 wait_on_buffer(log_blocks[i]);
2161 if (!buffer_uptodate(log_blocks[i])) {
2162 reiserfs_warning(p_s_sb,
2163 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2164 brelse_array(log_blocks + i,
2165 get_desc_trans_len(desc) - i);
2166 brelse_array(real_blocks, get_desc_trans_len(desc));
2167 brelse(c_bh);
2168 brelse(d_bh);
2169 reiserfs_kfree(log_blocks,
2170 get_desc_trans_len(desc) *
2171 sizeof(struct buffer_head *), p_s_sb);
2172 reiserfs_kfree(real_blocks,
2173 get_desc_trans_len(desc) *
2174 sizeof(struct buffer_head *), p_s_sb);
2175 return -1;
2176 }
2177 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2178 real_blocks[i]->b_size);
2179 set_buffer_uptodate(real_blocks[i]);
2180 brelse(log_blocks[i]);
2181 }
2182 /* flush out the real blocks */
2183 for (i = 0; i < get_desc_trans_len(desc); i++) {
2184 set_buffer_dirty(real_blocks[i]);
53778ffd 2185 ll_rw_block(SWRITE, 1, real_blocks + i);
bd4c625c
LT
2186 }
2187 for (i = 0; i < get_desc_trans_len(desc); i++) {
2188 wait_on_buffer(real_blocks[i]);
2189 if (!buffer_uptodate(real_blocks[i])) {
2190 reiserfs_warning(p_s_sb,
2191 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2192 brelse_array(real_blocks + i,
2193 get_desc_trans_len(desc) - i);
2194 brelse(c_bh);
2195 brelse(d_bh);
2196 reiserfs_kfree(log_blocks,
2197 get_desc_trans_len(desc) *
2198 sizeof(struct buffer_head *), p_s_sb);
2199 reiserfs_kfree(real_blocks,
2200 get_desc_trans_len(desc) *
2201 sizeof(struct buffer_head *), p_s_sb);
2202 return -1;
2203 }
2204 brelse(real_blocks[i]);
2205 }
2206 cur_dblock =
2207 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2208 ((trans_offset + get_desc_trans_len(desc) +
2209 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2210 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2211 "journal-1095: setting journal " "start to offset %ld",
2212 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2213
2214 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2215 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2216 journal->j_last_flush_trans_id = trans_id;
2217 journal->j_trans_id = trans_id + 1;
2218 brelse(c_bh);
2219 brelse(d_bh);
2220 reiserfs_kfree(log_blocks,
2221 le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *),
2222 p_s_sb);
2223 reiserfs_kfree(real_blocks,
2224 le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *),
2225 p_s_sb);
2226 return 0;
1da177e4
LT
2227}
2228
2229/* This function reads blocks starting from block and to max_block of bufsize
2230 size (but no more than BUFNR blocks at a time). This proved to improve
2231 mounting speed on self-rebuilding raid5 arrays at least.
2232 Right now it is only used from journal code. But later we might use it
2233 from other places.
2234 Note: Do not use journal_getblk/sb_getblk functions here! */
bd4c625c
LT
2235static struct buffer_head *reiserfs_breada(struct block_device *dev, int block,
2236 int bufsize, unsigned int max_block)
1da177e4 2237{
bd4c625c 2238 struct buffer_head *bhlist[BUFNR];
1da177e4 2239 unsigned int blocks = BUFNR;
bd4c625c 2240 struct buffer_head *bh;
1da177e4 2241 int i, j;
bd4c625c
LT
2242
2243 bh = __getblk(dev, block, bufsize);
2244 if (buffer_uptodate(bh))
2245 return (bh);
2246
1da177e4
LT
2247 if (block + BUFNR > max_block) {
2248 blocks = max_block - block;
2249 }
2250 bhlist[0] = bh;
2251 j = 1;
2252 for (i = 1; i < blocks; i++) {
bd4c625c
LT
2253 bh = __getblk(dev, block + i, bufsize);
2254 if (buffer_uptodate(bh)) {
2255 brelse(bh);
1da177e4 2256 break;
bd4c625c
LT
2257 } else
2258 bhlist[j++] = bh;
1da177e4 2259 }
bd4c625c
LT
2260 ll_rw_block(READ, j, bhlist);
2261 for (i = 1; i < j; i++)
2262 brelse(bhlist[i]);
1da177e4 2263 bh = bhlist[0];
bd4c625c
LT
2264 wait_on_buffer(bh);
2265 if (buffer_uptodate(bh))
1da177e4 2266 return bh;
bd4c625c 2267 brelse(bh);
1da177e4
LT
2268 return NULL;
2269}
2270
2271/*
2272** read and replay the log
2273** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2274** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2275**
2276** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2277**
2278** On exit, it sets things up so the first transaction will work correctly.
2279*/
bd4c625c
LT
2280static int journal_read(struct super_block *p_s_sb)
2281{
2282 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2283 struct reiserfs_journal_desc *desc;
2284 unsigned long oldest_trans_id = 0;
2285 unsigned long oldest_invalid_trans_id = 0;
2286 time_t start;
2287 unsigned long oldest_start = 0;
2288 unsigned long cur_dblock = 0;
2289 unsigned long newest_mount_id = 9;
2290 struct buffer_head *d_bh;
2291 struct reiserfs_journal_header *jh;
2292 int valid_journal_header = 0;
2293 int replay_count = 0;
2294 int continue_replay = 1;
2295 int ret;
2296 char b[BDEVNAME_SIZE];
2297
2298 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2299 reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2300 bdevname(journal->j_dev_bd, b));
2301 start = get_seconds();
2302
2303 /* step 1, read in the journal header block. Check the transaction it says
2304 ** is the first unflushed, and if that transaction is not valid,
2305 ** replay is done
2306 */
2307 journal->j_header_bh = journal_bread(p_s_sb,
2308 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2309 + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2310 if (!journal->j_header_bh) {
2311 return 1;
2312 }
2313 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2314 if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
2315 le32_to_cpu(jh->j_first_unflushed_offset) <
2316 SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2317 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2318 oldest_start =
2319 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2320 le32_to_cpu(jh->j_first_unflushed_offset);
2321 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2322 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2323 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2324 "journal-1153: found in "
2325 "header: first_unflushed_offset %d, last_flushed_trans_id "
2326 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2327 le32_to_cpu(jh->j_last_flush_trans_id));
2328 valid_journal_header = 1;
2329
2330 /* now, we try to read the first unflushed offset. If it is not valid,
2331 ** there is nothing more we can do, and it makes no sense to read
2332 ** through the whole log.
2333 */
2334 d_bh =
2335 journal_bread(p_s_sb,
2336 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2337 le32_to_cpu(jh->j_first_unflushed_offset));
2338 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2339 if (!ret) {
2340 continue_replay = 0;
2341 }
2342 brelse(d_bh);
2343 goto start_log_replay;
2344 }
2345
2346 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2347 reiserfs_warning(p_s_sb,
2348 "clm-2076: device is readonly, unable to replay log");
2349 return -1;
2350 }
2351
2352 /* ok, there are transactions that need to be replayed. start with the first log block, find
2353 ** all the valid transactions, and pick out the oldest.
2354 */
2355 while (continue_replay
2356 && cur_dblock <
2357 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2358 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2359 /* Note that it is required for blocksize of primary fs device and journal
2360 device to be the same */
2361 d_bh =
2362 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2363 p_s_sb->s_blocksize,
2364 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2365 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2366 ret =
2367 journal_transaction_is_valid(p_s_sb, d_bh,
2368 &oldest_invalid_trans_id,
2369 &newest_mount_id);
2370 if (ret == 1) {
2371 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2372 if (oldest_start == 0) { /* init all oldest_ values */
2373 oldest_trans_id = get_desc_trans_id(desc);
2374 oldest_start = d_bh->b_blocknr;
2375 newest_mount_id = get_desc_mount_id(desc);
2376 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2377 "journal-1179: Setting "
2378 "oldest_start to offset %llu, trans_id %lu",
2379 oldest_start -
2380 SB_ONDISK_JOURNAL_1st_BLOCK
2381 (p_s_sb), oldest_trans_id);
2382 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2383 /* one we just read was older */
2384 oldest_trans_id = get_desc_trans_id(desc);
2385 oldest_start = d_bh->b_blocknr;
2386 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2387 "journal-1180: Resetting "
2388 "oldest_start to offset %lu, trans_id %lu",
2389 oldest_start -
2390 SB_ONDISK_JOURNAL_1st_BLOCK
2391 (p_s_sb), oldest_trans_id);
2392 }
2393 if (newest_mount_id < get_desc_mount_id(desc)) {
2394 newest_mount_id = get_desc_mount_id(desc);
2395 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2396 "journal-1299: Setting "
2397 "newest_mount_id to %d",
2398 get_desc_mount_id(desc));
2399 }
2400 cur_dblock += get_desc_trans_len(desc) + 2;
2401 } else {
2402 cur_dblock++;
2403 }
2404 brelse(d_bh);
2405 }
2406
2407 start_log_replay:
2408 cur_dblock = oldest_start;
2409 if (oldest_trans_id) {
2410 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2411 "journal-1206: Starting replay "
2412 "from offset %llu, trans_id %lu",
2413 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2414 oldest_trans_id);
2415
2416 }
2417 replay_count = 0;
2418 while (continue_replay && oldest_trans_id > 0) {
2419 ret =
2420 journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2421 oldest_trans_id, newest_mount_id);
2422 if (ret < 0) {
2423 return ret;
2424 } else if (ret != 0) {
2425 break;
2426 }
2427 cur_dblock =
2428 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2429 replay_count++;
2430 if (cur_dblock == oldest_start)
2431 break;
2432 }
2433
2434 if (oldest_trans_id == 0) {
2435 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2436 "journal-1225: No valid " "transactions found");
2437 }
2438 /* j_start does not get set correctly if we don't replay any transactions.
2439 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2440 ** copy the trans_id from the header
2441 */
2442 if (valid_journal_header && replay_count == 0) {
2443 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2444 journal->j_trans_id =
2445 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2446 journal->j_last_flush_trans_id =
2447 le32_to_cpu(jh->j_last_flush_trans_id);
2448 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2449 } else {
2450 journal->j_mount_id = newest_mount_id + 1;
2451 }
1da177e4 2452 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
bd4c625c
LT
2453 "newest_mount_id to %lu", journal->j_mount_id);
2454 journal->j_first_unflushed_offset = journal->j_start;
2455 if (replay_count > 0) {
2456 reiserfs_info(p_s_sb,
2457 "replayed %d transactions in %lu seconds\n",
2458 replay_count, get_seconds() - start);
2459 }
2460 if (!bdev_read_only(p_s_sb->s_bdev) &&
2461 _update_journal_header_block(p_s_sb, journal->j_start,
2462 journal->j_last_flush_trans_id)) {
2463 /* replay failed, caller must call free_journal_ram and abort
2464 ** the mount
2465 */
2466 return -1;
2467 }
2468 return 0;
1da177e4
LT
2469}
2470
2471static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2472{
bd4c625c
LT
2473 struct reiserfs_journal_list *jl;
2474 retry:
2475 jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS,
2476 s);
2477 if (!jl) {
2478 yield();
2479 goto retry;
2480 }
2481 memset(jl, 0, sizeof(*jl));
2482 INIT_LIST_HEAD(&jl->j_list);
2483 INIT_LIST_HEAD(&jl->j_working_list);
2484 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2485 INIT_LIST_HEAD(&jl->j_bh_list);
2486 sema_init(&jl->j_commit_lock, 1);
2487 SB_JOURNAL(s)->j_num_lists++;
2488 get_journal_list(jl);
2489 return jl;
2490}
2491
2492static void journal_list_init(struct super_block *p_s_sb)
2493{
2494 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2495}
2496
2497static int release_journal_dev(struct super_block *super,
2498 struct reiserfs_journal *journal)
2499{
2500 int result;
2501
2502 result = 0;
2503
2504 if (journal->j_dev_file != NULL) {
2505 result = filp_close(journal->j_dev_file, NULL);
2506 journal->j_dev_file = NULL;
2507 journal->j_dev_bd = NULL;
2508 } else if (journal->j_dev_bd != NULL) {
2509 result = blkdev_put(journal->j_dev_bd);
2510 journal->j_dev_bd = NULL;
2511 }
2512
2513 if (result != 0) {
2514 reiserfs_warning(super,
2515 "sh-457: release_journal_dev: Cannot release journal device: %i",
2516 result);
2517 }
2518 return result;
2519}
2520
2521static int journal_init_dev(struct super_block *super,
2522 struct reiserfs_journal *journal,
2523 const char *jdev_name)
1da177e4
LT
2524{
2525 int result;
2526 dev_t jdev;
2527 int blkdev_mode = FMODE_READ | FMODE_WRITE;
2528 char b[BDEVNAME_SIZE];
2529
2530 result = 0;
2531
bd4c625c
LT
2532 journal->j_dev_bd = NULL;
2533 journal->j_dev_file = NULL;
2534 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2535 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
1da177e4
LT
2536
2537 if (bdev_read_only(super->s_bdev))
bd4c625c 2538 blkdev_mode = FMODE_READ;
1da177e4
LT
2539
2540 /* there is no "jdev" option and journal is on separate device */
bd4c625c 2541 if ((!jdev_name || !jdev_name[0])) {
1da177e4
LT
2542 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2543 if (IS_ERR(journal->j_dev_bd)) {
2544 result = PTR_ERR(journal->j_dev_bd);
2545 journal->j_dev_bd = NULL;
bd4c625c
LT
2546 reiserfs_warning(super, "sh-458: journal_init_dev: "
2547 "cannot init journal device '%s': %i",
2548 __bdevname(jdev, b), result);
1da177e4
LT
2549 return result;
2550 } else if (jdev != super->s_dev)
2551 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2552 return 0;
2553 }
2554
bd4c625c
LT
2555 journal->j_dev_file = filp_open(jdev_name, 0, 0);
2556 if (!IS_ERR(journal->j_dev_file)) {
1da177e4 2557 struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
bd4c625c 2558 if (!S_ISBLK(jdev_inode->i_mode)) {
74f9f974 2559 reiserfs_warning(super, "journal_init_dev: '%s' is "
bd4c625c 2560 "not a block device", jdev_name);
1da177e4 2561 result = -ENOTBLK;
bd4c625c
LT
2562 release_journal_dev(super, journal);
2563 } else {
1da177e4
LT
2564 /* ok */
2565 journal->j_dev_bd = I_BDEV(jdev_inode);
2566 set_blocksize(journal->j_dev_bd, super->s_blocksize);
bd4c625c
LT
2567 reiserfs_info(super,
2568 "journal_init_dev: journal device: %s\n",
74f9f974 2569 bdevname(journal->j_dev_bd, b));
1da177e4
LT
2570 }
2571 } else {
bd4c625c
LT
2572 result = PTR_ERR(journal->j_dev_file);
2573 journal->j_dev_file = NULL;
2574 reiserfs_warning(super,
2575 "journal_init_dev: Cannot open '%s': %i",
2576 jdev_name, result);
1da177e4 2577 }
1da177e4
LT
2578 return result;
2579}
2580
2581/*
2582** must be called once on fs mount. calls journal_read for you
2583*/
bd4c625c
LT
2584int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2585 int old_format, unsigned int commit_max_age)
2586{
2587 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2588 struct buffer_head *bhjh;
2589 struct reiserfs_super_block *rs;
2590 struct reiserfs_journal_header *jh;
2591 struct reiserfs_journal *journal;
2592 struct reiserfs_journal_list *jl;
2593 char b[BDEVNAME_SIZE];
2594
2595 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2596 if (!journal) {
2597 reiserfs_warning(p_s_sb,
2598 "journal-1256: unable to get memory for journal structure");
2599 return 1;
2600 }
2601 memset(journal, 0, sizeof(struct reiserfs_journal));
2602 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2603 INIT_LIST_HEAD(&journal->j_prealloc_list);
2604 INIT_LIST_HEAD(&journal->j_working_list);
2605 INIT_LIST_HEAD(&journal->j_journal_list);
2606 journal->j_persistent_trans = 0;
2607 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2608 journal->j_list_bitmap,
2609 SB_BMAP_NR(p_s_sb)))
2610 goto free_and_return;
2611 allocate_bitmap_nodes(p_s_sb);
2612
2613 /* reserved for journal area support */
2614 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2615 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2616 / p_s_sb->s_blocksize +
2617 SB_BMAP_NR(p_s_sb) +
2618 1 :
2619 REISERFS_DISK_OFFSET_IN_BYTES /
2620 p_s_sb->s_blocksize + 2);
2621
2622 /* Sanity check to see is the standard journal fitting withing first bitmap
2623 (actual for small blocksizes) */
2624 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2625 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2626 SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
2627 reiserfs_warning(p_s_sb,
2628 "journal-1393: journal does not fit for area "
2629 "addressed by first of bitmap blocks. It starts at "
2630 "%u and its size is %u. Block size %ld",
2631 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2632 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2633 p_s_sb->s_blocksize);
2634 goto free_and_return;
2635 }
2636
2637 if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
2638 reiserfs_warning(p_s_sb,
2639 "sh-462: unable to initialize jornal device");
2640 goto free_and_return;
2641 }
2642
2643 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2644
2645 /* read journal header */
2646 bhjh = journal_bread(p_s_sb,
2647 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2648 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2649 if (!bhjh) {
2650 reiserfs_warning(p_s_sb,
2651 "sh-459: unable to read journal header");
2652 goto free_and_return;
2653 }
2654 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2655
2656 /* make sure that journal matches to the super block */
2657 if (is_reiserfs_jr(rs)
2658 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2659 sb_jp_journal_magic(rs))) {
2660 reiserfs_warning(p_s_sb,
2661 "sh-460: journal header magic %x "
2662 "(device %s) does not match to magic found in super "
2663 "block %x", jh->jh_journal.jp_journal_magic,
2664 bdevname(journal->j_dev_bd, b),
2665 sb_jp_journal_magic(rs));
2666 brelse(bhjh);
2667 goto free_and_return;
2668 }
2669
2670 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2671 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2672 journal->j_max_commit_age =
2673 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2674 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2675
2676 if (journal->j_trans_max) {
2677 /* make sure these parameters are available, assign it if they are not */
2678 __u32 initial = journal->j_trans_max;
2679 __u32 ratio = 1;
2680
2681 if (p_s_sb->s_blocksize < 4096)
2682 ratio = 4096 / p_s_sb->s_blocksize;
2683
2684 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2685 JOURNAL_MIN_RATIO)
2686 journal->j_trans_max =
2687 SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2688 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2689 journal->j_trans_max =
2690 JOURNAL_TRANS_MAX_DEFAULT / ratio;
2691 if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2692 journal->j_trans_max =
2693 JOURNAL_TRANS_MIN_DEFAULT / ratio;
2694
2695 if (journal->j_trans_max != initial)
2696 reiserfs_warning(p_s_sb,
2697 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2698 initial, journal->j_trans_max);
2699
2700 journal->j_max_batch = journal->j_trans_max *
2701 JOURNAL_MAX_BATCH_DEFAULT / JOURNAL_TRANS_MAX_DEFAULT;
2702 }
2703
2704 if (!journal->j_trans_max) {
2705 /*we have the file system was created by old version of mkreiserfs
2706 so this field contains zero value */
2707 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2708 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2709 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2710
2711 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2712 trans max size is decreased proportionally */
2713 if (p_s_sb->s_blocksize < 4096) {
2714 journal->j_trans_max /= (4096 / p_s_sb->s_blocksize);
2715 journal->j_max_batch = (journal->j_trans_max) * 9 / 10;
2716 }
2717 }
2718
2719 journal->j_default_max_commit_age = journal->j_max_commit_age;
2720
2721 if (commit_max_age != 0) {
2722 journal->j_max_commit_age = commit_max_age;
2723 journal->j_max_trans_age = commit_max_age;
2724 }
2725
2726 reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2727 "journal first block %u, max trans len %u, max batch %u, "
2728 "max commit age %u, max trans age %u\n",
2729 bdevname(journal->j_dev_bd, b),
2730 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2731 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2732 journal->j_trans_max,
2733 journal->j_max_batch,
2734 journal->j_max_commit_age, journal->j_max_trans_age);
2735
2736 brelse(bhjh);
2737
2738 journal->j_list_bitmap_index = 0;
2739 journal_list_init(p_s_sb);
2740
2741 memset(journal->j_list_hash_table, 0,
2742 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2743
2744 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2745 spin_lock_init(&journal->j_dirty_buffers_lock);
2746
2747 journal->j_start = 0;
2748 journal->j_len = 0;
2749 journal->j_len_alloc = 0;
2750 atomic_set(&(journal->j_wcount), 0);
2751 atomic_set(&(journal->j_async_throttle), 0);
2752 journal->j_bcount = 0;
2753 journal->j_trans_start_time = 0;
2754 journal->j_last = NULL;
2755 journal->j_first = NULL;
2756 init_waitqueue_head(&(journal->j_join_wait));
2757 sema_init(&journal->j_lock, 1);
2758 sema_init(&journal->j_flush_sem, 1);
2759
2760 journal->j_trans_id = 10;
2761 journal->j_mount_id = 10;
2762 journal->j_state = 0;
2763 atomic_set(&(journal->j_jlock), 0);
2764 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2765 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2766 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2767 journal->j_cnode_used = 0;
2768 journal->j_must_wait = 0;
2769
576f6d79
JM
2770 if (journal->j_cnode_free == 0) {
2771 reiserfs_warning(p_s_sb, "journal-2004: Journal cnode memory "
2772 "allocation failed (%ld bytes). Journal is "
2773 "too large for available memory. Usually "
2774 "this is due to a journal that is too large.",
2775 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2776 goto free_and_return;
2777 }
2778
bd4c625c
LT
2779 init_journal_hash(p_s_sb);
2780 jl = journal->j_current_jl;
2781 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2782 if (!jl->j_list_bitmap) {
2783 reiserfs_warning(p_s_sb,
2784 "journal-2005, get_list_bitmap failed for journal list 0");
2785 goto free_and_return;
2786 }
2787 if (journal_read(p_s_sb) < 0) {
2788 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
2789 goto free_and_return;
2790 }
2791
2792 reiserfs_mounted_fs_count++;
2793 if (reiserfs_mounted_fs_count <= 1)
2794 commit_wq = create_workqueue("reiserfs");
2795
2796 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
2797 return 0;
2798 free_and_return:
2799 free_journal_ram(p_s_sb);
2800 return 1;
1da177e4
LT
2801}
2802
2803/*
2804** test for a polite end of the current transaction. Used by file_write, and should
2805** be used by delete to make sure they don't write more than can fit inside a single
2806** transaction
2807*/
bd4c625c
LT
2808int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2809 int new_alloc)
2810{
2811 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2812 time_t now = get_seconds();
2813 /* cannot restart while nested */
2814 BUG_ON(!th->t_trans_id);
2815 if (th->t_refcount > 1)
2816 return 0;
2817 if (journal->j_must_wait > 0 ||
2818 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2819 atomic_read(&(journal->j_jlock)) ||
2820 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2821 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2822 return 1;
2823 }
2824 return 0;
1da177e4
LT
2825}
2826
2827/* this must be called inside a transaction, and requires the
2828** kernel_lock to be held
2829*/
bd4c625c
LT
2830void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2831{
2832 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2833 BUG_ON(!th->t_trans_id);
2834 journal->j_must_wait = 1;
2835 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2836 return;
1da177e4
LT
2837}
2838
2839/* this must be called without a transaction started, and does not
2840** require BKL
2841*/
bd4c625c
LT
2842void reiserfs_allow_writes(struct super_block *s)
2843{
2844 struct reiserfs_journal *journal = SB_JOURNAL(s);
2845 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2846 wake_up(&journal->j_join_wait);
1da177e4
LT
2847}
2848
2849/* this must be called without a transaction started, and does not
2850** require BKL
2851*/
bd4c625c
LT
2852void reiserfs_wait_on_write_block(struct super_block *s)
2853{
2854 struct reiserfs_journal *journal = SB_JOURNAL(s);
2855 wait_event(journal->j_join_wait,
2856 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2857}
2858
2859static void queue_log_writer(struct super_block *s)
2860{
2861 wait_queue_t wait;
2862 struct reiserfs_journal *journal = SB_JOURNAL(s);
2863 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2864
2865 /*
2866 * we don't want to use wait_event here because
2867 * we only want to wait once.
2868 */
2869 init_waitqueue_entry(&wait, current);
2870 add_wait_queue(&journal->j_join_wait, &wait);
1da177e4 2871 set_current_state(TASK_UNINTERRUPTIBLE);
bd4c625c
LT
2872 if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2873 schedule();
2874 current->state = TASK_RUNNING;
2875 remove_wait_queue(&journal->j_join_wait, &wait);
2876}
2877
2878static void wake_queued_writers(struct super_block *s)
2879{
2880 struct reiserfs_journal *journal = SB_JOURNAL(s);
2881 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2882 wake_up(&journal->j_join_wait);
2883}
2884
2885static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
2886{
2887 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2888 unsigned long bcount = journal->j_bcount;
2889 while (1) {
041e0e3b 2890 schedule_timeout_uninterruptible(1);
bd4c625c
LT
2891 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2892 while ((atomic_read(&journal->j_wcount) > 0 ||
2893 atomic_read(&journal->j_jlock)) &&
2894 journal->j_trans_id == trans_id) {
2895 queue_log_writer(sb);
2896 }
2897 if (journal->j_trans_id != trans_id)
2898 break;
2899 if (bcount == journal->j_bcount)
2900 break;
2901 bcount = journal->j_bcount;
1da177e4 2902 }
1da177e4
LT
2903}
2904
2905/* join == true if you must join an existing transaction.
2906** join == false if you can deal with waiting for others to finish
2907**
2908** this will block until the transaction is joinable. send the number of blocks you
2909** expect to use in nblocks.
2910*/
bd4c625c
LT
2911static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2912 struct super_block *p_s_sb, unsigned long nblocks,
2913 int join)
2914{
2915 time_t now = get_seconds();
2916 int old_trans_id;
2917 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2918 struct reiserfs_transaction_handle myth;
2919 int sched_count = 0;
2920 int retval;
2921
2922 reiserfs_check_lock_depth(p_s_sb, "journal_begin");
2923 if (nblocks > journal->j_trans_max)
2924 BUG();
2925
2926 PROC_INFO_INC(p_s_sb, journal.journal_being);
2927 /* set here for journal_join */
2928 th->t_refcount = 1;
2929 th->t_super = p_s_sb;
2930
2931 relock:
2932 lock_journal(p_s_sb);
2933 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
2934 unlock_journal(p_s_sb);
2935 retval = journal->j_errno;
2936 goto out_fail;
2937 }
2938 journal->j_bcount++;
2939
2940 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2941 unlock_journal(p_s_sb);
2942 reiserfs_wait_on_write_block(p_s_sb);
2943 PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
2944 goto relock;
2945 }
2946 now = get_seconds();
2947
2948 /* if there is no room in the journal OR
2949 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2950 ** we don't sleep if there aren't other writers
2951 */
2952
2953 if ((!join && journal->j_must_wait > 0) ||
2954 (!join
2955 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
2956 || (!join && atomic_read(&journal->j_wcount) > 0
2957 && journal->j_trans_start_time > 0
2958 && (now - journal->j_trans_start_time) >
2959 journal->j_max_trans_age) || (!join
2960 && atomic_read(&journal->j_jlock))
2961 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
2962
2963 old_trans_id = journal->j_trans_id;
2964 unlock_journal(p_s_sb); /* allow others to finish this transaction */
2965
2966 if (!join && (journal->j_len_alloc + nblocks + 2) >=
2967 journal->j_max_batch &&
2968 ((journal->j_len + nblocks + 2) * 100) <
2969 (journal->j_len_alloc * 75)) {
2970 if (atomic_read(&journal->j_wcount) > 10) {
2971 sched_count++;
2972 queue_log_writer(p_s_sb);
2973 goto relock;
2974 }
2975 }
2976 /* don't mess with joining the transaction if all we have to do is
2977 * wait for someone else to do a commit
2978 */
2979 if (atomic_read(&journal->j_jlock)) {
2980 while (journal->j_trans_id == old_trans_id &&
2981 atomic_read(&journal->j_jlock)) {
2982 queue_log_writer(p_s_sb);
2983 }
2984 goto relock;
2985 }
2986 retval = journal_join(&myth, p_s_sb, 1);
2987 if (retval)
2988 goto out_fail;
2989
2990 /* someone might have ended the transaction while we joined */
2991 if (old_trans_id != journal->j_trans_id) {
2992 retval = do_journal_end(&myth, p_s_sb, 1, 0);
2993 } else {
2994 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
2995 }
2996
2997 if (retval)
2998 goto out_fail;
2999
3000 PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
3001 goto relock;
3002 }
3003 /* we are the first writer, set trans_id */
3004 if (journal->j_trans_start_time == 0) {
3005 journal->j_trans_start_time = get_seconds();
3006 }
3007 atomic_inc(&(journal->j_wcount));
3008 journal->j_len_alloc += nblocks;
3009 th->t_blocks_logged = 0;
3010 th->t_blocks_allocated = nblocks;
3011 th->t_trans_id = journal->j_trans_id;
3012 unlock_journal(p_s_sb);
3013 INIT_LIST_HEAD(&th->t_list);
3014 get_fs_excl();
3015 return 0;
3016
3017 out_fail:
3018 memset(th, 0, sizeof(*th));
3019 /* Re-set th->t_super, so we can properly keep track of how many
3020 * persistent transactions there are. We need to do this so if this
3021 * call is part of a failed restart_transaction, we can free it later */
3022 th->t_super = p_s_sb;
3023 return retval;
3024}
3025
3026struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3027 super_block
3028 *s,
3029 int nblocks)
3030{
3031 int ret;
3032 struct reiserfs_transaction_handle *th;
3033
3034 /* if we're nesting into an existing transaction. It will be
3035 ** persistent on its own
3036 */
3037 if (reiserfs_transaction_running(s)) {
3038 th = current->journal_info;
3039 th->t_refcount++;
3040 if (th->t_refcount < 2) {
3041 BUG();
3042 }
3043 return th;
3044 }
3045 th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle),
3046 GFP_NOFS, s);
3047 if (!th)
3048 return NULL;
3049 ret = journal_begin(th, s, nblocks);
3050 if (ret) {
3051 reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle),
3052 s);
3053 return NULL;
3054 }
3055
3056 SB_JOURNAL(s)->j_persistent_trans++;
3057 return th;
3058}
3059
3060int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3061{
3062 struct super_block *s = th->t_super;
3063 int ret = 0;
3064 if (th->t_trans_id)
3065 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3066 else
3067 ret = -EIO;
3068 if (th->t_refcount == 0) {
3069 SB_JOURNAL(s)->j_persistent_trans--;
3070 reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle),
3071 s);
3072 }
3073 return ret;
3074}
3075
3076static int journal_join(struct reiserfs_transaction_handle *th,
3077 struct super_block *p_s_sb, unsigned long nblocks)
3078{
3079 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3080
3081 /* this keeps do_journal_end from NULLing out the current->journal_info
3082 ** pointer
3083 */
3084 th->t_handle_save = cur_th;
3085 if (cur_th && cur_th->t_refcount > 1) {
3086 BUG();
3087 }
3088 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3089}
3090
3091int journal_join_abort(struct reiserfs_transaction_handle *th,
3092 struct super_block *p_s_sb, unsigned long nblocks)
3093{
3094 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3095
3096 /* this keeps do_journal_end from NULLing out the current->journal_info
3097 ** pointer
3098 */
3099 th->t_handle_save = cur_th;
3100 if (cur_th && cur_th->t_refcount > 1) {
3101 BUG();
3102 }
3103 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3104}
3105
3106int journal_begin(struct reiserfs_transaction_handle *th,
3107 struct super_block *p_s_sb, unsigned long nblocks)
3108{
3109 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3110 int ret;
3111
3112 th->t_handle_save = NULL;
3113 if (cur_th) {
3114 /* we are nesting into the current transaction */
3115 if (cur_th->t_super == p_s_sb) {
3116 BUG_ON(!cur_th->t_refcount);
3117 cur_th->t_refcount++;
3118 memcpy(th, cur_th, sizeof(*th));
3119 if (th->t_refcount <= 1)
3120 reiserfs_warning(p_s_sb,
3121 "BAD: refcount <= 1, but journal_info != 0");
3122 return 0;
3123 } else {
3124 /* we've ended up with a handle from a different filesystem.
3125 ** save it and restore on journal_end. This should never
3126 ** really happen...
3127 */
3128 reiserfs_warning(p_s_sb,
3129 "clm-2100: nesting info a different FS");
3130 th->t_handle_save = current->journal_info;
3131 current->journal_info = th;
3132 }
1da177e4 3133 } else {
bd4c625c
LT
3134 current->journal_info = th;
3135 }
3136 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
3137 if (current->journal_info != th)
3138 BUG();
1da177e4 3139
bd4c625c
LT
3140 /* I guess this boils down to being the reciprocal of clm-2100 above.
3141 * If do_journal_begin_r fails, we need to put it back, since journal_end
3142 * won't be called to do it. */
3143 if (ret)
3144 current->journal_info = th->t_handle_save;
3145 else
3146 BUG_ON(!th->t_refcount);
1da177e4 3147
bd4c625c 3148 return ret;
1da177e4
LT
3149}
3150
3151/*
3152** puts bh into the current transaction. If it was already there, reorders removes the
3153** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3154**
3155** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3156** transaction is committed.
3157**
3158** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3159*/
bd4c625c
LT
3160int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3161 struct super_block *p_s_sb, struct buffer_head *bh)
3162{
3163 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3164 struct reiserfs_journal_cnode *cn = NULL;
3165 int count_already_incd = 0;
3166 int prepared = 0;
3167 BUG_ON(!th->t_trans_id);
3168
3169 PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3170 if (th->t_trans_id != journal->j_trans_id) {
3171 reiserfs_panic(th->t_super,
3172 "journal-1577: handle trans id %ld != current trans id %ld\n",
3173 th->t_trans_id, journal->j_trans_id);
3174 }
3175
3176 p_s_sb->s_dirt = 1;
3177
3178 prepared = test_clear_buffer_journal_prepared(bh);
3179 clear_buffer_journal_restore_dirty(bh);
3180 /* already in this transaction, we are done */
3181 if (buffer_journaled(bh)) {
3182 PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3183 return 0;
3184 }
3185
3186 /* this must be turned into a panic instead of a warning. We can't allow
3187 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3188 ** could get to disk too early. NOT GOOD.
3189 */
3190 if (!prepared || buffer_dirty(bh)) {
3191 reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
3192 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3193 (unsigned long long)bh->b_blocknr,
3194 prepared ? ' ' : '!',
3195 buffer_locked(bh) ? ' ' : '!',
3196 buffer_dirty(bh) ? ' ' : '!',
3197 buffer_journal_dirty(bh) ? ' ' : '!');
3198 }
3199
3200 if (atomic_read(&(journal->j_wcount)) <= 0) {
3201 reiserfs_warning(p_s_sb,
3202 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3203 atomic_read(&(journal->j_wcount)));
3204 return 1;
3205 }
3206 /* this error means I've screwed up, and we've overflowed the transaction.
3207 ** Nothing can be done here, except make the FS readonly or panic.
3208 */
3209 if (journal->j_len >= journal->j_trans_max) {
3210 reiserfs_panic(th->t_super,
3211 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3212 journal->j_len);
3213 }
3214
3215 if (buffer_journal_dirty(bh)) {
3216 count_already_incd = 1;
3217 PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3218 clear_buffer_journal_dirty(bh);
3219 }
3220
3221 if (journal->j_len > journal->j_len_alloc) {
3222 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3223 }
3224
3225 set_buffer_journaled(bh);
3226
3227 /* now put this guy on the end */
3228 if (!cn) {
3229 cn = get_cnode(p_s_sb);
3230 if (!cn) {
3231 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3232 }
3233
3234 if (th->t_blocks_logged == th->t_blocks_allocated) {
3235 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3236 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3237 }
3238 th->t_blocks_logged++;
3239 journal->j_len++;
3240
3241 cn->bh = bh;
3242 cn->blocknr = bh->b_blocknr;
3243 cn->sb = p_s_sb;
3244 cn->jlist = NULL;
3245 insert_journal_hash(journal->j_hash_table, cn);
3246 if (!count_already_incd) {
3247 get_bh(bh);
3248 }
3249 }
3250 cn->next = NULL;
3251 cn->prev = journal->j_last;
3252 cn->bh = bh;
3253 if (journal->j_last) {
3254 journal->j_last->next = cn;
3255 journal->j_last = cn;
3256 } else {
3257 journal->j_first = cn;
3258 journal->j_last = cn;
3259 }
3260 return 0;
3261}
3262
3263int journal_end(struct reiserfs_transaction_handle *th,
3264 struct super_block *p_s_sb, unsigned long nblocks)
3265{
3266 if (!current->journal_info && th->t_refcount > 1)
3267 reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
3268 th->t_refcount);
3269
3270 if (!th->t_trans_id) {
3271 WARN_ON(1);
3272 return -EIO;
3273 }
3274
3275 th->t_refcount--;
3276 if (th->t_refcount > 0) {
3277 struct reiserfs_transaction_handle *cur_th =
3278 current->journal_info;
3279
3280 /* we aren't allowed to close a nested transaction on a different
3281 ** filesystem from the one in the task struct
3282 */
3283 if (cur_th->t_super != th->t_super)
3284 BUG();
3285
3286 if (th != cur_th) {
3287 memcpy(current->journal_info, th, sizeof(*th));
3288 th->t_trans_id = 0;
3289 }
3290 return 0;
3291 } else {
3292 return do_journal_end(th, p_s_sb, nblocks, 0);
3293 }
1da177e4
LT
3294}
3295
3296/* removes from the current transaction, relsing and descrementing any counters.
3297** also files the removed buffer directly onto the clean list
3298**
3299** called by journal_mark_freed when a block has been deleted
3300**
3301** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3302*/
bd4c625c
LT
3303static int remove_from_transaction(struct super_block *p_s_sb,
3304 b_blocknr_t blocknr, int already_cleaned)
3305{
3306 struct buffer_head *bh;
3307 struct reiserfs_journal_cnode *cn;
3308 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3309 int ret = 0;
3310
3311 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3312 if (!cn || !cn->bh) {
3313 return ret;
3314 }
3315 bh = cn->bh;
3316 if (cn->prev) {
3317 cn->prev->next = cn->next;
3318 }
3319 if (cn->next) {
3320 cn->next->prev = cn->prev;
3321 }
3322 if (cn == journal->j_first) {
3323 journal->j_first = cn->next;
3324 }
3325 if (cn == journal->j_last) {
3326 journal->j_last = cn->prev;
3327 }
3328 if (bh)
3329 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3330 bh->b_blocknr, 0);
3331 clear_buffer_journaled(bh); /* don't log this one */
3332
3333 if (!already_cleaned) {
3334 clear_buffer_journal_dirty(bh);
3335 clear_buffer_dirty(bh);
3336 clear_buffer_journal_test(bh);
3337 put_bh(bh);
3338 if (atomic_read(&(bh->b_count)) < 0) {
3339 reiserfs_warning(p_s_sb,
3340 "journal-1752: remove from trans, b_count < 0");
3341 }
3342 ret = 1;
3343 }
3344 journal->j_len--;
3345 journal->j_len_alloc--;
3346 free_cnode(p_s_sb, cn);
3347 return ret;
1da177e4
LT
3348}
3349
3350/*
3351** for any cnode in a journal list, it can only be dirtied of all the
3352** transactions that include it are commited to disk.
3353** this checks through each transaction, and returns 1 if you are allowed to dirty,
3354** and 0 if you aren't
3355**
3356** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3357** blocks for a given transaction on disk
3358**
3359*/
bd4c625c
LT
3360static int can_dirty(struct reiserfs_journal_cnode *cn)
3361{
3362 struct super_block *sb = cn->sb;
3363 b_blocknr_t blocknr = cn->blocknr;
3364 struct reiserfs_journal_cnode *cur = cn->hprev;
3365 int can_dirty = 1;
3366
3367 /* first test hprev. These are all newer than cn, so any node here
3368 ** with the same block number and dev means this node can't be sent
3369 ** to disk right now.
3370 */
3371 while (cur && can_dirty) {
3372 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3373 cur->blocknr == blocknr) {
3374 can_dirty = 0;
3375 }
3376 cur = cur->hprev;
3377 }
3378 /* then test hnext. These are all older than cn. As long as they
3379 ** are committed to the log, it is safe to write cn to disk
3380 */
3381 cur = cn->hnext;
3382 while (cur && can_dirty) {
3383 if (cur->jlist && cur->jlist->j_len > 0 &&
3384 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3385 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3386 can_dirty = 0;
3387 }
3388 cur = cur->hnext;
3389 }
3390 return can_dirty;
1da177e4
LT
3391}
3392
3393/* syncs the commit blocks, but does not force the real buffers to disk
3394** will wait until the current transaction is done/commited before returning
3395*/
bd4c625c
LT
3396int journal_end_sync(struct reiserfs_transaction_handle *th,
3397 struct super_block *p_s_sb, unsigned long nblocks)
3398{
3399 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 3400
bd4c625c
LT
3401 BUG_ON(!th->t_trans_id);
3402 /* you can sync while nested, very, very bad */
3403 if (th->t_refcount > 1) {
3404 BUG();
3405 }
3406 if (journal->j_len == 0) {
3407 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3408 1);
3409 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3410 }
3411 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
1da177e4
LT
3412}
3413
3414/*
3415** writeback the pending async commits to disk
3416*/
bd4c625c
LT
3417static void flush_async_commits(void *p)
3418{
3419 struct super_block *p_s_sb = p;
3420 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3421 struct reiserfs_journal_list *jl;
3422 struct list_head *entry;
3423
3424 lock_kernel();
3425 if (!list_empty(&journal->j_journal_list)) {
3426 /* last entry is the youngest, commit it and you get everything */
3427 entry = journal->j_journal_list.prev;
3428 jl = JOURNAL_LIST_ENTRY(entry);
3429 flush_commit_list(p_s_sb, jl, 1);
3430 }
3431 unlock_kernel();
3432 /*
3433 * this is a little racey, but there's no harm in missing
3434 * the filemap_fdata_write
3435 */
3436 if (!atomic_read(&journal->j_async_throttle)
3437 && !reiserfs_is_journal_aborted(journal)) {
3438 atomic_inc(&journal->j_async_throttle);
3439 filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
3440 atomic_dec(&journal->j_async_throttle);
3441 }
1da177e4
LT
3442}
3443
3444/*
3445** flushes any old transactions to disk
3446** ends the current transaction if it is too old
3447*/
bd4c625c
LT
3448int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3449{
3450 time_t now;
3451 struct reiserfs_transaction_handle th;
3452 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3453
3454 now = get_seconds();
3455 /* safety check so we don't flush while we are replaying the log during
3456 * mount
3457 */
3458 if (list_empty(&journal->j_journal_list)) {
3459 return 0;
3460 }
3461
3462 /* check the current transaction. If there are no writers, and it is
3463 * too old, finish it, and force the commit blocks to disk
3464 */
3465 if (atomic_read(&journal->j_wcount) <= 0 &&
3466 journal->j_trans_start_time > 0 &&
3467 journal->j_len > 0 &&
3468 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3469 if (!journal_join(&th, p_s_sb, 1)) {
3470 reiserfs_prepare_for_journal(p_s_sb,
3471 SB_BUFFER_WITH_SB(p_s_sb),
3472 1);
3473 journal_mark_dirty(&th, p_s_sb,
3474 SB_BUFFER_WITH_SB(p_s_sb));
3475
3476 /* we're only being called from kreiserfsd, it makes no sense to do
3477 ** an async commit so that kreiserfsd can do it later
3478 */
3479 do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3480 }
3481 }
3482 return p_s_sb->s_dirt;
1da177e4
LT
3483}
3484
3485/*
3486** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3487**
3488** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3489** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3490** flushes the commit list and returns 0.
3491**
3492** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3493**
3494** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3495*/
bd4c625c
LT
3496static int check_journal_end(struct reiserfs_transaction_handle *th,
3497 struct super_block *p_s_sb, unsigned long nblocks,
3498 int flags)
3499{
3500
3501 time_t now;
3502 int flush = flags & FLUSH_ALL;
3503 int commit_now = flags & COMMIT_NOW;
3504 int wait_on_commit = flags & WAIT;
3505 struct reiserfs_journal_list *jl;
3506 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3507
3508 BUG_ON(!th->t_trans_id);
3509
3510 if (th->t_trans_id != journal->j_trans_id) {
3511 reiserfs_panic(th->t_super,
3512 "journal-1577: handle trans id %ld != current trans id %ld\n",
3513 th->t_trans_id, journal->j_trans_id);
3514 }
3515
3516 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3517 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3518 atomic_dec(&(journal->j_wcount));
3519 }
3520
3521 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3522 ** will be dealt with by next transaction that actually writes something, but should be taken
3523 ** care of in this trans
3524 */
3525 if (journal->j_len == 0) {
3526 BUG();
3527 }
3528 /* if wcount > 0, and we are called to with flush or commit_now,
3529 ** we wait on j_join_wait. We will wake up when the last writer has
3530 ** finished the transaction, and started it on its way to the disk.
3531 ** Then, we flush the commit or journal list, and just return 0
3532 ** because the rest of journal end was already done for this transaction.
3533 */
3534 if (atomic_read(&(journal->j_wcount)) > 0) {
3535 if (flush || commit_now) {
3536 unsigned trans_id;
3537
3538 jl = journal->j_current_jl;
3539 trans_id = jl->j_trans_id;
3540 if (wait_on_commit)
3541 jl->j_state |= LIST_COMMIT_PENDING;
3542 atomic_set(&(journal->j_jlock), 1);
3543 if (flush) {
3544 journal->j_next_full_flush = 1;
3545 }
3546 unlock_journal(p_s_sb);
3547
3548 /* sleep while the current transaction is still j_jlocked */
3549 while (journal->j_trans_id == trans_id) {
3550 if (atomic_read(&journal->j_jlock)) {
3551 queue_log_writer(p_s_sb);
3552 } else {
3553 lock_journal(p_s_sb);
3554 if (journal->j_trans_id == trans_id) {
3555 atomic_set(&(journal->j_jlock),
3556 1);
3557 }
3558 unlock_journal(p_s_sb);
3559 }
3560 }
3561 if (journal->j_trans_id == trans_id) {
3562 BUG();
3563 }
3564 if (commit_now
3565 && journal_list_still_alive(p_s_sb, trans_id)
3566 && wait_on_commit) {
3567 flush_commit_list(p_s_sb, jl, 1);
3568 }
3569 return 0;
3570 }
3571 unlock_journal(p_s_sb);
3572 return 0;
3573 }
3574
3575 /* deal with old transactions where we are the last writers */
3576 now = get_seconds();
3577 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3578 commit_now = 1;
3579 journal->j_next_async_flush = 1;
3580 }
3581 /* don't batch when someone is waiting on j_join_wait */
3582 /* don't batch when syncing the commit or flushing the whole trans */
3583 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3584 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3585 && journal->j_len_alloc < journal->j_max_batch
3586 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3587 journal->j_bcount++;
3588 unlock_journal(p_s_sb);
3589 return 0;
3590 }
3591
3592 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3593 reiserfs_panic(p_s_sb,
3594 "journal-003: journal_end: j_start (%ld) is too high\n",
3595 journal->j_start);
3596 }
3597 return 1;
1da177e4
LT
3598}
3599
3600/*
3601** Does all the work that makes deleting blocks safe.
3602** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3603**
3604** otherwise:
3605** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3606** before this transaction has finished.
3607**
3608** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3609** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3610** the block can't be reallocated yet.
3611**
3612** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3613*/
bd4c625c
LT
3614int journal_mark_freed(struct reiserfs_transaction_handle *th,
3615 struct super_block *p_s_sb, b_blocknr_t blocknr)
3616{
3617 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3618 struct reiserfs_journal_cnode *cn = NULL;
3619 struct buffer_head *bh = NULL;
3620 struct reiserfs_list_bitmap *jb = NULL;
3621 int cleaned = 0;
3622 BUG_ON(!th->t_trans_id);
3623
3624 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3625 if (cn && cn->bh) {
3626 bh = cn->bh;
3627 get_bh(bh);
3628 }
3629 /* if it is journal new, we just remove it from this transaction */
3630 if (bh && buffer_journal_new(bh)) {
3631 clear_buffer_journal_new(bh);
3632 clear_prepared_bits(bh);
3633 reiserfs_clean_and_file_buffer(bh);
3634 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3635 } else {
3636 /* set the bit for this block in the journal bitmap for this transaction */
3637 jb = journal->j_current_jl->j_list_bitmap;
3638 if (!jb) {
3639 reiserfs_panic(p_s_sb,
3640 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3641 }
3642 set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3643
3644 /* Note, the entire while loop is not allowed to schedule. */
3645
3646 if (bh) {
3647 clear_prepared_bits(bh);
3648 reiserfs_clean_and_file_buffer(bh);
3649 }
3650 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3651
3652 /* find all older transactions with this block, make sure they don't try to write it out */
3653 cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3654 blocknr);
3655 while (cn) {
3656 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3657 set_bit(BLOCK_FREED, &cn->state);
3658 if (cn->bh) {
3659 if (!cleaned) {
3660 /* remove_from_transaction will brelse the buffer if it was
3661 ** in the current trans
3662 */
3663 clear_buffer_journal_dirty(cn->
3664 bh);
3665 clear_buffer_dirty(cn->bh);
3666 clear_buffer_journal_test(cn->
3667 bh);
3668 cleaned = 1;
3669 put_bh(cn->bh);
3670 if (atomic_read
3671 (&(cn->bh->b_count)) < 0) {
3672 reiserfs_warning(p_s_sb,
3673 "journal-2138: cn->bh->b_count < 0");
3674 }
3675 }
3676 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3677 atomic_dec(&
3678 (cn->jlist->
3679 j_nonzerolen));
3680 }
3681 cn->bh = NULL;
3682 }
3683 }
3684 cn = cn->hnext;
3685 }
3686 }
3687
3688 if (bh) {
3689 put_bh(bh); /* get_hash grabs the buffer */
3690 if (atomic_read(&(bh->b_count)) < 0) {
3691 reiserfs_warning(p_s_sb,
3692 "journal-2165: bh->b_count < 0");
3693 }
3694 }
3695 return 0;
3696}
3697
3698void reiserfs_update_inode_transaction(struct inode *inode)
3699{
3700 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3701 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3702 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
1da177e4
LT
3703}
3704
3705/*
3706 * returns -1 on error, 0 if no commits/barriers were done and 1
3707 * if a transaction was actually committed and the barrier was done
3708 */
3709static int __commit_trans_jl(struct inode *inode, unsigned long id,
bd4c625c 3710 struct reiserfs_journal_list *jl)
1da177e4 3711{
bd4c625c
LT
3712 struct reiserfs_transaction_handle th;
3713 struct super_block *sb = inode->i_sb;
3714 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3715 int ret = 0;
3716
3717 /* is it from the current transaction, or from an unknown transaction? */
3718 if (id == journal->j_trans_id) {
3719 jl = journal->j_current_jl;
3720 /* try to let other writers come in and grow this transaction */
3721 let_transaction_grow(sb, id);
3722 if (journal->j_trans_id != id) {
3723 goto flush_commit_only;
3724 }
1da177e4 3725
bd4c625c
LT
3726 ret = journal_begin(&th, sb, 1);
3727 if (ret)
3728 return ret;
3729
3730 /* someone might have ended this transaction while we joined */
3731 if (journal->j_trans_id != id) {
3732 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3733 1);
3734 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3735 ret = journal_end(&th, sb, 1);
3736 goto flush_commit_only;
3737 }
1da177e4 3738
bd4c625c
LT
3739 ret = journal_end_sync(&th, sb, 1);
3740 if (!ret)
3741 ret = 1;
1da177e4 3742
bd4c625c
LT
3743 } else {
3744 /* this gets tricky, we have to make sure the journal list in
3745 * the inode still exists. We know the list is still around
3746 * if we've got a larger transaction id than the oldest list
3747 */
3748 flush_commit_only:
3749 if (journal_list_still_alive(inode->i_sb, id)) {
3750 /*
3751 * we only set ret to 1 when we know for sure
3752 * the barrier hasn't been started yet on the commit
3753 * block.
3754 */
3755 if (atomic_read(&jl->j_commit_left) > 1)
3756 ret = 1;
3757 flush_commit_list(sb, jl, 1);
3758 if (journal->j_errno)
3759 ret = journal->j_errno;
3760 }
1da177e4 3761 }
bd4c625c
LT
3762 /* otherwise the list is gone, and long since committed */
3763 return ret;
3764}
1da177e4 3765
bd4c625c
LT
3766int reiserfs_commit_for_inode(struct inode *inode)
3767{
3768 unsigned long id = REISERFS_I(inode)->i_trans_id;
3769 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
1da177e4 3770
bd4c625c
LT
3771 /* for the whole inode, assume unset id means it was
3772 * changed in the current transaction. More conservative
1da177e4 3773 */
bd4c625c
LT
3774 if (!id || !jl) {
3775 reiserfs_update_inode_transaction(inode);
3776 id = REISERFS_I(inode)->i_trans_id;
3777 /* jl will be updated in __commit_trans_jl */
3778 }
3779
3780 return __commit_trans_jl(inode, id, jl);
3781}
3782
3783void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3784 struct buffer_head *bh)
3785{
3786 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3787 PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3788 if (!bh) {
3789 return;
3790 }
3791 if (test_clear_buffer_journal_restore_dirty(bh) &&
3792 buffer_journal_dirty(bh)) {
3793 struct reiserfs_journal_cnode *cn;
3794 cn = get_journal_hash_dev(p_s_sb,
3795 journal->j_list_hash_table,
3796 bh->b_blocknr);
3797 if (cn && can_dirty(cn)) {
3798 set_buffer_journal_test(bh);
3799 mark_buffer_dirty(bh);
3800 }
3801 }
3802 clear_buffer_journal_prepared(bh);
3803}
3804
3805extern struct tree_balance *cur_tb;
1da177e4
LT
3806/*
3807** before we can change a metadata block, we have to make sure it won't
3808** be written to disk while we are altering it. So, we must:
3809** clean it
3810** wait on it.
3811**
3812*/
3813int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
bd4c625c
LT
3814 struct buffer_head *bh, int wait)
3815{
3816 PROC_INFO_INC(p_s_sb, journal.prepare);
3817
3818 if (test_set_buffer_locked(bh)) {
3819 if (!wait)
3820 return 0;
3821 lock_buffer(bh);
3822 }
3823 set_buffer_journal_prepared(bh);
3824 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3825 clear_buffer_journal_test(bh);
3826 set_buffer_journal_restore_dirty(bh);
3827 }
3828 unlock_buffer(bh);
3829 return 1;
3830}
3831
3832static void flush_old_journal_lists(struct super_block *s)
3833{
3834 struct reiserfs_journal *journal = SB_JOURNAL(s);
3835 struct reiserfs_journal_list *jl;
3836 struct list_head *entry;
3837 time_t now = get_seconds();
3838
3839 while (!list_empty(&journal->j_journal_list)) {
3840 entry = journal->j_journal_list.next;
3841 jl = JOURNAL_LIST_ENTRY(entry);
3842 /* this check should always be run, to send old lists to disk */
3843 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
3844 flush_used_journal_lists(s, jl);
3845 } else {
3846 break;
3847 }
1da177e4 3848 }
1da177e4
LT
3849}
3850
3851/*
3852** long and ugly. If flush, will not return until all commit
3853** blocks and all real buffers in the trans are on disk.
3854** If no_async, won't return until all commit blocks are on disk.
3855**
3856** keep reading, there are comments as you go along
3857**
3858** If the journal is aborted, we just clean up. Things like flushing
3859** journal lists, etc just won't happen.
3860*/
bd4c625c
LT
3861static int do_journal_end(struct reiserfs_transaction_handle *th,
3862 struct super_block *p_s_sb, unsigned long nblocks,
3863 int flags)
3864{
3865 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3866 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3867 struct reiserfs_journal_cnode *last_cn = NULL;
3868 struct reiserfs_journal_desc *desc;
3869 struct reiserfs_journal_commit *commit;
3870 struct buffer_head *c_bh; /* commit bh */
3871 struct buffer_head *d_bh; /* desc bh */
3872 int cur_write_start = 0; /* start index of current log write */
3873 int old_start;
3874 int i;
3875 int flush = flags & FLUSH_ALL;
3876 int wait_on_commit = flags & WAIT;
3877 struct reiserfs_journal_list *jl, *temp_jl;
3878 struct list_head *entry, *safe;
3879 unsigned long jindex;
3880 unsigned long commit_trans_id;
3881 int trans_half;
3882
3883 BUG_ON(th->t_refcount > 1);
3884 BUG_ON(!th->t_trans_id);
3885
3886 put_fs_excl();
3887 current->journal_info = th->t_handle_save;
3888 reiserfs_check_lock_depth(p_s_sb, "journal end");
3889 if (journal->j_len == 0) {
3890 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3891 1);
3892 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3893 }
1da177e4 3894
bd4c625c
LT
3895 lock_journal(p_s_sb);
3896 if (journal->j_next_full_flush) {
3897 flags |= FLUSH_ALL;
3898 flush = 1;
3899 }
3900 if (journal->j_next_async_flush) {
3901 flags |= COMMIT_NOW | WAIT;
3902 wait_on_commit = 1;
3903 }
3904
3905 /* check_journal_end locks the journal, and unlocks if it does not return 1
3906 ** it tells us if we should continue with the journal_end, or just return
3907 */
3908 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3909 p_s_sb->s_dirt = 1;
3910 wake_queued_writers(p_s_sb);
3911 reiserfs_async_progress_wait(p_s_sb);
3912 goto out;
3913 }
3914
3915 /* check_journal_end might set these, check again */
3916 if (journal->j_next_full_flush) {
3917 flush = 1;
3918 }
3919
3920 /*
3921 ** j must wait means we have to flush the log blocks, and the real blocks for
3922 ** this transaction
3923 */
3924 if (journal->j_must_wait > 0) {
3925 flush = 1;
3926 }
1da177e4 3927#ifdef REISERFS_PREALLOCATE
bd4c625c
LT
3928 /* quota ops might need to nest, setup the journal_info pointer for them */
3929 current->journal_info = th;
3930 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
3931 * the transaction */
3932 current->journal_info = th->t_handle_save;
1da177e4 3933#endif
bd4c625c
LT
3934
3935 /* setup description block */
3936 d_bh =
3937 journal_getblk(p_s_sb,
3938 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3939 journal->j_start);
3940 set_buffer_uptodate(d_bh);
3941 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
3942 memset(d_bh->b_data, 0, d_bh->b_size);
3943 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
3944 set_desc_trans_id(desc, journal->j_trans_id);
3945
3946 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3947 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3948 ((journal->j_start + journal->j_len +
3949 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
3950 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
3951 memset(c_bh->b_data, 0, c_bh->b_size);
3952 set_commit_trans_id(commit, journal->j_trans_id);
3953 set_buffer_uptodate(c_bh);
3954
3955 /* init this journal list */
3956 jl = journal->j_current_jl;
3957
3958 /* we lock the commit before doing anything because
3959 * we want to make sure nobody tries to run flush_commit_list until
3960 * the new transaction is fully setup, and we've already flushed the
3961 * ordered bh list
3962 */
3963 down(&jl->j_commit_lock);
3964
3965 /* save the transaction id in case we need to commit it later */
3966 commit_trans_id = jl->j_trans_id;
3967
3968 atomic_set(&jl->j_older_commits_done, 0);
3969 jl->j_trans_id = journal->j_trans_id;
3970 jl->j_timestamp = journal->j_trans_start_time;
3971 jl->j_commit_bh = c_bh;
3972 jl->j_start = journal->j_start;
3973 jl->j_len = journal->j_len;
3974 atomic_set(&jl->j_nonzerolen, journal->j_len);
3975 atomic_set(&jl->j_commit_left, journal->j_len + 2);
3976 jl->j_realblock = NULL;
3977
3978 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3979 ** for each real block, add it to the journal list hash,
3980 ** copy into real block index array in the commit or desc block
3981 */
3982 trans_half = journal_trans_half(p_s_sb->s_blocksize);
3983 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
3984 if (buffer_journaled(cn->bh)) {
3985 jl_cn = get_cnode(p_s_sb);
3986 if (!jl_cn) {
3987 reiserfs_panic(p_s_sb,
3988 "journal-1676, get_cnode returned NULL\n");
3989 }
3990 if (i == 0) {
3991 jl->j_realblock = jl_cn;
3992 }
3993 jl_cn->prev = last_cn;
3994 jl_cn->next = NULL;
3995 if (last_cn) {
3996 last_cn->next = jl_cn;
3997 }
3998 last_cn = jl_cn;
3999 /* make sure the block we are trying to log is not a block
4000 of journal or reserved area */
4001
4002 if (is_block_in_log_or_reserved_area
4003 (p_s_sb, cn->bh->b_blocknr)) {
4004 reiserfs_panic(p_s_sb,
4005 "journal-2332: Trying to log block %lu, which is a log block\n",
4006 cn->bh->b_blocknr);
4007 }
4008 jl_cn->blocknr = cn->bh->b_blocknr;
4009 jl_cn->state = 0;
4010 jl_cn->sb = p_s_sb;
4011 jl_cn->bh = cn->bh;
4012 jl_cn->jlist = jl;
4013 insert_journal_hash(journal->j_list_hash_table, jl_cn);
4014 if (i < trans_half) {
4015 desc->j_realblock[i] =
4016 cpu_to_le32(cn->bh->b_blocknr);
4017 } else {
4018 commit->j_realblock[i - trans_half] =
4019 cpu_to_le32(cn->bh->b_blocknr);
4020 }
4021 } else {
4022 i--;
4023 }
4024 }
4025 set_desc_trans_len(desc, journal->j_len);
4026 set_desc_mount_id(desc, journal->j_mount_id);
4027 set_desc_trans_id(desc, journal->j_trans_id);
4028 set_commit_trans_len(commit, journal->j_len);
4029
4030 /* special check in case all buffers in the journal were marked for not logging */
4031 if (journal->j_len == 0) {
4032 BUG();
4033 }
4034
4035 /* we're about to dirty all the log blocks, mark the description block
4036 * dirty now too. Don't mark the commit block dirty until all the
4037 * others are on disk
4038 */
4039 mark_buffer_dirty(d_bh);
4040
4041 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4042 cur_write_start = journal->j_start;
4043 cn = journal->j_first;
4044 jindex = 1; /* start at one so we don't get the desc again */
4045 while (cn) {
4046 clear_buffer_journal_new(cn->bh);
4047 /* copy all the real blocks into log area. dirty log blocks */
4048 if (buffer_journaled(cn->bh)) {
4049 struct buffer_head *tmp_bh;
4050 char *addr;
4051 struct page *page;
4052 tmp_bh =
4053 journal_getblk(p_s_sb,
4054 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4055 ((cur_write_start +
4056 jindex) %
4057 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4058 set_buffer_uptodate(tmp_bh);
4059 page = cn->bh->b_page;
4060 addr = kmap(page);
4061 memcpy(tmp_bh->b_data,
4062 addr + offset_in_page(cn->bh->b_data),
4063 cn->bh->b_size);
4064 kunmap(page);
4065 mark_buffer_dirty(tmp_bh);
4066 jindex++;
4067 set_buffer_journal_dirty(cn->bh);
4068 clear_buffer_journaled(cn->bh);
4069 } else {
4070 /* JDirty cleared sometime during transaction. don't log this one */
4071 reiserfs_warning(p_s_sb,
4072 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4073 brelse(cn->bh);
4074 }
4075 next = cn->next;
4076 free_cnode(p_s_sb, cn);
4077 cn = next;
4078 cond_resched();
4079 }
4080
4081 /* we are done with both the c_bh and d_bh, but
4082 ** c_bh must be written after all other commit blocks,
4083 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4084 */
4085
4086 journal->j_current_jl = alloc_journal_list(p_s_sb);
4087
4088 /* now it is safe to insert this transaction on the main list */
4089 list_add_tail(&jl->j_list, &journal->j_journal_list);
4090 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4091 journal->j_num_work_lists++;
4092
4093 /* reset journal values for the next transaction */
4094 old_start = journal->j_start;
4095 journal->j_start =
4096 (journal->j_start + journal->j_len +
4097 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4098 atomic_set(&(journal->j_wcount), 0);
4099 journal->j_bcount = 0;
4100 journal->j_last = NULL;
4101 journal->j_first = NULL;
4102 journal->j_len = 0;
4103 journal->j_trans_start_time = 0;
4104 journal->j_trans_id++;
4105 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4106 journal->j_must_wait = 0;
4107 journal->j_len_alloc = 0;
4108 journal->j_next_full_flush = 0;
4109 journal->j_next_async_flush = 0;
4110 init_journal_hash(p_s_sb);
4111
4112 // make sure reiserfs_add_jh sees the new current_jl before we
4113 // write out the tails
4114 smp_mb();
4115
4116 /* tail conversion targets have to hit the disk before we end the
4117 * transaction. Otherwise a later transaction might repack the tail
4118 * before this transaction commits, leaving the data block unflushed and
4119 * clean, if we crash before the later transaction commits, the data block
4120 * is lost.
4121 */
4122 if (!list_empty(&jl->j_tail_bh_list)) {
4123 unlock_kernel();
4124 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4125 journal, jl, &jl->j_tail_bh_list);
4126 lock_kernel();
4127 }
4128 if (!list_empty(&jl->j_tail_bh_list))
4129 BUG();
4130 up(&jl->j_commit_lock);
4131
4132 /* honor the flush wishes from the caller, simple commits can
4133 ** be done outside the journal lock, they are done below
4134 **
4135 ** if we don't flush the commit list right now, we put it into
4136 ** the work queue so the people waiting on the async progress work
4137 ** queue don't wait for this proc to flush journal lists and such.
4138 */
4139 if (flush) {
4140 flush_commit_list(p_s_sb, jl, 1);
4141 flush_journal_list(p_s_sb, jl, 1);
4142 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4143 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4144
4145 /* if the next transaction has any chance of wrapping, flush
4146 ** transactions that might get overwritten. If any journal lists are very
4147 ** old flush them as well.
4148 */
4149 first_jl:
4150 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4151 temp_jl = JOURNAL_LIST_ENTRY(entry);
4152 if (journal->j_start <= temp_jl->j_start) {
4153 if ((journal->j_start + journal->j_trans_max + 1) >=
4154 temp_jl->j_start) {
4155 flush_used_journal_lists(p_s_sb, temp_jl);
4156 goto first_jl;
4157 } else if ((journal->j_start +
4158 journal->j_trans_max + 1) <
4159 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4160 /* if we don't cross into the next transaction and we don't
4161 * wrap, there is no way we can overlap any later transactions
4162 * break now
4163 */
4164 break;
4165 }
4166 } else if ((journal->j_start +
4167 journal->j_trans_max + 1) >
4168 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4169 if (((journal->j_start + journal->j_trans_max + 1) %
4170 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4171 temp_jl->j_start) {
4172 flush_used_journal_lists(p_s_sb, temp_jl);
4173 goto first_jl;
4174 } else {
4175 /* we don't overlap anything from out start to the end of the
4176 * log, and our wrapped portion doesn't overlap anything at
4177 * the start of the log. We can break
4178 */
4179 break;
4180 }
4181 }
4182 }
4183 flush_old_journal_lists(p_s_sb);
4184
4185 journal->j_current_jl->j_list_bitmap =
4186 get_list_bitmap(p_s_sb, journal->j_current_jl);
4187
4188 if (!(journal->j_current_jl->j_list_bitmap)) {
4189 reiserfs_panic(p_s_sb,
4190 "journal-1996: do_journal_end, could not get a list bitmap\n");
4191 }
4192
4193 atomic_set(&(journal->j_jlock), 0);
4194 unlock_journal(p_s_sb);
4195 /* wake up any body waiting to join. */
4196 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4197 wake_up(&(journal->j_join_wait));
4198
4199 if (!flush && wait_on_commit &&
4200 journal_list_still_alive(p_s_sb, commit_trans_id)) {
4201 flush_commit_list(p_s_sb, jl, 1);
4202 }
4203 out:
4204 reiserfs_check_lock_depth(p_s_sb, "journal end2");
4205
4206 memset(th, 0, sizeof(*th));
4207 /* Re-set th->t_super, so we can properly keep track of how many
4208 * persistent transactions there are. We need to do this so if this
4209 * call is part of a failed restart_transaction, we can free it later */
4210 th->t_super = p_s_sb;
4211
4212 return journal->j_errno;
4213}
4214
4215static void __reiserfs_journal_abort_hard(struct super_block *sb)
4216{
4217 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4218 if (test_bit(J_ABORTED, &journal->j_state))
4219 return;
4220
4221 printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4222 reiserfs_bdevname(sb));
4223
4224 sb->s_flags |= MS_RDONLY;
4225 set_bit(J_ABORTED, &journal->j_state);
1da177e4
LT
4226
4227#ifdef CONFIG_REISERFS_CHECK
bd4c625c 4228 dump_stack();
1da177e4
LT
4229#endif
4230}
4231
bd4c625c 4232static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
1da177e4 4233{
bd4c625c
LT
4234 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4235 if (test_bit(J_ABORTED, &journal->j_state))
4236 return;
1da177e4 4237
bd4c625c
LT
4238 if (!journal->j_errno)
4239 journal->j_errno = errno;
1da177e4 4240
bd4c625c 4241 __reiserfs_journal_abort_hard(sb);
1da177e4
LT
4242}
4243
bd4c625c 4244void reiserfs_journal_abort(struct super_block *sb, int errno)
1da177e4 4245{
bd4c625c 4246 return __reiserfs_journal_abort_soft(sb, errno);
1da177e4 4247}