]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/nilfs2/segment.c
nilfs2: hide nilfs_write_info struct in segment buffer code
[net-next-2.6.git] / fs / nilfs2 / segment.c
CommitLineData
9ff05123
RK
1/*
2 * segment.c - NILFS segment constructor.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24#include <linux/pagemap.h>
25#include <linux/buffer_head.h>
26#include <linux/writeback.h>
27#include <linux/bio.h>
28#include <linux/completion.h>
29#include <linux/blkdev.h>
30#include <linux/backing-dev.h>
31#include <linux/freezer.h>
32#include <linux/kthread.h>
33#include <linux/crc32.h>
34#include <linux/pagevec.h>
35#include "nilfs.h"
36#include "btnode.h"
37#include "page.h"
38#include "segment.h"
39#include "sufile.h"
40#include "cpfile.h"
41#include "ifile.h"
9ff05123
RK
42#include "segbuf.h"
43
44
45/*
46 * Segment constructor
47 */
48#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
49
50#define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
51 appended in collection retry loop */
52
53/* Construction mode */
54enum {
55 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
56 SC_LSEG_DSYNC, /* Flush data blocks of a given file and make
57 a logical segment without a super root */
58 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
59 creating a checkpoint */
60 SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without
61 a checkpoint */
62};
63
64/* Stage numbers of dirty block collection */
65enum {
66 NILFS_ST_INIT = 0,
67 NILFS_ST_GC, /* Collecting dirty blocks for GC */
68 NILFS_ST_FILE,
9ff05123
RK
69 NILFS_ST_IFILE,
70 NILFS_ST_CPFILE,
71 NILFS_ST_SUFILE,
72 NILFS_ST_DAT,
73 NILFS_ST_SR, /* Super root */
74 NILFS_ST_DSYNC, /* Data sync blocks */
75 NILFS_ST_DONE,
76};
77
78/* State flags of collection */
79#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
80#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
071cb4b8
RK
81#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
82#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
9ff05123
RK
83
84/* Operations depending on the construction mode and file type */
85struct nilfs_sc_operations {
86 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
87 struct inode *);
88 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
89 struct inode *);
90 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
91 struct inode *);
92 void (*write_data_binfo)(struct nilfs_sc_info *,
93 struct nilfs_segsum_pointer *,
94 union nilfs_binfo *);
95 void (*write_node_binfo)(struct nilfs_sc_info *,
96 struct nilfs_segsum_pointer *,
97 union nilfs_binfo *);
98};
99
100/*
101 * Other definitions
102 */
103static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
104static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
105static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
106static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
107 int);
108
109#define nilfs_cnt32_gt(a, b) \
110 (typecheck(__u32, a) && typecheck(__u32, b) && \
111 ((__s32)(b) - (__s32)(a) < 0))
112#define nilfs_cnt32_ge(a, b) \
113 (typecheck(__u32, a) && typecheck(__u32, b) && \
114 ((__s32)(a) - (__s32)(b) >= 0))
115#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
116#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
117
118/*
119 * Transaction
120 */
121static struct kmem_cache *nilfs_transaction_cachep;
122
123/**
124 * nilfs_init_transaction_cache - create a cache for nilfs_transaction_info
125 *
126 * nilfs_init_transaction_cache() creates a slab cache for the struct
127 * nilfs_transaction_info.
128 *
129 * Return Value: On success, it returns 0. On error, one of the following
130 * negative error code is returned.
131 *
132 * %-ENOMEM - Insufficient memory available.
133 */
134int nilfs_init_transaction_cache(void)
135{
136 nilfs_transaction_cachep =
137 kmem_cache_create("nilfs2_transaction_cache",
138 sizeof(struct nilfs_transaction_info),
139 0, SLAB_RECLAIM_ACCOUNT, NULL);
140 return (nilfs_transaction_cachep == NULL) ? -ENOMEM : 0;
141}
142
143/**
144 * nilfs_detroy_transaction_cache - destroy the cache for transaction info
145 *
146 * nilfs_destroy_transaction_cache() frees the slab cache for the struct
147 * nilfs_transaction_info.
148 */
149void nilfs_destroy_transaction_cache(void)
150{
151 kmem_cache_destroy(nilfs_transaction_cachep);
152}
153
154static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
155{
156 struct nilfs_transaction_info *cur_ti = current->journal_info;
157 void *save = NULL;
158
159 if (cur_ti) {
160 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
161 return ++cur_ti->ti_count;
162 else {
163 /*
164 * If journal_info field is occupied by other FS,
47420c79
RK
165 * it is saved and will be restored on
166 * nilfs_transaction_commit().
9ff05123
RK
167 */
168 printk(KERN_WARNING
169 "NILFS warning: journal info from a different "
170 "FS\n");
171 save = current->journal_info;
172 }
173 }
174 if (!ti) {
175 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
176 if (!ti)
177 return -ENOMEM;
178 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
179 } else {
180 ti->ti_flags = 0;
181 }
182 ti->ti_count = 0;
183 ti->ti_save = save;
184 ti->ti_magic = NILFS_TI_MAGIC;
185 current->journal_info = ti;
186 return 0;
187}
188
189/**
190 * nilfs_transaction_begin - start indivisible file operations.
191 * @sb: super block
192 * @ti: nilfs_transaction_info
193 * @vacancy_check: flags for vacancy rate checks
194 *
195 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
196 * the segment semaphore, to make a segment construction and write tasks
47420c79 197 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
9ff05123
RK
198 * The region enclosed by these two functions can be nested. To avoid a
199 * deadlock, the semaphore is only acquired or released in the outermost call.
200 *
201 * This function allocates a nilfs_transaction_info struct to keep context
202 * information on it. It is initialized and hooked onto the current task in
203 * the outermost call. If a pre-allocated struct is given to @ti, it is used
204 * instead; othewise a new struct is assigned from a slab.
205 *
206 * When @vacancy_check flag is set, this function will check the amount of
207 * free space, and will wait for the GC to reclaim disk space if low capacity.
208 *
209 * Return Value: On success, 0 is returned. On error, one of the following
210 * negative error code is returned.
211 *
212 * %-ENOMEM - Insufficient memory available.
213 *
9ff05123
RK
214 * %-ENOSPC - No space left on device
215 */
216int nilfs_transaction_begin(struct super_block *sb,
217 struct nilfs_transaction_info *ti,
218 int vacancy_check)
219{
220 struct nilfs_sb_info *sbi;
221 struct the_nilfs *nilfs;
222 int ret = nilfs_prepare_segment_lock(ti);
223
224 if (unlikely(ret < 0))
225 return ret;
226 if (ret > 0)
227 return 0;
228
229 sbi = NILFS_SB(sb);
230 nilfs = sbi->s_nilfs;
231 down_read(&nilfs->ns_segctor_sem);
232 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
233 up_read(&nilfs->ns_segctor_sem);
234 ret = -ENOSPC;
235 goto failed;
236 }
237 return 0;
238
239 failed:
240 ti = current->journal_info;
241 current->journal_info = ti->ti_save;
242 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
243 kmem_cache_free(nilfs_transaction_cachep, ti);
244 return ret;
245}
246
247/**
47420c79 248 * nilfs_transaction_commit - commit indivisible file operations.
9ff05123 249 * @sb: super block
9ff05123 250 *
47420c79
RK
251 * nilfs_transaction_commit() releases the read semaphore which is
252 * acquired by nilfs_transaction_begin(). This is only performed
253 * in outermost call of this function. If a commit flag is set,
254 * nilfs_transaction_commit() sets a timer to start the segment
255 * constructor. If a sync flag is set, it starts construction
256 * directly.
9ff05123 257 */
47420c79 258int nilfs_transaction_commit(struct super_block *sb)
9ff05123
RK
259{
260 struct nilfs_transaction_info *ti = current->journal_info;
261 struct nilfs_sb_info *sbi;
262 struct nilfs_sc_info *sci;
263 int err = 0;
264
265 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
47420c79 266 ti->ti_flags |= NILFS_TI_COMMIT;
9ff05123
RK
267 if (ti->ti_count > 0) {
268 ti->ti_count--;
269 return 0;
270 }
271 sbi = NILFS_SB(sb);
272 sci = NILFS_SC(sbi);
273 if (sci != NULL) {
274 if (ti->ti_flags & NILFS_TI_COMMIT)
275 nilfs_segctor_start_timer(sci);
276 if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) >
277 sci->sc_watermark)
278 nilfs_segctor_do_flush(sci, 0);
279 }
280 up_read(&sbi->s_nilfs->ns_segctor_sem);
281 current->journal_info = ti->ti_save;
282
283 if (ti->ti_flags & NILFS_TI_SYNC)
284 err = nilfs_construct_segment(sb);
285 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
286 kmem_cache_free(nilfs_transaction_cachep, ti);
287 return err;
288}
289
47420c79
RK
290void nilfs_transaction_abort(struct super_block *sb)
291{
292 struct nilfs_transaction_info *ti = current->journal_info;
293
294 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
295 if (ti->ti_count > 0) {
296 ti->ti_count--;
297 return;
298 }
299 up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem);
300
301 current->journal_info = ti->ti_save;
302 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
303 kmem_cache_free(nilfs_transaction_cachep, ti);
304}
305
9ff05123
RK
306void nilfs_relax_pressure_in_lock(struct super_block *sb)
307{
308 struct nilfs_sb_info *sbi = NILFS_SB(sb);
309 struct nilfs_sc_info *sci = NILFS_SC(sbi);
310 struct the_nilfs *nilfs = sbi->s_nilfs;
311
312 if (!sci || !sci->sc_flush_request)
313 return;
314
315 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
316 up_read(&nilfs->ns_segctor_sem);
317
318 down_write(&nilfs->ns_segctor_sem);
319 if (sci->sc_flush_request &&
320 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
321 struct nilfs_transaction_info *ti = current->journal_info;
322
323 ti->ti_flags |= NILFS_TI_WRITER;
324 nilfs_segctor_do_immediate_flush(sci);
325 ti->ti_flags &= ~NILFS_TI_WRITER;
326 }
327 downgrade_write(&nilfs->ns_segctor_sem);
328}
329
330static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
331 struct nilfs_transaction_info *ti,
332 int gcflag)
333{
334 struct nilfs_transaction_info *cur_ti = current->journal_info;
335
1f5abe7e 336 WARN_ON(cur_ti);
9ff05123
RK
337 ti->ti_flags = NILFS_TI_WRITER;
338 ti->ti_count = 0;
339 ti->ti_save = cur_ti;
340 ti->ti_magic = NILFS_TI_MAGIC;
341 INIT_LIST_HEAD(&ti->ti_garbage);
342 current->journal_info = ti;
343
344 for (;;) {
345 down_write(&sbi->s_nilfs->ns_segctor_sem);
346 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags))
347 break;
348
349 nilfs_segctor_do_immediate_flush(NILFS_SC(sbi));
350
351 up_write(&sbi->s_nilfs->ns_segctor_sem);
352 yield();
353 }
354 if (gcflag)
355 ti->ti_flags |= NILFS_TI_GC;
356}
357
358static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi)
359{
360 struct nilfs_transaction_info *ti = current->journal_info;
361
362 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
363 BUG_ON(ti->ti_count > 0);
364
365 up_write(&sbi->s_nilfs->ns_segctor_sem);
366 current->journal_info = ti->ti_save;
367 if (!list_empty(&ti->ti_garbage))
368 nilfs_dispose_list(sbi, &ti->ti_garbage, 0);
369}
370
371static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
372 struct nilfs_segsum_pointer *ssp,
373 unsigned bytes)
374{
375 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
376 unsigned blocksize = sci->sc_super->s_blocksize;
377 void *p;
378
379 if (unlikely(ssp->offset + bytes > blocksize)) {
380 ssp->offset = 0;
381 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
382 &segbuf->sb_segsum_buffers));
383 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
384 }
385 p = ssp->bh->b_data + ssp->offset;
386 ssp->offset += bytes;
387 return p;
388}
389
390/**
391 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
392 * @sci: nilfs_sc_info
393 */
394static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
395{
396 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
397 struct buffer_head *sumbh;
398 unsigned sumbytes;
399 unsigned flags = 0;
400 int err;
401
402 if (nilfs_doing_gc())
403 flags = NILFS_SS_GC;
404 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime);
405 if (unlikely(err))
406 return err;
407
408 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
409 sumbytes = segbuf->sb_sum.sumbytes;
410 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
411 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
412 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
413 return 0;
414}
415
416static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
417{
418 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
419 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
420 return -E2BIG; /* The current segment is filled up
421 (internal code) */
422 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
423 return nilfs_segctor_reset_segment_buffer(sci);
424}
425
426static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
427{
428 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
429 int err;
430
431 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
432 err = nilfs_segctor_feed_segment(sci);
433 if (err)
434 return err;
435 segbuf = sci->sc_curseg;
436 }
437 err = nilfs_segbuf_extend_payload(segbuf, &sci->sc_super_root);
438 if (likely(!err))
439 segbuf->sb_sum.flags |= NILFS_SS_SR;
440 return err;
441}
442
443/*
444 * Functions for making segment summary and payloads
445 */
446static int nilfs_segctor_segsum_block_required(
447 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
448 unsigned binfo_size)
449{
450 unsigned blocksize = sci->sc_super->s_blocksize;
451 /* Size of finfo and binfo is enough small against blocksize */
452
453 return ssp->offset + binfo_size +
454 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
455 blocksize;
456}
457
458static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
459 struct inode *inode)
460{
461 sci->sc_curseg->sb_sum.nfinfo++;
462 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
463 nilfs_segctor_map_segsum_entry(
464 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
c96fa464
RK
465
466 if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
467 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
468 /* skip finfo */
469}
470
471static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
472 struct inode *inode)
473{
474 struct nilfs_finfo *finfo;
475 struct nilfs_inode_info *ii;
476 struct nilfs_segment_buffer *segbuf;
477
478 if (sci->sc_blk_cnt == 0)
479 return;
480
481 ii = NILFS_I(inode);
482 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
483 sizeof(*finfo));
484 finfo->fi_ino = cpu_to_le64(inode->i_ino);
485 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
486 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
487 finfo->fi_cno = cpu_to_le64(ii->i_cno);
488
489 segbuf = sci->sc_curseg;
490 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
491 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
492 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
493 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
494}
495
496static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
497 struct buffer_head *bh,
498 struct inode *inode,
499 unsigned binfo_size)
500{
501 struct nilfs_segment_buffer *segbuf;
502 int required, err = 0;
503
504 retry:
505 segbuf = sci->sc_curseg;
506 required = nilfs_segctor_segsum_block_required(
507 sci, &sci->sc_binfo_ptr, binfo_size);
508 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
509 nilfs_segctor_end_finfo(sci, inode);
510 err = nilfs_segctor_feed_segment(sci);
511 if (err)
512 return err;
513 goto retry;
514 }
515 if (unlikely(required)) {
516 err = nilfs_segbuf_extend_segsum(segbuf);
517 if (unlikely(err))
518 goto failed;
519 }
520 if (sci->sc_blk_cnt == 0)
521 nilfs_segctor_begin_finfo(sci, inode);
522
523 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
524 /* Substitution to vblocknr is delayed until update_blocknr() */
525 nilfs_segbuf_add_file_buffer(segbuf, bh);
526 sci->sc_blk_cnt++;
527 failed:
528 return err;
529}
530
531static int nilfs_handle_bmap_error(int err, const char *fname,
532 struct inode *inode, struct super_block *sb)
533{
534 if (err == -EINVAL) {
535 nilfs_error(sb, fname, "broken bmap (inode=%lu)\n",
536 inode->i_ino);
537 err = -EIO;
538 }
539 return err;
540}
541
542/*
543 * Callback functions that enumerate, mark, and collect dirty blocks
544 */
545static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
546 struct buffer_head *bh, struct inode *inode)
547{
548 int err;
549
9ff05123
RK
550 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
551 if (unlikely(err < 0))
552 return nilfs_handle_bmap_error(err, __func__, inode,
553 sci->sc_super);
554
555 err = nilfs_segctor_add_file_block(sci, bh, inode,
556 sizeof(struct nilfs_binfo_v));
557 if (!err)
558 sci->sc_datablk_cnt++;
559 return err;
560}
561
562static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
563 struct buffer_head *bh,
564 struct inode *inode)
565{
566 int err;
567
9ff05123
RK
568 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
569 if (unlikely(err < 0))
570 return nilfs_handle_bmap_error(err, __func__, inode,
571 sci->sc_super);
572 return 0;
573}
574
575static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
576 struct buffer_head *bh,
577 struct inode *inode)
578{
1f5abe7e 579 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
580 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
581}
582
583static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
584 struct nilfs_segsum_pointer *ssp,
585 union nilfs_binfo *binfo)
586{
587 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
588 sci, ssp, sizeof(*binfo_v));
589 *binfo_v = binfo->bi_v;
590}
591
592static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
593 struct nilfs_segsum_pointer *ssp,
594 union nilfs_binfo *binfo)
595{
596 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
597 sci, ssp, sizeof(*vblocknr));
598 *vblocknr = binfo->bi_v.bi_vblocknr;
599}
600
601struct nilfs_sc_operations nilfs_sc_file_ops = {
602 .collect_data = nilfs_collect_file_data,
603 .collect_node = nilfs_collect_file_node,
604 .collect_bmap = nilfs_collect_file_bmap,
605 .write_data_binfo = nilfs_write_file_data_binfo,
606 .write_node_binfo = nilfs_write_file_node_binfo,
607};
608
609static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
610 struct buffer_head *bh, struct inode *inode)
611{
612 int err;
613
614 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
615 if (unlikely(err < 0))
616 return nilfs_handle_bmap_error(err, __func__, inode,
617 sci->sc_super);
618
619 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
620 if (!err)
621 sci->sc_datablk_cnt++;
622 return err;
623}
624
625static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
626 struct buffer_head *bh, struct inode *inode)
627{
1f5abe7e 628 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
629 return nilfs_segctor_add_file_block(sci, bh, inode,
630 sizeof(struct nilfs_binfo_dat));
631}
632
633static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
634 struct nilfs_segsum_pointer *ssp,
635 union nilfs_binfo *binfo)
636{
637 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
638 sizeof(*blkoff));
639 *blkoff = binfo->bi_dat.bi_blkoff;
640}
641
642static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
643 struct nilfs_segsum_pointer *ssp,
644 union nilfs_binfo *binfo)
645{
646 struct nilfs_binfo_dat *binfo_dat =
647 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
648 *binfo_dat = binfo->bi_dat;
649}
650
651struct nilfs_sc_operations nilfs_sc_dat_ops = {
652 .collect_data = nilfs_collect_dat_data,
653 .collect_node = nilfs_collect_file_node,
654 .collect_bmap = nilfs_collect_dat_bmap,
655 .write_data_binfo = nilfs_write_dat_data_binfo,
656 .write_node_binfo = nilfs_write_dat_node_binfo,
657};
658
659struct nilfs_sc_operations nilfs_sc_dsync_ops = {
660 .collect_data = nilfs_collect_file_data,
661 .collect_node = NULL,
662 .collect_bmap = NULL,
663 .write_data_binfo = nilfs_write_file_data_binfo,
664 .write_node_binfo = NULL,
665};
666
f30bf3e4
RK
667static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
668 struct list_head *listp,
669 size_t nlimit,
670 loff_t start, loff_t end)
9ff05123 671{
9ff05123
RK
672 struct address_space *mapping = inode->i_mapping;
673 struct pagevec pvec;
f30bf3e4
RK
674 pgoff_t index = 0, last = ULONG_MAX;
675 size_t ndirties = 0;
676 int i;
9ff05123 677
f30bf3e4
RK
678 if (unlikely(start != 0 || end != LLONG_MAX)) {
679 /*
680 * A valid range is given for sync-ing data pages. The
681 * range is rounded to per-page; extra dirty buffers
682 * may be included if blocksize < pagesize.
683 */
684 index = start >> PAGE_SHIFT;
685 last = end >> PAGE_SHIFT;
686 }
9ff05123
RK
687 pagevec_init(&pvec, 0);
688 repeat:
f30bf3e4
RK
689 if (unlikely(index > last) ||
690 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
691 min_t(pgoff_t, last - index,
692 PAGEVEC_SIZE - 1) + 1))
693 return ndirties;
9ff05123
RK
694
695 for (i = 0; i < pagevec_count(&pvec); i++) {
696 struct buffer_head *bh, *head;
697 struct page *page = pvec.pages[i];
698
f30bf3e4
RK
699 if (unlikely(page->index > last))
700 break;
701
9ff05123
RK
702 if (mapping->host) {
703 lock_page(page);
704 if (!page_has_buffers(page))
705 create_empty_buffers(page,
706 1 << inode->i_blkbits, 0);
707 unlock_page(page);
708 }
709
710 bh = head = page_buffers(page);
711 do {
f30bf3e4
RK
712 if (!buffer_dirty(bh))
713 continue;
714 get_bh(bh);
715 list_add_tail(&bh->b_assoc_buffers, listp);
716 ndirties++;
717 if (unlikely(ndirties >= nlimit)) {
718 pagevec_release(&pvec);
719 cond_resched();
720 return ndirties;
9ff05123 721 }
f30bf3e4 722 } while (bh = bh->b_this_page, bh != head);
9ff05123
RK
723 }
724 pagevec_release(&pvec);
725 cond_resched();
f30bf3e4 726 goto repeat;
9ff05123
RK
727}
728
729static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
730 struct list_head *listp)
731{
732 struct nilfs_inode_info *ii = NILFS_I(inode);
733 struct address_space *mapping = &ii->i_btnode_cache;
734 struct pagevec pvec;
735 struct buffer_head *bh, *head;
736 unsigned int i;
737 pgoff_t index = 0;
738
739 pagevec_init(&pvec, 0);
740
741 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
742 PAGEVEC_SIZE)) {
743 for (i = 0; i < pagevec_count(&pvec); i++) {
744 bh = head = page_buffers(pvec.pages[i]);
745 do {
746 if (buffer_dirty(bh)) {
747 get_bh(bh);
748 list_add_tail(&bh->b_assoc_buffers,
749 listp);
750 }
751 bh = bh->b_this_page;
752 } while (bh != head);
753 }
754 pagevec_release(&pvec);
755 cond_resched();
756 }
757}
758
759static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
760 struct list_head *head, int force)
761{
762 struct nilfs_inode_info *ii, *n;
763 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
764 unsigned nv = 0;
765
766 while (!list_empty(head)) {
767 spin_lock(&sbi->s_inode_lock);
768 list_for_each_entry_safe(ii, n, head, i_dirty) {
769 list_del_init(&ii->i_dirty);
770 if (force) {
771 if (unlikely(ii->i_bh)) {
772 brelse(ii->i_bh);
773 ii->i_bh = NULL;
774 }
775 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
776 set_bit(NILFS_I_QUEUED, &ii->i_state);
777 list_add_tail(&ii->i_dirty,
778 &sbi->s_dirty_files);
779 continue;
780 }
781 ivec[nv++] = ii;
782 if (nv == SC_N_INODEVEC)
783 break;
784 }
785 spin_unlock(&sbi->s_inode_lock);
786
787 for (pii = ivec; nv > 0; pii++, nv--)
788 iput(&(*pii)->vfs_inode);
789 }
790}
791
792static int nilfs_test_metadata_dirty(struct nilfs_sb_info *sbi)
793{
794 struct the_nilfs *nilfs = sbi->s_nilfs;
795 int ret = 0;
796
797 if (nilfs_mdt_fetch_dirty(sbi->s_ifile))
798 ret++;
799 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
800 ret++;
801 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
802 ret++;
803 if (ret || nilfs_doing_gc())
804 if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs)))
805 ret++;
806 return ret;
807}
808
809static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
810{
811 return list_empty(&sci->sc_dirty_files) &&
812 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
071cb4b8 813 sci->sc_nfreesegs == 0 &&
9ff05123
RK
814 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
815}
816
817static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
818{
819 struct nilfs_sb_info *sbi = sci->sc_sbi;
820 int ret = 0;
821
822 if (nilfs_test_metadata_dirty(sbi))
823 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
824
825 spin_lock(&sbi->s_inode_lock);
826 if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci))
827 ret++;
828
829 spin_unlock(&sbi->s_inode_lock);
830 return ret;
831}
832
833static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
834{
835 struct nilfs_sb_info *sbi = sci->sc_sbi;
836 struct the_nilfs *nilfs = sbi->s_nilfs;
837
838 nilfs_mdt_clear_dirty(sbi->s_ifile);
839 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
840 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
841 nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs));
842}
843
844static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
845{
846 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
847 struct buffer_head *bh_cp;
848 struct nilfs_checkpoint *raw_cp;
849 int err;
850
851 /* XXX: this interface will be changed */
852 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
853 &raw_cp, &bh_cp);
854 if (likely(!err)) {
855 /* The following code is duplicated with cpfile. But, it is
856 needed to collect the checkpoint even if it was not newly
857 created */
858 nilfs_mdt_mark_buffer_dirty(bh_cp);
859 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
860 nilfs_cpfile_put_checkpoint(
861 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
1f5abe7e
RK
862 } else
863 WARN_ON(err == -EINVAL || err == -ENOENT);
864
9ff05123
RK
865 return err;
866}
867
868static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
869{
870 struct nilfs_sb_info *sbi = sci->sc_sbi;
871 struct the_nilfs *nilfs = sbi->s_nilfs;
872 struct buffer_head *bh_cp;
873 struct nilfs_checkpoint *raw_cp;
874 int err;
875
876 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
877 &raw_cp, &bh_cp);
878 if (unlikely(err)) {
1f5abe7e 879 WARN_ON(err == -EINVAL || err == -ENOENT);
9ff05123
RK
880 goto failed_ibh;
881 }
882 raw_cp->cp_snapshot_list.ssl_next = 0;
883 raw_cp->cp_snapshot_list.ssl_prev = 0;
884 raw_cp->cp_inodes_count =
885 cpu_to_le64(atomic_read(&sbi->s_inodes_count));
886 raw_cp->cp_blocks_count =
887 cpu_to_le64(atomic_read(&sbi->s_blocks_count));
888 raw_cp->cp_nblk_inc =
889 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
890 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
891 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
458c5b08 892
c96fa464
RK
893 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
894 nilfs_checkpoint_clear_minor(raw_cp);
895 else
896 nilfs_checkpoint_set_minor(raw_cp);
897
9ff05123
RK
898 nilfs_write_inode_common(sbi->s_ifile, &raw_cp->cp_ifile_inode, 1);
899 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
900 return 0;
901
902 failed_ibh:
903 return err;
904}
905
906static void nilfs_fill_in_file_bmap(struct inode *ifile,
907 struct nilfs_inode_info *ii)
908
909{
910 struct buffer_head *ibh;
911 struct nilfs_inode *raw_inode;
912
913 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
914 ibh = ii->i_bh;
915 BUG_ON(!ibh);
916 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
917 ibh);
918 nilfs_bmap_write(ii->i_bmap, raw_inode);
919 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
920 }
921}
922
923static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci,
924 struct inode *ifile)
925{
926 struct nilfs_inode_info *ii;
927
928 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
929 nilfs_fill_in_file_bmap(ifile, ii);
930 set_bit(NILFS_I_COLLECTED, &ii->i_state);
931 }
9ff05123
RK
932}
933
934/*
935 * CRC calculation routines
936 */
937static void nilfs_fill_in_super_root_crc(struct buffer_head *bh_sr, u32 seed)
938{
939 struct nilfs_super_root *raw_sr =
940 (struct nilfs_super_root *)bh_sr->b_data;
941 u32 crc;
942
9ff05123
RK
943 crc = crc32_le(seed,
944 (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
945 NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
946 raw_sr->sr_sum = cpu_to_le32(crc);
947}
948
949static void nilfs_segctor_fill_in_checksums(struct nilfs_sc_info *sci,
950 u32 seed)
951{
952 struct nilfs_segment_buffer *segbuf;
953
954 if (sci->sc_super_root)
955 nilfs_fill_in_super_root_crc(sci->sc_super_root, seed);
956
957 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
958 nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
959 nilfs_segbuf_fill_in_data_crc(segbuf, seed);
960 }
961}
962
963static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
964 struct the_nilfs *nilfs)
965{
966 struct buffer_head *bh_sr = sci->sc_super_root;
967 struct nilfs_super_root *raw_sr =
968 (struct nilfs_super_root *)bh_sr->b_data;
969 unsigned isz = nilfs->ns_inode_size;
970
971 raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
972 raw_sr->sr_nongc_ctime
973 = cpu_to_le64(nilfs_doing_gc() ?
974 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
975 raw_sr->sr_flags = 0;
976
3961f0e2
RK
977 nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr +
978 NILFS_SR_DAT_OFFSET(isz), 1);
979 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
980 NILFS_SR_CPFILE_OFFSET(isz), 1);
981 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
982 NILFS_SR_SUFILE_OFFSET(isz), 1);
9ff05123
RK
983}
984
985static void nilfs_redirty_inodes(struct list_head *head)
986{
987 struct nilfs_inode_info *ii;
988
989 list_for_each_entry(ii, head, i_dirty) {
990 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
991 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
992 }
993}
994
995static void nilfs_drop_collected_inodes(struct list_head *head)
996{
997 struct nilfs_inode_info *ii;
998
999 list_for_each_entry(ii, head, i_dirty) {
1000 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1001 continue;
1002
1003 clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
1004 set_bit(NILFS_I_UPDATED, &ii->i_state);
1005 }
1006}
1007
9ff05123
RK
1008static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1009 struct inode *inode,
1010 struct list_head *listp,
1011 int (*collect)(struct nilfs_sc_info *,
1012 struct buffer_head *,
1013 struct inode *))
1014{
1015 struct buffer_head *bh, *n;
1016 int err = 0;
1017
1018 if (collect) {
1019 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1020 list_del_init(&bh->b_assoc_buffers);
1021 err = collect(sci, bh, inode);
1022 brelse(bh);
1023 if (unlikely(err))
1024 goto dispose_buffers;
1025 }
1026 return 0;
1027 }
1028
1029 dispose_buffers:
1030 while (!list_empty(listp)) {
1031 bh = list_entry(listp->next, struct buffer_head,
1032 b_assoc_buffers);
1033 list_del_init(&bh->b_assoc_buffers);
1034 brelse(bh);
1035 }
1036 return err;
1037}
1038
f30bf3e4
RK
1039static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1040{
1041 /* Remaining number of blocks within segment buffer */
1042 return sci->sc_segbuf_nblocks -
1043 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1044}
1045
9ff05123
RK
1046static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1047 struct inode *inode,
1048 struct nilfs_sc_operations *sc_ops)
1049{
1050 LIST_HEAD(data_buffers);
1051 LIST_HEAD(node_buffers);
f30bf3e4 1052 int err;
9ff05123
RK
1053
1054 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
f30bf3e4
RK
1055 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1056
1057 n = nilfs_lookup_dirty_data_buffers(
1058 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1059 if (n > rest) {
1060 err = nilfs_segctor_apply_buffers(
9ff05123 1061 sci, inode, &data_buffers,
f30bf3e4
RK
1062 sc_ops->collect_data);
1063 BUG_ON(!err); /* always receive -E2BIG or true error */
9ff05123
RK
1064 goto break_or_fail;
1065 }
1066 }
1067 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1068
1069 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1070 err = nilfs_segctor_apply_buffers(
1071 sci, inode, &data_buffers, sc_ops->collect_data);
1072 if (unlikely(err)) {
1073 /* dispose node list */
1074 nilfs_segctor_apply_buffers(
1075 sci, inode, &node_buffers, NULL);
1076 goto break_or_fail;
1077 }
1078 sci->sc_stage.flags |= NILFS_CF_NODE;
1079 }
1080 /* Collect node */
1081 err = nilfs_segctor_apply_buffers(
1082 sci, inode, &node_buffers, sc_ops->collect_node);
1083 if (unlikely(err))
1084 goto break_or_fail;
1085
1086 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1087 err = nilfs_segctor_apply_buffers(
1088 sci, inode, &node_buffers, sc_ops->collect_bmap);
1089 if (unlikely(err))
1090 goto break_or_fail;
1091
1092 nilfs_segctor_end_finfo(sci, inode);
1093 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1094
1095 break_or_fail:
1096 return err;
1097}
1098
1099static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1100 struct inode *inode)
1101{
1102 LIST_HEAD(data_buffers);
f30bf3e4
RK
1103 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1104 int err;
9ff05123 1105
f30bf3e4
RK
1106 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1107 sci->sc_dsync_start,
1108 sci->sc_dsync_end);
1109
1110 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1111 nilfs_collect_file_data);
1112 if (!err) {
9ff05123 1113 nilfs_segctor_end_finfo(sci, inode);
f30bf3e4
RK
1114 BUG_ON(n > rest);
1115 /* always receive -E2BIG or true error if n > rest */
1116 }
9ff05123
RK
1117 return err;
1118}
1119
1120static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1121{
1122 struct nilfs_sb_info *sbi = sci->sc_sbi;
1123 struct the_nilfs *nilfs = sbi->s_nilfs;
1124 struct list_head *head;
1125 struct nilfs_inode_info *ii;
071cb4b8 1126 size_t ndone;
9ff05123
RK
1127 int err = 0;
1128
1129 switch (sci->sc_stage.scnt) {
1130 case NILFS_ST_INIT:
1131 /* Pre-processes */
1132 sci->sc_stage.flags = 0;
1133
1134 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1135 sci->sc_nblk_inc = 0;
1136 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1137 if (mode == SC_LSEG_DSYNC) {
1138 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1139 goto dsync_mode;
1140 }
1141 }
1142
1143 sci->sc_stage.dirty_file_ptr = NULL;
1144 sci->sc_stage.gc_inode_ptr = NULL;
1145 if (mode == SC_FLUSH_DAT) {
1146 sci->sc_stage.scnt = NILFS_ST_DAT;
1147 goto dat_stage;
1148 }
1149 sci->sc_stage.scnt++; /* Fall through */
1150 case NILFS_ST_GC:
1151 if (nilfs_doing_gc()) {
1152 head = &sci->sc_gc_inodes;
1153 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1154 head, i_dirty);
1155 list_for_each_entry_continue(ii, head, i_dirty) {
1156 err = nilfs_segctor_scan_file(
1157 sci, &ii->vfs_inode,
1158 &nilfs_sc_file_ops);
1159 if (unlikely(err)) {
1160 sci->sc_stage.gc_inode_ptr = list_entry(
1161 ii->i_dirty.prev,
1162 struct nilfs_inode_info,
1163 i_dirty);
1164 goto break_or_fail;
1165 }
1166 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1167 }
1168 sci->sc_stage.gc_inode_ptr = NULL;
1169 }
1170 sci->sc_stage.scnt++; /* Fall through */
1171 case NILFS_ST_FILE:
1172 head = &sci->sc_dirty_files;
1173 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1174 i_dirty);
1175 list_for_each_entry_continue(ii, head, i_dirty) {
1176 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1177
1178 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1179 &nilfs_sc_file_ops);
1180 if (unlikely(err)) {
1181 sci->sc_stage.dirty_file_ptr =
1182 list_entry(ii->i_dirty.prev,
1183 struct nilfs_inode_info,
1184 i_dirty);
1185 goto break_or_fail;
1186 }
1187 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1188 /* XXX: required ? */
1189 }
1190 sci->sc_stage.dirty_file_ptr = NULL;
1191 if (mode == SC_FLUSH_FILE) {
1192 sci->sc_stage.scnt = NILFS_ST_DONE;
1193 return 0;
1194 }
9ff05123
RK
1195 sci->sc_stage.scnt++;
1196 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1197 /* Fall through */
1198 case NILFS_ST_IFILE:
1199 err = nilfs_segctor_scan_file(sci, sbi->s_ifile,
1200 &nilfs_sc_file_ops);
1201 if (unlikely(err))
1202 break;
1203 sci->sc_stage.scnt++;
1204 /* Creating a checkpoint */
1205 err = nilfs_segctor_create_checkpoint(sci);
1206 if (unlikely(err))
1207 break;
1208 /* Fall through */
1209 case NILFS_ST_CPFILE:
1210 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1211 &nilfs_sc_file_ops);
1212 if (unlikely(err))
1213 break;
1214 sci->sc_stage.scnt++; /* Fall through */
1215 case NILFS_ST_SUFILE:
071cb4b8
RK
1216 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1217 sci->sc_nfreesegs, &ndone);
1218 if (unlikely(err)) {
1219 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1220 sci->sc_freesegs, ndone,
1221 NULL);
9ff05123 1222 break;
071cb4b8
RK
1223 }
1224 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1225
9ff05123
RK
1226 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1227 &nilfs_sc_file_ops);
1228 if (unlikely(err))
1229 break;
1230 sci->sc_stage.scnt++; /* Fall through */
1231 case NILFS_ST_DAT:
1232 dat_stage:
1233 err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs),
1234 &nilfs_sc_dat_ops);
1235 if (unlikely(err))
1236 break;
1237 if (mode == SC_FLUSH_DAT) {
1238 sci->sc_stage.scnt = NILFS_ST_DONE;
1239 return 0;
1240 }
1241 sci->sc_stage.scnt++; /* Fall through */
1242 case NILFS_ST_SR:
1243 if (mode == SC_LSEG_SR) {
1244 /* Appending a super root */
1245 err = nilfs_segctor_add_super_root(sci);
1246 if (unlikely(err))
1247 break;
1248 }
1249 /* End of a logical segment */
1250 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1251 sci->sc_stage.scnt = NILFS_ST_DONE;
1252 return 0;
1253 case NILFS_ST_DSYNC:
1254 dsync_mode:
1255 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
f30bf3e4 1256 ii = sci->sc_dsync_inode;
9ff05123
RK
1257 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1258 break;
1259
1260 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1261 if (unlikely(err))
1262 break;
9ff05123
RK
1263 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1264 sci->sc_stage.scnt = NILFS_ST_DONE;
1265 return 0;
1266 case NILFS_ST_DONE:
1267 return 0;
1268 default:
1269 BUG();
1270 }
1271
1272 break_or_fail:
1273 return err;
1274}
1275
9ff05123
RK
1276static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1277 struct the_nilfs *nilfs)
1278{
1279 struct nilfs_segment_buffer *segbuf, *n;
9ff05123
RK
1280 __u64 nextnum;
1281 int err;
1282
1283 if (list_empty(&sci->sc_segbufs)) {
1284 segbuf = nilfs_segbuf_new(sci->sc_super);
1285 if (unlikely(!segbuf))
1286 return -ENOMEM;
1287 list_add(&segbuf->sb_list, &sci->sc_segbufs);
1288 } else
1289 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1290
cece5520
RK
1291 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset,
1292 nilfs);
9ff05123
RK
1293
1294 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
9ff05123 1295 nilfs_shift_to_next_segment(nilfs);
cece5520 1296 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
9ff05123
RK
1297 }
1298 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1299
61a189e9 1300 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
9ff05123
RK
1301 if (unlikely(err))
1302 return err;
1303
1304 if (nilfs->ns_segnum == nilfs->ns_nextnum) {
1305 /* Start from the head of a new full segment */
cece5520 1306 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
9ff05123
RK
1307 if (unlikely(err))
1308 return err;
1309 } else
1310 nextnum = nilfs->ns_nextnum;
1311
1312 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1313 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1314
1315 /* truncating segment buffers */
1316 list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
1317 sb_list) {
1318 list_del_init(&segbuf->sb_list);
1319 nilfs_segbuf_free(segbuf);
1320 }
cece5520 1321 return 0;
9ff05123
RK
1322}
1323
1324static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1325 struct the_nilfs *nilfs, int nadd)
1326{
1327 struct nilfs_segment_buffer *segbuf, *prev, *n;
1328 struct inode *sufile = nilfs->ns_sufile;
1329 __u64 nextnextnum;
1330 LIST_HEAD(list);
1331 int err, ret, i;
1332
1333 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1334 /*
1335 * Since the segment specified with nextnum might be allocated during
1336 * the previous construction, the buffer including its segusage may
1337 * not be dirty. The following call ensures that the buffer is dirty
1338 * and will pin the buffer on memory until the sufile is written.
1339 */
61a189e9 1340 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
9ff05123
RK
1341 if (unlikely(err))
1342 return err;
1343
1344 for (i = 0; i < nadd; i++) {
1345 /* extend segment info */
1346 err = -ENOMEM;
1347 segbuf = nilfs_segbuf_new(sci->sc_super);
1348 if (unlikely(!segbuf))
1349 goto failed;
1350
1351 /* map this buffer to region of segment on-disk */
cece5520 1352 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
9ff05123
RK
1353 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1354
1355 /* allocate the next next full segment */
1356 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1357 if (unlikely(err))
1358 goto failed_segbuf;
1359
1360 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1361 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1362
1363 list_add_tail(&segbuf->sb_list, &list);
1364 prev = segbuf;
1365 }
0935db74 1366 list_splice_tail(&list, &sci->sc_segbufs);
9ff05123
RK
1367 return 0;
1368
1369 failed_segbuf:
1370 nilfs_segbuf_free(segbuf);
1371 failed:
1372 list_for_each_entry_safe(segbuf, n, &list, sb_list) {
1373 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1374 WARN_ON(ret); /* never fails */
9ff05123
RK
1375 list_del_init(&segbuf->sb_list);
1376 nilfs_segbuf_free(segbuf);
1377 }
1378 return err;
1379}
1380
1381static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
1382 struct the_nilfs *nilfs)
1383{
1384 struct nilfs_segment_buffer *segbuf;
9284ad2a 1385 int ret;
9ff05123
RK
1386
1387 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1388 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1389 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
1f5abe7e 1390 WARN_ON(ret); /* never fails */
9ff05123 1391 }
9284ad2a 1392 if (atomic_read(&segbuf->sb_err)) {
9ff05123
RK
1393 /* Case 1: The first segment failed */
1394 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1395 /* Case 1a: Partial segment appended into an existing
1396 segment */
1397 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1398 segbuf->sb_fseg_end);
1399 else /* Case 1b: New full segment */
1400 set_nilfs_discontinued(nilfs);
9ff05123
RK
1401 }
1402
1403 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1404 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
1f5abe7e 1405 WARN_ON(ret); /* never fails */
9284ad2a
RK
1406 if (atomic_read(&segbuf->sb_err) &&
1407 segbuf->sb_segnum != nilfs->ns_nextnum)
1408 /* Case 2: extended segment (!= next) failed */
1409 nilfs_sufile_set_error(nilfs->ns_sufile,
1410 segbuf->sb_segnum);
9ff05123
RK
1411 }
1412}
1413
1414static void nilfs_segctor_clear_segment_buffers(struct nilfs_sc_info *sci)
1415{
1416 struct nilfs_segment_buffer *segbuf;
1417
1418 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list)
1419 nilfs_segbuf_clear(segbuf);
1420 sci->sc_super_root = NULL;
1421}
1422
1423static void nilfs_segctor_destroy_segment_buffers(struct nilfs_sc_info *sci)
1424{
1425 struct nilfs_segment_buffer *segbuf;
1426
1427 while (!list_empty(&sci->sc_segbufs)) {
1428 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1429 list_del_init(&segbuf->sb_list);
1430 nilfs_segbuf_free(segbuf);
1431 }
1432 /* sci->sc_curseg = NULL; */
1433}
1434
1435static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci,
1436 struct the_nilfs *nilfs, int err)
1437{
1438 if (unlikely(err)) {
1439 nilfs_segctor_free_incomplete_segments(sci, nilfs);
071cb4b8
RK
1440 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1441 int ret;
1442
1443 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1444 sci->sc_freesegs,
1445 sci->sc_nfreesegs,
1446 NULL);
1447 WARN_ON(ret); /* do not happen */
1448 }
9ff05123
RK
1449 }
1450 nilfs_segctor_clear_segment_buffers(sci);
1451}
1452
1453static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1454 struct inode *sufile)
1455{
1456 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1457 unsigned long live_blocks;
1458 int ret;
1459
1460 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1461 live_blocks = segbuf->sb_sum.nblocks +
1462 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
071ec54d
RK
1463 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1464 live_blocks,
1465 sci->sc_seg_ctime);
1466 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123
RK
1467 }
1468}
1469
1470static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci,
1471 struct inode *sufile)
1472{
1473 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1474 int ret;
1475
1476 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
071ec54d
RK
1477 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1478 segbuf->sb_pseg_start -
1479 segbuf->sb_fseg_start, 0);
1480 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123
RK
1481
1482 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
071ec54d
RK
1483 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1484 0, 0);
1f5abe7e 1485 WARN_ON(ret); /* always succeed */
9ff05123
RK
1486 }
1487}
1488
1489static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1490 struct nilfs_segment_buffer *last,
1491 struct inode *sufile)
1492{
1493 struct nilfs_segment_buffer *segbuf = last, *n;
1494 int ret;
1495
1496 list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
1497 sb_list) {
1498 list_del_init(&segbuf->sb_list);
1499 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1500 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1501 WARN_ON(ret);
9ff05123
RK
1502 nilfs_segbuf_free(segbuf);
1503 }
1504}
1505
1506
1507static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1508 struct the_nilfs *nilfs, int mode)
1509{
1510 struct nilfs_cstage prev_stage = sci->sc_stage;
1511 int err, nadd = 1;
1512
1513 /* Collection retry loop */
1514 for (;;) {
1515 sci->sc_super_root = NULL;
1516 sci->sc_nblk_this_inc = 0;
1517 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1518
1519 err = nilfs_segctor_reset_segment_buffer(sci);
1520 if (unlikely(err))
1521 goto failed;
1522
1523 err = nilfs_segctor_collect_blocks(sci, mode);
1524 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1525 if (!err)
1526 break;
1527
1528 if (unlikely(err != -E2BIG))
1529 goto failed;
1530
1531 /* The current segment is filled up */
1532 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1533 break;
1534
071cb4b8
RK
1535 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1536 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1537 sci->sc_freesegs,
1538 sci->sc_nfreesegs,
1539 NULL);
1540 WARN_ON(err); /* do not happen */
1541 }
9ff05123
RK
1542 nilfs_segctor_clear_segment_buffers(sci);
1543
1544 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1545 if (unlikely(err))
1546 return err;
1547
1548 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1549 sci->sc_stage = prev_stage;
1550 }
1551 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1552 return 0;
1553
1554 failed:
1555 return err;
1556}
1557
1558static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1559 struct buffer_head *new_bh)
1560{
1561 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1562
1563 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1564 /* The caller must release old_bh */
1565}
1566
1567static int
1568nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1569 struct nilfs_segment_buffer *segbuf,
1570 int mode)
1571{
1572 struct inode *inode = NULL;
1573 sector_t blocknr;
1574 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1575 unsigned long nblocks = 0, ndatablk = 0;
1576 struct nilfs_sc_operations *sc_op = NULL;
1577 struct nilfs_segsum_pointer ssp;
1578 struct nilfs_finfo *finfo = NULL;
1579 union nilfs_binfo binfo;
1580 struct buffer_head *bh, *bh_org;
1581 ino_t ino = 0;
1582 int err = 0;
1583
1584 if (!nfinfo)
1585 goto out;
1586
1587 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1588 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1589 ssp.offset = sizeof(struct nilfs_segment_summary);
1590
1591 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1592 if (bh == sci->sc_super_root)
1593 break;
1594 if (!finfo) {
1595 finfo = nilfs_segctor_map_segsum_entry(
1596 sci, &ssp, sizeof(*finfo));
1597 ino = le64_to_cpu(finfo->fi_ino);
1598 nblocks = le32_to_cpu(finfo->fi_nblocks);
1599 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1600
1601 if (buffer_nilfs_node(bh))
1602 inode = NILFS_BTNC_I(bh->b_page->mapping);
1603 else
1604 inode = NILFS_AS_I(bh->b_page->mapping);
1605
1606 if (mode == SC_LSEG_DSYNC)
1607 sc_op = &nilfs_sc_dsync_ops;
1608 else if (ino == NILFS_DAT_INO)
1609 sc_op = &nilfs_sc_dat_ops;
1610 else /* file blocks */
1611 sc_op = &nilfs_sc_file_ops;
1612 }
1613 bh_org = bh;
1614 get_bh(bh_org);
1615 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1616 &binfo);
1617 if (bh != bh_org)
1618 nilfs_list_replace_buffer(bh_org, bh);
1619 brelse(bh_org);
1620 if (unlikely(err))
1621 goto failed_bmap;
1622
1623 if (ndatablk > 0)
1624 sc_op->write_data_binfo(sci, &ssp, &binfo);
1625 else
1626 sc_op->write_node_binfo(sci, &ssp, &binfo);
1627
1628 blocknr++;
1629 if (--nblocks == 0) {
1630 finfo = NULL;
1631 if (--nfinfo == 0)
1632 break;
1633 } else if (ndatablk > 0)
1634 ndatablk--;
1635 }
1636 out:
1637 return 0;
1638
1639 failed_bmap:
1640 err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super);
1641 return err;
1642}
1643
1644static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1645{
1646 struct nilfs_segment_buffer *segbuf;
1647 int err;
1648
1649 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1650 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1651 if (unlikely(err))
1652 return err;
1653 nilfs_segbuf_fill_in_segsum(segbuf);
1654 }
1655 return 0;
1656}
1657
1658static int
1659nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
1660{
1661 struct page *clone_page;
1662 struct buffer_head *bh, *head, *bh2;
1663 void *kaddr;
1664
1665 bh = head = page_buffers(page);
1666
1667 clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
1668 if (unlikely(!clone_page))
1669 return -ENOMEM;
1670
1671 bh2 = page_buffers(clone_page);
1672 kaddr = kmap_atomic(page, KM_USER0);
1673 do {
1674 if (list_empty(&bh->b_assoc_buffers))
1675 continue;
1676 get_bh(bh2);
1677 page_cache_get(clone_page); /* for each bh */
1678 memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
1679 bh2->b_blocknr = bh->b_blocknr;
1680 list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
1681 list_add_tail(&bh->b_assoc_buffers, out);
1682 } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
1683 kunmap_atomic(kaddr, KM_USER0);
1684
1685 if (!TestSetPageWriteback(clone_page))
1686 inc_zone_page_state(clone_page, NR_WRITEBACK);
1687 unlock_page(clone_page);
1688
1689 return 0;
1690}
1691
1692static int nilfs_test_page_to_be_frozen(struct page *page)
1693{
1694 struct address_space *mapping = page->mapping;
1695
1696 if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
1697 return 0;
1698
1699 if (page_mapped(page)) {
1700 ClearPageChecked(page);
1701 return 1;
1702 }
1703 return PageChecked(page);
1704}
1705
1706static int nilfs_begin_page_io(struct page *page, struct list_head *out)
1707{
1708 if (!page || PageWriteback(page))
1709 /* For split b-tree node pages, this function may be called
1710 twice. We ignore the 2nd or later calls by this check. */
1711 return 0;
1712
1713 lock_page(page);
1714 clear_page_dirty_for_io(page);
1715 set_page_writeback(page);
1716 unlock_page(page);
1717
1718 if (nilfs_test_page_to_be_frozen(page)) {
1719 int err = nilfs_copy_replace_page_buffers(page, out);
1720 if (unlikely(err))
1721 return err;
1722 }
1723 return 0;
1724}
1725
1726static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1727 struct page **failed_page)
1728{
1729 struct nilfs_segment_buffer *segbuf;
1730 struct page *bd_page = NULL, *fs_page = NULL;
1731 struct list_head *list = &sci->sc_copied_buffers;
1732 int err;
1733
1734 *failed_page = NULL;
1735 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1736 struct buffer_head *bh;
1737
1738 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1739 b_assoc_buffers) {
1740 if (bh->b_page != bd_page) {
1741 if (bd_page) {
1742 lock_page(bd_page);
1743 clear_page_dirty_for_io(bd_page);
1744 set_page_writeback(bd_page);
1745 unlock_page(bd_page);
1746 }
1747 bd_page = bh->b_page;
1748 }
1749 }
1750
1751 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1752 b_assoc_buffers) {
1753 if (bh == sci->sc_super_root) {
1754 if (bh->b_page != bd_page) {
1755 lock_page(bd_page);
1756 clear_page_dirty_for_io(bd_page);
1757 set_page_writeback(bd_page);
1758 unlock_page(bd_page);
1759 bd_page = bh->b_page;
1760 }
1761 break;
1762 }
1763 if (bh->b_page != fs_page) {
1764 err = nilfs_begin_page_io(fs_page, list);
1765 if (unlikely(err)) {
1766 *failed_page = fs_page;
1767 goto out;
1768 }
1769 fs_page = bh->b_page;
1770 }
1771 }
1772 }
1773 if (bd_page) {
1774 lock_page(bd_page);
1775 clear_page_dirty_for_io(bd_page);
1776 set_page_writeback(bd_page);
1777 unlock_page(bd_page);
1778 }
1779 err = nilfs_begin_page_io(fs_page, list);
1780 if (unlikely(err))
1781 *failed_page = fs_page;
1782 out:
1783 return err;
1784}
1785
1786static int nilfs_segctor_write(struct nilfs_sc_info *sci,
9c965bac 1787 struct the_nilfs *nilfs)
9ff05123
RK
1788{
1789 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1790 int err, res;
1791
9ff05123 1792 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
9c965bac 1793 err = nilfs_segbuf_write(segbuf, nilfs);
9ff05123 1794
9284ad2a 1795 res = nilfs_segbuf_wait(segbuf);
0cfae3d8
RK
1796 err = err ? : res;
1797 if (err)
9ff05123
RK
1798 return err;
1799 }
1800 return 0;
1801}
1802
9ff05123
RK
1803static void __nilfs_end_page_io(struct page *page, int err)
1804{
9ff05123
RK
1805 if (!err) {
1806 if (!nilfs_page_buffers_clean(page))
1807 __set_page_dirty_nobuffers(page);
1808 ClearPageError(page);
1809 } else {
1810 __set_page_dirty_nobuffers(page);
1811 SetPageError(page);
1812 }
1813
1814 if (buffer_nilfs_allocated(page_buffers(page))) {
1815 if (TestClearPageWriteback(page))
1816 dec_zone_page_state(page, NR_WRITEBACK);
1817 } else
1818 end_page_writeback(page);
1819}
1820
1821static void nilfs_end_page_io(struct page *page, int err)
1822{
1823 if (!page)
1824 return;
1825
a9777845 1826 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
8227b297
RK
1827 /*
1828 * For b-tree node pages, this function may be called twice
1829 * or more because they might be split in a segment.
1830 */
a9777845
RK
1831 if (PageDirty(page)) {
1832 /*
1833 * For pages holding split b-tree node buffers, dirty
1834 * flag on the buffers may be cleared discretely.
1835 * In that case, the page is once redirtied for
1836 * remaining buffers, and it must be cancelled if
1837 * all the buffers get cleaned later.
1838 */
1839 lock_page(page);
1840 if (nilfs_page_buffers_clean(page))
1841 __nilfs_clear_page_dirty(page);
1842 unlock_page(page);
1843 }
9ff05123 1844 return;
a9777845 1845 }
9ff05123
RK
1846
1847 __nilfs_end_page_io(page, err);
1848}
1849
1850static void nilfs_clear_copied_buffers(struct list_head *list, int err)
1851{
1852 struct buffer_head *bh, *head;
1853 struct page *page;
1854
1855 while (!list_empty(list)) {
1856 bh = list_entry(list->next, struct buffer_head,
1857 b_assoc_buffers);
1858 page = bh->b_page;
1859 page_cache_get(page);
1860 head = bh = page_buffers(page);
1861 do {
1862 if (!list_empty(&bh->b_assoc_buffers)) {
1863 list_del_init(&bh->b_assoc_buffers);
1864 if (!err) {
1865 set_buffer_uptodate(bh);
1866 clear_buffer_dirty(bh);
1867 clear_buffer_nilfs_volatile(bh);
1868 }
1869 brelse(bh); /* for b_assoc_buffers */
1870 }
1871 } while ((bh = bh->b_this_page) != head);
1872
1873 __nilfs_end_page_io(page, err);
1874 page_cache_release(page);
1875 }
1876}
1877
1878static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
1879 struct page *failed_page, int err)
1880{
1881 struct nilfs_segment_buffer *segbuf;
1882 struct page *bd_page = NULL, *fs_page = NULL;
1883
1884 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1885 struct buffer_head *bh;
1886
1887 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1888 b_assoc_buffers) {
1889 if (bh->b_page != bd_page) {
1890 if (bd_page)
1891 end_page_writeback(bd_page);
1892 bd_page = bh->b_page;
1893 }
1894 }
1895
1896 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1897 b_assoc_buffers) {
1898 if (bh == sci->sc_super_root) {
1899 if (bh->b_page != bd_page) {
1900 end_page_writeback(bd_page);
1901 bd_page = bh->b_page;
1902 }
1903 break;
1904 }
1905 if (bh->b_page != fs_page) {
1906 nilfs_end_page_io(fs_page, err);
8227b297 1907 if (fs_page && fs_page == failed_page)
9ff05123
RK
1908 goto done;
1909 fs_page = bh->b_page;
1910 }
1911 }
1912 }
1913 if (bd_page)
1914 end_page_writeback(bd_page);
1915
1916 nilfs_end_page_io(fs_page, err);
1917 done:
1918 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
1919}
1920
1921static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1922 struct nilfs_segment_buffer *segbuf)
1923{
1924 nilfs->ns_segnum = segbuf->sb_segnum;
1925 nilfs->ns_nextnum = segbuf->sb_nextnum;
1926 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1927 + segbuf->sb_sum.nblocks;
1928 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1929 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1930}
1931
1932static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1933{
1934 struct nilfs_segment_buffer *segbuf;
1935 struct page *bd_page = NULL, *fs_page = NULL;
1936 struct nilfs_sb_info *sbi = sci->sc_sbi;
1937 struct the_nilfs *nilfs = sbi->s_nilfs;
1938 int update_sr = (sci->sc_super_root != NULL);
1939
1940 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1941 struct buffer_head *bh;
1942
1943 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1944 b_assoc_buffers) {
1945 set_buffer_uptodate(bh);
1946 clear_buffer_dirty(bh);
1947 if (bh->b_page != bd_page) {
1948 if (bd_page)
1949 end_page_writeback(bd_page);
1950 bd_page = bh->b_page;
1951 }
1952 }
1953 /*
1954 * We assume that the buffers which belong to the same page
1955 * continue over the buffer list.
1956 * Under this assumption, the last BHs of pages is
1957 * identifiable by the discontinuity of bh->b_page
1958 * (page != fs_page).
1959 *
1960 * For B-tree node blocks, however, this assumption is not
1961 * guaranteed. The cleanup code of B-tree node pages needs
1962 * special care.
1963 */
1964 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1965 b_assoc_buffers) {
1966 set_buffer_uptodate(bh);
1967 clear_buffer_dirty(bh);
1968 clear_buffer_nilfs_volatile(bh);
1969 if (bh == sci->sc_super_root) {
1970 if (bh->b_page != bd_page) {
1971 end_page_writeback(bd_page);
1972 bd_page = bh->b_page;
1973 }
1974 break;
1975 }
1976 if (bh->b_page != fs_page) {
1977 nilfs_end_page_io(fs_page, 0);
1978 fs_page = bh->b_page;
1979 }
1980 }
1981
1982 if (!NILFS_SEG_SIMPLEX(&segbuf->sb_sum)) {
1983 if (NILFS_SEG_LOGBGN(&segbuf->sb_sum)) {
1984 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1985 sci->sc_lseg_stime = jiffies;
1986 }
1987 if (NILFS_SEG_LOGEND(&segbuf->sb_sum))
1988 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1989 }
1990 }
1991 /*
1992 * Since pages may continue over multiple segment buffers,
1993 * end of the last page must be checked outside of the loop.
1994 */
1995 if (bd_page)
1996 end_page_writeback(bd_page);
1997
1998 nilfs_end_page_io(fs_page, 0);
1999
2000 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
2001
2002 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
2003
2004 if (nilfs_doing_gc()) {
2005 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
2006 if (update_sr)
2007 nilfs_commit_gcdat_inode(nilfs);
1088dcf4 2008 } else
9ff05123 2009 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
9ff05123
RK
2010
2011 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
2012
2013 segbuf = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
2014 nilfs_set_next_segment(nilfs, segbuf);
2015
2016 if (update_sr) {
2017 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
e339ad31
RK
2018 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
2019 sbi->s_super->s_dirt = 1;
9ff05123 2020
c96fa464 2021 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
2022 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2023 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
2024 } else
2025 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
2026}
2027
2028static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
2029 struct nilfs_sb_info *sbi)
2030{
2031 struct nilfs_inode_info *ii, *n;
2032 __u64 cno = sbi->s_nilfs->ns_cno;
2033
2034 spin_lock(&sbi->s_inode_lock);
2035 retry:
2036 list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) {
2037 if (!ii->i_bh) {
2038 struct buffer_head *ibh;
2039 int err;
2040
2041 spin_unlock(&sbi->s_inode_lock);
2042 err = nilfs_ifile_get_inode_block(
2043 sbi->s_ifile, ii->vfs_inode.i_ino, &ibh);
2044 if (unlikely(err)) {
2045 nilfs_warning(sbi->s_super, __func__,
2046 "failed to get inode block.\n");
2047 return err;
2048 }
2049 nilfs_mdt_mark_buffer_dirty(ibh);
2050 nilfs_mdt_mark_dirty(sbi->s_ifile);
2051 spin_lock(&sbi->s_inode_lock);
2052 if (likely(!ii->i_bh))
2053 ii->i_bh = ibh;
2054 else
2055 brelse(ibh);
2056 goto retry;
2057 }
2058 ii->i_cno = cno;
2059
2060 clear_bit(NILFS_I_QUEUED, &ii->i_state);
2061 set_bit(NILFS_I_BUSY, &ii->i_state);
2062 list_del(&ii->i_dirty);
2063 list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
2064 }
2065 spin_unlock(&sbi->s_inode_lock);
2066
2067 NILFS_I(sbi->s_ifile)->i_cno = cno;
2068
2069 return 0;
2070}
2071
2072static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
2073 struct nilfs_sb_info *sbi)
2074{
2075 struct nilfs_transaction_info *ti = current->journal_info;
2076 struct nilfs_inode_info *ii, *n;
2077 __u64 cno = sbi->s_nilfs->ns_cno;
2078
2079 spin_lock(&sbi->s_inode_lock);
2080 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2081 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2082 test_bit(NILFS_I_DIRTY, &ii->i_state)) {
2083 /* The current checkpoint number (=nilfs->ns_cno) is
2084 changed between check-in and check-out only if the
2085 super root is written out. So, we can update i_cno
2086 for the inodes that remain in the dirty list. */
2087 ii->i_cno = cno;
2088 continue;
2089 }
2090 clear_bit(NILFS_I_BUSY, &ii->i_state);
2091 brelse(ii->i_bh);
2092 ii->i_bh = NULL;
2093 list_del(&ii->i_dirty);
2094 list_add_tail(&ii->i_dirty, &ti->ti_garbage);
2095 }
2096 spin_unlock(&sbi->s_inode_lock);
2097}
2098
9ff05123
RK
2099/*
2100 * Main procedure of segment constructor
2101 */
2102static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2103{
2104 struct nilfs_sb_info *sbi = sci->sc_sbi;
2105 struct the_nilfs *nilfs = sbi->s_nilfs;
2106 struct page *failed_page;
2107 int err, has_sr = 0;
2108
2109 sci->sc_stage.scnt = NILFS_ST_INIT;
2110
2111 err = nilfs_segctor_check_in_files(sci, sbi);
2112 if (unlikely(err))
2113 goto out;
2114
2115 if (nilfs_test_metadata_dirty(sbi))
2116 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2117
2118 if (nilfs_segctor_clean(sci))
2119 goto out;
2120
2121 do {
2122 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2123
2124 err = nilfs_segctor_begin_construction(sci, nilfs);
2125 if (unlikely(err))
2126 goto out;
2127
2128 /* Update time stamp */
2129 sci->sc_seg_ctime = get_seconds();
2130
2131 err = nilfs_segctor_collect(sci, nilfs, mode);
2132 if (unlikely(err))
2133 goto failed;
2134
2135 has_sr = (sci->sc_super_root != NULL);
2136
2137 /* Avoid empty segment */
2138 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2139 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
9ff05123
RK
2140 nilfs_segctor_end_construction(sci, nilfs, 1);
2141 goto out;
2142 }
2143
2144 err = nilfs_segctor_assign(sci, mode);
2145 if (unlikely(err))
2146 goto failed;
2147
9ff05123
RK
2148 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2149 nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile);
2150
2151 if (has_sr) {
2152 err = nilfs_segctor_fill_in_checkpoint(sci);
2153 if (unlikely(err))
2154 goto failed_to_make_up;
2155
2156 nilfs_segctor_fill_in_super_root(sci, nilfs);
2157 }
2158 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2159
2160 /* Write partial segments */
2161 err = nilfs_segctor_prepare_write(sci, &failed_page);
2162 if (unlikely(err))
2163 goto failed_to_write;
2164
2165 nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
2166
9c965bac 2167 err = nilfs_segctor_write(sci, nilfs);
9ff05123
RK
2168 if (unlikely(err))
2169 goto failed_to_write;
2170
2171 nilfs_segctor_complete_write(sci);
2172
2173 /* Commit segments */
071cb4b8 2174 if (has_sr)
9ff05123 2175 nilfs_segctor_clear_metadata_dirty(sci);
9ff05123
RK
2176
2177 nilfs_segctor_end_construction(sci, nilfs, 0);
2178
2179 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2180
9ff05123
RK
2181 out:
2182 nilfs_segctor_destroy_segment_buffers(sci);
2183 nilfs_segctor_check_out_files(sci, sbi);
2184 return err;
2185
2186 failed_to_write:
2187 nilfs_segctor_abort_write(sci, failed_page, err);
2188 nilfs_segctor_cancel_segusage(sci, nilfs->ns_sufile);
2189
2190 failed_to_make_up:
2191 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2192 nilfs_redirty_inodes(&sci->sc_dirty_files);
9ff05123
RK
2193
2194 failed:
2195 if (nilfs_doing_gc())
2196 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2197 nilfs_segctor_end_construction(sci, nilfs, err);
2198 goto out;
2199}
2200
2201/**
2202 * nilfs_secgtor_start_timer - set timer of background write
2203 * @sci: nilfs_sc_info
2204 *
2205 * If the timer has already been set, it ignores the new request.
2206 * This function MUST be called within a section locking the segment
2207 * semaphore.
2208 */
2209static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2210{
2211 spin_lock(&sci->sc_state_lock);
2212 if (sci->sc_timer && !(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2213 sci->sc_timer->expires = jiffies + sci->sc_interval;
2214 add_timer(sci->sc_timer);
2215 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2216 }
2217 spin_unlock(&sci->sc_state_lock);
2218}
2219
2220static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2221{
2222 spin_lock(&sci->sc_state_lock);
2223 if (!(sci->sc_flush_request & (1 << bn))) {
2224 unsigned long prev_req = sci->sc_flush_request;
2225
2226 sci->sc_flush_request |= (1 << bn);
2227 if (!prev_req)
2228 wake_up(&sci->sc_wait_daemon);
2229 }
2230 spin_unlock(&sci->sc_state_lock);
2231}
2232
2233/**
2234 * nilfs_flush_segment - trigger a segment construction for resource control
2235 * @sb: super block
2236 * @ino: inode number of the file to be flushed out.
2237 */
2238void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2239{
2240 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2241 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2242
2243 if (!sci || nilfs_doing_construction())
2244 return;
2245 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2246 /* assign bit 0 to data files */
2247}
2248
9ff05123
RK
2249struct nilfs_segctor_wait_request {
2250 wait_queue_t wq;
2251 __u32 seq;
2252 int err;
2253 atomic_t done;
2254};
2255
2256static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2257{
2258 struct nilfs_segctor_wait_request wait_req;
2259 int err = 0;
2260
2261 spin_lock(&sci->sc_state_lock);
2262 init_wait(&wait_req.wq);
2263 wait_req.err = 0;
2264 atomic_set(&wait_req.done, 0);
2265 wait_req.seq = ++sci->sc_seq_request;
2266 spin_unlock(&sci->sc_state_lock);
2267
2268 init_waitqueue_entry(&wait_req.wq, current);
2269 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2270 set_current_state(TASK_INTERRUPTIBLE);
2271 wake_up(&sci->sc_wait_daemon);
2272
2273 for (;;) {
2274 if (atomic_read(&wait_req.done)) {
2275 err = wait_req.err;
2276 break;
2277 }
2278 if (!signal_pending(current)) {
2279 schedule();
2280 continue;
2281 }
2282 err = -ERESTARTSYS;
2283 break;
2284 }
2285 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2286 return err;
2287}
2288
2289static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2290{
2291 struct nilfs_segctor_wait_request *wrq, *n;
2292 unsigned long flags;
2293
2294 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2295 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2296 wq.task_list) {
2297 if (!atomic_read(&wrq->done) &&
2298 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2299 wrq->err = err;
2300 atomic_set(&wrq->done, 1);
2301 }
2302 if (atomic_read(&wrq->done)) {
2303 wrq->wq.func(&wrq->wq,
2304 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2305 0, NULL);
2306 }
2307 }
2308 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2309}
2310
2311/**
2312 * nilfs_construct_segment - construct a logical segment
2313 * @sb: super block
2314 *
2315 * Return Value: On success, 0 is retured. On errors, one of the following
2316 * negative error code is returned.
2317 *
2318 * %-EROFS - Read only filesystem.
2319 *
2320 * %-EIO - I/O error
2321 *
2322 * %-ENOSPC - No space left on device (only in a panic state).
2323 *
2324 * %-ERESTARTSYS - Interrupted.
2325 *
2326 * %-ENOMEM - Insufficient memory available.
2327 */
2328int nilfs_construct_segment(struct super_block *sb)
2329{
2330 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2331 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2332 struct nilfs_transaction_info *ti;
2333 int err;
2334
2335 if (!sci)
2336 return -EROFS;
2337
2338 /* A call inside transactions causes a deadlock. */
2339 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2340
2341 err = nilfs_segctor_sync(sci);
2342 return err;
2343}
2344
2345/**
2346 * nilfs_construct_dsync_segment - construct a data-only logical segment
2347 * @sb: super block
f30bf3e4
RK
2348 * @inode: inode whose data blocks should be written out
2349 * @start: start byte offset
2350 * @end: end byte offset (inclusive)
9ff05123
RK
2351 *
2352 * Return Value: On success, 0 is retured. On errors, one of the following
2353 * negative error code is returned.
2354 *
2355 * %-EROFS - Read only filesystem.
2356 *
2357 * %-EIO - I/O error
2358 *
2359 * %-ENOSPC - No space left on device (only in a panic state).
2360 *
2361 * %-ERESTARTSYS - Interrupted.
2362 *
2363 * %-ENOMEM - Insufficient memory available.
2364 */
f30bf3e4
RK
2365int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2366 loff_t start, loff_t end)
9ff05123
RK
2367{
2368 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2369 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2370 struct nilfs_inode_info *ii;
2371 struct nilfs_transaction_info ti;
2372 int err = 0;
2373
2374 if (!sci)
2375 return -EROFS;
2376
2377 nilfs_transaction_lock(sbi, &ti, 0);
2378
2379 ii = NILFS_I(inode);
2380 if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
2381 nilfs_test_opt(sbi, STRICT_ORDER) ||
2382 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2383 nilfs_discontinued(sbi->s_nilfs)) {
2384 nilfs_transaction_unlock(sbi);
2385 err = nilfs_segctor_sync(sci);
2386 return err;
2387 }
2388
2389 spin_lock(&sbi->s_inode_lock);
2390 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2391 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2392 spin_unlock(&sbi->s_inode_lock);
2393 nilfs_transaction_unlock(sbi);
2394 return 0;
2395 }
2396 spin_unlock(&sbi->s_inode_lock);
f30bf3e4
RK
2397 sci->sc_dsync_inode = ii;
2398 sci->sc_dsync_start = start;
2399 sci->sc_dsync_end = end;
9ff05123
RK
2400
2401 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2402
2403 nilfs_transaction_unlock(sbi);
2404 return err;
2405}
2406
2407struct nilfs_segctor_req {
2408 int mode;
2409 __u32 seq_accepted;
2410 int sc_err; /* construction failure */
2411 int sb_err; /* super block writeback failure */
2412};
2413
2414#define FLUSH_FILE_BIT (0x1) /* data file only */
2415#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2416
2417static void nilfs_segctor_accept(struct nilfs_sc_info *sci,
2418 struct nilfs_segctor_req *req)
2419{
9ff05123
RK
2420 req->sc_err = req->sb_err = 0;
2421 spin_lock(&sci->sc_state_lock);
2422 req->seq_accepted = sci->sc_seq_request;
2423 spin_unlock(&sci->sc_state_lock);
2424
2425 if (sci->sc_timer)
2426 del_timer_sync(sci->sc_timer);
2427}
2428
2429static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
2430 struct nilfs_segctor_req *req)
2431{
2432 /* Clear requests (even when the construction failed) */
2433 spin_lock(&sci->sc_state_lock);
2434
9ff05123 2435 if (req->mode == SC_LSEG_SR) {
aeda7f63 2436 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
9ff05123
RK
2437 sci->sc_seq_done = req->seq_accepted;
2438 nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err);
2439 sci->sc_flush_request = 0;
aeda7f63
RK
2440 } else {
2441 if (req->mode == SC_FLUSH_FILE)
2442 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2443 else if (req->mode == SC_FLUSH_DAT)
2444 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2445
2446 /* re-enable timer if checkpoint creation was not done */
2447 if (sci->sc_timer && (sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2448 time_before(jiffies, sci->sc_timer->expires))
2449 add_timer(sci->sc_timer);
2450 }
9ff05123
RK
2451 spin_unlock(&sci->sc_state_lock);
2452}
2453
2454static int nilfs_segctor_construct(struct nilfs_sc_info *sci,
2455 struct nilfs_segctor_req *req)
2456{
2457 struct nilfs_sb_info *sbi = sci->sc_sbi;
2458 struct the_nilfs *nilfs = sbi->s_nilfs;
2459 int err = 0;
2460
2461 if (nilfs_discontinued(nilfs))
2462 req->mode = SC_LSEG_SR;
2463 if (!nilfs_segctor_confirm(sci)) {
2464 err = nilfs_segctor_do_construct(sci, req->mode);
2465 req->sc_err = err;
2466 }
2467 if (likely(!err)) {
2468 if (req->mode != SC_FLUSH_DAT)
2469 atomic_set(&nilfs->ns_ndirtyblks, 0);
2470 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2471 nilfs_discontinued(nilfs)) {
2472 down_write(&nilfs->ns_sem);
1dfa2710
JS
2473 req->sb_err = nilfs_commit_super(sbi,
2474 nilfs_altsb_need_update(nilfs));
9ff05123
RK
2475 up_write(&nilfs->ns_sem);
2476 }
2477 }
2478 return err;
2479}
2480
2481static void nilfs_construction_timeout(unsigned long data)
2482{
2483 struct task_struct *p = (struct task_struct *)data;
2484 wake_up_process(p);
2485}
2486
2487static void
2488nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2489{
2490 struct nilfs_inode_info *ii, *n;
2491
2492 list_for_each_entry_safe(ii, n, head, i_dirty) {
2493 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2494 continue;
2495 hlist_del_init(&ii->vfs_inode.i_hash);
2496 list_del_init(&ii->i_dirty);
2497 nilfs_clear_gcinode(&ii->vfs_inode);
2498 }
2499}
2500
4f6b8288
RK
2501int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2502 void **kbufs)
9ff05123
RK
2503{
2504 struct nilfs_sb_info *sbi = NILFS_SB(sb);
2505 struct nilfs_sc_info *sci = NILFS_SC(sbi);
2506 struct the_nilfs *nilfs = sbi->s_nilfs;
2507 struct nilfs_transaction_info ti;
2508 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2509 int err;
2510
2511 if (unlikely(!sci))
2512 return -EROFS;
2513
2514 nilfs_transaction_lock(sbi, &ti, 1);
2515
2516 err = nilfs_init_gcdat_inode(nilfs);
2517 if (unlikely(err))
2518 goto out_unlock;
071cb4b8 2519
4f6b8288 2520 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
9ff05123
RK
2521 if (unlikely(err))
2522 goto out_unlock;
2523
071cb4b8
RK
2524 sci->sc_freesegs = kbufs[4];
2525 sci->sc_nfreesegs = argv[4].v_nmembs;
0935db74 2526 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
9ff05123
RK
2527
2528 for (;;) {
2529 nilfs_segctor_accept(sci, &req);
2530 err = nilfs_segctor_construct(sci, &req);
2531 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2532 nilfs_segctor_notify(sci, &req);
2533
2534 if (likely(!err))
2535 break;
2536
2537 nilfs_warning(sb, __func__,
2538 "segment construction failed. (err=%d)", err);
2539 set_current_state(TASK_INTERRUPTIBLE);
2540 schedule_timeout(sci->sc_interval);
2541 }
2542
2543 out_unlock:
071cb4b8
RK
2544 sci->sc_freesegs = NULL;
2545 sci->sc_nfreesegs = 0;
9ff05123
RK
2546 nilfs_clear_gcdat_inode(nilfs);
2547 nilfs_transaction_unlock(sbi);
2548 return err;
2549}
2550
2551static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2552{
2553 struct nilfs_sb_info *sbi = sci->sc_sbi;
2554 struct nilfs_transaction_info ti;
2555 struct nilfs_segctor_req req = { .mode = mode };
2556
2557 nilfs_transaction_lock(sbi, &ti, 0);
2558
2559 nilfs_segctor_accept(sci, &req);
2560 nilfs_segctor_construct(sci, &req);
2561 nilfs_segctor_notify(sci, &req);
2562
2563 /*
2564 * Unclosed segment should be retried. We do this using sc_timer.
2565 * Timeout of sc_timer will invoke complete construction which leads
2566 * to close the current logical segment.
2567 */
2568 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2569 nilfs_segctor_start_timer(sci);
2570
2571 nilfs_transaction_unlock(sbi);
2572}
2573
2574static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2575{
2576 int mode = 0;
2577 int err;
2578
2579 spin_lock(&sci->sc_state_lock);
2580 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2581 SC_FLUSH_DAT : SC_FLUSH_FILE;
2582 spin_unlock(&sci->sc_state_lock);
2583
2584 if (mode) {
2585 err = nilfs_segctor_do_construct(sci, mode);
2586
2587 spin_lock(&sci->sc_state_lock);
2588 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2589 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2590 spin_unlock(&sci->sc_state_lock);
2591 }
2592 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2593}
2594
2595static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2596{
2597 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2598 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2599 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2600 return SC_FLUSH_FILE;
2601 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2602 return SC_FLUSH_DAT;
2603 }
2604 return SC_LSEG_SR;
2605}
2606
2607/**
2608 * nilfs_segctor_thread - main loop of the segment constructor thread.
2609 * @arg: pointer to a struct nilfs_sc_info.
2610 *
2611 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2612 * to execute segment constructions.
2613 */
2614static int nilfs_segctor_thread(void *arg)
2615{
2616 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2617 struct timer_list timer;
2618 int timeout = 0;
2619
2620 init_timer(&timer);
2621 timer.data = (unsigned long)current;
2622 timer.function = nilfs_construction_timeout;
2623 sci->sc_timer = &timer;
2624
2625 /* start sync. */
2626 sci->sc_task = current;
2627 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2628 printk(KERN_INFO
2629 "segctord starting. Construction interval = %lu seconds, "
2630 "CP frequency < %lu seconds\n",
2631 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2632
2633 spin_lock(&sci->sc_state_lock);
2634 loop:
2635 for (;;) {
2636 int mode;
2637
2638 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2639 goto end_thread;
2640
2641 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2642 mode = SC_LSEG_SR;
2643 else if (!sci->sc_flush_request)
2644 break;
2645 else
2646 mode = nilfs_segctor_flush_mode(sci);
2647
2648 spin_unlock(&sci->sc_state_lock);
2649 nilfs_segctor_thread_construct(sci, mode);
2650 spin_lock(&sci->sc_state_lock);
2651 timeout = 0;
2652 }
2653
2654
2655 if (freezing(current)) {
2656 spin_unlock(&sci->sc_state_lock);
2657 refrigerator();
2658 spin_lock(&sci->sc_state_lock);
2659 } else {
2660 DEFINE_WAIT(wait);
2661 int should_sleep = 1;
1dfa2710 2662 struct the_nilfs *nilfs;
9ff05123
RK
2663
2664 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2665 TASK_INTERRUPTIBLE);
2666
2667 if (sci->sc_seq_request != sci->sc_seq_done)
2668 should_sleep = 0;
2669 else if (sci->sc_flush_request)
2670 should_sleep = 0;
2671 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2672 should_sleep = time_before(jiffies,
2673 sci->sc_timer->expires);
2674
2675 if (should_sleep) {
2676 spin_unlock(&sci->sc_state_lock);
2677 schedule();
2678 spin_lock(&sci->sc_state_lock);
2679 }
2680 finish_wait(&sci->sc_wait_daemon, &wait);
2681 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2682 time_after_eq(jiffies, sci->sc_timer->expires));
1dfa2710
JS
2683 nilfs = sci->sc_sbi->s_nilfs;
2684 if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs))
2685 set_nilfs_discontinued(nilfs);
9ff05123
RK
2686 }
2687 goto loop;
2688
2689 end_thread:
2690 spin_unlock(&sci->sc_state_lock);
2691 del_timer_sync(sci->sc_timer);
2692 sci->sc_timer = NULL;
2693
2694 /* end sync. */
2695 sci->sc_task = NULL;
2696 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2697 return 0;
2698}
2699
2700static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2701{
2702 struct task_struct *t;
2703
2704 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2705 if (IS_ERR(t)) {
2706 int err = PTR_ERR(t);
2707
2708 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2709 err);
2710 return err;
2711 }
2712 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2713 return 0;
2714}
2715
2716static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2717{
2718 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2719
2720 while (sci->sc_task) {
2721 wake_up(&sci->sc_wait_daemon);
2722 spin_unlock(&sci->sc_state_lock);
2723 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2724 spin_lock(&sci->sc_state_lock);
2725 }
2726}
2727
cece5520 2728static int nilfs_segctor_init(struct nilfs_sc_info *sci)
9ff05123 2729{
9ff05123 2730 sci->sc_seq_done = sci->sc_seq_request;
9ff05123 2731
cece5520 2732 return nilfs_segctor_start_thread(sci);
9ff05123
RK
2733}
2734
2735/*
2736 * Setup & clean-up functions
2737 */
2738static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
2739{
2740 struct nilfs_sc_info *sci;
2741
2742 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2743 if (!sci)
2744 return NULL;
2745
2746 sci->sc_sbi = sbi;
2747 sci->sc_super = sbi->s_super;
2748
2749 init_waitqueue_head(&sci->sc_wait_request);
2750 init_waitqueue_head(&sci->sc_wait_daemon);
2751 init_waitqueue_head(&sci->sc_wait_task);
2752 spin_lock_init(&sci->sc_state_lock);
2753 INIT_LIST_HEAD(&sci->sc_dirty_files);
2754 INIT_LIST_HEAD(&sci->sc_segbufs);
2755 INIT_LIST_HEAD(&sci->sc_gc_inodes);
9ff05123
RK
2756 INIT_LIST_HEAD(&sci->sc_copied_buffers);
2757
2758 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2759 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2760 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2761
2762 if (sbi->s_interval)
2763 sci->sc_interval = sbi->s_interval;
2764 if (sbi->s_watermark)
2765 sci->sc_watermark = sbi->s_watermark;
2766 return sci;
2767}
2768
2769static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2770{
2771 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2772
2773 /* The segctord thread was stopped and its timer was removed.
2774 But some tasks remain. */
2775 do {
2776 struct nilfs_sb_info *sbi = sci->sc_sbi;
2777 struct nilfs_transaction_info ti;
2778 struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
2779
2780 nilfs_transaction_lock(sbi, &ti, 0);
2781 nilfs_segctor_accept(sci, &req);
2782 ret = nilfs_segctor_construct(sci, &req);
2783 nilfs_segctor_notify(sci, &req);
2784 nilfs_transaction_unlock(sbi);
2785
2786 } while (ret && retrycount-- > 0);
2787}
2788
2789/**
2790 * nilfs_segctor_destroy - destroy the segment constructor.
2791 * @sci: nilfs_sc_info
2792 *
2793 * nilfs_segctor_destroy() kills the segctord thread and frees
2794 * the nilfs_sc_info struct.
2795 * Caller must hold the segment semaphore.
2796 */
2797static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2798{
2799 struct nilfs_sb_info *sbi = sci->sc_sbi;
2800 int flag;
2801
2802 up_write(&sbi->s_nilfs->ns_segctor_sem);
2803
2804 spin_lock(&sci->sc_state_lock);
2805 nilfs_segctor_kill_thread(sci);
2806 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2807 || sci->sc_seq_request != sci->sc_seq_done);
2808 spin_unlock(&sci->sc_state_lock);
2809
2810 if (flag || nilfs_segctor_confirm(sci))
2811 nilfs_segctor_write_out(sci);
2812
1f5abe7e 2813 WARN_ON(!list_empty(&sci->sc_copied_buffers));
9ff05123
RK
2814
2815 if (!list_empty(&sci->sc_dirty_files)) {
2816 nilfs_warning(sbi->s_super, __func__,
2817 "dirty file(s) after the final construction\n");
2818 nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1);
2819 }
9ff05123 2820
1f5abe7e 2821 WARN_ON(!list_empty(&sci->sc_segbufs));
9ff05123 2822
9ff05123
RK
2823 down_write(&sbi->s_nilfs->ns_segctor_sem);
2824
2825 kfree(sci);
2826}
2827
2828/**
2829 * nilfs_attach_segment_constructor - attach a segment constructor
2830 * @sbi: nilfs_sb_info
9ff05123
RK
2831 *
2832 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
2833 * initilizes it, and starts the segment constructor.
2834 *
2835 * Return Value: On success, 0 is returned. On error, one of the following
2836 * negative error code is returned.
2837 *
2838 * %-ENOMEM - Insufficient memory available.
2839 */
cece5520 2840int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
9ff05123
RK
2841{
2842 struct the_nilfs *nilfs = sbi->s_nilfs;
2843 int err;
2844
2845 /* Each field of nilfs_segctor is cleared through the initialization
2846 of super-block info */
2847 sbi->s_sc_info = nilfs_segctor_new(sbi);
2848 if (!sbi->s_sc_info)
2849 return -ENOMEM;
2850
2851 nilfs_attach_writer(nilfs, sbi);
cece5520 2852 err = nilfs_segctor_init(NILFS_SC(sbi));
9ff05123
RK
2853 if (err) {
2854 nilfs_detach_writer(nilfs, sbi);
2855 kfree(sbi->s_sc_info);
2856 sbi->s_sc_info = NULL;
2857 }
2858 return err;
2859}
2860
2861/**
2862 * nilfs_detach_segment_constructor - destroy the segment constructor
2863 * @sbi: nilfs_sb_info
2864 *
2865 * nilfs_detach_segment_constructor() kills the segment constructor daemon,
2866 * frees the struct nilfs_sc_info, and destroy the dirty file list.
2867 */
2868void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
2869{
2870 struct the_nilfs *nilfs = sbi->s_nilfs;
2871 LIST_HEAD(garbage_list);
2872
2873 down_write(&nilfs->ns_segctor_sem);
2874 if (NILFS_SC(sbi)) {
2875 nilfs_segctor_destroy(NILFS_SC(sbi));
2876 sbi->s_sc_info = NULL;
2877 }
2878
2879 /* Force to free the list of dirty files */
2880 spin_lock(&sbi->s_inode_lock);
2881 if (!list_empty(&sbi->s_dirty_files)) {
2882 list_splice_init(&sbi->s_dirty_files, &garbage_list);
2883 nilfs_warning(sbi->s_super, __func__,
2884 "Non empty dirty list after the last "
2885 "segment construction\n");
2886 }
2887 spin_unlock(&sbi->s_inode_lock);
2888 up_write(&nilfs->ns_segctor_sem);
2889
2890 nilfs_dispose_list(sbi, &garbage_list, 1);
2891 nilfs_detach_writer(nilfs, sbi);
2892}