2 * segbuf.c - NILFS segment buffer
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
27 #include <linux/backing-dev.h>
28 #include <linux/slab.h>
33 struct nilfs_write_info {
34 struct the_nilfs *nilfs;
36 int start, end; /* The region to be submitted */
44 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
45 struct the_nilfs *nilfs);
46 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
49 static struct kmem_cache *nilfs_segbuf_cachep;
51 static void nilfs_segbuf_init_once(void *obj)
53 memset(obj, 0, sizeof(struct nilfs_segment_buffer));
56 int __init nilfs_init_segbuf_cache(void)
59 kmem_cache_create("nilfs2_segbuf_cache",
60 sizeof(struct nilfs_segment_buffer),
61 0, SLAB_RECLAIM_ACCOUNT,
62 nilfs_segbuf_init_once);
64 return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
67 void nilfs_destroy_segbuf_cache(void)
69 kmem_cache_destroy(nilfs_segbuf_cachep);
72 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
74 struct nilfs_segment_buffer *segbuf;
76 segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
77 if (unlikely(!segbuf))
80 segbuf->sb_super = sb;
81 INIT_LIST_HEAD(&segbuf->sb_list);
82 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
83 INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
84 segbuf->sb_super_root = NULL;
86 init_completion(&segbuf->sb_bio_event);
87 atomic_set(&segbuf->sb_err, 0);
93 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
95 kmem_cache_free(nilfs_segbuf_cachep, segbuf);
98 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
99 unsigned long offset, struct the_nilfs *nilfs)
101 segbuf->sb_segnum = segnum;
102 nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
103 &segbuf->sb_fseg_end);
105 segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
106 segbuf->sb_rest_blocks =
107 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
111 * nilfs_segbuf_map_cont - map a new log behind a given log
112 * @segbuf: new segment buffer
113 * @prev: segment buffer containing a log to be continued
115 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
116 struct nilfs_segment_buffer *prev)
118 segbuf->sb_segnum = prev->sb_segnum;
119 segbuf->sb_fseg_start = prev->sb_fseg_start;
120 segbuf->sb_fseg_end = prev->sb_fseg_end;
121 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
122 segbuf->sb_rest_blocks =
123 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
126 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
127 __u64 nextnum, struct the_nilfs *nilfs)
129 segbuf->sb_nextnum = nextnum;
130 segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
133 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
135 struct buffer_head *bh;
137 bh = sb_getblk(segbuf->sb_super,
138 segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
142 nilfs_segbuf_add_segsum_buffer(segbuf, bh);
146 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
147 struct buffer_head **bhp)
149 struct buffer_head *bh;
151 bh = sb_getblk(segbuf->sb_super,
152 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
156 nilfs_segbuf_add_payload_buffer(segbuf, bh);
161 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
166 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
167 err = nilfs_segbuf_extend_segsum(segbuf);
171 segbuf->sb_sum.flags = flags;
172 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
173 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
174 segbuf->sb_sum.ctime = ctime;
179 * Setup segment summary
181 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
183 struct nilfs_segment_summary *raw_sum;
184 struct buffer_head *bh_sum;
186 bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
187 struct buffer_head, b_assoc_buffers);
188 raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
190 raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC);
191 raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum));
192 raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags);
193 raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq);
194 raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime);
195 raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next);
196 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks);
197 raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
198 raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
203 * CRC calculation routines
205 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
208 struct buffer_head *bh;
209 struct nilfs_segment_summary *raw_sum;
210 unsigned long size, bytes = segbuf->sb_sum.sumbytes;
213 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
216 raw_sum = (struct nilfs_segment_summary *)bh->b_data;
217 size = min_t(unsigned long, bytes, bh->b_size);
219 (unsigned char *)raw_sum +
220 sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
221 size - (sizeof(raw_sum->ss_datasum) +
222 sizeof(raw_sum->ss_sumsum)));
224 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
227 size = min_t(unsigned long, bytes, bh->b_size);
228 crc = crc32_le(crc, bh->b_data, size);
230 raw_sum->ss_sumsum = cpu_to_le32(crc);
233 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
236 struct buffer_head *bh;
237 struct nilfs_segment_summary *raw_sum;
241 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
243 raw_sum = (struct nilfs_segment_summary *)bh->b_data;
245 (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
246 bh->b_size - sizeof(raw_sum->ss_datasum));
248 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
250 crc = crc32_le(crc, bh->b_data, bh->b_size);
252 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
253 kaddr = kmap_atomic(bh->b_page, KM_USER0);
254 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
255 kunmap_atomic(kaddr, KM_USER0);
257 raw_sum->ss_datasum = cpu_to_le32(crc);
260 static void nilfs_release_buffers(struct list_head *list)
262 struct buffer_head *bh, *n;
264 list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
265 list_del_init(&bh->b_assoc_buffers);
266 if (buffer_nilfs_allocated(bh)) {
267 struct page *clone_page = bh->b_page;
269 /* remove clone page */
271 page_cache_release(clone_page); /* for each bh */
272 if (page_count(clone_page) <= 2) {
273 lock_page(clone_page);
274 nilfs_free_private_page(clone_page);
282 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
284 nilfs_release_buffers(&segbuf->sb_segsum_buffers);
285 nilfs_release_buffers(&segbuf->sb_payload_buffers);
286 segbuf->sb_super_root = NULL;
290 * Iterators for segment buffers
292 void nilfs_clear_logs(struct list_head *logs)
294 struct nilfs_segment_buffer *segbuf;
296 list_for_each_entry(segbuf, logs, sb_list)
297 nilfs_segbuf_clear(segbuf);
300 void nilfs_truncate_logs(struct list_head *logs,
301 struct nilfs_segment_buffer *last)
303 struct nilfs_segment_buffer *n, *segbuf;
305 segbuf = list_prepare_entry(last, logs, sb_list);
306 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
307 list_del_init(&segbuf->sb_list);
308 nilfs_segbuf_clear(segbuf);
309 nilfs_segbuf_free(segbuf);
313 int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
315 struct nilfs_segment_buffer *segbuf;
318 list_for_each_entry(segbuf, logs, sb_list) {
319 ret = nilfs_segbuf_write(segbuf, nilfs);
326 int nilfs_wait_on_logs(struct list_head *logs)
328 struct nilfs_segment_buffer *segbuf;
331 list_for_each_entry(segbuf, logs, sb_list) {
332 err = nilfs_segbuf_wait(segbuf);
342 static void nilfs_end_bio_write(struct bio *bio, int err)
344 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
345 struct nilfs_segment_buffer *segbuf = bio->bi_private;
347 if (err == -EOPNOTSUPP) {
348 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
350 /* to be detected by submit_seg_bio() */
354 atomic_inc(&segbuf->sb_err);
357 complete(&segbuf->sb_bio_event);
360 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
361 struct nilfs_write_info *wi, int mode)
363 struct bio *bio = wi->bio;
366 if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
367 wait_for_completion(&segbuf->sb_bio_event);
369 if (unlikely(atomic_read(&segbuf->sb_err))) {
376 bio->bi_end_io = nilfs_end_bio_write;
377 bio->bi_private = segbuf;
379 submit_bio(mode, bio);
380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
389 wi->rest_blocks -= wi->end - wi->start;
390 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
400 * nilfs_alloc_seg_bio - allocate a new bio for writing log
401 * @nilfs: nilfs object
402 * @start: start block number of the bio
403 * @nr_vecs: request size of page vector.
405 * Return Value: On success, pointer to the struct bio is returned.
406 * On error, NULL is returned.
408 static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
413 bio = bio_alloc(GFP_NOIO, nr_vecs);
415 while (!bio && (nr_vecs >>= 1))
416 bio = bio_alloc(GFP_NOIO, nr_vecs);
419 bio->bi_bdev = nilfs->ns_bdev;
420 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
425 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
426 struct nilfs_write_info *wi)
429 wi->rest_blocks = segbuf->sb_sum.nblocks;
430 wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
431 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
432 wi->start = wi->end = 0;
433 wi->blocknr = segbuf->sb_pseg_start;
436 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
437 struct nilfs_write_info *wi,
438 struct buffer_head *bh, int mode)
442 BUG_ON(wi->nr_vecs <= 0);
445 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
447 if (unlikely(!wi->bio))
451 len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
452 if (len == bh->b_size) {
457 err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
458 /* never submit current bh */
465 * nilfs_segbuf_write - submit write requests of a log
466 * @segbuf: buffer storing a log to be written
467 * @nilfs: nilfs object
469 * Return Value: On Success, 0 is returned. On Error, one of the following
470 * negative error code is returned.
474 * %-ENOMEM - Insufficient memory available.
476 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
477 struct the_nilfs *nilfs)
479 struct nilfs_write_info wi;
480 struct buffer_head *bh;
481 int res = 0, rw = WRITE;
484 nilfs_segbuf_prepare_write(segbuf, &wi);
486 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
487 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
492 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
493 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
500 * Last BIO is always sent through the following
503 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
504 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
512 * nilfs_segbuf_wait - wait for completion of requested BIOs
513 * @segbuf: segment buffer
515 * Return Value: On Success, 0 is returned. On Error, one of the following
516 * negative error code is returned.
520 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
524 if (!segbuf->sb_nbio)
528 wait_for_completion(&segbuf->sb_bio_event);
529 } while (--segbuf->sb_nbio > 0);
531 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
532 printk(KERN_ERR "NILFS: IO error writing segment\n");