]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
[PATCH] ext3_get_blocks: Adjust reservation window size for mblocks
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
21#include <linux/config.h>
22#include <linux/kernel.h>
23#include <linux/syscalls.h>
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/percpu.h>
27#include <linux/slab.h>
28#include <linux/smp_lock.h>
16f7e0fe 29#include <linux/capability.h>
1da177e4
LT
30#include <linux/blkdev.h>
31#include <linux/file.h>
32#include <linux/quotaops.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/writeback.h>
36#include <linux/hash.h>
37#include <linux/suspend.h>
38#include <linux/buffer_head.h>
39#include <linux/bio.h>
40#include <linux/notifier.h>
41#include <linux/cpu.h>
42#include <linux/bitops.h>
43#include <linux/mpage.h>
fb1c8f93 44#include <linux/bit_spinlock.h>
1da177e4
LT
45
46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47static void invalidate_bh_lrus(void);
48
49#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50
51inline void
52init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53{
54 bh->b_end_io = handler;
55 bh->b_private = private;
56}
57
58static int sync_buffer(void *word)
59{
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
63
64 smp_mb();
65 bd = bh->b_bdev;
66 if (bd)
67 blk_run_address_space(bd->bd_inode->i_mapping);
68 io_schedule();
69 return 0;
70}
71
72void fastcall __lock_buffer(struct buffer_head *bh)
73{
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
76}
77EXPORT_SYMBOL(__lock_buffer);
78
79void fastcall unlock_buffer(struct buffer_head *bh)
80{
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
84}
85
86/*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91void __wait_on_buffer(struct buffer_head * bh)
92{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94}
95
96static void
97__clear_page_buffers(struct page *page)
98{
99 ClearPagePrivate(page);
4c21e2f2 100 set_page_private(page, 0);
1da177e4
LT
101 page_cache_release(page);
102}
103
104static void buffer_io_error(struct buffer_head *bh)
105{
106 char b[BDEVNAME_SIZE];
107
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
111}
112
113/*
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
116 */
117void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
118{
119 if (uptodate) {
120 set_buffer_uptodate(bh);
121 } else {
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
124 }
125 unlock_buffer(bh);
126 put_bh(bh);
127}
128
129void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130{
131 char b[BDEVNAME_SIZE];
132
133 if (uptodate) {
134 set_buffer_uptodate(bh);
135 } else {
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 buffer_io_error(bh);
138 printk(KERN_WARNING "lost page write due to "
139 "I/O error on %s\n",
140 bdevname(bh->b_bdev, b));
141 }
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
144 }
145 unlock_buffer(bh);
146 put_bh(bh);
147}
148
149/*
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
152 */
153int sync_blockdev(struct block_device *bdev)
154{
155 int ret = 0;
156
28fd1298
OH
157 if (bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
159 return ret;
160}
161EXPORT_SYMBOL(sync_blockdev);
162
d25b9a1f 163static void __fsync_super(struct super_block *sb)
1da177e4
LT
164{
165 sync_inodes_sb(sb, 0);
166 DQUOT_SYNC(sb);
167 lock_super(sb);
168 if (sb->s_dirt && sb->s_op->write_super)
169 sb->s_op->write_super(sb);
170 unlock_super(sb);
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
174 sync_inodes_sb(sb, 1);
d25b9a1f 175}
1da177e4 176
d25b9a1f
OH
177/*
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
181 */
182int fsync_super(struct super_block *sb)
183{
184 __fsync_super(sb);
1da177e4
LT
185 return sync_blockdev(sb->s_bdev);
186}
187
188/*
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
192 */
193int fsync_bdev(struct block_device *bdev)
194{
195 struct super_block *sb = get_super(bdev);
196 if (sb) {
197 int res = fsync_super(sb);
198 drop_super(sb);
199 return res;
200 }
201 return sync_blockdev(bdev);
202}
203
204/**
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
207 *
c039e313 208 * This takes the block device bd_mount_mutex to make sure no new mounts
1da177e4
LT
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
212 */
213struct super_block *freeze_bdev(struct block_device *bdev)
214{
215 struct super_block *sb;
216
c039e313 217 mutex_lock(&bdev->bd_mount_mutex);
1da177e4
LT
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 221 smp_wmb();
1da177e4 222
d25b9a1f 223 __fsync_super(sb);
1da177e4
LT
224
225 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 226 smp_wmb();
1da177e4
LT
227
228 sync_blockdev(sb->s_bdev);
229
230 if (sb->s_op->write_super_lockfs)
231 sb->s_op->write_super_lockfs(sb);
232 }
233
234 sync_blockdev(bdev);
235 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
236}
237EXPORT_SYMBOL(freeze_bdev);
238
239/**
240 * thaw_bdev -- unlock filesystem
241 * @bdev: blockdevice to unlock
242 * @sb: associated superblock
243 *
244 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
245 */
246void thaw_bdev(struct block_device *bdev, struct super_block *sb)
247{
248 if (sb) {
249 BUG_ON(sb->s_bdev != bdev);
250
251 if (sb->s_op->unlockfs)
252 sb->s_op->unlockfs(sb);
253 sb->s_frozen = SB_UNFROZEN;
d59dd462 254 smp_wmb();
1da177e4
LT
255 wake_up(&sb->s_wait_unfrozen);
256 drop_super(sb);
257 }
258
c039e313 259 mutex_unlock(&bdev->bd_mount_mutex);
1da177e4
LT
260}
261EXPORT_SYMBOL(thaw_bdev);
262
263/*
264 * sync everything. Start out by waking pdflush, because that writes back
265 * all queues in parallel.
266 */
267static void do_sync(unsigned long wait)
268{
687a21ce 269 wakeup_pdflush(0);
1da177e4
LT
270 sync_inodes(0); /* All mappings, inodes and their blockdevs */
271 DQUOT_SYNC(NULL);
272 sync_supers(); /* Write the superblocks */
273 sync_filesystems(0); /* Start syncing the filesystems */
274 sync_filesystems(wait); /* Waitingly sync the filesystems */
275 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
276 if (!wait)
277 printk("Emergency Sync complete\n");
278 if (unlikely(laptop_mode))
279 laptop_sync_completion();
280}
281
282asmlinkage long sys_sync(void)
283{
284 do_sync(1);
285 return 0;
286}
287
288void emergency_sync(void)
289{
290 pdflush_operation(do_sync, 0);
291}
292
293/*
294 * Generic function to fsync a file.
295 *
296 * filp may be NULL if called via the msync of a vma.
297 */
298
299int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
300{
301 struct inode * inode = dentry->d_inode;
302 struct super_block * sb;
303 int ret, err;
304
305 /* sync the inode to buffers */
306 ret = write_inode_now(inode, 0);
307
308 /* sync the superblock to buffers */
309 sb = inode->i_sb;
310 lock_super(sb);
311 if (sb->s_op->write_super)
312 sb->s_op->write_super(sb);
313 unlock_super(sb);
314
315 /* .. finally sync the buffers to disk */
316 err = sync_blockdev(sb->s_bdev);
317 if (!ret)
318 ret = err;
319 return ret;
320}
321
18e79b40 322long do_fsync(struct file *file, int datasync)
1da177e4 323{
18e79b40
AM
324 int ret;
325 int err;
326 struct address_space *mapping = file->f_mapping;
1da177e4 327
1da177e4
LT
328 if (!file->f_op || !file->f_op->fsync) {
329 /* Why? We can still call filemap_fdatawrite */
18e79b40
AM
330 ret = -EINVAL;
331 goto out;
1da177e4
LT
332 }
333
334 current->flags |= PF_SYNCWRITE;
335 ret = filemap_fdatawrite(mapping);
336
337 /*
18e79b40
AM
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
1da177e4 340 */
1b1dcc1b 341 mutex_lock(&mapping->host->i_mutex);
dfb388bf 342 err = file->f_op->fsync(file, file->f_dentry, datasync);
1da177e4
LT
343 if (!ret)
344 ret = err;
1b1dcc1b 345 mutex_unlock(&mapping->host->i_mutex);
1da177e4
LT
346 err = filemap_fdatawait(mapping);
347 if (!ret)
348 ret = err;
349 current->flags &= ~PF_SYNCWRITE;
1da177e4
LT
350out:
351 return ret;
352}
353
18e79b40
AM
354static long __do_fsync(unsigned int fd, int datasync)
355{
356 struct file *file;
357 int ret = -EBADF;
358
359 file = fget(fd);
360 if (file) {
361 ret = do_fsync(file, datasync);
362 fput(file);
363 }
364 return ret;
365}
366
dfb388bf 367asmlinkage long sys_fsync(unsigned int fd)
1da177e4 368{
18e79b40 369 return __do_fsync(fd, 0);
dfb388bf 370}
1da177e4 371
dfb388bf
ON
372asmlinkage long sys_fdatasync(unsigned int fd)
373{
18e79b40 374 return __do_fsync(fd, 1);
1da177e4
LT
375}
376
377/*
378 * Various filesystems appear to want __find_get_block to be non-blocking.
379 * But it's the page lock which protects the buffers. To get around this,
380 * we get exclusion from try_to_free_buffers with the blockdev mapping's
381 * private_lock.
382 *
383 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
384 * may be quite high. This code could TryLock the page, and if that
385 * succeeds, there is no need to take private_lock. (But if
386 * private_lock is contended then so is mapping->tree_lock).
387 */
388static struct buffer_head *
385fd4c5 389__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
390{
391 struct inode *bd_inode = bdev->bd_inode;
392 struct address_space *bd_mapping = bd_inode->i_mapping;
393 struct buffer_head *ret = NULL;
394 pgoff_t index;
395 struct buffer_head *bh;
396 struct buffer_head *head;
397 struct page *page;
398 int all_mapped = 1;
399
400 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
401 page = find_get_page(bd_mapping, index);
402 if (!page)
403 goto out;
404
405 spin_lock(&bd_mapping->private_lock);
406 if (!page_has_buffers(page))
407 goto out_unlock;
408 head = page_buffers(page);
409 bh = head;
410 do {
411 if (bh->b_blocknr == block) {
412 ret = bh;
413 get_bh(bh);
414 goto out_unlock;
415 }
416 if (!buffer_mapped(bh))
417 all_mapped = 0;
418 bh = bh->b_this_page;
419 } while (bh != head);
420
421 /* we might be here because some of the buffers on this page are
422 * not mapped. This is due to various races between
423 * file io on the block device and getblk. It gets dealt with
424 * elsewhere, don't buffer_error if we had some unmapped buffers
425 */
426 if (all_mapped) {
427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
430 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
431 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
432 }
433out_unlock:
434 spin_unlock(&bd_mapping->private_lock);
435 page_cache_release(page);
436out:
437 return ret;
438}
439
440/* If invalidate_buffers() will trash dirty buffers, it means some kind
441 of fs corruption is going on. Trashing dirty data always imply losing
442 information that was supposed to be just stored on the physical layer
443 by the user.
444
445 Thus invalidate_buffers in general usage is not allwowed to trash
446 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
447 be preserved. These buffers are simply skipped.
448
449 We also skip buffers which are still in use. For example this can
450 happen if a userspace program is reading the block device.
451
452 NOTE: In the case where the user removed a removable-media-disk even if
453 there's still dirty data not synced on disk (due a bug in the device driver
454 or due an error of the user), by not destroying the dirty buffers we could
455 generate corruption also on the next media inserted, thus a parameter is
456 necessary to handle this case in the most safe way possible (trying
457 to not corrupt also the new disk inserted with the data belonging to
458 the old now corrupted disk). Also for the ramdisk the natural thing
459 to do in order to release the ramdisk memory is to destroy dirty buffers.
460
461 These are two special cases. Normal usage imply the device driver
462 to issue a sync on the device (without waiting I/O completion) and
463 then an invalidate_buffers call that doesn't trash dirty buffers.
464
465 For handling cache coherency with the blkdev pagecache the 'update' case
466 is been introduced. It is needed to re-read from disk any pinned
467 buffer. NOTE: re-reading from disk is destructive so we can do it only
468 when we assume nobody is changing the buffercache under our I/O and when
469 we think the disk contains more recent information than the buffercache.
470 The update == 1 pass marks the buffers we need to update, the update == 2
471 pass does the actual I/O. */
472void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
473{
474 invalidate_bh_lrus();
475 /*
476 * FIXME: what about destroy_dirty_buffers?
477 * We really want to use invalidate_inode_pages2() for
478 * that, but not until that's cleaned up.
479 */
480 invalidate_inode_pages(bdev->bd_inode->i_mapping);
481}
482
483/*
484 * Kick pdflush then try to free up some ZONE_NORMAL memory.
485 */
486static void free_more_memory(void)
487{
488 struct zone **zones;
489 pg_data_t *pgdat;
490
687a21ce 491 wakeup_pdflush(1024);
1da177e4
LT
492 yield();
493
494 for_each_pgdat(pgdat) {
af4ca457 495 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 496 if (*zones)
1ad539b2 497 try_to_free_pages(zones, GFP_NOFS);
1da177e4
LT
498 }
499}
500
501/*
502 * I/O completion handler for block_read_full_page() - pages
503 * which come unlocked at the end of I/O.
504 */
505static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
506{
1da177e4 507 unsigned long flags;
a3972203 508 struct buffer_head *first;
1da177e4
LT
509 struct buffer_head *tmp;
510 struct page *page;
511 int page_uptodate = 1;
512
513 BUG_ON(!buffer_async_read(bh));
514
515 page = bh->b_page;
516 if (uptodate) {
517 set_buffer_uptodate(bh);
518 } else {
519 clear_buffer_uptodate(bh);
520 if (printk_ratelimit())
521 buffer_io_error(bh);
522 SetPageError(page);
523 }
524
525 /*
526 * Be _very_ careful from here on. Bad things can happen if
527 * two buffer heads end IO at almost the same time and both
528 * decide that the page is now completely done.
529 */
a3972203
NP
530 first = page_buffers(page);
531 local_irq_save(flags);
532 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
533 clear_buffer_async_read(bh);
534 unlock_buffer(bh);
535 tmp = bh;
536 do {
537 if (!buffer_uptodate(tmp))
538 page_uptodate = 0;
539 if (buffer_async_read(tmp)) {
540 BUG_ON(!buffer_locked(tmp));
541 goto still_busy;
542 }
543 tmp = tmp->b_this_page;
544 } while (tmp != bh);
a3972203
NP
545 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
546 local_irq_restore(flags);
1da177e4
LT
547
548 /*
549 * If none of the buffers had errors and they are all
550 * uptodate then we can set the page uptodate.
551 */
552 if (page_uptodate && !PageError(page))
553 SetPageUptodate(page);
554 unlock_page(page);
555 return;
556
557still_busy:
a3972203
NP
558 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
559 local_irq_restore(flags);
1da177e4
LT
560 return;
561}
562
563/*
564 * Completion handler for block_write_full_page() - pages which are unlocked
565 * during I/O, and which have PageWriteback cleared upon I/O completion.
566 */
567void end_buffer_async_write(struct buffer_head *bh, int uptodate)
568{
569 char b[BDEVNAME_SIZE];
1da177e4 570 unsigned long flags;
a3972203 571 struct buffer_head *first;
1da177e4
LT
572 struct buffer_head *tmp;
573 struct page *page;
574
575 BUG_ON(!buffer_async_write(bh));
576
577 page = bh->b_page;
578 if (uptodate) {
579 set_buffer_uptodate(bh);
580 } else {
581 if (printk_ratelimit()) {
582 buffer_io_error(bh);
583 printk(KERN_WARNING "lost page write due to "
584 "I/O error on %s\n",
585 bdevname(bh->b_bdev, b));
586 }
587 set_bit(AS_EIO, &page->mapping->flags);
588 clear_buffer_uptodate(bh);
589 SetPageError(page);
590 }
591
a3972203
NP
592 first = page_buffers(page);
593 local_irq_save(flags);
594 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
595
1da177e4
LT
596 clear_buffer_async_write(bh);
597 unlock_buffer(bh);
598 tmp = bh->b_this_page;
599 while (tmp != bh) {
600 if (buffer_async_write(tmp)) {
601 BUG_ON(!buffer_locked(tmp));
602 goto still_busy;
603 }
604 tmp = tmp->b_this_page;
605 }
a3972203
NP
606 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
607 local_irq_restore(flags);
1da177e4
LT
608 end_page_writeback(page);
609 return;
610
611still_busy:
a3972203
NP
612 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 local_irq_restore(flags);
1da177e4
LT
614 return;
615}
616
617/*
618 * If a page's buffers are under async readin (end_buffer_async_read
619 * completion) then there is a possibility that another thread of
620 * control could lock one of the buffers after it has completed
621 * but while some of the other buffers have not completed. This
622 * locked buffer would confuse end_buffer_async_read() into not unlocking
623 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
624 * that this buffer is not under async I/O.
625 *
626 * The page comes unlocked when it has no locked buffer_async buffers
627 * left.
628 *
629 * PageLocked prevents anyone starting new async I/O reads any of
630 * the buffers.
631 *
632 * PageWriteback is used to prevent simultaneous writeout of the same
633 * page.
634 *
635 * PageLocked prevents anyone from starting writeback of a page which is
636 * under read I/O (PageWriteback is only ever set against a locked page).
637 */
638static void mark_buffer_async_read(struct buffer_head *bh)
639{
640 bh->b_end_io = end_buffer_async_read;
641 set_buffer_async_read(bh);
642}
643
644void mark_buffer_async_write(struct buffer_head *bh)
645{
646 bh->b_end_io = end_buffer_async_write;
647 set_buffer_async_write(bh);
648}
649EXPORT_SYMBOL(mark_buffer_async_write);
650
651
652/*
653 * fs/buffer.c contains helper functions for buffer-backed address space's
654 * fsync functions. A common requirement for buffer-based filesystems is
655 * that certain data from the backing blockdev needs to be written out for
656 * a successful fsync(). For example, ext2 indirect blocks need to be
657 * written back and waited upon before fsync() returns.
658 *
659 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
660 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
661 * management of a list of dependent buffers at ->i_mapping->private_list.
662 *
663 * Locking is a little subtle: try_to_free_buffers() will remove buffers
664 * from their controlling inode's queue when they are being freed. But
665 * try_to_free_buffers() will be operating against the *blockdev* mapping
666 * at the time, not against the S_ISREG file which depends on those buffers.
667 * So the locking for private_list is via the private_lock in the address_space
668 * which backs the buffers. Which is different from the address_space
669 * against which the buffers are listed. So for a particular address_space,
670 * mapping->private_lock does *not* protect mapping->private_list! In fact,
671 * mapping->private_list will always be protected by the backing blockdev's
672 * ->private_lock.
673 *
674 * Which introduces a requirement: all buffers on an address_space's
675 * ->private_list must be from the same address_space: the blockdev's.
676 *
677 * address_spaces which do not place buffers at ->private_list via these
678 * utility functions are free to use private_lock and private_list for
679 * whatever they want. The only requirement is that list_empty(private_list)
680 * be true at clear_inode() time.
681 *
682 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
683 * filesystems should do that. invalidate_inode_buffers() should just go
684 * BUG_ON(!list_empty).
685 *
686 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
687 * take an address_space, not an inode. And it should be called
688 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
689 * queued up.
690 *
691 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
692 * list if it is already on a list. Because if the buffer is on a list,
693 * it *must* already be on the right one. If not, the filesystem is being
694 * silly. This will save a ton of locking. But first we have to ensure
695 * that buffers are taken *off* the old inode's list when they are freed
696 * (presumably in truncate). That requires careful auditing of all
697 * filesystems (do it inside bforget()). It could also be done by bringing
698 * b_inode back.
699 */
700
701/*
702 * The buffer's backing address_space's private_lock must be held
703 */
704static inline void __remove_assoc_queue(struct buffer_head *bh)
705{
706 list_del_init(&bh->b_assoc_buffers);
707}
708
709int inode_has_buffers(struct inode *inode)
710{
711 return !list_empty(&inode->i_data.private_list);
712}
713
714/*
715 * osync is designed to support O_SYNC io. It waits synchronously for
716 * all already-submitted IO to complete, but does not queue any new
717 * writes to the disk.
718 *
719 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
720 * you dirty the buffers, and then use osync_inode_buffers to wait for
721 * completion. Any other dirty buffers which are not yet queued for
722 * write will not be flushed to disk by the osync.
723 */
724static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
725{
726 struct buffer_head *bh;
727 struct list_head *p;
728 int err = 0;
729
730 spin_lock(lock);
731repeat:
732 list_for_each_prev(p, list) {
733 bh = BH_ENTRY(p);
734 if (buffer_locked(bh)) {
735 get_bh(bh);
736 spin_unlock(lock);
737 wait_on_buffer(bh);
738 if (!buffer_uptodate(bh))
739 err = -EIO;
740 brelse(bh);
741 spin_lock(lock);
742 goto repeat;
743 }
744 }
745 spin_unlock(lock);
746 return err;
747}
748
749/**
750 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
751 * buffers
67be2dd1 752 * @mapping: the mapping which wants those buffers written
1da177e4
LT
753 *
754 * Starts I/O against the buffers at mapping->private_list, and waits upon
755 * that I/O.
756 *
67be2dd1
MW
757 * Basically, this is a convenience function for fsync().
758 * @mapping is a file or directory which needs those buffers to be written for
759 * a successful fsync().
1da177e4
LT
760 */
761int sync_mapping_buffers(struct address_space *mapping)
762{
763 struct address_space *buffer_mapping = mapping->assoc_mapping;
764
765 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
766 return 0;
767
768 return fsync_buffers_list(&buffer_mapping->private_lock,
769 &mapping->private_list);
770}
771EXPORT_SYMBOL(sync_mapping_buffers);
772
773/*
774 * Called when we've recently written block `bblock', and it is known that
775 * `bblock' was for a buffer_boundary() buffer. This means that the block at
776 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
777 * dirty, schedule it for IO. So that indirects merge nicely with their data.
778 */
779void write_boundary_block(struct block_device *bdev,
780 sector_t bblock, unsigned blocksize)
781{
782 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
783 if (bh) {
784 if (buffer_dirty(bh))
785 ll_rw_block(WRITE, 1, &bh);
786 put_bh(bh);
787 }
788}
789
790void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
791{
792 struct address_space *mapping = inode->i_mapping;
793 struct address_space *buffer_mapping = bh->b_page->mapping;
794
795 mark_buffer_dirty(bh);
796 if (!mapping->assoc_mapping) {
797 mapping->assoc_mapping = buffer_mapping;
798 } else {
799 if (mapping->assoc_mapping != buffer_mapping)
800 BUG();
801 }
802 if (list_empty(&bh->b_assoc_buffers)) {
803 spin_lock(&buffer_mapping->private_lock);
804 list_move_tail(&bh->b_assoc_buffers,
805 &mapping->private_list);
806 spin_unlock(&buffer_mapping->private_lock);
807 }
808}
809EXPORT_SYMBOL(mark_buffer_dirty_inode);
810
811/*
812 * Add a page to the dirty page list.
813 *
814 * It is a sad fact of life that this function is called from several places
815 * deeply under spinlocking. It may not sleep.
816 *
817 * If the page has buffers, the uptodate buffers are set dirty, to preserve
818 * dirty-state coherency between the page and the buffers. It the page does
819 * not have buffers then when they are later attached they will all be set
820 * dirty.
821 *
822 * The buffers are dirtied before the page is dirtied. There's a small race
823 * window in which a writepage caller may see the page cleanness but not the
824 * buffer dirtiness. That's fine. If this code were to set the page dirty
825 * before the buffers, a concurrent writepage caller could clear the page dirty
826 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
827 * page on the dirty page list.
828 *
829 * We use private_lock to lock against try_to_free_buffers while using the
830 * page's buffer list. Also use this to protect against clean buffers being
831 * added to the page after it was set dirty.
832 *
833 * FIXME: may need to call ->reservepage here as well. That's rather up to the
834 * address_space though.
835 */
836int __set_page_dirty_buffers(struct page *page)
837{
838 struct address_space * const mapping = page->mapping;
839
840 spin_lock(&mapping->private_lock);
841 if (page_has_buffers(page)) {
842 struct buffer_head *head = page_buffers(page);
843 struct buffer_head *bh = head;
844
845 do {
846 set_buffer_dirty(bh);
847 bh = bh->b_this_page;
848 } while (bh != head);
849 }
850 spin_unlock(&mapping->private_lock);
851
852 if (!TestSetPageDirty(page)) {
853 write_lock_irq(&mapping->tree_lock);
854 if (page->mapping) { /* Race with truncate? */
855 if (mapping_cap_account_dirty(mapping))
856 inc_page_state(nr_dirty);
857 radix_tree_tag_set(&mapping->page_tree,
858 page_index(page),
859 PAGECACHE_TAG_DIRTY);
860 }
861 write_unlock_irq(&mapping->tree_lock);
862 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
4741c9fd 863 return 1;
1da177e4 864 }
1da177e4
LT
865 return 0;
866}
867EXPORT_SYMBOL(__set_page_dirty_buffers);
868
869/*
870 * Write out and wait upon a list of buffers.
871 *
872 * We have conflicting pressures: we want to make sure that all
873 * initially dirty buffers get waited on, but that any subsequently
874 * dirtied buffers don't. After all, we don't want fsync to last
875 * forever if somebody is actively writing to the file.
876 *
877 * Do this in two main stages: first we copy dirty buffers to a
878 * temporary inode list, queueing the writes as we go. Then we clean
879 * up, waiting for those writes to complete.
880 *
881 * During this second stage, any subsequent updates to the file may end
882 * up refiling the buffer on the original inode's dirty list again, so
883 * there is a chance we will end up with a buffer queued for write but
884 * not yet completed on that list. So, as a final cleanup we go through
885 * the osync code to catch these locked, dirty buffers without requeuing
886 * any newly dirty buffers for write.
887 */
888static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
889{
890 struct buffer_head *bh;
891 struct list_head tmp;
892 int err = 0, err2;
893
894 INIT_LIST_HEAD(&tmp);
895
896 spin_lock(lock);
897 while (!list_empty(list)) {
898 bh = BH_ENTRY(list->next);
899 list_del_init(&bh->b_assoc_buffers);
900 if (buffer_dirty(bh) || buffer_locked(bh)) {
901 list_add(&bh->b_assoc_buffers, &tmp);
902 if (buffer_dirty(bh)) {
903 get_bh(bh);
904 spin_unlock(lock);
905 /*
906 * Ensure any pending I/O completes so that
907 * ll_rw_block() actually writes the current
908 * contents - it is a noop if I/O is still in
909 * flight on potentially older contents.
910 */
a7662236 911 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
912 brelse(bh);
913 spin_lock(lock);
914 }
915 }
916 }
917
918 while (!list_empty(&tmp)) {
919 bh = BH_ENTRY(tmp.prev);
920 __remove_assoc_queue(bh);
921 get_bh(bh);
922 spin_unlock(lock);
923 wait_on_buffer(bh);
924 if (!buffer_uptodate(bh))
925 err = -EIO;
926 brelse(bh);
927 spin_lock(lock);
928 }
929
930 spin_unlock(lock);
931 err2 = osync_buffers_list(lock, list);
932 if (err)
933 return err;
934 else
935 return err2;
936}
937
938/*
939 * Invalidate any and all dirty buffers on a given inode. We are
940 * probably unmounting the fs, but that doesn't mean we have already
941 * done a sync(). Just drop the buffers from the inode list.
942 *
943 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
944 * assumes that all the buffers are against the blockdev. Not true
945 * for reiserfs.
946 */
947void invalidate_inode_buffers(struct inode *inode)
948{
949 if (inode_has_buffers(inode)) {
950 struct address_space *mapping = &inode->i_data;
951 struct list_head *list = &mapping->private_list;
952 struct address_space *buffer_mapping = mapping->assoc_mapping;
953
954 spin_lock(&buffer_mapping->private_lock);
955 while (!list_empty(list))
956 __remove_assoc_queue(BH_ENTRY(list->next));
957 spin_unlock(&buffer_mapping->private_lock);
958 }
959}
960
961/*
962 * Remove any clean buffers from the inode's buffer list. This is called
963 * when we're trying to free the inode itself. Those buffers can pin it.
964 *
965 * Returns true if all buffers were removed.
966 */
967int remove_inode_buffers(struct inode *inode)
968{
969 int ret = 1;
970
971 if (inode_has_buffers(inode)) {
972 struct address_space *mapping = &inode->i_data;
973 struct list_head *list = &mapping->private_list;
974 struct address_space *buffer_mapping = mapping->assoc_mapping;
975
976 spin_lock(&buffer_mapping->private_lock);
977 while (!list_empty(list)) {
978 struct buffer_head *bh = BH_ENTRY(list->next);
979 if (buffer_dirty(bh)) {
980 ret = 0;
981 break;
982 }
983 __remove_assoc_queue(bh);
984 }
985 spin_unlock(&buffer_mapping->private_lock);
986 }
987 return ret;
988}
989
990/*
991 * Create the appropriate buffers when given a page for data area and
992 * the size of each buffer.. Use the bh->b_this_page linked list to
993 * follow the buffers created. Return NULL if unable to create more
994 * buffers.
995 *
996 * The retry flag is used to differentiate async IO (paging, swapping)
997 * which may not fail from ordinary buffer allocations.
998 */
999struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1000 int retry)
1001{
1002 struct buffer_head *bh, *head;
1003 long offset;
1004
1005try_again:
1006 head = NULL;
1007 offset = PAGE_SIZE;
1008 while ((offset -= size) >= 0) {
1009 bh = alloc_buffer_head(GFP_NOFS);
1010 if (!bh)
1011 goto no_grow;
1012
1013 bh->b_bdev = NULL;
1014 bh->b_this_page = head;
1015 bh->b_blocknr = -1;
1016 head = bh;
1017
1018 bh->b_state = 0;
1019 atomic_set(&bh->b_count, 0);
fc5cd582 1020 bh->b_private = NULL;
1da177e4
LT
1021 bh->b_size = size;
1022
1023 /* Link the buffer to its page */
1024 set_bh_page(bh, page, offset);
1025
01ffe339 1026 init_buffer(bh, NULL, NULL);
1da177e4
LT
1027 }
1028 return head;
1029/*
1030 * In case anything failed, we just free everything we got.
1031 */
1032no_grow:
1033 if (head) {
1034 do {
1035 bh = head;
1036 head = head->b_this_page;
1037 free_buffer_head(bh);
1038 } while (head);
1039 }
1040
1041 /*
1042 * Return failure for non-async IO requests. Async IO requests
1043 * are not allowed to fail, so we have to wait until buffer heads
1044 * become available. But we don't want tasks sleeping with
1045 * partially complete buffers, so all were released above.
1046 */
1047 if (!retry)
1048 return NULL;
1049
1050 /* We're _really_ low on memory. Now we just
1051 * wait for old buffer heads to become free due to
1052 * finishing IO. Since this is an async request and
1053 * the reserve list is empty, we're sure there are
1054 * async buffer heads in use.
1055 */
1056 free_more_memory();
1057 goto try_again;
1058}
1059EXPORT_SYMBOL_GPL(alloc_page_buffers);
1060
1061static inline void
1062link_dev_buffers(struct page *page, struct buffer_head *head)
1063{
1064 struct buffer_head *bh, *tail;
1065
1066 bh = head;
1067 do {
1068 tail = bh;
1069 bh = bh->b_this_page;
1070 } while (bh);
1071 tail->b_this_page = head;
1072 attach_page_buffers(page, head);
1073}
1074
1075/*
1076 * Initialise the state of a blockdev page's buffers.
1077 */
1078static void
1079init_page_buffers(struct page *page, struct block_device *bdev,
1080 sector_t block, int size)
1081{
1082 struct buffer_head *head = page_buffers(page);
1083 struct buffer_head *bh = head;
1084 int uptodate = PageUptodate(page);
1085
1086 do {
1087 if (!buffer_mapped(bh)) {
1088 init_buffer(bh, NULL, NULL);
1089 bh->b_bdev = bdev;
1090 bh->b_blocknr = block;
1091 if (uptodate)
1092 set_buffer_uptodate(bh);
1093 set_buffer_mapped(bh);
1094 }
1095 block++;
1096 bh = bh->b_this_page;
1097 } while (bh != head);
1098}
1099
1100/*
1101 * Create the page-cache page that contains the requested block.
1102 *
1103 * This is user purely for blockdev mappings.
1104 */
1105static struct page *
1106grow_dev_page(struct block_device *bdev, sector_t block,
1107 pgoff_t index, int size)
1108{
1109 struct inode *inode = bdev->bd_inode;
1110 struct page *page;
1111 struct buffer_head *bh;
1112
1113 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1114 if (!page)
1115 return NULL;
1116
1117 if (!PageLocked(page))
1118 BUG();
1119
1120 if (page_has_buffers(page)) {
1121 bh = page_buffers(page);
1122 if (bh->b_size == size) {
1123 init_page_buffers(page, bdev, block, size);
1124 return page;
1125 }
1126 if (!try_to_free_buffers(page))
1127 goto failed;
1128 }
1129
1130 /*
1131 * Allocate some buffers for this page
1132 */
1133 bh = alloc_page_buffers(page, size, 0);
1134 if (!bh)
1135 goto failed;
1136
1137 /*
1138 * Link the page to the buffers and initialise them. Take the
1139 * lock to be atomic wrt __find_get_block(), which does not
1140 * run under the page lock.
1141 */
1142 spin_lock(&inode->i_mapping->private_lock);
1143 link_dev_buffers(page, bh);
1144 init_page_buffers(page, bdev, block, size);
1145 spin_unlock(&inode->i_mapping->private_lock);
1146 return page;
1147
1148failed:
1149 BUG();
1150 unlock_page(page);
1151 page_cache_release(page);
1152 return NULL;
1153}
1154
1155/*
1156 * Create buffers for the specified block device block's page. If
1157 * that page was dirty, the buffers are set dirty also.
1158 *
1159 * Except that's a bug. Attaching dirty buffers to a dirty
1160 * blockdev's page can result in filesystem corruption, because
1161 * some of those buffers may be aliases of filesystem data.
1162 * grow_dev_page() will go BUG() if this happens.
1163 */
858119e1 1164static int
1da177e4
LT
1165grow_buffers(struct block_device *bdev, sector_t block, int size)
1166{
1167 struct page *page;
1168 pgoff_t index;
1169 int sizebits;
1170
1171 sizebits = -1;
1172 do {
1173 sizebits++;
1174 } while ((size << sizebits) < PAGE_SIZE);
1175
1176 index = block >> sizebits;
1177 block = index << sizebits;
1178
1179 /* Create a page with the proper size buffers.. */
1180 page = grow_dev_page(bdev, block, index, size);
1181 if (!page)
1182 return 0;
1183 unlock_page(page);
1184 page_cache_release(page);
1185 return 1;
1186}
1187
75c96f85 1188static struct buffer_head *
1da177e4
LT
1189__getblk_slow(struct block_device *bdev, sector_t block, int size)
1190{
1191 /* Size must be multiple of hard sectorsize */
1192 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1193 (size < 512 || size > PAGE_SIZE))) {
1194 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1195 size);
1196 printk(KERN_ERR "hardsect size: %d\n",
1197 bdev_hardsect_size(bdev));
1198
1199 dump_stack();
1200 return NULL;
1201 }
1202
1203 for (;;) {
1204 struct buffer_head * bh;
1205
1206 bh = __find_get_block(bdev, block, size);
1207 if (bh)
1208 return bh;
1209
1210 if (!grow_buffers(bdev, block, size))
1211 free_more_memory();
1212 }
1213}
1214
1215/*
1216 * The relationship between dirty buffers and dirty pages:
1217 *
1218 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1219 * the page is tagged dirty in its radix tree.
1220 *
1221 * At all times, the dirtiness of the buffers represents the dirtiness of
1222 * subsections of the page. If the page has buffers, the page dirty bit is
1223 * merely a hint about the true dirty state.
1224 *
1225 * When a page is set dirty in its entirety, all its buffers are marked dirty
1226 * (if the page has buffers).
1227 *
1228 * When a buffer is marked dirty, its page is dirtied, but the page's other
1229 * buffers are not.
1230 *
1231 * Also. When blockdev buffers are explicitly read with bread(), they
1232 * individually become uptodate. But their backing page remains not
1233 * uptodate - even if all of its buffers are uptodate. A subsequent
1234 * block_read_full_page() against that page will discover all the uptodate
1235 * buffers, will set the page uptodate and will perform no I/O.
1236 */
1237
1238/**
1239 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1240 * @bh: the buffer_head to mark dirty
1da177e4
LT
1241 *
1242 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1243 * backing page dirty, then tag the page as dirty in its address_space's radix
1244 * tree and then attach the address_space's inode to its superblock's dirty
1245 * inode list.
1246 *
1247 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1248 * mapping->tree_lock and the global inode_lock.
1249 */
1250void fastcall mark_buffer_dirty(struct buffer_head *bh)
1251{
1252 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1253 __set_page_dirty_nobuffers(bh->b_page);
1254}
1255
1256/*
1257 * Decrement a buffer_head's reference count. If all buffers against a page
1258 * have zero reference count, are clean and unlocked, and if the page is clean
1259 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1260 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1261 * a page but it ends up not being freed, and buffers may later be reattached).
1262 */
1263void __brelse(struct buffer_head * buf)
1264{
1265 if (atomic_read(&buf->b_count)) {
1266 put_bh(buf);
1267 return;
1268 }
1269 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1270 WARN_ON(1);
1271}
1272
1273/*
1274 * bforget() is like brelse(), except it discards any
1275 * potentially dirty data.
1276 */
1277void __bforget(struct buffer_head *bh)
1278{
1279 clear_buffer_dirty(bh);
1280 if (!list_empty(&bh->b_assoc_buffers)) {
1281 struct address_space *buffer_mapping = bh->b_page->mapping;
1282
1283 spin_lock(&buffer_mapping->private_lock);
1284 list_del_init(&bh->b_assoc_buffers);
1285 spin_unlock(&buffer_mapping->private_lock);
1286 }
1287 __brelse(bh);
1288}
1289
1290static struct buffer_head *__bread_slow(struct buffer_head *bh)
1291{
1292 lock_buffer(bh);
1293 if (buffer_uptodate(bh)) {
1294 unlock_buffer(bh);
1295 return bh;
1296 } else {
1297 get_bh(bh);
1298 bh->b_end_io = end_buffer_read_sync;
1299 submit_bh(READ, bh);
1300 wait_on_buffer(bh);
1301 if (buffer_uptodate(bh))
1302 return bh;
1303 }
1304 brelse(bh);
1305 return NULL;
1306}
1307
1308/*
1309 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1310 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1311 * refcount elevated by one when they're in an LRU. A buffer can only appear
1312 * once in a particular CPU's LRU. A single buffer can be present in multiple
1313 * CPU's LRUs at the same time.
1314 *
1315 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1316 * sb_find_get_block().
1317 *
1318 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1319 * a local interrupt disable for that.
1320 */
1321
1322#define BH_LRU_SIZE 8
1323
1324struct bh_lru {
1325 struct buffer_head *bhs[BH_LRU_SIZE];
1326};
1327
1328static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1329
1330#ifdef CONFIG_SMP
1331#define bh_lru_lock() local_irq_disable()
1332#define bh_lru_unlock() local_irq_enable()
1333#else
1334#define bh_lru_lock() preempt_disable()
1335#define bh_lru_unlock() preempt_enable()
1336#endif
1337
1338static inline void check_irqs_on(void)
1339{
1340#ifdef irqs_disabled
1341 BUG_ON(irqs_disabled());
1342#endif
1343}
1344
1345/*
1346 * The LRU management algorithm is dopey-but-simple. Sorry.
1347 */
1348static void bh_lru_install(struct buffer_head *bh)
1349{
1350 struct buffer_head *evictee = NULL;
1351 struct bh_lru *lru;
1352
1353 check_irqs_on();
1354 bh_lru_lock();
1355 lru = &__get_cpu_var(bh_lrus);
1356 if (lru->bhs[0] != bh) {
1357 struct buffer_head *bhs[BH_LRU_SIZE];
1358 int in;
1359 int out = 0;
1360
1361 get_bh(bh);
1362 bhs[out++] = bh;
1363 for (in = 0; in < BH_LRU_SIZE; in++) {
1364 struct buffer_head *bh2 = lru->bhs[in];
1365
1366 if (bh2 == bh) {
1367 __brelse(bh2);
1368 } else {
1369 if (out >= BH_LRU_SIZE) {
1370 BUG_ON(evictee != NULL);
1371 evictee = bh2;
1372 } else {
1373 bhs[out++] = bh2;
1374 }
1375 }
1376 }
1377 while (out < BH_LRU_SIZE)
1378 bhs[out++] = NULL;
1379 memcpy(lru->bhs, bhs, sizeof(bhs));
1380 }
1381 bh_lru_unlock();
1382
1383 if (evictee)
1384 __brelse(evictee);
1385}
1386
1387/*
1388 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1389 */
858119e1 1390static struct buffer_head *
1da177e4
LT
1391lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1392{
1393 struct buffer_head *ret = NULL;
1394 struct bh_lru *lru;
1395 int i;
1396
1397 check_irqs_on();
1398 bh_lru_lock();
1399 lru = &__get_cpu_var(bh_lrus);
1400 for (i = 0; i < BH_LRU_SIZE; i++) {
1401 struct buffer_head *bh = lru->bhs[i];
1402
1403 if (bh && bh->b_bdev == bdev &&
1404 bh->b_blocknr == block && bh->b_size == size) {
1405 if (i) {
1406 while (i) {
1407 lru->bhs[i] = lru->bhs[i - 1];
1408 i--;
1409 }
1410 lru->bhs[0] = bh;
1411 }
1412 get_bh(bh);
1413 ret = bh;
1414 break;
1415 }
1416 }
1417 bh_lru_unlock();
1418 return ret;
1419}
1420
1421/*
1422 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1423 * it in the LRU and mark it as accessed. If it is not present then return
1424 * NULL
1425 */
1426struct buffer_head *
1427__find_get_block(struct block_device *bdev, sector_t block, int size)
1428{
1429 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1430
1431 if (bh == NULL) {
385fd4c5 1432 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1433 if (bh)
1434 bh_lru_install(bh);
1435 }
1436 if (bh)
1437 touch_buffer(bh);
1438 return bh;
1439}
1440EXPORT_SYMBOL(__find_get_block);
1441
1442/*
1443 * __getblk will locate (and, if necessary, create) the buffer_head
1444 * which corresponds to the passed block_device, block and size. The
1445 * returned buffer has its reference count incremented.
1446 *
1447 * __getblk() cannot fail - it just keeps trying. If you pass it an
1448 * illegal block number, __getblk() will happily return a buffer_head
1449 * which represents the non-existent block. Very weird.
1450 *
1451 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1452 * attempt is failing. FIXME, perhaps?
1453 */
1454struct buffer_head *
1455__getblk(struct block_device *bdev, sector_t block, int size)
1456{
1457 struct buffer_head *bh = __find_get_block(bdev, block, size);
1458
1459 might_sleep();
1460 if (bh == NULL)
1461 bh = __getblk_slow(bdev, block, size);
1462 return bh;
1463}
1464EXPORT_SYMBOL(__getblk);
1465
1466/*
1467 * Do async read-ahead on a buffer..
1468 */
1469void __breadahead(struct block_device *bdev, sector_t block, int size)
1470{
1471 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1472 if (likely(bh)) {
1473 ll_rw_block(READA, 1, &bh);
1474 brelse(bh);
1475 }
1da177e4
LT
1476}
1477EXPORT_SYMBOL(__breadahead);
1478
1479/**
1480 * __bread() - reads a specified block and returns the bh
67be2dd1 1481 * @bdev: the block_device to read from
1da177e4
LT
1482 * @block: number of block
1483 * @size: size (in bytes) to read
1484 *
1485 * Reads a specified block, and returns buffer head that contains it.
1486 * It returns NULL if the block was unreadable.
1487 */
1488struct buffer_head *
1489__bread(struct block_device *bdev, sector_t block, int size)
1490{
1491 struct buffer_head *bh = __getblk(bdev, block, size);
1492
a3e713b5 1493 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1494 bh = __bread_slow(bh);
1495 return bh;
1496}
1497EXPORT_SYMBOL(__bread);
1498
1499/*
1500 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1501 * This doesn't race because it runs in each cpu either in irq
1502 * or with preempt disabled.
1503 */
1504static void invalidate_bh_lru(void *arg)
1505{
1506 struct bh_lru *b = &get_cpu_var(bh_lrus);
1507 int i;
1508
1509 for (i = 0; i < BH_LRU_SIZE; i++) {
1510 brelse(b->bhs[i]);
1511 b->bhs[i] = NULL;
1512 }
1513 put_cpu_var(bh_lrus);
1514}
1515
1516static void invalidate_bh_lrus(void)
1517{
1518 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1519}
1520
1521void set_bh_page(struct buffer_head *bh,
1522 struct page *page, unsigned long offset)
1523{
1524 bh->b_page = page;
1525 if (offset >= PAGE_SIZE)
1526 BUG();
1527 if (PageHighMem(page))
1528 /*
1529 * This catches illegal uses and preserves the offset:
1530 */
1531 bh->b_data = (char *)(0 + offset);
1532 else
1533 bh->b_data = page_address(page) + offset;
1534}
1535EXPORT_SYMBOL(set_bh_page);
1536
1537/*
1538 * Called when truncating a buffer on a page completely.
1539 */
858119e1 1540static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1541{
1542 lock_buffer(bh);
1543 clear_buffer_dirty(bh);
1544 bh->b_bdev = NULL;
1545 clear_buffer_mapped(bh);
1546 clear_buffer_req(bh);
1547 clear_buffer_new(bh);
1548 clear_buffer_delay(bh);
1549 unlock_buffer(bh);
1550}
1551
1552/**
1553 * try_to_release_page() - release old fs-specific metadata on a page
1554 *
1555 * @page: the page which the kernel is trying to free
1556 * @gfp_mask: memory allocation flags (and I/O mode)
1557 *
1558 * The address_space is to try to release any data against the page
1559 * (presumably at page->private). If the release was successful, return `1'.
1560 * Otherwise return zero.
1561 *
1562 * The @gfp_mask argument specifies whether I/O may be performed to release
1563 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1564 *
1565 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1566 */
27496a8c 1567int try_to_release_page(struct page *page, gfp_t gfp_mask)
1da177e4
LT
1568{
1569 struct address_space * const mapping = page->mapping;
1570
1571 BUG_ON(!PageLocked(page));
1572 if (PageWriteback(page))
1573 return 0;
1574
1575 if (mapping && mapping->a_ops->releasepage)
1576 return mapping->a_ops->releasepage(page, gfp_mask);
1577 return try_to_free_buffers(page);
1578}
1579EXPORT_SYMBOL(try_to_release_page);
1580
1581/**
1582 * block_invalidatepage - invalidate part of all of a buffer-backed page
1583 *
1584 * @page: the page which is affected
1585 * @offset: the index of the truncation point
1586 *
1587 * block_invalidatepage() is called when all or part of the page has become
1588 * invalidatedby a truncate operation.
1589 *
1590 * block_invalidatepage() does not have to release all buffers, but it must
1591 * ensure that no dirty buffer is left outside @offset and that no I/O
1592 * is underway against any of the blocks which are outside the truncation
1593 * point. Because the caller is about to free (and possibly reuse) those
1594 * blocks on-disk.
1595 */
2ff28e22 1596void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1597{
1598 struct buffer_head *head, *bh, *next;
1599 unsigned int curr_off = 0;
1da177e4
LT
1600
1601 BUG_ON(!PageLocked(page));
1602 if (!page_has_buffers(page))
1603 goto out;
1604
1605 head = page_buffers(page);
1606 bh = head;
1607 do {
1608 unsigned int next_off = curr_off + bh->b_size;
1609 next = bh->b_this_page;
1610
1611 /*
1612 * is this block fully invalidated?
1613 */
1614 if (offset <= curr_off)
1615 discard_buffer(bh);
1616 curr_off = next_off;
1617 bh = next;
1618 } while (bh != head);
1619
1620 /*
1621 * We release buffers only if the entire page is being invalidated.
1622 * The get_block cached value has been unconditionally invalidated,
1623 * so real IO is not possible anymore.
1624 */
1625 if (offset == 0)
2ff28e22 1626 try_to_release_page(page, 0);
1da177e4 1627out:
2ff28e22 1628 return;
1da177e4
LT
1629}
1630EXPORT_SYMBOL(block_invalidatepage);
1631
2ff28e22 1632void do_invalidatepage(struct page *page, unsigned long offset)
aaa4059b 1633{
2ff28e22
N
1634 void (*invalidatepage)(struct page *, unsigned long);
1635 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1636 block_invalidatepage;
1637 (*invalidatepage)(page, offset);
aaa4059b
JK
1638}
1639
1da177e4
LT
1640/*
1641 * We attach and possibly dirty the buffers atomically wrt
1642 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1643 * is already excluded via the page lock.
1644 */
1645void create_empty_buffers(struct page *page,
1646 unsigned long blocksize, unsigned long b_state)
1647{
1648 struct buffer_head *bh, *head, *tail;
1649
1650 head = alloc_page_buffers(page, blocksize, 1);
1651 bh = head;
1652 do {
1653 bh->b_state |= b_state;
1654 tail = bh;
1655 bh = bh->b_this_page;
1656 } while (bh);
1657 tail->b_this_page = head;
1658
1659 spin_lock(&page->mapping->private_lock);
1660 if (PageUptodate(page) || PageDirty(page)) {
1661 bh = head;
1662 do {
1663 if (PageDirty(page))
1664 set_buffer_dirty(bh);
1665 if (PageUptodate(page))
1666 set_buffer_uptodate(bh);
1667 bh = bh->b_this_page;
1668 } while (bh != head);
1669 }
1670 attach_page_buffers(page, head);
1671 spin_unlock(&page->mapping->private_lock);
1672}
1673EXPORT_SYMBOL(create_empty_buffers);
1674
1675/*
1676 * We are taking a block for data and we don't want any output from any
1677 * buffer-cache aliases starting from return from that function and
1678 * until the moment when something will explicitly mark the buffer
1679 * dirty (hopefully that will not happen until we will free that block ;-)
1680 * We don't even need to mark it not-uptodate - nobody can expect
1681 * anything from a newly allocated buffer anyway. We used to used
1682 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1683 * don't want to mark the alias unmapped, for example - it would confuse
1684 * anyone who might pick it with bread() afterwards...
1685 *
1686 * Also.. Note that bforget() doesn't lock the buffer. So there can
1687 * be writeout I/O going on against recently-freed buffers. We don't
1688 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1689 * only if we really need to. That happens here.
1690 */
1691void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1692{
1693 struct buffer_head *old_bh;
1694
1695 might_sleep();
1696
385fd4c5 1697 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1698 if (old_bh) {
1699 clear_buffer_dirty(old_bh);
1700 wait_on_buffer(old_bh);
1701 clear_buffer_req(old_bh);
1702 __brelse(old_bh);
1703 }
1704}
1705EXPORT_SYMBOL(unmap_underlying_metadata);
1706
1707/*
1708 * NOTE! All mapped/uptodate combinations are valid:
1709 *
1710 * Mapped Uptodate Meaning
1711 *
1712 * No No "unknown" - must do get_block()
1713 * No Yes "hole" - zero-filled
1714 * Yes No "allocated" - allocated on disk, not read in
1715 * Yes Yes "valid" - allocated and up-to-date in memory.
1716 *
1717 * "Dirty" is valid only with the last case (mapped+uptodate).
1718 */
1719
1720/*
1721 * While block_write_full_page is writing back the dirty buffers under
1722 * the page lock, whoever dirtied the buffers may decide to clean them
1723 * again at any time. We handle that by only looking at the buffer
1724 * state inside lock_buffer().
1725 *
1726 * If block_write_full_page() is called for regular writeback
1727 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1728 * locked buffer. This only can happen if someone has written the buffer
1729 * directly, with submit_bh(). At the address_space level PageWriteback
1730 * prevents this contention from occurring.
1731 */
1732static int __block_write_full_page(struct inode *inode, struct page *page,
1733 get_block_t *get_block, struct writeback_control *wbc)
1734{
1735 int err;
1736 sector_t block;
1737 sector_t last_block;
f0fbd5fc 1738 struct buffer_head *bh, *head;
1da177e4
LT
1739 int nr_underway = 0;
1740
1741 BUG_ON(!PageLocked(page));
1742
1743 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1744
1745 if (!page_has_buffers(page)) {
1746 create_empty_buffers(page, 1 << inode->i_blkbits,
1747 (1 << BH_Dirty)|(1 << BH_Uptodate));
1748 }
1749
1750 /*
1751 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1752 * here, and the (potentially unmapped) buffers may become dirty at
1753 * any time. If a buffer becomes dirty here after we've inspected it
1754 * then we just miss that fact, and the page stays dirty.
1755 *
1756 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1757 * handle that here by just cleaning them.
1758 */
1759
54b21a79 1760 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1761 head = page_buffers(page);
1762 bh = head;
1763
1764 /*
1765 * Get all the dirty buffers mapped to disk addresses and
1766 * handle any aliases from the underlying blockdev's mapping.
1767 */
1768 do {
1769 if (block > last_block) {
1770 /*
1771 * mapped buffers outside i_size will occur, because
1772 * this page can be outside i_size when there is a
1773 * truncate in progress.
1774 */
1775 /*
1776 * The buffer was zeroed by block_write_full_page()
1777 */
1778 clear_buffer_dirty(bh);
1779 set_buffer_uptodate(bh);
1780 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1781 err = get_block(inode, block, bh, 1);
1782 if (err)
1783 goto recover;
1784 if (buffer_new(bh)) {
1785 /* blockdev mappings never come here */
1786 clear_buffer_new(bh);
1787 unmap_underlying_metadata(bh->b_bdev,
1788 bh->b_blocknr);
1789 }
1790 }
1791 bh = bh->b_this_page;
1792 block++;
1793 } while (bh != head);
1794
1795 do {
1da177e4
LT
1796 if (!buffer_mapped(bh))
1797 continue;
1798 /*
1799 * If it's a fully non-blocking write attempt and we cannot
1800 * lock the buffer then redirty the page. Note that this can
1801 * potentially cause a busy-wait loop from pdflush and kswapd
1802 * activity, but those code paths have their own higher-level
1803 * throttling.
1804 */
1805 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1806 lock_buffer(bh);
1807 } else if (test_set_buffer_locked(bh)) {
1808 redirty_page_for_writepage(wbc, page);
1809 continue;
1810 }
1811 if (test_clear_buffer_dirty(bh)) {
1812 mark_buffer_async_write(bh);
1813 } else {
1814 unlock_buffer(bh);
1815 }
1816 } while ((bh = bh->b_this_page) != head);
1817
1818 /*
1819 * The page and its buffers are protected by PageWriteback(), so we can
1820 * drop the bh refcounts early.
1821 */
1822 BUG_ON(PageWriteback(page));
1823 set_page_writeback(page);
1da177e4
LT
1824
1825 do {
1826 struct buffer_head *next = bh->b_this_page;
1827 if (buffer_async_write(bh)) {
1828 submit_bh(WRITE, bh);
1829 nr_underway++;
1830 }
1da177e4
LT
1831 bh = next;
1832 } while (bh != head);
05937baa 1833 unlock_page(page);
1da177e4
LT
1834
1835 err = 0;
1836done:
1837 if (nr_underway == 0) {
1838 /*
1839 * The page was marked dirty, but the buffers were
1840 * clean. Someone wrote them back by hand with
1841 * ll_rw_block/submit_bh. A rare case.
1842 */
1843 int uptodate = 1;
1844 do {
1845 if (!buffer_uptodate(bh)) {
1846 uptodate = 0;
1847 break;
1848 }
1849 bh = bh->b_this_page;
1850 } while (bh != head);
1851 if (uptodate)
1852 SetPageUptodate(page);
1853 end_page_writeback(page);
1854 /*
1855 * The page and buffer_heads can be released at any time from
1856 * here on.
1857 */
1858 wbc->pages_skipped++; /* We didn't write this page */
1859 }
1860 return err;
1861
1862recover:
1863 /*
1864 * ENOSPC, or some other error. We may already have added some
1865 * blocks to the file, so we need to write these out to avoid
1866 * exposing stale data.
1867 * The page is currently locked and not marked for writeback
1868 */
1869 bh = head;
1870 /* Recovery: lock and submit the mapped buffers */
1871 do {
1da177e4
LT
1872 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1873 lock_buffer(bh);
1874 mark_buffer_async_write(bh);
1875 } else {
1876 /*
1877 * The buffer may have been set dirty during
1878 * attachment to a dirty page.
1879 */
1880 clear_buffer_dirty(bh);
1881 }
1882 } while ((bh = bh->b_this_page) != head);
1883 SetPageError(page);
1884 BUG_ON(PageWriteback(page));
1885 set_page_writeback(page);
1886 unlock_page(page);
1887 do {
1888 struct buffer_head *next = bh->b_this_page;
1889 if (buffer_async_write(bh)) {
1890 clear_buffer_dirty(bh);
1891 submit_bh(WRITE, bh);
1892 nr_underway++;
1893 }
1da177e4
LT
1894 bh = next;
1895 } while (bh != head);
1896 goto done;
1897}
1898
1899static int __block_prepare_write(struct inode *inode, struct page *page,
1900 unsigned from, unsigned to, get_block_t *get_block)
1901{
1902 unsigned block_start, block_end;
1903 sector_t block;
1904 int err = 0;
1905 unsigned blocksize, bbits;
1906 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1907
1908 BUG_ON(!PageLocked(page));
1909 BUG_ON(from > PAGE_CACHE_SIZE);
1910 BUG_ON(to > PAGE_CACHE_SIZE);
1911 BUG_ON(from > to);
1912
1913 blocksize = 1 << inode->i_blkbits;
1914 if (!page_has_buffers(page))
1915 create_empty_buffers(page, blocksize, 0);
1916 head = page_buffers(page);
1917
1918 bbits = inode->i_blkbits;
1919 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1920
1921 for(bh = head, block_start = 0; bh != head || !block_start;
1922 block++, block_start=block_end, bh = bh->b_this_page) {
1923 block_end = block_start + blocksize;
1924 if (block_end <= from || block_start >= to) {
1925 if (PageUptodate(page)) {
1926 if (!buffer_uptodate(bh))
1927 set_buffer_uptodate(bh);
1928 }
1929 continue;
1930 }
1931 if (buffer_new(bh))
1932 clear_buffer_new(bh);
1933 if (!buffer_mapped(bh)) {
1934 err = get_block(inode, block, bh, 1);
1935 if (err)
f3ddbdc6 1936 break;
1da177e4 1937 if (buffer_new(bh)) {
1da177e4
LT
1938 unmap_underlying_metadata(bh->b_bdev,
1939 bh->b_blocknr);
1940 if (PageUptodate(page)) {
1941 set_buffer_uptodate(bh);
1942 continue;
1943 }
1944 if (block_end > to || block_start < from) {
1945 void *kaddr;
1946
1947 kaddr = kmap_atomic(page, KM_USER0);
1948 if (block_end > to)
1949 memset(kaddr+to, 0,
1950 block_end-to);
1951 if (block_start < from)
1952 memset(kaddr+block_start,
1953 0, from-block_start);
1954 flush_dcache_page(page);
1955 kunmap_atomic(kaddr, KM_USER0);
1956 }
1957 continue;
1958 }
1959 }
1960 if (PageUptodate(page)) {
1961 if (!buffer_uptodate(bh))
1962 set_buffer_uptodate(bh);
1963 continue;
1964 }
1965 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1966 (block_start < from || block_end > to)) {
1967 ll_rw_block(READ, 1, &bh);
1968 *wait_bh++=bh;
1969 }
1970 }
1971 /*
1972 * If we issued read requests - let them complete.
1973 */
1974 while(wait_bh > wait) {
1975 wait_on_buffer(*--wait_bh);
1976 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1977 err = -EIO;
1da177e4 1978 }
152becd2
AA
1979 if (!err) {
1980 bh = head;
1981 do {
1982 if (buffer_new(bh))
1983 clear_buffer_new(bh);
1984 } while ((bh = bh->b_this_page) != head);
1985 return 0;
1986 }
f3ddbdc6 1987 /* Error case: */
1da177e4
LT
1988 /*
1989 * Zero out any newly allocated blocks to avoid exposing stale
1990 * data. If BH_New is set, we know that the block was newly
1991 * allocated in the above loop.
1992 */
1993 bh = head;
1994 block_start = 0;
1995 do {
1996 block_end = block_start+blocksize;
1997 if (block_end <= from)
1998 goto next_bh;
1999 if (block_start >= to)
2000 break;
2001 if (buffer_new(bh)) {
2002 void *kaddr;
2003
2004 clear_buffer_new(bh);
2005 kaddr = kmap_atomic(page, KM_USER0);
2006 memset(kaddr+block_start, 0, bh->b_size);
2007 kunmap_atomic(kaddr, KM_USER0);
2008 set_buffer_uptodate(bh);
2009 mark_buffer_dirty(bh);
2010 }
2011next_bh:
2012 block_start = block_end;
2013 bh = bh->b_this_page;
2014 } while (bh != head);
2015 return err;
2016}
2017
2018static int __block_commit_write(struct inode *inode, struct page *page,
2019 unsigned from, unsigned to)
2020{
2021 unsigned block_start, block_end;
2022 int partial = 0;
2023 unsigned blocksize;
2024 struct buffer_head *bh, *head;
2025
2026 blocksize = 1 << inode->i_blkbits;
2027
2028 for(bh = head = page_buffers(page), block_start = 0;
2029 bh != head || !block_start;
2030 block_start=block_end, bh = bh->b_this_page) {
2031 block_end = block_start + blocksize;
2032 if (block_end <= from || block_start >= to) {
2033 if (!buffer_uptodate(bh))
2034 partial = 1;
2035 } else {
2036 set_buffer_uptodate(bh);
2037 mark_buffer_dirty(bh);
2038 }
2039 }
2040
2041 /*
2042 * If this is a partial write which happened to make all buffers
2043 * uptodate then we can optimize away a bogus readpage() for
2044 * the next read(). Here we 'discover' whether the page went
2045 * uptodate as a result of this (potentially partial) write.
2046 */
2047 if (!partial)
2048 SetPageUptodate(page);
2049 return 0;
2050}
2051
2052/*
2053 * Generic "read page" function for block devices that have the normal
2054 * get_block functionality. This is most of the block device filesystems.
2055 * Reads the page asynchronously --- the unlock_buffer() and
2056 * set/clear_buffer_uptodate() functions propagate buffer state into the
2057 * page struct once IO has completed.
2058 */
2059int block_read_full_page(struct page *page, get_block_t *get_block)
2060{
2061 struct inode *inode = page->mapping->host;
2062 sector_t iblock, lblock;
2063 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2064 unsigned int blocksize;
2065 int nr, i;
2066 int fully_mapped = 1;
2067
cd7619d6 2068 BUG_ON(!PageLocked(page));
1da177e4
LT
2069 blocksize = 1 << inode->i_blkbits;
2070 if (!page_has_buffers(page))
2071 create_empty_buffers(page, blocksize, 0);
2072 head = page_buffers(page);
2073
2074 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2075 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2076 bh = head;
2077 nr = 0;
2078 i = 0;
2079
2080 do {
2081 if (buffer_uptodate(bh))
2082 continue;
2083
2084 if (!buffer_mapped(bh)) {
c64610ba
AM
2085 int err = 0;
2086
1da177e4
LT
2087 fully_mapped = 0;
2088 if (iblock < lblock) {
c64610ba
AM
2089 err = get_block(inode, iblock, bh, 0);
2090 if (err)
1da177e4
LT
2091 SetPageError(page);
2092 }
2093 if (!buffer_mapped(bh)) {
2094 void *kaddr = kmap_atomic(page, KM_USER0);
2095 memset(kaddr + i * blocksize, 0, blocksize);
2096 flush_dcache_page(page);
2097 kunmap_atomic(kaddr, KM_USER0);
c64610ba
AM
2098 if (!err)
2099 set_buffer_uptodate(bh);
1da177e4
LT
2100 continue;
2101 }
2102 /*
2103 * get_block() might have updated the buffer
2104 * synchronously
2105 */
2106 if (buffer_uptodate(bh))
2107 continue;
2108 }
2109 arr[nr++] = bh;
2110 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2111
2112 if (fully_mapped)
2113 SetPageMappedToDisk(page);
2114
2115 if (!nr) {
2116 /*
2117 * All buffers are uptodate - we can set the page uptodate
2118 * as well. But not if get_block() returned an error.
2119 */
2120 if (!PageError(page))
2121 SetPageUptodate(page);
2122 unlock_page(page);
2123 return 0;
2124 }
2125
2126 /* Stage two: lock the buffers */
2127 for (i = 0; i < nr; i++) {
2128 bh = arr[i];
2129 lock_buffer(bh);
2130 mark_buffer_async_read(bh);
2131 }
2132
2133 /*
2134 * Stage 3: start the IO. Check for uptodateness
2135 * inside the buffer lock in case another process reading
2136 * the underlying blockdev brought it uptodate (the sct fix).
2137 */
2138 for (i = 0; i < nr; i++) {
2139 bh = arr[i];
2140 if (buffer_uptodate(bh))
2141 end_buffer_async_read(bh, 1);
2142 else
2143 submit_bh(READ, bh);
2144 }
2145 return 0;
2146}
2147
2148/* utility function for filesystems that need to do work on expanding
2149 * truncates. Uses prepare/commit_write to allow the filesystem to
2150 * deal with the hole.
2151 */
05eb0b51
OH
2152static int __generic_cont_expand(struct inode *inode, loff_t size,
2153 pgoff_t index, unsigned int offset)
1da177e4
LT
2154{
2155 struct address_space *mapping = inode->i_mapping;
2156 struct page *page;
05eb0b51 2157 unsigned long limit;
1da177e4
LT
2158 int err;
2159
2160 err = -EFBIG;
2161 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2162 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2163 send_sig(SIGXFSZ, current, 0);
2164 goto out;
2165 }
2166 if (size > inode->i_sb->s_maxbytes)
2167 goto out;
2168
1da177e4
LT
2169 err = -ENOMEM;
2170 page = grab_cache_page(mapping, index);
2171 if (!page)
2172 goto out;
2173 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
05eb0b51
OH
2174 if (err) {
2175 /*
2176 * ->prepare_write() may have instantiated a few blocks
2177 * outside i_size. Trim these off again.
2178 */
2179 unlock_page(page);
2180 page_cache_release(page);
2181 vmtruncate(inode, inode->i_size);
2182 goto out;
1da177e4 2183 }
05eb0b51
OH
2184
2185 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2186
1da177e4
LT
2187 unlock_page(page);
2188 page_cache_release(page);
2189 if (err > 0)
2190 err = 0;
2191out:
2192 return err;
2193}
2194
05eb0b51
OH
2195int generic_cont_expand(struct inode *inode, loff_t size)
2196{
2197 pgoff_t index;
2198 unsigned int offset;
2199
2200 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2201
2202 /* ugh. in prepare/commit_write, if from==to==start of block, we
2203 ** skip the prepare. make sure we never send an offset for the start
2204 ** of a block
2205 */
2206 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2207 /* caller must handle this extra byte. */
2208 offset++;
2209 }
2210 index = size >> PAGE_CACHE_SHIFT;
2211
2212 return __generic_cont_expand(inode, size, index, offset);
2213}
2214
2215int generic_cont_expand_simple(struct inode *inode, loff_t size)
2216{
2217 loff_t pos = size - 1;
2218 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2219 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2220
2221 /* prepare/commit_write can handle even if from==to==start of block. */
2222 return __generic_cont_expand(inode, size, index, offset);
2223}
2224
1da177e4
LT
2225/*
2226 * For moronic filesystems that do not allow holes in file.
2227 * We may have to extend the file.
2228 */
2229
2230int cont_prepare_write(struct page *page, unsigned offset,
2231 unsigned to, get_block_t *get_block, loff_t *bytes)
2232{
2233 struct address_space *mapping = page->mapping;
2234 struct inode *inode = mapping->host;
2235 struct page *new_page;
2236 pgoff_t pgpos;
2237 long status;
2238 unsigned zerofrom;
2239 unsigned blocksize = 1 << inode->i_blkbits;
2240 void *kaddr;
2241
2242 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2243 status = -ENOMEM;
2244 new_page = grab_cache_page(mapping, pgpos);
2245 if (!new_page)
2246 goto out;
2247 /* we might sleep */
2248 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2249 unlock_page(new_page);
2250 page_cache_release(new_page);
2251 continue;
2252 }
2253 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2254 if (zerofrom & (blocksize-1)) {
2255 *bytes |= (blocksize-1);
2256 (*bytes)++;
2257 }
2258 status = __block_prepare_write(inode, new_page, zerofrom,
2259 PAGE_CACHE_SIZE, get_block);
2260 if (status)
2261 goto out_unmap;
2262 kaddr = kmap_atomic(new_page, KM_USER0);
2263 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2264 flush_dcache_page(new_page);
2265 kunmap_atomic(kaddr, KM_USER0);
2266 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2267 unlock_page(new_page);
2268 page_cache_release(new_page);
2269 }
2270
2271 if (page->index < pgpos) {
2272 /* completely inside the area */
2273 zerofrom = offset;
2274 } else {
2275 /* page covers the boundary, find the boundary offset */
2276 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2277
2278 /* if we will expand the thing last block will be filled */
2279 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2280 *bytes |= (blocksize-1);
2281 (*bytes)++;
2282 }
2283
2284 /* starting below the boundary? Nothing to zero out */
2285 if (offset <= zerofrom)
2286 zerofrom = offset;
2287 }
2288 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2289 if (status)
2290 goto out1;
2291 if (zerofrom < offset) {
2292 kaddr = kmap_atomic(page, KM_USER0);
2293 memset(kaddr+zerofrom, 0, offset-zerofrom);
2294 flush_dcache_page(page);
2295 kunmap_atomic(kaddr, KM_USER0);
2296 __block_commit_write(inode, page, zerofrom, offset);
2297 }
2298 return 0;
2299out1:
2300 ClearPageUptodate(page);
2301 return status;
2302
2303out_unmap:
2304 ClearPageUptodate(new_page);
2305 unlock_page(new_page);
2306 page_cache_release(new_page);
2307out:
2308 return status;
2309}
2310
2311int block_prepare_write(struct page *page, unsigned from, unsigned to,
2312 get_block_t *get_block)
2313{
2314 struct inode *inode = page->mapping->host;
2315 int err = __block_prepare_write(inode, page, from, to, get_block);
2316 if (err)
2317 ClearPageUptodate(page);
2318 return err;
2319}
2320
2321int block_commit_write(struct page *page, unsigned from, unsigned to)
2322{
2323 struct inode *inode = page->mapping->host;
2324 __block_commit_write(inode,page,from,to);
2325 return 0;
2326}
2327
2328int generic_commit_write(struct file *file, struct page *page,
2329 unsigned from, unsigned to)
2330{
2331 struct inode *inode = page->mapping->host;
2332 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2333 __block_commit_write(inode,page,from,to);
2334 /*
2335 * No need to use i_size_read() here, the i_size
1b1dcc1b 2336 * cannot change under us because we hold i_mutex.
1da177e4
LT
2337 */
2338 if (pos > inode->i_size) {
2339 i_size_write(inode, pos);
2340 mark_inode_dirty(inode);
2341 }
2342 return 0;
2343}
2344
2345
2346/*
2347 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2348 * immediately, while under the page lock. So it needs a special end_io
2349 * handler which does not touch the bh after unlocking it.
2350 *
2351 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2352 * a race there is benign: unlock_buffer() only use the bh's address for
2353 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2354 * itself.
2355 */
2356static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2357{
2358 if (uptodate) {
2359 set_buffer_uptodate(bh);
2360 } else {
2361 /* This happens, due to failed READA attempts. */
2362 clear_buffer_uptodate(bh);
2363 }
2364 unlock_buffer(bh);
2365}
2366
2367/*
2368 * On entry, the page is fully not uptodate.
2369 * On exit the page is fully uptodate in the areas outside (from,to)
2370 */
2371int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2372 get_block_t *get_block)
2373{
2374 struct inode *inode = page->mapping->host;
2375 const unsigned blkbits = inode->i_blkbits;
2376 const unsigned blocksize = 1 << blkbits;
2377 struct buffer_head map_bh;
2378 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2379 unsigned block_in_page;
2380 unsigned block_start;
2381 sector_t block_in_file;
2382 char *kaddr;
2383 int nr_reads = 0;
2384 int i;
2385 int ret = 0;
2386 int is_mapped_to_disk = 1;
2387 int dirtied_it = 0;
2388
2389 if (PageMappedToDisk(page))
2390 return 0;
2391
2392 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2393 map_bh.b_page = page;
2394
2395 /*
2396 * We loop across all blocks in the page, whether or not they are
2397 * part of the affected region. This is so we can discover if the
2398 * page is fully mapped-to-disk.
2399 */
2400 for (block_start = 0, block_in_page = 0;
2401 block_start < PAGE_CACHE_SIZE;
2402 block_in_page++, block_start += blocksize) {
2403 unsigned block_end = block_start + blocksize;
2404 int create;
2405
2406 map_bh.b_state = 0;
2407 create = 1;
2408 if (block_start >= to)
2409 create = 0;
2410 ret = get_block(inode, block_in_file + block_in_page,
2411 &map_bh, create);
2412 if (ret)
2413 goto failed;
2414 if (!buffer_mapped(&map_bh))
2415 is_mapped_to_disk = 0;
2416 if (buffer_new(&map_bh))
2417 unmap_underlying_metadata(map_bh.b_bdev,
2418 map_bh.b_blocknr);
2419 if (PageUptodate(page))
2420 continue;
2421 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2422 kaddr = kmap_atomic(page, KM_USER0);
2423 if (block_start < from) {
2424 memset(kaddr+block_start, 0, from-block_start);
2425 dirtied_it = 1;
2426 }
2427 if (block_end > to) {
2428 memset(kaddr + to, 0, block_end - to);
2429 dirtied_it = 1;
2430 }
2431 flush_dcache_page(page);
2432 kunmap_atomic(kaddr, KM_USER0);
2433 continue;
2434 }
2435 if (buffer_uptodate(&map_bh))
2436 continue; /* reiserfs does this */
2437 if (block_start < from || block_end > to) {
2438 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2439
2440 if (!bh) {
2441 ret = -ENOMEM;
2442 goto failed;
2443 }
2444 bh->b_state = map_bh.b_state;
2445 atomic_set(&bh->b_count, 0);
2446 bh->b_this_page = NULL;
2447 bh->b_page = page;
2448 bh->b_blocknr = map_bh.b_blocknr;
2449 bh->b_size = blocksize;
2450 bh->b_data = (char *)(long)block_start;
2451 bh->b_bdev = map_bh.b_bdev;
2452 bh->b_private = NULL;
2453 read_bh[nr_reads++] = bh;
2454 }
2455 }
2456
2457 if (nr_reads) {
2458 struct buffer_head *bh;
2459
2460 /*
2461 * The page is locked, so these buffers are protected from
2462 * any VM or truncate activity. Hence we don't need to care
2463 * for the buffer_head refcounts.
2464 */
2465 for (i = 0; i < nr_reads; i++) {
2466 bh = read_bh[i];
2467 lock_buffer(bh);
2468 bh->b_end_io = end_buffer_read_nobh;
2469 submit_bh(READ, bh);
2470 }
2471 for (i = 0; i < nr_reads; i++) {
2472 bh = read_bh[i];
2473 wait_on_buffer(bh);
2474 if (!buffer_uptodate(bh))
2475 ret = -EIO;
2476 free_buffer_head(bh);
2477 read_bh[i] = NULL;
2478 }
2479 if (ret)
2480 goto failed;
2481 }
2482
2483 if (is_mapped_to_disk)
2484 SetPageMappedToDisk(page);
2485 SetPageUptodate(page);
2486
2487 /*
2488 * Setting the page dirty here isn't necessary for the prepare_write
2489 * function - commit_write will do that. But if/when this function is
2490 * used within the pagefault handler to ensure that all mmapped pages
2491 * have backing space in the filesystem, we will need to dirty the page
2492 * if its contents were altered.
2493 */
2494 if (dirtied_it)
2495 set_page_dirty(page);
2496
2497 return 0;
2498
2499failed:
2500 for (i = 0; i < nr_reads; i++) {
2501 if (read_bh[i])
2502 free_buffer_head(read_bh[i]);
2503 }
2504
2505 /*
2506 * Error recovery is pretty slack. Clear the page and mark it dirty
2507 * so we'll later zero out any blocks which _were_ allocated.
2508 */
2509 kaddr = kmap_atomic(page, KM_USER0);
2510 memset(kaddr, 0, PAGE_CACHE_SIZE);
2511 kunmap_atomic(kaddr, KM_USER0);
2512 SetPageUptodate(page);
2513 set_page_dirty(page);
2514 return ret;
2515}
2516EXPORT_SYMBOL(nobh_prepare_write);
2517
2518int nobh_commit_write(struct file *file, struct page *page,
2519 unsigned from, unsigned to)
2520{
2521 struct inode *inode = page->mapping->host;
2522 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2523
2524 set_page_dirty(page);
2525 if (pos > inode->i_size) {
2526 i_size_write(inode, pos);
2527 mark_inode_dirty(inode);
2528 }
2529 return 0;
2530}
2531EXPORT_SYMBOL(nobh_commit_write);
2532
2533/*
2534 * nobh_writepage() - based on block_full_write_page() except
2535 * that it tries to operate without attaching bufferheads to
2536 * the page.
2537 */
2538int nobh_writepage(struct page *page, get_block_t *get_block,
2539 struct writeback_control *wbc)
2540{
2541 struct inode * const inode = page->mapping->host;
2542 loff_t i_size = i_size_read(inode);
2543 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2544 unsigned offset;
2545 void *kaddr;
2546 int ret;
2547
2548 /* Is the page fully inside i_size? */
2549 if (page->index < end_index)
2550 goto out;
2551
2552 /* Is the page fully outside i_size? (truncate in progress) */
2553 offset = i_size & (PAGE_CACHE_SIZE-1);
2554 if (page->index >= end_index+1 || !offset) {
2555 /*
2556 * The page may have dirty, unmapped buffers. For example,
2557 * they may have been added in ext3_writepage(). Make them
2558 * freeable here, so the page does not leak.
2559 */
2560#if 0
2561 /* Not really sure about this - do we need this ? */
2562 if (page->mapping->a_ops->invalidatepage)
2563 page->mapping->a_ops->invalidatepage(page, offset);
2564#endif
2565 unlock_page(page);
2566 return 0; /* don't care */
2567 }
2568
2569 /*
2570 * The page straddles i_size. It must be zeroed out on each and every
2571 * writepage invocation because it may be mmapped. "A file is mapped
2572 * in multiples of the page size. For a file that is not a multiple of
2573 * the page size, the remaining memory is zeroed when mapped, and
2574 * writes to that region are not written out to the file."
2575 */
2576 kaddr = kmap_atomic(page, KM_USER0);
2577 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2578 flush_dcache_page(page);
2579 kunmap_atomic(kaddr, KM_USER0);
2580out:
2581 ret = mpage_writepage(page, get_block, wbc);
2582 if (ret == -EAGAIN)
2583 ret = __block_write_full_page(inode, page, get_block, wbc);
2584 return ret;
2585}
2586EXPORT_SYMBOL(nobh_writepage);
2587
2588/*
2589 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2590 */
2591int nobh_truncate_page(struct address_space *mapping, loff_t from)
2592{
2593 struct inode *inode = mapping->host;
2594 unsigned blocksize = 1 << inode->i_blkbits;
2595 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2596 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2597 unsigned to;
2598 struct page *page;
2599 struct address_space_operations *a_ops = mapping->a_ops;
2600 char *kaddr;
2601 int ret = 0;
2602
2603 if ((offset & (blocksize - 1)) == 0)
2604 goto out;
2605
2606 ret = -ENOMEM;
2607 page = grab_cache_page(mapping, index);
2608 if (!page)
2609 goto out;
2610
2611 to = (offset + blocksize) & ~(blocksize - 1);
2612 ret = a_ops->prepare_write(NULL, page, offset, to);
2613 if (ret == 0) {
2614 kaddr = kmap_atomic(page, KM_USER0);
2615 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2616 flush_dcache_page(page);
2617 kunmap_atomic(kaddr, KM_USER0);
2618 set_page_dirty(page);
2619 }
2620 unlock_page(page);
2621 page_cache_release(page);
2622out:
2623 return ret;
2624}
2625EXPORT_SYMBOL(nobh_truncate_page);
2626
2627int block_truncate_page(struct address_space *mapping,
2628 loff_t from, get_block_t *get_block)
2629{
2630 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2631 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2632 unsigned blocksize;
54b21a79 2633 sector_t iblock;
1da177e4
LT
2634 unsigned length, pos;
2635 struct inode *inode = mapping->host;
2636 struct page *page;
2637 struct buffer_head *bh;
2638 void *kaddr;
2639 int err;
2640
2641 blocksize = 1 << inode->i_blkbits;
2642 length = offset & (blocksize - 1);
2643
2644 /* Block boundary? Nothing to do */
2645 if (!length)
2646 return 0;
2647
2648 length = blocksize - length;
54b21a79 2649 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2650
2651 page = grab_cache_page(mapping, index);
2652 err = -ENOMEM;
2653 if (!page)
2654 goto out;
2655
2656 if (!page_has_buffers(page))
2657 create_empty_buffers(page, blocksize, 0);
2658
2659 /* Find the buffer that contains "offset" */
2660 bh = page_buffers(page);
2661 pos = blocksize;
2662 while (offset >= pos) {
2663 bh = bh->b_this_page;
2664 iblock++;
2665 pos += blocksize;
2666 }
2667
2668 err = 0;
2669 if (!buffer_mapped(bh)) {
2670 err = get_block(inode, iblock, bh, 0);
2671 if (err)
2672 goto unlock;
2673 /* unmapped? It's a hole - nothing to do */
2674 if (!buffer_mapped(bh))
2675 goto unlock;
2676 }
2677
2678 /* Ok, it's mapped. Make sure it's up-to-date */
2679 if (PageUptodate(page))
2680 set_buffer_uptodate(bh);
2681
2682 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2683 err = -EIO;
2684 ll_rw_block(READ, 1, &bh);
2685 wait_on_buffer(bh);
2686 /* Uhhuh. Read error. Complain and punt. */
2687 if (!buffer_uptodate(bh))
2688 goto unlock;
2689 }
2690
2691 kaddr = kmap_atomic(page, KM_USER0);
2692 memset(kaddr + offset, 0, length);
2693 flush_dcache_page(page);
2694 kunmap_atomic(kaddr, KM_USER0);
2695
2696 mark_buffer_dirty(bh);
2697 err = 0;
2698
2699unlock:
2700 unlock_page(page);
2701 page_cache_release(page);
2702out:
2703 return err;
2704}
2705
2706/*
2707 * The generic ->writepage function for buffer-backed address_spaces
2708 */
2709int block_write_full_page(struct page *page, get_block_t *get_block,
2710 struct writeback_control *wbc)
2711{
2712 struct inode * const inode = page->mapping->host;
2713 loff_t i_size = i_size_read(inode);
2714 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2715 unsigned offset;
2716 void *kaddr;
2717
2718 /* Is the page fully inside i_size? */
2719 if (page->index < end_index)
2720 return __block_write_full_page(inode, page, get_block, wbc);
2721
2722 /* Is the page fully outside i_size? (truncate in progress) */
2723 offset = i_size & (PAGE_CACHE_SIZE-1);
2724 if (page->index >= end_index+1 || !offset) {
2725 /*
2726 * The page may have dirty, unmapped buffers. For example,
2727 * they may have been added in ext3_writepage(). Make them
2728 * freeable here, so the page does not leak.
2729 */
aaa4059b 2730 do_invalidatepage(page, 0);
1da177e4
LT
2731 unlock_page(page);
2732 return 0; /* don't care */
2733 }
2734
2735 /*
2736 * The page straddles i_size. It must be zeroed out on each and every
2737 * writepage invokation because it may be mmapped. "A file is mapped
2738 * in multiples of the page size. For a file that is not a multiple of
2739 * the page size, the remaining memory is zeroed when mapped, and
2740 * writes to that region are not written out to the file."
2741 */
2742 kaddr = kmap_atomic(page, KM_USER0);
2743 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2744 flush_dcache_page(page);
2745 kunmap_atomic(kaddr, KM_USER0);
2746 return __block_write_full_page(inode, page, get_block, wbc);
2747}
2748
2749sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2750 get_block_t *get_block)
2751{
2752 struct buffer_head tmp;
2753 struct inode *inode = mapping->host;
2754 tmp.b_state = 0;
2755 tmp.b_blocknr = 0;
2756 get_block(inode, block, &tmp, 0);
2757 return tmp.b_blocknr;
2758}
2759
2760static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2761{
2762 struct buffer_head *bh = bio->bi_private;
2763
2764 if (bio->bi_size)
2765 return 1;
2766
2767 if (err == -EOPNOTSUPP) {
2768 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2769 set_bit(BH_Eopnotsupp, &bh->b_state);
2770 }
2771
2772 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2773 bio_put(bio);
2774 return 0;
2775}
2776
2777int submit_bh(int rw, struct buffer_head * bh)
2778{
2779 struct bio *bio;
2780 int ret = 0;
2781
2782 BUG_ON(!buffer_locked(bh));
2783 BUG_ON(!buffer_mapped(bh));
2784 BUG_ON(!bh->b_end_io);
2785
2786 if (buffer_ordered(bh) && (rw == WRITE))
2787 rw = WRITE_BARRIER;
2788
2789 /*
2790 * Only clear out a write error when rewriting, should this
2791 * include WRITE_SYNC as well?
2792 */
2793 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2794 clear_buffer_write_io_error(bh);
2795
2796 /*
2797 * from here on down, it's all bio -- do the initial mapping,
2798 * submit_bio -> generic_make_request may further map this bio around
2799 */
2800 bio = bio_alloc(GFP_NOIO, 1);
2801
2802 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2803 bio->bi_bdev = bh->b_bdev;
2804 bio->bi_io_vec[0].bv_page = bh->b_page;
2805 bio->bi_io_vec[0].bv_len = bh->b_size;
2806 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2807
2808 bio->bi_vcnt = 1;
2809 bio->bi_idx = 0;
2810 bio->bi_size = bh->b_size;
2811
2812 bio->bi_end_io = end_bio_bh_io_sync;
2813 bio->bi_private = bh;
2814
2815 bio_get(bio);
2816 submit_bio(rw, bio);
2817
2818 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2819 ret = -EOPNOTSUPP;
2820
2821 bio_put(bio);
2822 return ret;
2823}
2824
2825/**
2826 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2827 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2828 * @nr: number of &struct buffer_heads in the array
2829 * @bhs: array of pointers to &struct buffer_head
2830 *
a7662236
JK
2831 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2832 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2833 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2834 * are sent to disk. The fourth %READA option is described in the documentation
2835 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2836 *
2837 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2838 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2839 * clean when doing a write request, and any buffer that appears to be
2840 * up-to-date when doing read request. Further it marks as clean buffers that
2841 * are processed for writing (the buffer cache won't assume that they are
2842 * actually clean until the buffer gets unlocked).
1da177e4
LT
2843 *
2844 * ll_rw_block sets b_end_io to simple completion handler that marks
2845 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2846 * any waiters.
2847 *
2848 * All of the buffers must be for the same device, and must also be a
2849 * multiple of the current approved size for the device.
2850 */
2851void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2852{
2853 int i;
2854
2855 for (i = 0; i < nr; i++) {
2856 struct buffer_head *bh = bhs[i];
2857
a7662236
JK
2858 if (rw == SWRITE)
2859 lock_buffer(bh);
2860 else if (test_set_buffer_locked(bh))
1da177e4
LT
2861 continue;
2862
a7662236 2863 if (rw == WRITE || rw == SWRITE) {
1da177e4 2864 if (test_clear_buffer_dirty(bh)) {
76c3073a 2865 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2866 get_bh(bh);
1da177e4
LT
2867 submit_bh(WRITE, bh);
2868 continue;
2869 }
2870 } else {
1da177e4 2871 if (!buffer_uptodate(bh)) {
76c3073a 2872 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2873 get_bh(bh);
1da177e4
LT
2874 submit_bh(rw, bh);
2875 continue;
2876 }
2877 }
2878 unlock_buffer(bh);
1da177e4
LT
2879 }
2880}
2881
2882/*
2883 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2884 * and then start new I/O and then wait upon it. The caller must have a ref on
2885 * the buffer_head.
2886 */
2887int sync_dirty_buffer(struct buffer_head *bh)
2888{
2889 int ret = 0;
2890
2891 WARN_ON(atomic_read(&bh->b_count) < 1);
2892 lock_buffer(bh);
2893 if (test_clear_buffer_dirty(bh)) {
2894 get_bh(bh);
2895 bh->b_end_io = end_buffer_write_sync;
2896 ret = submit_bh(WRITE, bh);
2897 wait_on_buffer(bh);
2898 if (buffer_eopnotsupp(bh)) {
2899 clear_buffer_eopnotsupp(bh);
2900 ret = -EOPNOTSUPP;
2901 }
2902 if (!ret && !buffer_uptodate(bh))
2903 ret = -EIO;
2904 } else {
2905 unlock_buffer(bh);
2906 }
2907 return ret;
2908}
2909
2910/*
2911 * try_to_free_buffers() checks if all the buffers on this particular page
2912 * are unused, and releases them if so.
2913 *
2914 * Exclusion against try_to_free_buffers may be obtained by either
2915 * locking the page or by holding its mapping's private_lock.
2916 *
2917 * If the page is dirty but all the buffers are clean then we need to
2918 * be sure to mark the page clean as well. This is because the page
2919 * may be against a block device, and a later reattachment of buffers
2920 * to a dirty page will set *all* buffers dirty. Which would corrupt
2921 * filesystem data on the same device.
2922 *
2923 * The same applies to regular filesystem pages: if all the buffers are
2924 * clean then we set the page clean and proceed. To do that, we require
2925 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2926 * private_lock.
2927 *
2928 * try_to_free_buffers() is non-blocking.
2929 */
2930static inline int buffer_busy(struct buffer_head *bh)
2931{
2932 return atomic_read(&bh->b_count) |
2933 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2934}
2935
2936static int
2937drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2938{
2939 struct buffer_head *head = page_buffers(page);
2940 struct buffer_head *bh;
2941
2942 bh = head;
2943 do {
de7d5a3b 2944 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
2945 set_bit(AS_EIO, &page->mapping->flags);
2946 if (buffer_busy(bh))
2947 goto failed;
2948 bh = bh->b_this_page;
2949 } while (bh != head);
2950
2951 do {
2952 struct buffer_head *next = bh->b_this_page;
2953
2954 if (!list_empty(&bh->b_assoc_buffers))
2955 __remove_assoc_queue(bh);
2956 bh = next;
2957 } while (bh != head);
2958 *buffers_to_free = head;
2959 __clear_page_buffers(page);
2960 return 1;
2961failed:
2962 return 0;
2963}
2964
2965int try_to_free_buffers(struct page *page)
2966{
2967 struct address_space * const mapping = page->mapping;
2968 struct buffer_head *buffers_to_free = NULL;
2969 int ret = 0;
2970
2971 BUG_ON(!PageLocked(page));
2972 if (PageWriteback(page))
2973 return 0;
2974
2975 if (mapping == NULL) { /* can this still happen? */
2976 ret = drop_buffers(page, &buffers_to_free);
2977 goto out;
2978 }
2979
2980 spin_lock(&mapping->private_lock);
2981 ret = drop_buffers(page, &buffers_to_free);
2982 if (ret) {
2983 /*
2984 * If the filesystem writes its buffers by hand (eg ext3)
2985 * then we can have clean buffers against a dirty page. We
2986 * clean the page here; otherwise later reattachment of buffers
2987 * could encounter a non-uptodate page, which is unresolvable.
2988 * This only applies in the rare case where try_to_free_buffers
2989 * succeeds but the page is not freed.
2990 */
2991 clear_page_dirty(page);
2992 }
2993 spin_unlock(&mapping->private_lock);
2994out:
2995 if (buffers_to_free) {
2996 struct buffer_head *bh = buffers_to_free;
2997
2998 do {
2999 struct buffer_head *next = bh->b_this_page;
3000 free_buffer_head(bh);
3001 bh = next;
3002 } while (bh != buffers_to_free);
3003 }
3004 return ret;
3005}
3006EXPORT_SYMBOL(try_to_free_buffers);
3007
3978d717 3008void block_sync_page(struct page *page)
1da177e4
LT
3009{
3010 struct address_space *mapping;
3011
3012 smp_mb();
3013 mapping = page_mapping(page);
3014 if (mapping)
3015 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3016}
3017
3018/*
3019 * There are no bdflush tunables left. But distributions are
3020 * still running obsolete flush daemons, so we terminate them here.
3021 *
3022 * Use of bdflush() is deprecated and will be removed in a future kernel.
3023 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3024 */
3025asmlinkage long sys_bdflush(int func, long data)
3026{
3027 static int msg_count;
3028
3029 if (!capable(CAP_SYS_ADMIN))
3030 return -EPERM;
3031
3032 if (msg_count < 5) {
3033 msg_count++;
3034 printk(KERN_INFO
3035 "warning: process `%s' used the obsolete bdflush"
3036 " system call\n", current->comm);
3037 printk(KERN_INFO "Fix your initscripts?\n");
3038 }
3039
3040 if (func == 1)
3041 do_exit(0);
3042 return 0;
3043}
3044
3045/*
3046 * Buffer-head allocation
3047 */
3048static kmem_cache_t *bh_cachep;
3049
3050/*
3051 * Once the number of bh's in the machine exceeds this level, we start
3052 * stripping them in writeback.
3053 */
3054static int max_buffer_heads;
3055
3056int buffer_heads_over_limit;
3057
3058struct bh_accounting {
3059 int nr; /* Number of live bh's */
3060 int ratelimit; /* Limit cacheline bouncing */
3061};
3062
3063static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3064
3065static void recalc_bh_state(void)
3066{
3067 int i;
3068 int tot = 0;
3069
3070 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3071 return;
3072 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3073 for_each_online_cpu(i)
1da177e4
LT
3074 tot += per_cpu(bh_accounting, i).nr;
3075 buffer_heads_over_limit = (tot > max_buffer_heads);
3076}
3077
dd0fc66f 3078struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4
LT
3079{
3080 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3081 if (ret) {
736c7b80 3082 get_cpu_var(bh_accounting).nr++;
1da177e4 3083 recalc_bh_state();
736c7b80 3084 put_cpu_var(bh_accounting);
1da177e4
LT
3085 }
3086 return ret;
3087}
3088EXPORT_SYMBOL(alloc_buffer_head);
3089
3090void free_buffer_head(struct buffer_head *bh)
3091{
3092 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3093 kmem_cache_free(bh_cachep, bh);
736c7b80 3094 get_cpu_var(bh_accounting).nr--;
1da177e4 3095 recalc_bh_state();
736c7b80 3096 put_cpu_var(bh_accounting);
1da177e4
LT
3097}
3098EXPORT_SYMBOL(free_buffer_head);
3099
3100static void
3101init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3102{
3103 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3104 SLAB_CTOR_CONSTRUCTOR) {
3105 struct buffer_head * bh = (struct buffer_head *)data;
3106
3107 memset(bh, 0, sizeof(*bh));
3108 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3109 }
3110}
3111
3112#ifdef CONFIG_HOTPLUG_CPU
3113static void buffer_exit_cpu(int cpu)
3114{
3115 int i;
3116 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3117
3118 for (i = 0; i < BH_LRU_SIZE; i++) {
3119 brelse(b->bhs[i]);
3120 b->bhs[i] = NULL;
3121 }
8a143426
ED
3122 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3123 per_cpu(bh_accounting, cpu).nr = 0;
3124 put_cpu_var(bh_accounting);
1da177e4
LT
3125}
3126
3127static int buffer_cpu_notify(struct notifier_block *self,
3128 unsigned long action, void *hcpu)
3129{
3130 if (action == CPU_DEAD)
3131 buffer_exit_cpu((unsigned long)hcpu);
3132 return NOTIFY_OK;
3133}
3134#endif /* CONFIG_HOTPLUG_CPU */
3135
3136void __init buffer_init(void)
3137{
3138 int nrpages;
3139
3140 bh_cachep = kmem_cache_create("buffer_head",
b0196009
PJ
3141 sizeof(struct buffer_head), 0,
3142 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3143 SLAB_MEM_SPREAD),
3144 init_buffer_head,
3145 NULL);
1da177e4
LT
3146
3147 /*
3148 * Limit the bh occupancy to 10% of ZONE_NORMAL
3149 */
3150 nrpages = (nr_free_buffer_pages() * 10) / 100;
3151 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3152 hotcpu_notifier(buffer_cpu_notify, 0);
3153}
3154
3155EXPORT_SYMBOL(__bforget);
3156EXPORT_SYMBOL(__brelse);
3157EXPORT_SYMBOL(__wait_on_buffer);
3158EXPORT_SYMBOL(block_commit_write);
3159EXPORT_SYMBOL(block_prepare_write);
3160EXPORT_SYMBOL(block_read_full_page);
3161EXPORT_SYMBOL(block_sync_page);
3162EXPORT_SYMBOL(block_truncate_page);
3163EXPORT_SYMBOL(block_write_full_page);
3164EXPORT_SYMBOL(cont_prepare_write);
3165EXPORT_SYMBOL(end_buffer_async_write);
3166EXPORT_SYMBOL(end_buffer_read_sync);
3167EXPORT_SYMBOL(end_buffer_write_sync);
3168EXPORT_SYMBOL(file_fsync);
3169EXPORT_SYMBOL(fsync_bdev);
3170EXPORT_SYMBOL(generic_block_bmap);
3171EXPORT_SYMBOL(generic_commit_write);
3172EXPORT_SYMBOL(generic_cont_expand);
05eb0b51 3173EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3174EXPORT_SYMBOL(init_buffer);
3175EXPORT_SYMBOL(invalidate_bdev);
3176EXPORT_SYMBOL(ll_rw_block);
3177EXPORT_SYMBOL(mark_buffer_dirty);
3178EXPORT_SYMBOL(submit_bh);
3179EXPORT_SYMBOL(sync_dirty_buffer);
3180EXPORT_SYMBOL(unlock_buffer);