4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
55 EXPORT_SYMBOL(init_buffer);
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
71 void __lock_buffer(struct buffer_head *bh)
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
76 EXPORT_SYMBOL(__lock_buffer);
78 void unlock_buffer(struct buffer_head *bh)
80 clear_bit_unlock(BH_Lock, &bh->b_state);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
84 EXPORT_SYMBOL(unlock_buffer);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 EXPORT_SYMBOL(__wait_on_buffer);
98 __clear_page_buffers(struct page *page)
100 ClearPagePrivate(page);
101 set_page_private(page, 0);
102 page_cache_release(page);
106 static int quiet_error(struct buffer_head *bh)
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
114 static void buffer_io_error(struct buffer_head *bh)
116 char b[BDEVNAME_SIZE];
117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
123 * End-of-IO handler helper function which does not touch the bh after
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
133 set_buffer_uptodate(bh);
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
147 __end_buffer_read_notouch(bh, uptodate);
150 EXPORT_SYMBOL(end_buffer_read_sync);
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
154 char b[BDEVNAME_SIZE];
157 set_buffer_uptodate(bh);
159 if (!quiet_error(bh)) {
161 printk(KERN_WARNING "lost page write due to "
163 bdevname(bh->b_bdev, b));
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
171 EXPORT_SYMBOL(end_buffer_write_sync);
174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
191 struct buffer_head *bh;
192 struct buffer_head *head;
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
204 head = page_buffers(page);
207 if (!buffer_mapped(bh))
209 else if (bh->b_blocknr == block) {
214 bh = bh->b_this_page;
215 } while (bh != head);
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
272 struct address_space *mapping = bdev->bd_inode->i_mapping;
274 if (mapping->nrpages == 0)
277 invalidate_bh_lrus();
278 lru_add_drain_all(); /* make sure all lru add caches are flushed */
279 invalidate_mapping_pages(mapping, 0, -1);
281 EXPORT_SYMBOL(invalidate_bdev);
284 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
286 static void free_more_memory(void)
291 wakeup_flusher_threads(1024);
294 for_each_online_node(nid) {
295 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296 gfp_zone(GFP_NOFS), NULL,
299 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
305 * I/O completion handler for block_read_full_page() - pages
306 * which come unlocked at the end of I/O.
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
311 struct buffer_head *first;
312 struct buffer_head *tmp;
314 int page_uptodate = 1;
316 BUG_ON(!buffer_async_read(bh));
320 set_buffer_uptodate(bh);
322 clear_buffer_uptodate(bh);
323 if (!quiet_error(bh))
329 * Be _very_ careful from here on. Bad things can happen if
330 * two buffer heads end IO at almost the same time and both
331 * decide that the page is now completely done.
333 first = page_buffers(page);
334 local_irq_save(flags);
335 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336 clear_buffer_async_read(bh);
340 if (!buffer_uptodate(tmp))
342 if (buffer_async_read(tmp)) {
343 BUG_ON(!buffer_locked(tmp));
346 tmp = tmp->b_this_page;
348 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349 local_irq_restore(flags);
352 * If none of the buffers had errors and they are all
353 * uptodate then we can set the page uptodate.
355 if (page_uptodate && !PageError(page))
356 SetPageUptodate(page);
361 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362 local_irq_restore(flags);
367 * Completion handler for block_write_full_page() - pages which are unlocked
368 * during I/O, and which have PageWriteback cleared upon I/O completion.
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
372 char b[BDEVNAME_SIZE];
374 struct buffer_head *first;
375 struct buffer_head *tmp;
378 BUG_ON(!buffer_async_write(bh));
382 set_buffer_uptodate(bh);
384 if (!quiet_error(bh)) {
386 printk(KERN_WARNING "lost page write due to "
388 bdevname(bh->b_bdev, b));
390 set_bit(AS_EIO, &page->mapping->flags);
391 set_buffer_write_io_error(bh);
392 clear_buffer_uptodate(bh);
396 first = page_buffers(page);
397 local_irq_save(flags);
398 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
400 clear_buffer_async_write(bh);
402 tmp = bh->b_this_page;
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
408 tmp = tmp->b_this_page;
410 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411 local_irq_restore(flags);
412 end_page_writeback(page);
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
420 EXPORT_SYMBOL(end_buffer_async_write);
423 * If a page's buffers are under async readin (end_buffer_async_read
424 * completion) then there is a possibility that another thread of
425 * control could lock one of the buffers after it has completed
426 * but while some of the other buffers have not completed. This
427 * locked buffer would confuse end_buffer_async_read() into not unlocking
428 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
429 * that this buffer is not under async I/O.
431 * The page comes unlocked when it has no locked buffer_async buffers
434 * PageLocked prevents anyone starting new async I/O reads any of
437 * PageWriteback is used to prevent simultaneous writeout of the same
440 * PageLocked prevents anyone from starting writeback of a page which is
441 * under read I/O (PageWriteback is only ever set against a locked page).
443 static void mark_buffer_async_read(struct buffer_head *bh)
445 bh->b_end_io = end_buffer_async_read;
446 set_buffer_async_read(bh);
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 bh_end_io_t *handler)
452 bh->b_end_io = handler;
453 set_buffer_async_write(bh);
456 void mark_buffer_async_write(struct buffer_head *bh)
458 mark_buffer_async_write_endio(bh, end_buffer_async_write);
460 EXPORT_SYMBOL(mark_buffer_async_write);
464 * fs/buffer.c contains helper functions for buffer-backed address space's
465 * fsync functions. A common requirement for buffer-based filesystems is
466 * that certain data from the backing blockdev needs to be written out for
467 * a successful fsync(). For example, ext2 indirect blocks need to be
468 * written back and waited upon before fsync() returns.
470 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472 * management of a list of dependent buffers at ->i_mapping->private_list.
474 * Locking is a little subtle: try_to_free_buffers() will remove buffers
475 * from their controlling inode's queue when they are being freed. But
476 * try_to_free_buffers() will be operating against the *blockdev* mapping
477 * at the time, not against the S_ISREG file which depends on those buffers.
478 * So the locking for private_list is via the private_lock in the address_space
479 * which backs the buffers. Which is different from the address_space
480 * against which the buffers are listed. So for a particular address_space,
481 * mapping->private_lock does *not* protect mapping->private_list! In fact,
482 * mapping->private_list will always be protected by the backing blockdev's
485 * Which introduces a requirement: all buffers on an address_space's
486 * ->private_list must be from the same address_space: the blockdev's.
488 * address_spaces which do not place buffers at ->private_list via these
489 * utility functions are free to use private_lock and private_list for
490 * whatever they want. The only requirement is that list_empty(private_list)
491 * be true at clear_inode() time.
493 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
494 * filesystems should do that. invalidate_inode_buffers() should just go
495 * BUG_ON(!list_empty).
497 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
498 * take an address_space, not an inode. And it should be called
499 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
502 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503 * list if it is already on a list. Because if the buffer is on a list,
504 * it *must* already be on the right one. If not, the filesystem is being
505 * silly. This will save a ton of locking. But first we have to ensure
506 * that buffers are taken *off* the old inode's list when they are freed
507 * (presumably in truncate). That requires careful auditing of all
508 * filesystems (do it inside bforget()). It could also be done by bringing
513 * The buffer's backing address_space's private_lock must be held
515 static void __remove_assoc_queue(struct buffer_head *bh)
517 list_del_init(&bh->b_assoc_buffers);
518 WARN_ON(!bh->b_assoc_map);
519 if (buffer_write_io_error(bh))
520 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521 bh->b_assoc_map = NULL;
524 int inode_has_buffers(struct inode *inode)
526 return !list_empty(&inode->i_data.private_list);
530 * osync is designed to support O_SYNC io. It waits synchronously for
531 * all already-submitted IO to complete, but does not queue any new
532 * writes to the disk.
534 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535 * you dirty the buffers, and then use osync_inode_buffers to wait for
536 * completion. Any other dirty buffers which are not yet queued for
537 * write will not be flushed to disk by the osync.
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
541 struct buffer_head *bh;
547 list_for_each_prev(p, list) {
549 if (buffer_locked(bh)) {
553 if (!buffer_uptodate(bh))
564 static void do_thaw_one(struct super_block *sb, void *unused)
566 char b[BDEVNAME_SIZE];
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
572 static void do_thaw_all(struct work_struct *work)
574 iterate_supers(do_thaw_one, NULL);
576 printk(KERN_WARNING "Emergency Thaw complete\n");
580 * emergency_thaw_all -- forcibly thaw every frozen filesystem
582 * Used for emergency unfreeze of all filesystems via SysRq
584 void emergency_thaw_all(void)
586 struct work_struct *work;
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 INIT_WORK(work, do_thaw_all);
596 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597 * @mapping: the mapping which wants those buffers written
599 * Starts I/O against the buffers at mapping->private_list, and waits upon
602 * Basically, this is a convenience function for fsync().
603 * @mapping is a file or directory which needs those buffers to be written for
604 * a successful fsync().
606 int sync_mapping_buffers(struct address_space *mapping)
608 struct address_space *buffer_mapping = mapping->assoc_mapping;
610 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
613 return fsync_buffers_list(&buffer_mapping->private_lock,
614 &mapping->private_list);
616 EXPORT_SYMBOL(sync_mapping_buffers);
619 * Called when we've recently written block `bblock', and it is known that
620 * `bblock' was for a buffer_boundary() buffer. This means that the block at
621 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
622 * dirty, schedule it for IO. So that indirects merge nicely with their data.
624 void write_boundary_block(struct block_device *bdev,
625 sector_t bblock, unsigned blocksize)
627 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
629 if (buffer_dirty(bh))
630 ll_rw_block(WRITE, 1, &bh);
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
637 struct address_space *mapping = inode->i_mapping;
638 struct address_space *buffer_mapping = bh->b_page->mapping;
640 mark_buffer_dirty(bh);
641 if (!mapping->assoc_mapping) {
642 mapping->assoc_mapping = buffer_mapping;
644 BUG_ON(mapping->assoc_mapping != buffer_mapping);
646 if (!bh->b_assoc_map) {
647 spin_lock(&buffer_mapping->private_lock);
648 list_move_tail(&bh->b_assoc_buffers,
649 &mapping->private_list);
650 bh->b_assoc_map = mapping;
651 spin_unlock(&buffer_mapping->private_lock);
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
657 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
660 * If warn is true, then emit a warning if the page is not uptodate and has
661 * not been truncated.
663 static void __set_page_dirty(struct page *page,
664 struct address_space *mapping, int warn)
666 spin_lock_irq(&mapping->tree_lock);
667 if (page->mapping) { /* Race with truncate? */
668 WARN_ON_ONCE(warn && !PageUptodate(page));
669 account_page_dirtied(page, mapping);
670 radix_tree_tag_set(&mapping->page_tree,
671 page_index(page), PAGECACHE_TAG_DIRTY);
673 spin_unlock_irq(&mapping->tree_lock);
674 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
678 * Add a page to the dirty page list.
680 * It is a sad fact of life that this function is called from several places
681 * deeply under spinlocking. It may not sleep.
683 * If the page has buffers, the uptodate buffers are set dirty, to preserve
684 * dirty-state coherency between the page and the buffers. It the page does
685 * not have buffers then when they are later attached they will all be set
688 * The buffers are dirtied before the page is dirtied. There's a small race
689 * window in which a writepage caller may see the page cleanness but not the
690 * buffer dirtiness. That's fine. If this code were to set the page dirty
691 * before the buffers, a concurrent writepage caller could clear the page dirty
692 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693 * page on the dirty page list.
695 * We use private_lock to lock against try_to_free_buffers while using the
696 * page's buffer list. Also use this to protect against clean buffers being
697 * added to the page after it was set dirty.
699 * FIXME: may need to call ->reservepage here as well. That's rather up to the
700 * address_space though.
702 int __set_page_dirty_buffers(struct page *page)
705 struct address_space *mapping = page_mapping(page);
707 if (unlikely(!mapping))
708 return !TestSetPageDirty(page);
710 spin_lock(&mapping->private_lock);
711 if (page_has_buffers(page)) {
712 struct buffer_head *head = page_buffers(page);
713 struct buffer_head *bh = head;
716 set_buffer_dirty(bh);
717 bh = bh->b_this_page;
718 } while (bh != head);
720 newly_dirty = !TestSetPageDirty(page);
721 spin_unlock(&mapping->private_lock);
724 __set_page_dirty(page, mapping, 1);
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
730 * Write out and wait upon a list of buffers.
732 * We have conflicting pressures: we want to make sure that all
733 * initially dirty buffers get waited on, but that any subsequently
734 * dirtied buffers don't. After all, we don't want fsync to last
735 * forever if somebody is actively writing to the file.
737 * Do this in two main stages: first we copy dirty buffers to a
738 * temporary inode list, queueing the writes as we go. Then we clean
739 * up, waiting for those writes to complete.
741 * During this second stage, any subsequent updates to the file may end
742 * up refiling the buffer on the original inode's dirty list again, so
743 * there is a chance we will end up with a buffer queued for write but
744 * not yet completed on that list. So, as a final cleanup we go through
745 * the osync code to catch these locked, dirty buffers without requeuing
746 * any newly dirty buffers for write.
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
750 struct buffer_head *bh;
751 struct list_head tmp;
752 struct address_space *mapping, *prev_mapping = NULL;
755 INIT_LIST_HEAD(&tmp);
758 while (!list_empty(list)) {
759 bh = BH_ENTRY(list->next);
760 mapping = bh->b_assoc_map;
761 __remove_assoc_queue(bh);
762 /* Avoid race with mark_buffer_dirty_inode() which does
763 * a lockless check and we rely on seeing the dirty bit */
765 if (buffer_dirty(bh) || buffer_locked(bh)) {
766 list_add(&bh->b_assoc_buffers, &tmp);
767 bh->b_assoc_map = mapping;
768 if (buffer_dirty(bh)) {
772 * Ensure any pending I/O completes so that
773 * write_dirty_buffer() actually writes the
774 * current contents - it is a noop if I/O is
775 * still in flight on potentially older
778 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
781 * Kick off IO for the previous mapping. Note
782 * that we will not run the very last mapping,
783 * wait_on_buffer() will do that for us
784 * through sync_buffer().
786 if (prev_mapping && prev_mapping != mapping)
787 blk_run_address_space(prev_mapping);
788 prev_mapping = mapping;
796 while (!list_empty(&tmp)) {
797 bh = BH_ENTRY(tmp.prev);
799 mapping = bh->b_assoc_map;
800 __remove_assoc_queue(bh);
801 /* Avoid race with mark_buffer_dirty_inode() which does
802 * a lockless check and we rely on seeing the dirty bit */
804 if (buffer_dirty(bh)) {
805 list_add(&bh->b_assoc_buffers,
806 &mapping->private_list);
807 bh->b_assoc_map = mapping;
811 if (!buffer_uptodate(bh))
818 err2 = osync_buffers_list(lock, list);
826 * Invalidate any and all dirty buffers on a given inode. We are
827 * probably unmounting the fs, but that doesn't mean we have already
828 * done a sync(). Just drop the buffers from the inode list.
830 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
831 * assumes that all the buffers are against the blockdev. Not true
834 void invalidate_inode_buffers(struct inode *inode)
836 if (inode_has_buffers(inode)) {
837 struct address_space *mapping = &inode->i_data;
838 struct list_head *list = &mapping->private_list;
839 struct address_space *buffer_mapping = mapping->assoc_mapping;
841 spin_lock(&buffer_mapping->private_lock);
842 while (!list_empty(list))
843 __remove_assoc_queue(BH_ENTRY(list->next));
844 spin_unlock(&buffer_mapping->private_lock);
847 EXPORT_SYMBOL(invalidate_inode_buffers);
850 * Remove any clean buffers from the inode's buffer list. This is called
851 * when we're trying to free the inode itself. Those buffers can pin it.
853 * Returns true if all buffers were removed.
855 int remove_inode_buffers(struct inode *inode)
859 if (inode_has_buffers(inode)) {
860 struct address_space *mapping = &inode->i_data;
861 struct list_head *list = &mapping->private_list;
862 struct address_space *buffer_mapping = mapping->assoc_mapping;
864 spin_lock(&buffer_mapping->private_lock);
865 while (!list_empty(list)) {
866 struct buffer_head *bh = BH_ENTRY(list->next);
867 if (buffer_dirty(bh)) {
871 __remove_assoc_queue(bh);
873 spin_unlock(&buffer_mapping->private_lock);
879 * Create the appropriate buffers when given a page for data area and
880 * the size of each buffer.. Use the bh->b_this_page linked list to
881 * follow the buffers created. Return NULL if unable to create more
884 * The retry flag is used to differentiate async IO (paging, swapping)
885 * which may not fail from ordinary buffer allocations.
887 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
890 struct buffer_head *bh, *head;
896 while ((offset -= size) >= 0) {
897 bh = alloc_buffer_head(GFP_NOFS);
902 bh->b_this_page = head;
907 atomic_set(&bh->b_count, 0);
908 bh->b_private = NULL;
911 /* Link the buffer to its page */
912 set_bh_page(bh, page, offset);
914 init_buffer(bh, NULL, NULL);
918 * In case anything failed, we just free everything we got.
924 head = head->b_this_page;
925 free_buffer_head(bh);
930 * Return failure for non-async IO requests. Async IO requests
931 * are not allowed to fail, so we have to wait until buffer heads
932 * become available. But we don't want tasks sleeping with
933 * partially complete buffers, so all were released above.
938 /* We're _really_ low on memory. Now we just
939 * wait for old buffer heads to become free due to
940 * finishing IO. Since this is an async request and
941 * the reserve list is empty, we're sure there are
942 * async buffer heads in use.
947 EXPORT_SYMBOL_GPL(alloc_page_buffers);
950 link_dev_buffers(struct page *page, struct buffer_head *head)
952 struct buffer_head *bh, *tail;
957 bh = bh->b_this_page;
959 tail->b_this_page = head;
960 attach_page_buffers(page, head);
964 * Initialise the state of a blockdev page's buffers.
967 init_page_buffers(struct page *page, struct block_device *bdev,
968 sector_t block, int size)
970 struct buffer_head *head = page_buffers(page);
971 struct buffer_head *bh = head;
972 int uptodate = PageUptodate(page);
975 if (!buffer_mapped(bh)) {
976 init_buffer(bh, NULL, NULL);
978 bh->b_blocknr = block;
980 set_buffer_uptodate(bh);
981 set_buffer_mapped(bh);
984 bh = bh->b_this_page;
985 } while (bh != head);
989 * Create the page-cache page that contains the requested block.
991 * This is user purely for blockdev mappings.
994 grow_dev_page(struct block_device *bdev, sector_t block,
995 pgoff_t index, int size)
997 struct inode *inode = bdev->bd_inode;
999 struct buffer_head *bh;
1001 page = find_or_create_page(inode->i_mapping, index,
1002 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1006 BUG_ON(!PageLocked(page));
1008 if (page_has_buffers(page)) {
1009 bh = page_buffers(page);
1010 if (bh->b_size == size) {
1011 init_page_buffers(page, bdev, block, size);
1014 if (!try_to_free_buffers(page))
1019 * Allocate some buffers for this page
1021 bh = alloc_page_buffers(page, size, 0);
1026 * Link the page to the buffers and initialise them. Take the
1027 * lock to be atomic wrt __find_get_block(), which does not
1028 * run under the page lock.
1030 spin_lock(&inode->i_mapping->private_lock);
1031 link_dev_buffers(page, bh);
1032 init_page_buffers(page, bdev, block, size);
1033 spin_unlock(&inode->i_mapping->private_lock);
1039 page_cache_release(page);
1044 * Create buffers for the specified block device block's page. If
1045 * that page was dirty, the buffers are set dirty also.
1048 grow_buffers(struct block_device *bdev, sector_t block, int size)
1057 } while ((size << sizebits) < PAGE_SIZE);
1059 index = block >> sizebits;
1062 * Check for a block which wants to lie outside our maximum possible
1063 * pagecache index. (this comparison is done using sector_t types).
1065 if (unlikely(index != block >> sizebits)) {
1066 char b[BDEVNAME_SIZE];
1068 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1070 __func__, (unsigned long long)block,
1074 block = index << sizebits;
1075 /* Create a page with the proper size buffers.. */
1076 page = grow_dev_page(bdev, block, index, size);
1080 page_cache_release(page);
1084 static struct buffer_head *
1085 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1087 /* Size must be multiple of hard sectorsize */
1088 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1089 (size < 512 || size > PAGE_SIZE))) {
1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1092 printk(KERN_ERR "logical block size: %d\n",
1093 bdev_logical_block_size(bdev));
1100 struct buffer_head * bh;
1103 bh = __find_get_block(bdev, block, size);
1107 ret = grow_buffers(bdev, block, size);
1116 * The relationship between dirty buffers and dirty pages:
1118 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1119 * the page is tagged dirty in its radix tree.
1121 * At all times, the dirtiness of the buffers represents the dirtiness of
1122 * subsections of the page. If the page has buffers, the page dirty bit is
1123 * merely a hint about the true dirty state.
1125 * When a page is set dirty in its entirety, all its buffers are marked dirty
1126 * (if the page has buffers).
1128 * When a buffer is marked dirty, its page is dirtied, but the page's other
1131 * Also. When blockdev buffers are explicitly read with bread(), they
1132 * individually become uptodate. But their backing page remains not
1133 * uptodate - even if all of its buffers are uptodate. A subsequent
1134 * block_read_full_page() against that page will discover all the uptodate
1135 * buffers, will set the page uptodate and will perform no I/O.
1139 * mark_buffer_dirty - mark a buffer_head as needing writeout
1140 * @bh: the buffer_head to mark dirty
1142 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1143 * backing page dirty, then tag the page as dirty in its address_space's radix
1144 * tree and then attach the address_space's inode to its superblock's dirty
1147 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1148 * mapping->tree_lock and the global inode_lock.
1150 void mark_buffer_dirty(struct buffer_head *bh)
1152 WARN_ON_ONCE(!buffer_uptodate(bh));
1155 * Very *carefully* optimize the it-is-already-dirty case.
1157 * Don't let the final "is it dirty" escape to before we
1158 * perhaps modified the buffer.
1160 if (buffer_dirty(bh)) {
1162 if (buffer_dirty(bh))
1166 if (!test_set_buffer_dirty(bh)) {
1167 struct page *page = bh->b_page;
1168 if (!TestSetPageDirty(page)) {
1169 struct address_space *mapping = page_mapping(page);
1171 __set_page_dirty(page, mapping, 0);
1175 EXPORT_SYMBOL(mark_buffer_dirty);
1178 * Decrement a buffer_head's reference count. If all buffers against a page
1179 * have zero reference count, are clean and unlocked, and if the page is clean
1180 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1181 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1182 * a page but it ends up not being freed, and buffers may later be reattached).
1184 void __brelse(struct buffer_head * buf)
1186 if (atomic_read(&buf->b_count)) {
1190 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1192 EXPORT_SYMBOL(__brelse);
1195 * bforget() is like brelse(), except it discards any
1196 * potentially dirty data.
1198 void __bforget(struct buffer_head *bh)
1200 clear_buffer_dirty(bh);
1201 if (bh->b_assoc_map) {
1202 struct address_space *buffer_mapping = bh->b_page->mapping;
1204 spin_lock(&buffer_mapping->private_lock);
1205 list_del_init(&bh->b_assoc_buffers);
1206 bh->b_assoc_map = NULL;
1207 spin_unlock(&buffer_mapping->private_lock);
1211 EXPORT_SYMBOL(__bforget);
1213 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1216 if (buffer_uptodate(bh)) {
1221 bh->b_end_io = end_buffer_read_sync;
1222 submit_bh(READ, bh);
1224 if (buffer_uptodate(bh))
1232 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1233 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1234 * refcount elevated by one when they're in an LRU. A buffer can only appear
1235 * once in a particular CPU's LRU. A single buffer can be present in multiple
1236 * CPU's LRUs at the same time.
1238 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1239 * sb_find_get_block().
1241 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1242 * a local interrupt disable for that.
1245 #define BH_LRU_SIZE 8
1248 struct buffer_head *bhs[BH_LRU_SIZE];
1251 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1254 #define bh_lru_lock() local_irq_disable()
1255 #define bh_lru_unlock() local_irq_enable()
1257 #define bh_lru_lock() preempt_disable()
1258 #define bh_lru_unlock() preempt_enable()
1261 static inline void check_irqs_on(void)
1263 #ifdef irqs_disabled
1264 BUG_ON(irqs_disabled());
1269 * The LRU management algorithm is dopey-but-simple. Sorry.
1271 static void bh_lru_install(struct buffer_head *bh)
1273 struct buffer_head *evictee = NULL;
1278 lru = &__get_cpu_var(bh_lrus);
1279 if (lru->bhs[0] != bh) {
1280 struct buffer_head *bhs[BH_LRU_SIZE];
1286 for (in = 0; in < BH_LRU_SIZE; in++) {
1287 struct buffer_head *bh2 = lru->bhs[in];
1292 if (out >= BH_LRU_SIZE) {
1293 BUG_ON(evictee != NULL);
1300 while (out < BH_LRU_SIZE)
1302 memcpy(lru->bhs, bhs, sizeof(bhs));
1311 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1313 static struct buffer_head *
1314 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1316 struct buffer_head *ret = NULL;
1322 lru = &__get_cpu_var(bh_lrus);
1323 for (i = 0; i < BH_LRU_SIZE; i++) {
1324 struct buffer_head *bh = lru->bhs[i];
1326 if (bh && bh->b_bdev == bdev &&
1327 bh->b_blocknr == block && bh->b_size == size) {
1330 lru->bhs[i] = lru->bhs[i - 1];
1345 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1346 * it in the LRU and mark it as accessed. If it is not present then return
1349 struct buffer_head *
1350 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1352 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1355 bh = __find_get_block_slow(bdev, block);
1363 EXPORT_SYMBOL(__find_get_block);
1366 * __getblk will locate (and, if necessary, create) the buffer_head
1367 * which corresponds to the passed block_device, block and size. The
1368 * returned buffer has its reference count incremented.
1370 * __getblk() cannot fail - it just keeps trying. If you pass it an
1371 * illegal block number, __getblk() will happily return a buffer_head
1372 * which represents the non-existent block. Very weird.
1374 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1375 * attempt is failing. FIXME, perhaps?
1377 struct buffer_head *
1378 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1380 struct buffer_head *bh = __find_get_block(bdev, block, size);
1384 bh = __getblk_slow(bdev, block, size);
1387 EXPORT_SYMBOL(__getblk);
1390 * Do async read-ahead on a buffer..
1392 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1394 struct buffer_head *bh = __getblk(bdev, block, size);
1396 ll_rw_block(READA, 1, &bh);
1400 EXPORT_SYMBOL(__breadahead);
1403 * __bread() - reads a specified block and returns the bh
1404 * @bdev: the block_device to read from
1405 * @block: number of block
1406 * @size: size (in bytes) to read
1408 * Reads a specified block, and returns buffer head that contains it.
1409 * It returns NULL if the block was unreadable.
1411 struct buffer_head *
1412 __bread(struct block_device *bdev, sector_t block, unsigned size)
1414 struct buffer_head *bh = __getblk(bdev, block, size);
1416 if (likely(bh) && !buffer_uptodate(bh))
1417 bh = __bread_slow(bh);
1420 EXPORT_SYMBOL(__bread);
1423 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1424 * This doesn't race because it runs in each cpu either in irq
1425 * or with preempt disabled.
1427 static void invalidate_bh_lru(void *arg)
1429 struct bh_lru *b = &get_cpu_var(bh_lrus);
1432 for (i = 0; i < BH_LRU_SIZE; i++) {
1436 put_cpu_var(bh_lrus);
1439 void invalidate_bh_lrus(void)
1441 on_each_cpu(invalidate_bh_lru, NULL, 1);
1443 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1445 void set_bh_page(struct buffer_head *bh,
1446 struct page *page, unsigned long offset)
1449 BUG_ON(offset >= PAGE_SIZE);
1450 if (PageHighMem(page))
1452 * This catches illegal uses and preserves the offset:
1454 bh->b_data = (char *)(0 + offset);
1456 bh->b_data = page_address(page) + offset;
1458 EXPORT_SYMBOL(set_bh_page);
1461 * Called when truncating a buffer on a page completely.
1463 static void discard_buffer(struct buffer_head * bh)
1466 clear_buffer_dirty(bh);
1468 clear_buffer_mapped(bh);
1469 clear_buffer_req(bh);
1470 clear_buffer_new(bh);
1471 clear_buffer_delay(bh);
1472 clear_buffer_unwritten(bh);
1477 * block_invalidatepage - invalidate part of all of a buffer-backed page
1479 * @page: the page which is affected
1480 * @offset: the index of the truncation point
1482 * block_invalidatepage() is called when all or part of the page has become
1483 * invalidatedby a truncate operation.
1485 * block_invalidatepage() does not have to release all buffers, but it must
1486 * ensure that no dirty buffer is left outside @offset and that no I/O
1487 * is underway against any of the blocks which are outside the truncation
1488 * point. Because the caller is about to free (and possibly reuse) those
1491 void block_invalidatepage(struct page *page, unsigned long offset)
1493 struct buffer_head *head, *bh, *next;
1494 unsigned int curr_off = 0;
1496 BUG_ON(!PageLocked(page));
1497 if (!page_has_buffers(page))
1500 head = page_buffers(page);
1503 unsigned int next_off = curr_off + bh->b_size;
1504 next = bh->b_this_page;
1507 * is this block fully invalidated?
1509 if (offset <= curr_off)
1511 curr_off = next_off;
1513 } while (bh != head);
1516 * We release buffers only if the entire page is being invalidated.
1517 * The get_block cached value has been unconditionally invalidated,
1518 * so real IO is not possible anymore.
1521 try_to_release_page(page, 0);
1525 EXPORT_SYMBOL(block_invalidatepage);
1528 * We attach and possibly dirty the buffers atomically wrt
1529 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1530 * is already excluded via the page lock.
1532 void create_empty_buffers(struct page *page,
1533 unsigned long blocksize, unsigned long b_state)
1535 struct buffer_head *bh, *head, *tail;
1537 head = alloc_page_buffers(page, blocksize, 1);
1540 bh->b_state |= b_state;
1542 bh = bh->b_this_page;
1544 tail->b_this_page = head;
1546 spin_lock(&page->mapping->private_lock);
1547 if (PageUptodate(page) || PageDirty(page)) {
1550 if (PageDirty(page))
1551 set_buffer_dirty(bh);
1552 if (PageUptodate(page))
1553 set_buffer_uptodate(bh);
1554 bh = bh->b_this_page;
1555 } while (bh != head);
1557 attach_page_buffers(page, head);
1558 spin_unlock(&page->mapping->private_lock);
1560 EXPORT_SYMBOL(create_empty_buffers);
1563 * We are taking a block for data and we don't want any output from any
1564 * buffer-cache aliases starting from return from that function and
1565 * until the moment when something will explicitly mark the buffer
1566 * dirty (hopefully that will not happen until we will free that block ;-)
1567 * We don't even need to mark it not-uptodate - nobody can expect
1568 * anything from a newly allocated buffer anyway. We used to used
1569 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1570 * don't want to mark the alias unmapped, for example - it would confuse
1571 * anyone who might pick it with bread() afterwards...
1573 * Also.. Note that bforget() doesn't lock the buffer. So there can
1574 * be writeout I/O going on against recently-freed buffers. We don't
1575 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1576 * only if we really need to. That happens here.
1578 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1580 struct buffer_head *old_bh;
1584 old_bh = __find_get_block_slow(bdev, block);
1586 clear_buffer_dirty(old_bh);
1587 wait_on_buffer(old_bh);
1588 clear_buffer_req(old_bh);
1592 EXPORT_SYMBOL(unmap_underlying_metadata);
1595 * NOTE! All mapped/uptodate combinations are valid:
1597 * Mapped Uptodate Meaning
1599 * No No "unknown" - must do get_block()
1600 * No Yes "hole" - zero-filled
1601 * Yes No "allocated" - allocated on disk, not read in
1602 * Yes Yes "valid" - allocated and up-to-date in memory.
1604 * "Dirty" is valid only with the last case (mapped+uptodate).
1608 * While block_write_full_page is writing back the dirty buffers under
1609 * the page lock, whoever dirtied the buffers may decide to clean them
1610 * again at any time. We handle that by only looking at the buffer
1611 * state inside lock_buffer().
1613 * If block_write_full_page() is called for regular writeback
1614 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1615 * locked buffer. This only can happen if someone has written the buffer
1616 * directly, with submit_bh(). At the address_space level PageWriteback
1617 * prevents this contention from occurring.
1619 * If block_write_full_page() is called with wbc->sync_mode ==
1620 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1621 * causes the writes to be flagged as synchronous writes, but the
1622 * block device queue will NOT be unplugged, since usually many pages
1623 * will be pushed to the out before the higher-level caller actually
1624 * waits for the writes to be completed. The various wait functions,
1625 * such as wait_on_writeback_range() will ultimately call sync_page()
1626 * which will ultimately call blk_run_backing_dev(), which will end up
1627 * unplugging the device queue.
1629 static int __block_write_full_page(struct inode *inode, struct page *page,
1630 get_block_t *get_block, struct writeback_control *wbc,
1631 bh_end_io_t *handler)
1635 sector_t last_block;
1636 struct buffer_head *bh, *head;
1637 const unsigned blocksize = 1 << inode->i_blkbits;
1638 int nr_underway = 0;
1639 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1640 WRITE_SYNC_PLUG : WRITE);
1642 BUG_ON(!PageLocked(page));
1644 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1646 if (!page_has_buffers(page)) {
1647 create_empty_buffers(page, blocksize,
1648 (1 << BH_Dirty)|(1 << BH_Uptodate));
1652 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1653 * here, and the (potentially unmapped) buffers may become dirty at
1654 * any time. If a buffer becomes dirty here after we've inspected it
1655 * then we just miss that fact, and the page stays dirty.
1657 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1658 * handle that here by just cleaning them.
1661 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1662 head = page_buffers(page);
1666 * Get all the dirty buffers mapped to disk addresses and
1667 * handle any aliases from the underlying blockdev's mapping.
1670 if (block > last_block) {
1672 * mapped buffers outside i_size will occur, because
1673 * this page can be outside i_size when there is a
1674 * truncate in progress.
1677 * The buffer was zeroed by block_write_full_page()
1679 clear_buffer_dirty(bh);
1680 set_buffer_uptodate(bh);
1681 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1683 WARN_ON(bh->b_size != blocksize);
1684 err = get_block(inode, block, bh, 1);
1687 clear_buffer_delay(bh);
1688 if (buffer_new(bh)) {
1689 /* blockdev mappings never come here */
1690 clear_buffer_new(bh);
1691 unmap_underlying_metadata(bh->b_bdev,
1695 bh = bh->b_this_page;
1697 } while (bh != head);
1700 if (!buffer_mapped(bh))
1703 * If it's a fully non-blocking write attempt and we cannot
1704 * lock the buffer then redirty the page. Note that this can
1705 * potentially cause a busy-wait loop from writeback threads
1706 * and kswapd activity, but those code paths have their own
1707 * higher-level throttling.
1709 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1711 } else if (!trylock_buffer(bh)) {
1712 redirty_page_for_writepage(wbc, page);
1715 if (test_clear_buffer_dirty(bh)) {
1716 mark_buffer_async_write_endio(bh, handler);
1720 } while ((bh = bh->b_this_page) != head);
1723 * The page and its buffers are protected by PageWriteback(), so we can
1724 * drop the bh refcounts early.
1726 BUG_ON(PageWriteback(page));
1727 set_page_writeback(page);
1730 struct buffer_head *next = bh->b_this_page;
1731 if (buffer_async_write(bh)) {
1732 submit_bh(write_op, bh);
1736 } while (bh != head);
1741 if (nr_underway == 0) {
1743 * The page was marked dirty, but the buffers were
1744 * clean. Someone wrote them back by hand with
1745 * ll_rw_block/submit_bh. A rare case.
1747 end_page_writeback(page);
1750 * The page and buffer_heads can be released at any time from
1758 * ENOSPC, or some other error. We may already have added some
1759 * blocks to the file, so we need to write these out to avoid
1760 * exposing stale data.
1761 * The page is currently locked and not marked for writeback
1764 /* Recovery: lock and submit the mapped buffers */
1766 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1767 !buffer_delay(bh)) {
1769 mark_buffer_async_write_endio(bh, handler);
1772 * The buffer may have been set dirty during
1773 * attachment to a dirty page.
1775 clear_buffer_dirty(bh);
1777 } while ((bh = bh->b_this_page) != head);
1779 BUG_ON(PageWriteback(page));
1780 mapping_set_error(page->mapping, err);
1781 set_page_writeback(page);
1783 struct buffer_head *next = bh->b_this_page;
1784 if (buffer_async_write(bh)) {
1785 clear_buffer_dirty(bh);
1786 submit_bh(write_op, bh);
1790 } while (bh != head);
1796 * If a page has any new buffers, zero them out here, and mark them uptodate
1797 * and dirty so they'll be written out (in order to prevent uninitialised
1798 * block data from leaking). And clear the new bit.
1800 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1802 unsigned int block_start, block_end;
1803 struct buffer_head *head, *bh;
1805 BUG_ON(!PageLocked(page));
1806 if (!page_has_buffers(page))
1809 bh = head = page_buffers(page);
1812 block_end = block_start + bh->b_size;
1814 if (buffer_new(bh)) {
1815 if (block_end > from && block_start < to) {
1816 if (!PageUptodate(page)) {
1817 unsigned start, size;
1819 start = max(from, block_start);
1820 size = min(to, block_end) - start;
1822 zero_user(page, start, size);
1823 set_buffer_uptodate(bh);
1826 clear_buffer_new(bh);
1827 mark_buffer_dirty(bh);
1831 block_start = block_end;
1832 bh = bh->b_this_page;
1833 } while (bh != head);
1835 EXPORT_SYMBOL(page_zero_new_buffers);
1837 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1838 get_block_t *get_block)
1840 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1841 unsigned to = from + len;
1842 struct inode *inode = page->mapping->host;
1843 unsigned block_start, block_end;
1846 unsigned blocksize, bbits;
1847 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1849 BUG_ON(!PageLocked(page));
1850 BUG_ON(from > PAGE_CACHE_SIZE);
1851 BUG_ON(to > PAGE_CACHE_SIZE);
1854 blocksize = 1 << inode->i_blkbits;
1855 if (!page_has_buffers(page))
1856 create_empty_buffers(page, blocksize, 0);
1857 head = page_buffers(page);
1859 bbits = inode->i_blkbits;
1860 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1862 for(bh = head, block_start = 0; bh != head || !block_start;
1863 block++, block_start=block_end, bh = bh->b_this_page) {
1864 block_end = block_start + blocksize;
1865 if (block_end <= from || block_start >= to) {
1866 if (PageUptodate(page)) {
1867 if (!buffer_uptodate(bh))
1868 set_buffer_uptodate(bh);
1873 clear_buffer_new(bh);
1874 if (!buffer_mapped(bh)) {
1875 WARN_ON(bh->b_size != blocksize);
1876 err = get_block(inode, block, bh, 1);
1879 if (buffer_new(bh)) {
1880 unmap_underlying_metadata(bh->b_bdev,
1882 if (PageUptodate(page)) {
1883 clear_buffer_new(bh);
1884 set_buffer_uptodate(bh);
1885 mark_buffer_dirty(bh);
1888 if (block_end > to || block_start < from)
1889 zero_user_segments(page,
1895 if (PageUptodate(page)) {
1896 if (!buffer_uptodate(bh))
1897 set_buffer_uptodate(bh);
1900 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1901 !buffer_unwritten(bh) &&
1902 (block_start < from || block_end > to)) {
1903 ll_rw_block(READ, 1, &bh);
1908 * If we issued read requests - let them complete.
1910 while(wait_bh > wait) {
1911 wait_on_buffer(*--wait_bh);
1912 if (!buffer_uptodate(*wait_bh))
1915 if (unlikely(err)) {
1916 page_zero_new_buffers(page, from, to);
1917 ClearPageUptodate(page);
1921 EXPORT_SYMBOL(__block_write_begin);
1923 static int __block_commit_write(struct inode *inode, struct page *page,
1924 unsigned from, unsigned to)
1926 unsigned block_start, block_end;
1929 struct buffer_head *bh, *head;
1931 blocksize = 1 << inode->i_blkbits;
1933 for(bh = head = page_buffers(page), block_start = 0;
1934 bh != head || !block_start;
1935 block_start=block_end, bh = bh->b_this_page) {
1936 block_end = block_start + blocksize;
1937 if (block_end <= from || block_start >= to) {
1938 if (!buffer_uptodate(bh))
1941 set_buffer_uptodate(bh);
1942 mark_buffer_dirty(bh);
1944 clear_buffer_new(bh);
1948 * If this is a partial write which happened to make all buffers
1949 * uptodate then we can optimize away a bogus readpage() for
1950 * the next read(). Here we 'discover' whether the page went
1951 * uptodate as a result of this (potentially partial) write.
1954 SetPageUptodate(page);
1959 * block_write_begin takes care of the basic task of block allocation and
1960 * bringing partial write blocks uptodate first.
1962 * The filesystem needs to handle block truncation upon failure.
1964 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1965 unsigned flags, struct page **pagep, get_block_t *get_block)
1967 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1971 page = grab_cache_page_write_begin(mapping, index, flags);
1975 status = __block_write_begin(page, pos, len, get_block);
1976 if (unlikely(status)) {
1978 page_cache_release(page);
1985 EXPORT_SYMBOL(block_write_begin);
1987 int block_write_end(struct file *file, struct address_space *mapping,
1988 loff_t pos, unsigned len, unsigned copied,
1989 struct page *page, void *fsdata)
1991 struct inode *inode = mapping->host;
1994 start = pos & (PAGE_CACHE_SIZE - 1);
1996 if (unlikely(copied < len)) {
1998 * The buffers that were written will now be uptodate, so we
1999 * don't have to worry about a readpage reading them and
2000 * overwriting a partial write. However if we have encountered
2001 * a short write and only partially written into a buffer, it
2002 * will not be marked uptodate, so a readpage might come in and
2003 * destroy our partial write.
2005 * Do the simplest thing, and just treat any short write to a
2006 * non uptodate page as a zero-length write, and force the
2007 * caller to redo the whole thing.
2009 if (!PageUptodate(page))
2012 page_zero_new_buffers(page, start+copied, start+len);
2014 flush_dcache_page(page);
2016 /* This could be a short (even 0-length) commit */
2017 __block_commit_write(inode, page, start, start+copied);
2021 EXPORT_SYMBOL(block_write_end);
2023 int generic_write_end(struct file *file, struct address_space *mapping,
2024 loff_t pos, unsigned len, unsigned copied,
2025 struct page *page, void *fsdata)
2027 struct inode *inode = mapping->host;
2028 int i_size_changed = 0;
2030 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2033 * No need to use i_size_read() here, the i_size
2034 * cannot change under us because we hold i_mutex.
2036 * But it's important to update i_size while still holding page lock:
2037 * page writeout could otherwise come in and zero beyond i_size.
2039 if (pos+copied > inode->i_size) {
2040 i_size_write(inode, pos+copied);
2045 page_cache_release(page);
2048 * Don't mark the inode dirty under page lock. First, it unnecessarily
2049 * makes the holding time of page lock longer. Second, it forces lock
2050 * ordering of page lock and transaction start for journaling
2054 mark_inode_dirty(inode);
2058 EXPORT_SYMBOL(generic_write_end);
2061 * block_is_partially_uptodate checks whether buffers within a page are
2064 * Returns true if all buffers which correspond to a file portion
2065 * we want to read are uptodate.
2067 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2070 struct inode *inode = page->mapping->host;
2071 unsigned block_start, block_end, blocksize;
2073 struct buffer_head *bh, *head;
2076 if (!page_has_buffers(page))
2079 blocksize = 1 << inode->i_blkbits;
2080 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2082 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2085 head = page_buffers(page);
2089 block_end = block_start + blocksize;
2090 if (block_end > from && block_start < to) {
2091 if (!buffer_uptodate(bh)) {
2095 if (block_end >= to)
2098 block_start = block_end;
2099 bh = bh->b_this_page;
2100 } while (bh != head);
2104 EXPORT_SYMBOL(block_is_partially_uptodate);
2107 * Generic "read page" function for block devices that have the normal
2108 * get_block functionality. This is most of the block device filesystems.
2109 * Reads the page asynchronously --- the unlock_buffer() and
2110 * set/clear_buffer_uptodate() functions propagate buffer state into the
2111 * page struct once IO has completed.
2113 int block_read_full_page(struct page *page, get_block_t *get_block)
2115 struct inode *inode = page->mapping->host;
2116 sector_t iblock, lblock;
2117 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2118 unsigned int blocksize;
2120 int fully_mapped = 1;
2122 BUG_ON(!PageLocked(page));
2123 blocksize = 1 << inode->i_blkbits;
2124 if (!page_has_buffers(page))
2125 create_empty_buffers(page, blocksize, 0);
2126 head = page_buffers(page);
2128 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2129 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2135 if (buffer_uptodate(bh))
2138 if (!buffer_mapped(bh)) {
2142 if (iblock < lblock) {
2143 WARN_ON(bh->b_size != blocksize);
2144 err = get_block(inode, iblock, bh, 0);
2148 if (!buffer_mapped(bh)) {
2149 zero_user(page, i * blocksize, blocksize);
2151 set_buffer_uptodate(bh);
2155 * get_block() might have updated the buffer
2158 if (buffer_uptodate(bh))
2162 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2165 SetPageMappedToDisk(page);
2169 * All buffers are uptodate - we can set the page uptodate
2170 * as well. But not if get_block() returned an error.
2172 if (!PageError(page))
2173 SetPageUptodate(page);
2178 /* Stage two: lock the buffers */
2179 for (i = 0; i < nr; i++) {
2182 mark_buffer_async_read(bh);
2186 * Stage 3: start the IO. Check for uptodateness
2187 * inside the buffer lock in case another process reading
2188 * the underlying blockdev brought it uptodate (the sct fix).
2190 for (i = 0; i < nr; i++) {
2192 if (buffer_uptodate(bh))
2193 end_buffer_async_read(bh, 1);
2195 submit_bh(READ, bh);
2199 EXPORT_SYMBOL(block_read_full_page);
2201 /* utility function for filesystems that need to do work on expanding
2202 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2203 * deal with the hole.
2205 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2207 struct address_space *mapping = inode->i_mapping;
2212 err = inode_newsize_ok(inode, size);
2216 err = pagecache_write_begin(NULL, mapping, size, 0,
2217 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2222 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2228 EXPORT_SYMBOL(generic_cont_expand_simple);
2230 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2231 loff_t pos, loff_t *bytes)
2233 struct inode *inode = mapping->host;
2234 unsigned blocksize = 1 << inode->i_blkbits;
2237 pgoff_t index, curidx;
2239 unsigned zerofrom, offset, len;
2242 index = pos >> PAGE_CACHE_SHIFT;
2243 offset = pos & ~PAGE_CACHE_MASK;
2245 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2246 zerofrom = curpos & ~PAGE_CACHE_MASK;
2247 if (zerofrom & (blocksize-1)) {
2248 *bytes |= (blocksize-1);
2251 len = PAGE_CACHE_SIZE - zerofrom;
2253 err = pagecache_write_begin(file, mapping, curpos, len,
2254 AOP_FLAG_UNINTERRUPTIBLE,
2258 zero_user(page, zerofrom, len);
2259 err = pagecache_write_end(file, mapping, curpos, len, len,
2266 balance_dirty_pages_ratelimited(mapping);
2269 /* page covers the boundary, find the boundary offset */
2270 if (index == curidx) {
2271 zerofrom = curpos & ~PAGE_CACHE_MASK;
2272 /* if we will expand the thing last block will be filled */
2273 if (offset <= zerofrom) {
2276 if (zerofrom & (blocksize-1)) {
2277 *bytes |= (blocksize-1);
2280 len = offset - zerofrom;
2282 err = pagecache_write_begin(file, mapping, curpos, len,
2283 AOP_FLAG_UNINTERRUPTIBLE,
2287 zero_user(page, zerofrom, len);
2288 err = pagecache_write_end(file, mapping, curpos, len, len,
2300 * For moronic filesystems that do not allow holes in file.
2301 * We may have to extend the file.
2303 int cont_write_begin(struct file *file, struct address_space *mapping,
2304 loff_t pos, unsigned len, unsigned flags,
2305 struct page **pagep, void **fsdata,
2306 get_block_t *get_block, loff_t *bytes)
2308 struct inode *inode = mapping->host;
2309 unsigned blocksize = 1 << inode->i_blkbits;
2313 err = cont_expand_zero(file, mapping, pos, bytes);
2317 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2318 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2319 *bytes |= (blocksize-1);
2323 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2325 EXPORT_SYMBOL(cont_write_begin);
2327 int block_commit_write(struct page *page, unsigned from, unsigned to)
2329 struct inode *inode = page->mapping->host;
2330 __block_commit_write(inode,page,from,to);
2333 EXPORT_SYMBOL(block_commit_write);
2336 * block_page_mkwrite() is not allowed to change the file size as it gets
2337 * called from a page fault handler when a page is first dirtied. Hence we must
2338 * be careful to check for EOF conditions here. We set the page up correctly
2339 * for a written page which means we get ENOSPC checking when writing into
2340 * holes and correct delalloc and unwritten extent mapping on filesystems that
2341 * support these features.
2343 * We are not allowed to take the i_mutex here so we have to play games to
2344 * protect against truncate races as the page could now be beyond EOF. Because
2345 * truncate writes the inode size before removing pages, once we have the
2346 * page lock we can determine safely if the page is beyond EOF. If it is not
2347 * beyond EOF, then the page is guaranteed safe against truncation until we
2351 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2352 get_block_t get_block)
2354 struct page *page = vmf->page;
2355 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2358 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2361 size = i_size_read(inode);
2362 if ((page->mapping != inode->i_mapping) ||
2363 (page_offset(page) > size)) {
2364 /* page got truncated out from underneath us */
2369 /* page is wholly or partially inside EOF */
2370 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2371 end = size & ~PAGE_CACHE_MASK;
2373 end = PAGE_CACHE_SIZE;
2375 ret = __block_write_begin(page, 0, end, get_block);
2377 ret = block_commit_write(page, 0, end);
2379 if (unlikely(ret)) {
2383 else /* -ENOSPC, -EIO, etc */
2384 ret = VM_FAULT_SIGBUS;
2386 ret = VM_FAULT_LOCKED;
2391 EXPORT_SYMBOL(block_page_mkwrite);
2394 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2395 * immediately, while under the page lock. So it needs a special end_io
2396 * handler which does not touch the bh after unlocking it.
2398 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2400 __end_buffer_read_notouch(bh, uptodate);
2404 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2405 * the page (converting it to circular linked list and taking care of page
2408 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2410 struct buffer_head *bh;
2412 BUG_ON(!PageLocked(page));
2414 spin_lock(&page->mapping->private_lock);
2417 if (PageDirty(page))
2418 set_buffer_dirty(bh);
2419 if (!bh->b_this_page)
2420 bh->b_this_page = head;
2421 bh = bh->b_this_page;
2422 } while (bh != head);
2423 attach_page_buffers(page, head);
2424 spin_unlock(&page->mapping->private_lock);
2428 * On entry, the page is fully not uptodate.
2429 * On exit the page is fully uptodate in the areas outside (from,to)
2430 * The filesystem needs to handle block truncation upon failure.
2432 int nobh_write_begin(struct address_space *mapping,
2433 loff_t pos, unsigned len, unsigned flags,
2434 struct page **pagep, void **fsdata,
2435 get_block_t *get_block)
2437 struct inode *inode = mapping->host;
2438 const unsigned blkbits = inode->i_blkbits;
2439 const unsigned blocksize = 1 << blkbits;
2440 struct buffer_head *head, *bh;
2444 unsigned block_in_page;
2445 unsigned block_start, block_end;
2446 sector_t block_in_file;
2449 int is_mapped_to_disk = 1;
2451 index = pos >> PAGE_CACHE_SHIFT;
2452 from = pos & (PAGE_CACHE_SIZE - 1);
2455 page = grab_cache_page_write_begin(mapping, index, flags);
2461 if (page_has_buffers(page)) {
2463 page_cache_release(page);
2465 return block_write_begin(mapping, pos, len, flags, pagep,
2469 if (PageMappedToDisk(page))
2473 * Allocate buffers so that we can keep track of state, and potentially
2474 * attach them to the page if an error occurs. In the common case of
2475 * no error, they will just be freed again without ever being attached
2476 * to the page (which is all OK, because we're under the page lock).
2478 * Be careful: the buffer linked list is a NULL terminated one, rather
2479 * than the circular one we're used to.
2481 head = alloc_page_buffers(page, blocksize, 0);
2487 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2490 * We loop across all blocks in the page, whether or not they are
2491 * part of the affected region. This is so we can discover if the
2492 * page is fully mapped-to-disk.
2494 for (block_start = 0, block_in_page = 0, bh = head;
2495 block_start < PAGE_CACHE_SIZE;
2496 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2499 block_end = block_start + blocksize;
2502 if (block_start >= to)
2504 ret = get_block(inode, block_in_file + block_in_page,
2508 if (!buffer_mapped(bh))
2509 is_mapped_to_disk = 0;
2511 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2512 if (PageUptodate(page)) {
2513 set_buffer_uptodate(bh);
2516 if (buffer_new(bh) || !buffer_mapped(bh)) {
2517 zero_user_segments(page, block_start, from,
2521 if (buffer_uptodate(bh))
2522 continue; /* reiserfs does this */
2523 if (block_start < from || block_end > to) {
2525 bh->b_end_io = end_buffer_read_nobh;
2526 submit_bh(READ, bh);
2533 * The page is locked, so these buffers are protected from
2534 * any VM or truncate activity. Hence we don't need to care
2535 * for the buffer_head refcounts.
2537 for (bh = head; bh; bh = bh->b_this_page) {
2539 if (!buffer_uptodate(bh))
2546 if (is_mapped_to_disk)
2547 SetPageMappedToDisk(page);
2549 *fsdata = head; /* to be released by nobh_write_end */
2556 * Error recovery is a bit difficult. We need to zero out blocks that
2557 * were newly allocated, and dirty them to ensure they get written out.
2558 * Buffers need to be attached to the page at this point, otherwise
2559 * the handling of potential IO errors during writeout would be hard
2560 * (could try doing synchronous writeout, but what if that fails too?)
2562 attach_nobh_buffers(page, head);
2563 page_zero_new_buffers(page, from, to);
2567 page_cache_release(page);
2572 EXPORT_SYMBOL(nobh_write_begin);
2574 int nobh_write_end(struct file *file, struct address_space *mapping,
2575 loff_t pos, unsigned len, unsigned copied,
2576 struct page *page, void *fsdata)
2578 struct inode *inode = page->mapping->host;
2579 struct buffer_head *head = fsdata;
2580 struct buffer_head *bh;
2581 BUG_ON(fsdata != NULL && page_has_buffers(page));
2583 if (unlikely(copied < len) && head)
2584 attach_nobh_buffers(page, head);
2585 if (page_has_buffers(page))
2586 return generic_write_end(file, mapping, pos, len,
2587 copied, page, fsdata);
2589 SetPageUptodate(page);
2590 set_page_dirty(page);
2591 if (pos+copied > inode->i_size) {
2592 i_size_write(inode, pos+copied);
2593 mark_inode_dirty(inode);
2597 page_cache_release(page);
2601 head = head->b_this_page;
2602 free_buffer_head(bh);
2607 EXPORT_SYMBOL(nobh_write_end);
2610 * nobh_writepage() - based on block_full_write_page() except
2611 * that it tries to operate without attaching bufferheads to
2614 int nobh_writepage(struct page *page, get_block_t *get_block,
2615 struct writeback_control *wbc)
2617 struct inode * const inode = page->mapping->host;
2618 loff_t i_size = i_size_read(inode);
2619 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2623 /* Is the page fully inside i_size? */
2624 if (page->index < end_index)
2627 /* Is the page fully outside i_size? (truncate in progress) */
2628 offset = i_size & (PAGE_CACHE_SIZE-1);
2629 if (page->index >= end_index+1 || !offset) {
2631 * The page may have dirty, unmapped buffers. For example,
2632 * they may have been added in ext3_writepage(). Make them
2633 * freeable here, so the page does not leak.
2636 /* Not really sure about this - do we need this ? */
2637 if (page->mapping->a_ops->invalidatepage)
2638 page->mapping->a_ops->invalidatepage(page, offset);
2641 return 0; /* don't care */
2645 * The page straddles i_size. It must be zeroed out on each and every
2646 * writepage invocation because it may be mmapped. "A file is mapped
2647 * in multiples of the page size. For a file that is not a multiple of
2648 * the page size, the remaining memory is zeroed when mapped, and
2649 * writes to that region are not written out to the file."
2651 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2653 ret = mpage_writepage(page, get_block, wbc);
2655 ret = __block_write_full_page(inode, page, get_block, wbc,
2656 end_buffer_async_write);
2659 EXPORT_SYMBOL(nobh_writepage);
2661 int nobh_truncate_page(struct address_space *mapping,
2662 loff_t from, get_block_t *get_block)
2664 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2665 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2668 unsigned length, pos;
2669 struct inode *inode = mapping->host;
2671 struct buffer_head map_bh;
2674 blocksize = 1 << inode->i_blkbits;
2675 length = offset & (blocksize - 1);
2677 /* Block boundary? Nothing to do */
2681 length = blocksize - length;
2682 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2684 page = grab_cache_page(mapping, index);
2689 if (page_has_buffers(page)) {
2692 page_cache_release(page);
2693 return block_truncate_page(mapping, from, get_block);
2696 /* Find the buffer that contains "offset" */
2698 while (offset >= pos) {
2703 map_bh.b_size = blocksize;
2705 err = get_block(inode, iblock, &map_bh, 0);
2708 /* unmapped? It's a hole - nothing to do */
2709 if (!buffer_mapped(&map_bh))
2712 /* Ok, it's mapped. Make sure it's up-to-date */
2713 if (!PageUptodate(page)) {
2714 err = mapping->a_ops->readpage(NULL, page);
2716 page_cache_release(page);
2720 if (!PageUptodate(page)) {
2724 if (page_has_buffers(page))
2727 zero_user(page, offset, length);
2728 set_page_dirty(page);
2733 page_cache_release(page);
2737 EXPORT_SYMBOL(nobh_truncate_page);
2739 int block_truncate_page(struct address_space *mapping,
2740 loff_t from, get_block_t *get_block)
2742 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2743 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2746 unsigned length, pos;
2747 struct inode *inode = mapping->host;
2749 struct buffer_head *bh;
2752 blocksize = 1 << inode->i_blkbits;
2753 length = offset & (blocksize - 1);
2755 /* Block boundary? Nothing to do */
2759 length = blocksize - length;
2760 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2762 page = grab_cache_page(mapping, index);
2767 if (!page_has_buffers(page))
2768 create_empty_buffers(page, blocksize, 0);
2770 /* Find the buffer that contains "offset" */
2771 bh = page_buffers(page);
2773 while (offset >= pos) {
2774 bh = bh->b_this_page;
2780 if (!buffer_mapped(bh)) {
2781 WARN_ON(bh->b_size != blocksize);
2782 err = get_block(inode, iblock, bh, 0);
2785 /* unmapped? It's a hole - nothing to do */
2786 if (!buffer_mapped(bh))
2790 /* Ok, it's mapped. Make sure it's up-to-date */
2791 if (PageUptodate(page))
2792 set_buffer_uptodate(bh);
2794 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2796 ll_rw_block(READ, 1, &bh);
2798 /* Uhhuh. Read error. Complain and punt. */
2799 if (!buffer_uptodate(bh))
2803 zero_user(page, offset, length);
2804 mark_buffer_dirty(bh);
2809 page_cache_release(page);
2813 EXPORT_SYMBOL(block_truncate_page);
2816 * The generic ->writepage function for buffer-backed address_spaces
2817 * this form passes in the end_io handler used to finish the IO.
2819 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2820 struct writeback_control *wbc, bh_end_io_t *handler)
2822 struct inode * const inode = page->mapping->host;
2823 loff_t i_size = i_size_read(inode);
2824 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2827 /* Is the page fully inside i_size? */
2828 if (page->index < end_index)
2829 return __block_write_full_page(inode, page, get_block, wbc,
2832 /* Is the page fully outside i_size? (truncate in progress) */
2833 offset = i_size & (PAGE_CACHE_SIZE-1);
2834 if (page->index >= end_index+1 || !offset) {
2836 * The page may have dirty, unmapped buffers. For example,
2837 * they may have been added in ext3_writepage(). Make them
2838 * freeable here, so the page does not leak.
2840 do_invalidatepage(page, 0);
2842 return 0; /* don't care */
2846 * The page straddles i_size. It must be zeroed out on each and every
2847 * writepage invocation because it may be mmapped. "A file is mapped
2848 * in multiples of the page size. For a file that is not a multiple of
2849 * the page size, the remaining memory is zeroed when mapped, and
2850 * writes to that region are not written out to the file."
2852 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2853 return __block_write_full_page(inode, page, get_block, wbc, handler);
2855 EXPORT_SYMBOL(block_write_full_page_endio);
2858 * The generic ->writepage function for buffer-backed address_spaces
2860 int block_write_full_page(struct page *page, get_block_t *get_block,
2861 struct writeback_control *wbc)
2863 return block_write_full_page_endio(page, get_block, wbc,
2864 end_buffer_async_write);
2866 EXPORT_SYMBOL(block_write_full_page);
2868 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2869 get_block_t *get_block)
2871 struct buffer_head tmp;
2872 struct inode *inode = mapping->host;
2875 tmp.b_size = 1 << inode->i_blkbits;
2876 get_block(inode, block, &tmp, 0);
2877 return tmp.b_blocknr;
2879 EXPORT_SYMBOL(generic_block_bmap);
2881 static void end_bio_bh_io_sync(struct bio *bio, int err)
2883 struct buffer_head *bh = bio->bi_private;
2885 if (err == -EOPNOTSUPP) {
2886 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2889 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2890 set_bit(BH_Quiet, &bh->b_state);
2892 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2896 int submit_bh(int rw, struct buffer_head * bh)
2901 BUG_ON(!buffer_locked(bh));
2902 BUG_ON(!buffer_mapped(bh));
2903 BUG_ON(!bh->b_end_io);
2904 BUG_ON(buffer_delay(bh));
2905 BUG_ON(buffer_unwritten(bh));
2908 * Only clear out a write error when rewriting
2910 if (test_set_buffer_req(bh) && (rw & WRITE))
2911 clear_buffer_write_io_error(bh);
2914 * from here on down, it's all bio -- do the initial mapping,
2915 * submit_bio -> generic_make_request may further map this bio around
2917 bio = bio_alloc(GFP_NOIO, 1);
2919 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2920 bio->bi_bdev = bh->b_bdev;
2921 bio->bi_io_vec[0].bv_page = bh->b_page;
2922 bio->bi_io_vec[0].bv_len = bh->b_size;
2923 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2927 bio->bi_size = bh->b_size;
2929 bio->bi_end_io = end_bio_bh_io_sync;
2930 bio->bi_private = bh;
2933 submit_bio(rw, bio);
2935 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2941 EXPORT_SYMBOL(submit_bh);
2944 * ll_rw_block: low-level access to block devices (DEPRECATED)
2945 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2946 * @nr: number of &struct buffer_heads in the array
2947 * @bhs: array of pointers to &struct buffer_head
2949 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2950 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2951 * %READA option is described in the documentation for generic_make_request()
2952 * which ll_rw_block() calls.
2954 * This function drops any buffer that it cannot get a lock on (with the
2955 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2956 * request, and any buffer that appears to be up-to-date when doing read
2957 * request. Further it marks as clean buffers that are processed for
2958 * writing (the buffer cache won't assume that they are actually clean
2959 * until the buffer gets unlocked).
2961 * ll_rw_block sets b_end_io to simple completion handler that marks
2962 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2965 * All of the buffers must be for the same device, and must also be a
2966 * multiple of the current approved size for the device.
2968 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2972 for (i = 0; i < nr; i++) {
2973 struct buffer_head *bh = bhs[i];
2975 if (!trylock_buffer(bh))
2978 if (test_clear_buffer_dirty(bh)) {
2979 bh->b_end_io = end_buffer_write_sync;
2981 submit_bh(WRITE, bh);
2985 if (!buffer_uptodate(bh)) {
2986 bh->b_end_io = end_buffer_read_sync;
2995 EXPORT_SYMBOL(ll_rw_block);
2997 void write_dirty_buffer(struct buffer_head *bh, int rw)
3000 if (!test_clear_buffer_dirty(bh)) {
3004 bh->b_end_io = end_buffer_write_sync;
3008 EXPORT_SYMBOL(write_dirty_buffer);
3011 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3012 * and then start new I/O and then wait upon it. The caller must have a ref on
3015 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3019 WARN_ON(atomic_read(&bh->b_count) < 1);
3021 if (test_clear_buffer_dirty(bh)) {
3023 bh->b_end_io = end_buffer_write_sync;
3024 ret = submit_bh(rw, bh);
3026 if (!ret && !buffer_uptodate(bh))
3033 EXPORT_SYMBOL(__sync_dirty_buffer);
3035 int sync_dirty_buffer(struct buffer_head *bh)
3037 return __sync_dirty_buffer(bh, WRITE_SYNC);
3039 EXPORT_SYMBOL(sync_dirty_buffer);
3042 * try_to_free_buffers() checks if all the buffers on this particular page
3043 * are unused, and releases them if so.
3045 * Exclusion against try_to_free_buffers may be obtained by either
3046 * locking the page or by holding its mapping's private_lock.
3048 * If the page is dirty but all the buffers are clean then we need to
3049 * be sure to mark the page clean as well. This is because the page
3050 * may be against a block device, and a later reattachment of buffers
3051 * to a dirty page will set *all* buffers dirty. Which would corrupt
3052 * filesystem data on the same device.
3054 * The same applies to regular filesystem pages: if all the buffers are
3055 * clean then we set the page clean and proceed. To do that, we require
3056 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3059 * try_to_free_buffers() is non-blocking.
3061 static inline int buffer_busy(struct buffer_head *bh)
3063 return atomic_read(&bh->b_count) |
3064 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3068 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3070 struct buffer_head *head = page_buffers(page);
3071 struct buffer_head *bh;
3075 if (buffer_write_io_error(bh) && page->mapping)
3076 set_bit(AS_EIO, &page->mapping->flags);
3077 if (buffer_busy(bh))
3079 bh = bh->b_this_page;
3080 } while (bh != head);
3083 struct buffer_head *next = bh->b_this_page;
3085 if (bh->b_assoc_map)
3086 __remove_assoc_queue(bh);
3088 } while (bh != head);
3089 *buffers_to_free = head;
3090 __clear_page_buffers(page);
3096 int try_to_free_buffers(struct page *page)
3098 struct address_space * const mapping = page->mapping;
3099 struct buffer_head *buffers_to_free = NULL;
3102 BUG_ON(!PageLocked(page));
3103 if (PageWriteback(page))
3106 if (mapping == NULL) { /* can this still happen? */
3107 ret = drop_buffers(page, &buffers_to_free);
3111 spin_lock(&mapping->private_lock);
3112 ret = drop_buffers(page, &buffers_to_free);
3115 * If the filesystem writes its buffers by hand (eg ext3)
3116 * then we can have clean buffers against a dirty page. We
3117 * clean the page here; otherwise the VM will never notice
3118 * that the filesystem did any IO at all.
3120 * Also, during truncate, discard_buffer will have marked all
3121 * the page's buffers clean. We discover that here and clean
3124 * private_lock must be held over this entire operation in order
3125 * to synchronise against __set_page_dirty_buffers and prevent the
3126 * dirty bit from being lost.
3129 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3130 spin_unlock(&mapping->private_lock);
3132 if (buffers_to_free) {
3133 struct buffer_head *bh = buffers_to_free;
3136 struct buffer_head *next = bh->b_this_page;
3137 free_buffer_head(bh);
3139 } while (bh != buffers_to_free);
3143 EXPORT_SYMBOL(try_to_free_buffers);
3145 void block_sync_page(struct page *page)
3147 struct address_space *mapping;
3150 mapping = page_mapping(page);
3152 blk_run_backing_dev(mapping->backing_dev_info, page);
3154 EXPORT_SYMBOL(block_sync_page);
3157 * There are no bdflush tunables left. But distributions are
3158 * still running obsolete flush daemons, so we terminate them here.
3160 * Use of bdflush() is deprecated and will be removed in a future kernel.
3161 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3163 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3165 static int msg_count;
3167 if (!capable(CAP_SYS_ADMIN))
3170 if (msg_count < 5) {
3173 "warning: process `%s' used the obsolete bdflush"
3174 " system call\n", current->comm);
3175 printk(KERN_INFO "Fix your initscripts?\n");
3184 * Buffer-head allocation
3186 static struct kmem_cache *bh_cachep;
3189 * Once the number of bh's in the machine exceeds this level, we start
3190 * stripping them in writeback.
3192 static int max_buffer_heads;
3194 int buffer_heads_over_limit;
3196 struct bh_accounting {
3197 int nr; /* Number of live bh's */
3198 int ratelimit; /* Limit cacheline bouncing */
3201 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3203 static void recalc_bh_state(void)
3208 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3210 __get_cpu_var(bh_accounting).ratelimit = 0;
3211 for_each_online_cpu(i)
3212 tot += per_cpu(bh_accounting, i).nr;
3213 buffer_heads_over_limit = (tot > max_buffer_heads);
3216 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3218 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3220 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3221 get_cpu_var(bh_accounting).nr++;
3223 put_cpu_var(bh_accounting);
3227 EXPORT_SYMBOL(alloc_buffer_head);
3229 void free_buffer_head(struct buffer_head *bh)
3231 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3232 kmem_cache_free(bh_cachep, bh);
3233 get_cpu_var(bh_accounting).nr--;
3235 put_cpu_var(bh_accounting);
3237 EXPORT_SYMBOL(free_buffer_head);
3239 static void buffer_exit_cpu(int cpu)
3242 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3244 for (i = 0; i < BH_LRU_SIZE; i++) {
3248 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3249 per_cpu(bh_accounting, cpu).nr = 0;
3250 put_cpu_var(bh_accounting);
3253 static int buffer_cpu_notify(struct notifier_block *self,
3254 unsigned long action, void *hcpu)
3256 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3257 buffer_exit_cpu((unsigned long)hcpu);
3262 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3263 * @bh: struct buffer_head
3265 * Return true if the buffer is up-to-date and false,
3266 * with the buffer locked, if not.
3268 int bh_uptodate_or_lock(struct buffer_head *bh)
3270 if (!buffer_uptodate(bh)) {
3272 if (!buffer_uptodate(bh))
3278 EXPORT_SYMBOL(bh_uptodate_or_lock);
3281 * bh_submit_read - Submit a locked buffer for reading
3282 * @bh: struct buffer_head
3284 * Returns zero on success and -EIO on error.
3286 int bh_submit_read(struct buffer_head *bh)
3288 BUG_ON(!buffer_locked(bh));
3290 if (buffer_uptodate(bh)) {
3296 bh->b_end_io = end_buffer_read_sync;
3297 submit_bh(READ, bh);
3299 if (buffer_uptodate(bh))
3303 EXPORT_SYMBOL(bh_submit_read);
3305 void __init buffer_init(void)
3309 bh_cachep = kmem_cache_create("buffer_head",
3310 sizeof(struct buffer_head), 0,
3311 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3316 * Limit the bh occupancy to 10% of ZONE_NORMAL
3318 nrpages = (nr_free_buffer_pages() * 10) / 100;
3319 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3320 hotcpu_notifier(buffer_cpu_notify, 0);