]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
mm: buffered write iterator
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
70void fastcall __lock_buffer(struct buffer_head *bh)
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
77void fastcall unlock_buffer(struct buffer_head *bh)
78{
72ed3d03 79 smp_mb__before_clear_bit();
1da177e4
LT
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
84
85/*
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
89 */
90void __wait_on_buffer(struct buffer_head * bh)
91{
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93}
94
95static void
96__clear_page_buffers(struct page *page)
97{
98 ClearPagePrivate(page);
4c21e2f2 99 set_page_private(page, 0);
1da177e4
LT
100 page_cache_release(page);
101}
102
103static void buffer_io_error(struct buffer_head *bh)
104{
105 char b[BDEVNAME_SIZE];
106
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
110}
111
112/*
68671f35
DM
113 * End-of-IO handler helper function which does not touch the bh after
114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
1da177e4 119 */
68671f35 120static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
121{
122 if (uptodate) {
123 set_buffer_uptodate(bh);
124 } else {
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
127 }
128 unlock_buffer(bh);
68671f35
DM
129}
130
131/*
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
134 */
135void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136{
137 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
138 put_bh(bh);
139}
140
141void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142{
143 char b[BDEVNAME_SIZE];
144
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 buffer_io_error(bh);
150 printk(KERN_WARNING "lost page write due to "
151 "I/O error on %s\n",
152 bdevname(bh->b_bdev, b));
153 }
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
156 }
157 unlock_buffer(bh);
158 put_bh(bh);
159}
160
161/*
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
164 */
165int sync_blockdev(struct block_device *bdev)
166{
167 int ret = 0;
168
28fd1298
OH
169 if (bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
171 return ret;
172}
173EXPORT_SYMBOL(sync_blockdev);
174
1da177e4
LT
175/*
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
179 */
180int fsync_bdev(struct block_device *bdev)
181{
182 struct super_block *sb = get_super(bdev);
183 if (sb) {
184 int res = fsync_super(sb);
185 drop_super(sb);
186 return res;
187 }
188 return sync_blockdev(bdev);
189}
190
191/**
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
194 *
f73ca1b7 195 * This takes the block device bd_mount_sem to make sure no new mounts
1da177e4
LT
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
199 */
200struct super_block *freeze_bdev(struct block_device *bdev)
201{
202 struct super_block *sb;
203
f73ca1b7 204 down(&bdev->bd_mount_sem);
1da177e4
LT
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 208 smp_wmb();
1da177e4 209
d25b9a1f 210 __fsync_super(sb);
1da177e4
LT
211
212 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 213 smp_wmb();
1da177e4
LT
214
215 sync_blockdev(sb->s_bdev);
216
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
219 }
220
221 sync_blockdev(bdev);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
223}
224EXPORT_SYMBOL(freeze_bdev);
225
226/**
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
230 *
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232 */
233void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234{
235 if (sb) {
236 BUG_ON(sb->s_bdev != bdev);
237
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
d59dd462 241 smp_wmb();
1da177e4
LT
242 wake_up(&sb->s_wait_unfrozen);
243 drop_super(sb);
244 }
245
f73ca1b7 246 up(&bdev->bd_mount_sem);
1da177e4
LT
247}
248EXPORT_SYMBOL(thaw_bdev);
249
1da177e4
LT
250/*
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
254 * private_lock.
255 *
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
260 */
261static struct buffer_head *
385fd4c5 262__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
263{
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
267 pgoff_t index;
268 struct buffer_head *bh;
269 struct buffer_head *head;
270 struct page *page;
271 int all_mapped = 1;
272
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
275 if (!page)
276 goto out;
277
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
280 goto out_unlock;
281 head = page_buffers(page);
282 bh = head;
283 do {
284 if (bh->b_blocknr == block) {
285 ret = bh;
286 get_bh(bh);
287 goto out_unlock;
288 }
289 if (!buffer_mapped(bh))
290 all_mapped = 0;
291 bh = bh->b_this_page;
292 } while (bh != head);
293
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
298 */
299 if (all_mapped) {
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
1da177e4
LT
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 }
308out_unlock:
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
311out:
312 return ret;
313}
314
315/* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
318 by the user.
319
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
323
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
326
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
335
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
339
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
f98393a6 347void invalidate_bdev(struct block_device *bdev)
1da177e4 348{
0e1dfc66
AM
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
350
351 if (mapping->nrpages == 0)
352 return;
353
1da177e4 354 invalidate_bh_lrus();
fc0ecff6 355 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
356}
357
358/*
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
360 */
361static void free_more_memory(void)
362{
363 struct zone **zones;
364 pg_data_t *pgdat;
365
687a21ce 366 wakeup_pdflush(1024);
1da177e4
LT
367 yield();
368
ec936fc5 369 for_each_online_pgdat(pgdat) {
af4ca457 370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 371 if (*zones)
5ad333eb 372 try_to_free_pages(zones, 0, GFP_NOFS);
1da177e4
LT
373 }
374}
375
376/*
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
379 */
380static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
381{
1da177e4 382 unsigned long flags;
a3972203 383 struct buffer_head *first;
1da177e4
LT
384 struct buffer_head *tmp;
385 struct page *page;
386 int page_uptodate = 1;
387
388 BUG_ON(!buffer_async_read(bh));
389
390 page = bh->b_page;
391 if (uptodate) {
392 set_buffer_uptodate(bh);
393 } else {
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
396 buffer_io_error(bh);
397 SetPageError(page);
398 }
399
400 /*
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
404 */
a3972203
NP
405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
408 clear_buffer_async_read(bh);
409 unlock_buffer(bh);
410 tmp = bh;
411 do {
412 if (!buffer_uptodate(tmp))
413 page_uptodate = 0;
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
416 goto still_busy;
417 }
418 tmp = tmp->b_this_page;
419 } while (tmp != bh);
a3972203
NP
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
1da177e4
LT
422
423 /*
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
426 */
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
429 unlock_page(page);
430 return;
431
432still_busy:
a3972203
NP
433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
1da177e4
LT
435 return;
436}
437
438/*
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
441 */
b6cd0b77 442static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
443{
444 char b[BDEVNAME_SIZE];
1da177e4 445 unsigned long flags;
a3972203 446 struct buffer_head *first;
1da177e4
LT
447 struct buffer_head *tmp;
448 struct page *page;
449
450 BUG_ON(!buffer_async_write(bh));
451
452 page = bh->b_page;
453 if (uptodate) {
454 set_buffer_uptodate(bh);
455 } else {
456 if (printk_ratelimit()) {
457 buffer_io_error(bh);
458 printk(KERN_WARNING "lost page write due to "
459 "I/O error on %s\n",
460 bdevname(bh->b_bdev, b));
461 }
462 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 463 set_buffer_write_io_error(bh);
1da177e4
LT
464 clear_buffer_uptodate(bh);
465 SetPageError(page);
466 }
467
a3972203
NP
468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471
1da177e4
LT
472 clear_buffer_async_write(bh);
473 unlock_buffer(bh);
474 tmp = bh->b_this_page;
475 while (tmp != bh) {
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
478 goto still_busy;
479 }
480 tmp = tmp->b_this_page;
481 }
a3972203
NP
482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
1da177e4
LT
484 end_page_writeback(page);
485 return;
486
487still_busy:
a3972203
NP
488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
1da177e4
LT
490 return;
491}
492
493/*
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
501 *
502 * The page comes unlocked when it has no locked buffer_async buffers
503 * left.
504 *
505 * PageLocked prevents anyone starting new async I/O reads any of
506 * the buffers.
507 *
508 * PageWriteback is used to prevent simultaneous writeout of the same
509 * page.
510 *
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
513 */
514static void mark_buffer_async_read(struct buffer_head *bh)
515{
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
518}
519
520void mark_buffer_async_write(struct buffer_head *bh)
521{
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
524}
525EXPORT_SYMBOL(mark_buffer_async_write);
526
527
528/*
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
534 *
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
538 *
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
548 * ->private_lock.
549 *
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
552 *
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
557 *
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
561 *
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565 * queued up.
566 *
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
574 * b_inode back.
575 */
576
577/*
578 * The buffer's backing address_space's private_lock must be held
579 */
580static inline void __remove_assoc_queue(struct buffer_head *bh)
581{
582 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
1da177e4
LT
587}
588
589int inode_has_buffers(struct inode *inode)
590{
591 return !list_empty(&inode->i_data.private_list);
592}
593
594/*
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
598 *
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
603 */
604static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
605{
606 struct buffer_head *bh;
607 struct list_head *p;
608 int err = 0;
609
610 spin_lock(lock);
611repeat:
612 list_for_each_prev(p, list) {
613 bh = BH_ENTRY(p);
614 if (buffer_locked(bh)) {
615 get_bh(bh);
616 spin_unlock(lock);
617 wait_on_buffer(bh);
618 if (!buffer_uptodate(bh))
619 err = -EIO;
620 brelse(bh);
621 spin_lock(lock);
622 goto repeat;
623 }
624 }
625 spin_unlock(lock);
626 return err;
627}
628
629/**
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
631 * buffers
67be2dd1 632 * @mapping: the mapping which wants those buffers written
1da177e4
LT
633 *
634 * Starts I/O against the buffers at mapping->private_list, and waits upon
635 * that I/O.
636 *
67be2dd1
MW
637 * Basically, this is a convenience function for fsync().
638 * @mapping is a file or directory which needs those buffers to be written for
639 * a successful fsync().
1da177e4
LT
640 */
641int sync_mapping_buffers(struct address_space *mapping)
642{
643 struct address_space *buffer_mapping = mapping->assoc_mapping;
644
645 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
646 return 0;
647
648 return fsync_buffers_list(&buffer_mapping->private_lock,
649 &mapping->private_list);
650}
651EXPORT_SYMBOL(sync_mapping_buffers);
652
653/*
654 * Called when we've recently written block `bblock', and it is known that
655 * `bblock' was for a buffer_boundary() buffer. This means that the block at
656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
657 * dirty, schedule it for IO. So that indirects merge nicely with their data.
658 */
659void write_boundary_block(struct block_device *bdev,
660 sector_t bblock, unsigned blocksize)
661{
662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
663 if (bh) {
664 if (buffer_dirty(bh))
665 ll_rw_block(WRITE, 1, &bh);
666 put_bh(bh);
667 }
668}
669
670void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
671{
672 struct address_space *mapping = inode->i_mapping;
673 struct address_space *buffer_mapping = bh->b_page->mapping;
674
675 mark_buffer_dirty(bh);
676 if (!mapping->assoc_mapping) {
677 mapping->assoc_mapping = buffer_mapping;
678 } else {
e827f923 679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4
LT
680 }
681 if (list_empty(&bh->b_assoc_buffers)) {
682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list);
58ff407b 685 bh->b_assoc_map = mapping;
1da177e4
LT
686 spin_unlock(&buffer_mapping->private_lock);
687 }
688}
689EXPORT_SYMBOL(mark_buffer_dirty_inode);
690
787d2214
NP
691/*
692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693 * dirty.
694 *
695 * If warn is true, then emit a warning if the page is not uptodate and has
696 * not been truncated.
697 */
698static int __set_page_dirty(struct page *page,
699 struct address_space *mapping, int warn)
700{
701 if (unlikely(!mapping))
702 return !TestSetPageDirty(page);
703
704 if (TestSetPageDirty(page))
705 return 0;
706
707 write_lock_irq(&mapping->tree_lock);
708 if (page->mapping) { /* Race with truncate? */
709 WARN_ON_ONCE(warn && !PageUptodate(page));
710
711 if (mapping_cap_account_dirty(mapping)) {
712 __inc_zone_page_state(page, NR_FILE_DIRTY);
713 task_io_account_write(PAGE_CACHE_SIZE);
714 }
715 radix_tree_tag_set(&mapping->page_tree,
716 page_index(page), PAGECACHE_TAG_DIRTY);
717 }
718 write_unlock_irq(&mapping->tree_lock);
719 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
720
721 return 1;
722}
723
1da177e4
LT
724/*
725 * Add a page to the dirty page list.
726 *
727 * It is a sad fact of life that this function is called from several places
728 * deeply under spinlocking. It may not sleep.
729 *
730 * If the page has buffers, the uptodate buffers are set dirty, to preserve
731 * dirty-state coherency between the page and the buffers. It the page does
732 * not have buffers then when they are later attached they will all be set
733 * dirty.
734 *
735 * The buffers are dirtied before the page is dirtied. There's a small race
736 * window in which a writepage caller may see the page cleanness but not the
737 * buffer dirtiness. That's fine. If this code were to set the page dirty
738 * before the buffers, a concurrent writepage caller could clear the page dirty
739 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
740 * page on the dirty page list.
741 *
742 * We use private_lock to lock against try_to_free_buffers while using the
743 * page's buffer list. Also use this to protect against clean buffers being
744 * added to the page after it was set dirty.
745 *
746 * FIXME: may need to call ->reservepage here as well. That's rather up to the
747 * address_space though.
748 */
749int __set_page_dirty_buffers(struct page *page)
750{
787d2214 751 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
752
753 if (unlikely(!mapping))
754 return !TestSetPageDirty(page);
1da177e4
LT
755
756 spin_lock(&mapping->private_lock);
757 if (page_has_buffers(page)) {
758 struct buffer_head *head = page_buffers(page);
759 struct buffer_head *bh = head;
760
761 do {
762 set_buffer_dirty(bh);
763 bh = bh->b_this_page;
764 } while (bh != head);
765 }
766 spin_unlock(&mapping->private_lock);
767
787d2214 768 return __set_page_dirty(page, mapping, 1);
1da177e4
LT
769}
770EXPORT_SYMBOL(__set_page_dirty_buffers);
771
772/*
773 * Write out and wait upon a list of buffers.
774 *
775 * We have conflicting pressures: we want to make sure that all
776 * initially dirty buffers get waited on, but that any subsequently
777 * dirtied buffers don't. After all, we don't want fsync to last
778 * forever if somebody is actively writing to the file.
779 *
780 * Do this in two main stages: first we copy dirty buffers to a
781 * temporary inode list, queueing the writes as we go. Then we clean
782 * up, waiting for those writes to complete.
783 *
784 * During this second stage, any subsequent updates to the file may end
785 * up refiling the buffer on the original inode's dirty list again, so
786 * there is a chance we will end up with a buffer queued for write but
787 * not yet completed on that list. So, as a final cleanup we go through
788 * the osync code to catch these locked, dirty buffers without requeuing
789 * any newly dirty buffers for write.
790 */
791static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
792{
793 struct buffer_head *bh;
794 struct list_head tmp;
795 int err = 0, err2;
796
797 INIT_LIST_HEAD(&tmp);
798
799 spin_lock(lock);
800 while (!list_empty(list)) {
801 bh = BH_ENTRY(list->next);
58ff407b 802 __remove_assoc_queue(bh);
1da177e4
LT
803 if (buffer_dirty(bh) || buffer_locked(bh)) {
804 list_add(&bh->b_assoc_buffers, &tmp);
805 if (buffer_dirty(bh)) {
806 get_bh(bh);
807 spin_unlock(lock);
808 /*
809 * Ensure any pending I/O completes so that
810 * ll_rw_block() actually writes the current
811 * contents - it is a noop if I/O is still in
812 * flight on potentially older contents.
813 */
a7662236 814 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
815 brelse(bh);
816 spin_lock(lock);
817 }
818 }
819 }
820
821 while (!list_empty(&tmp)) {
822 bh = BH_ENTRY(tmp.prev);
58ff407b 823 list_del_init(&bh->b_assoc_buffers);
1da177e4
LT
824 get_bh(bh);
825 spin_unlock(lock);
826 wait_on_buffer(bh);
827 if (!buffer_uptodate(bh))
828 err = -EIO;
829 brelse(bh);
830 spin_lock(lock);
831 }
832
833 spin_unlock(lock);
834 err2 = osync_buffers_list(lock, list);
835 if (err)
836 return err;
837 else
838 return err2;
839}
840
841/*
842 * Invalidate any and all dirty buffers on a given inode. We are
843 * probably unmounting the fs, but that doesn't mean we have already
844 * done a sync(). Just drop the buffers from the inode list.
845 *
846 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
847 * assumes that all the buffers are against the blockdev. Not true
848 * for reiserfs.
849 */
850void invalidate_inode_buffers(struct inode *inode)
851{
852 if (inode_has_buffers(inode)) {
853 struct address_space *mapping = &inode->i_data;
854 struct list_head *list = &mapping->private_list;
855 struct address_space *buffer_mapping = mapping->assoc_mapping;
856
857 spin_lock(&buffer_mapping->private_lock);
858 while (!list_empty(list))
859 __remove_assoc_queue(BH_ENTRY(list->next));
860 spin_unlock(&buffer_mapping->private_lock);
861 }
862}
863
864/*
865 * Remove any clean buffers from the inode's buffer list. This is called
866 * when we're trying to free the inode itself. Those buffers can pin it.
867 *
868 * Returns true if all buffers were removed.
869 */
870int remove_inode_buffers(struct inode *inode)
871{
872 int ret = 1;
873
874 if (inode_has_buffers(inode)) {
875 struct address_space *mapping = &inode->i_data;
876 struct list_head *list = &mapping->private_list;
877 struct address_space *buffer_mapping = mapping->assoc_mapping;
878
879 spin_lock(&buffer_mapping->private_lock);
880 while (!list_empty(list)) {
881 struct buffer_head *bh = BH_ENTRY(list->next);
882 if (buffer_dirty(bh)) {
883 ret = 0;
884 break;
885 }
886 __remove_assoc_queue(bh);
887 }
888 spin_unlock(&buffer_mapping->private_lock);
889 }
890 return ret;
891}
892
893/*
894 * Create the appropriate buffers when given a page for data area and
895 * the size of each buffer.. Use the bh->b_this_page linked list to
896 * follow the buffers created. Return NULL if unable to create more
897 * buffers.
898 *
899 * The retry flag is used to differentiate async IO (paging, swapping)
900 * which may not fail from ordinary buffer allocations.
901 */
902struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
903 int retry)
904{
905 struct buffer_head *bh, *head;
906 long offset;
907
908try_again:
909 head = NULL;
910 offset = PAGE_SIZE;
911 while ((offset -= size) >= 0) {
912 bh = alloc_buffer_head(GFP_NOFS);
913 if (!bh)
914 goto no_grow;
915
916 bh->b_bdev = NULL;
917 bh->b_this_page = head;
918 bh->b_blocknr = -1;
919 head = bh;
920
921 bh->b_state = 0;
922 atomic_set(&bh->b_count, 0);
fc5cd582 923 bh->b_private = NULL;
1da177e4
LT
924 bh->b_size = size;
925
926 /* Link the buffer to its page */
927 set_bh_page(bh, page, offset);
928
01ffe339 929 init_buffer(bh, NULL, NULL);
1da177e4
LT
930 }
931 return head;
932/*
933 * In case anything failed, we just free everything we got.
934 */
935no_grow:
936 if (head) {
937 do {
938 bh = head;
939 head = head->b_this_page;
940 free_buffer_head(bh);
941 } while (head);
942 }
943
944 /*
945 * Return failure for non-async IO requests. Async IO requests
946 * are not allowed to fail, so we have to wait until buffer heads
947 * become available. But we don't want tasks sleeping with
948 * partially complete buffers, so all were released above.
949 */
950 if (!retry)
951 return NULL;
952
953 /* We're _really_ low on memory. Now we just
954 * wait for old buffer heads to become free due to
955 * finishing IO. Since this is an async request and
956 * the reserve list is empty, we're sure there are
957 * async buffer heads in use.
958 */
959 free_more_memory();
960 goto try_again;
961}
962EXPORT_SYMBOL_GPL(alloc_page_buffers);
963
964static inline void
965link_dev_buffers(struct page *page, struct buffer_head *head)
966{
967 struct buffer_head *bh, *tail;
968
969 bh = head;
970 do {
971 tail = bh;
972 bh = bh->b_this_page;
973 } while (bh);
974 tail->b_this_page = head;
975 attach_page_buffers(page, head);
976}
977
978/*
979 * Initialise the state of a blockdev page's buffers.
980 */
981static void
982init_page_buffers(struct page *page, struct block_device *bdev,
983 sector_t block, int size)
984{
985 struct buffer_head *head = page_buffers(page);
986 struct buffer_head *bh = head;
987 int uptodate = PageUptodate(page);
988
989 do {
990 if (!buffer_mapped(bh)) {
991 init_buffer(bh, NULL, NULL);
992 bh->b_bdev = bdev;
993 bh->b_blocknr = block;
994 if (uptodate)
995 set_buffer_uptodate(bh);
996 set_buffer_mapped(bh);
997 }
998 block++;
999 bh = bh->b_this_page;
1000 } while (bh != head);
1001}
1002
1003/*
1004 * Create the page-cache page that contains the requested block.
1005 *
1006 * This is user purely for blockdev mappings.
1007 */
1008static struct page *
1009grow_dev_page(struct block_device *bdev, sector_t block,
1010 pgoff_t index, int size)
1011{
1012 struct inode *inode = bdev->bd_inode;
1013 struct page *page;
1014 struct buffer_head *bh;
1015
ea125892 1016 page = find_or_create_page(inode->i_mapping, index,
769848c0 1017 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
1018 if (!page)
1019 return NULL;
1020
e827f923 1021 BUG_ON(!PageLocked(page));
1da177e4
LT
1022
1023 if (page_has_buffers(page)) {
1024 bh = page_buffers(page);
1025 if (bh->b_size == size) {
1026 init_page_buffers(page, bdev, block, size);
1027 return page;
1028 }
1029 if (!try_to_free_buffers(page))
1030 goto failed;
1031 }
1032
1033 /*
1034 * Allocate some buffers for this page
1035 */
1036 bh = alloc_page_buffers(page, size, 0);
1037 if (!bh)
1038 goto failed;
1039
1040 /*
1041 * Link the page to the buffers and initialise them. Take the
1042 * lock to be atomic wrt __find_get_block(), which does not
1043 * run under the page lock.
1044 */
1045 spin_lock(&inode->i_mapping->private_lock);
1046 link_dev_buffers(page, bh);
1047 init_page_buffers(page, bdev, block, size);
1048 spin_unlock(&inode->i_mapping->private_lock);
1049 return page;
1050
1051failed:
1052 BUG();
1053 unlock_page(page);
1054 page_cache_release(page);
1055 return NULL;
1056}
1057
1058/*
1059 * Create buffers for the specified block device block's page. If
1060 * that page was dirty, the buffers are set dirty also.
1da177e4 1061 */
858119e1 1062static int
1da177e4
LT
1063grow_buffers(struct block_device *bdev, sector_t block, int size)
1064{
1065 struct page *page;
1066 pgoff_t index;
1067 int sizebits;
1068
1069 sizebits = -1;
1070 do {
1071 sizebits++;
1072 } while ((size << sizebits) < PAGE_SIZE);
1073
1074 index = block >> sizebits;
1da177e4 1075
e5657933
AM
1076 /*
1077 * Check for a block which wants to lie outside our maximum possible
1078 * pagecache index. (this comparison is done using sector_t types).
1079 */
1080 if (unlikely(index != block >> sizebits)) {
1081 char b[BDEVNAME_SIZE];
1082
1083 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1084 "device %s\n",
1085 __FUNCTION__, (unsigned long long)block,
1086 bdevname(bdev, b));
1087 return -EIO;
1088 }
1089 block = index << sizebits;
1da177e4
LT
1090 /* Create a page with the proper size buffers.. */
1091 page = grow_dev_page(bdev, block, index, size);
1092 if (!page)
1093 return 0;
1094 unlock_page(page);
1095 page_cache_release(page);
1096 return 1;
1097}
1098
75c96f85 1099static struct buffer_head *
1da177e4
LT
1100__getblk_slow(struct block_device *bdev, sector_t block, int size)
1101{
1102 /* Size must be multiple of hard sectorsize */
1103 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1104 (size < 512 || size > PAGE_SIZE))) {
1105 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1106 size);
1107 printk(KERN_ERR "hardsect size: %d\n",
1108 bdev_hardsect_size(bdev));
1109
1110 dump_stack();
1111 return NULL;
1112 }
1113
1114 for (;;) {
1115 struct buffer_head * bh;
e5657933 1116 int ret;
1da177e4
LT
1117
1118 bh = __find_get_block(bdev, block, size);
1119 if (bh)
1120 return bh;
1121
e5657933
AM
1122 ret = grow_buffers(bdev, block, size);
1123 if (ret < 0)
1124 return NULL;
1125 if (ret == 0)
1da177e4
LT
1126 free_more_memory();
1127 }
1128}
1129
1130/*
1131 * The relationship between dirty buffers and dirty pages:
1132 *
1133 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1134 * the page is tagged dirty in its radix tree.
1135 *
1136 * At all times, the dirtiness of the buffers represents the dirtiness of
1137 * subsections of the page. If the page has buffers, the page dirty bit is
1138 * merely a hint about the true dirty state.
1139 *
1140 * When a page is set dirty in its entirety, all its buffers are marked dirty
1141 * (if the page has buffers).
1142 *
1143 * When a buffer is marked dirty, its page is dirtied, but the page's other
1144 * buffers are not.
1145 *
1146 * Also. When blockdev buffers are explicitly read with bread(), they
1147 * individually become uptodate. But their backing page remains not
1148 * uptodate - even if all of its buffers are uptodate. A subsequent
1149 * block_read_full_page() against that page will discover all the uptodate
1150 * buffers, will set the page uptodate and will perform no I/O.
1151 */
1152
1153/**
1154 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1155 * @bh: the buffer_head to mark dirty
1da177e4
LT
1156 *
1157 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1158 * backing page dirty, then tag the page as dirty in its address_space's radix
1159 * tree and then attach the address_space's inode to its superblock's dirty
1160 * inode list.
1161 *
1162 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1163 * mapping->tree_lock and the global inode_lock.
1164 */
1165void fastcall mark_buffer_dirty(struct buffer_head *bh)
1166{
787d2214 1167 WARN_ON_ONCE(!buffer_uptodate(bh));
1da177e4 1168 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
787d2214 1169 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1da177e4
LT
1170}
1171
1172/*
1173 * Decrement a buffer_head's reference count. If all buffers against a page
1174 * have zero reference count, are clean and unlocked, and if the page is clean
1175 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1176 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1177 * a page but it ends up not being freed, and buffers may later be reattached).
1178 */
1179void __brelse(struct buffer_head * buf)
1180{
1181 if (atomic_read(&buf->b_count)) {
1182 put_bh(buf);
1183 return;
1184 }
1185 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1186 WARN_ON(1);
1187}
1188
1189/*
1190 * bforget() is like brelse(), except it discards any
1191 * potentially dirty data.
1192 */
1193void __bforget(struct buffer_head *bh)
1194{
1195 clear_buffer_dirty(bh);
1196 if (!list_empty(&bh->b_assoc_buffers)) {
1197 struct address_space *buffer_mapping = bh->b_page->mapping;
1198
1199 spin_lock(&buffer_mapping->private_lock);
1200 list_del_init(&bh->b_assoc_buffers);
58ff407b 1201 bh->b_assoc_map = NULL;
1da177e4
LT
1202 spin_unlock(&buffer_mapping->private_lock);
1203 }
1204 __brelse(bh);
1205}
1206
1207static struct buffer_head *__bread_slow(struct buffer_head *bh)
1208{
1209 lock_buffer(bh);
1210 if (buffer_uptodate(bh)) {
1211 unlock_buffer(bh);
1212 return bh;
1213 } else {
1214 get_bh(bh);
1215 bh->b_end_io = end_buffer_read_sync;
1216 submit_bh(READ, bh);
1217 wait_on_buffer(bh);
1218 if (buffer_uptodate(bh))
1219 return bh;
1220 }
1221 brelse(bh);
1222 return NULL;
1223}
1224
1225/*
1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1228 * refcount elevated by one when they're in an LRU. A buffer can only appear
1229 * once in a particular CPU's LRU. A single buffer can be present in multiple
1230 * CPU's LRUs at the same time.
1231 *
1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233 * sb_find_get_block().
1234 *
1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1236 * a local interrupt disable for that.
1237 */
1238
1239#define BH_LRU_SIZE 8
1240
1241struct bh_lru {
1242 struct buffer_head *bhs[BH_LRU_SIZE];
1243};
1244
1245static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1246
1247#ifdef CONFIG_SMP
1248#define bh_lru_lock() local_irq_disable()
1249#define bh_lru_unlock() local_irq_enable()
1250#else
1251#define bh_lru_lock() preempt_disable()
1252#define bh_lru_unlock() preempt_enable()
1253#endif
1254
1255static inline void check_irqs_on(void)
1256{
1257#ifdef irqs_disabled
1258 BUG_ON(irqs_disabled());
1259#endif
1260}
1261
1262/*
1263 * The LRU management algorithm is dopey-but-simple. Sorry.
1264 */
1265static void bh_lru_install(struct buffer_head *bh)
1266{
1267 struct buffer_head *evictee = NULL;
1268 struct bh_lru *lru;
1269
1270 check_irqs_on();
1271 bh_lru_lock();
1272 lru = &__get_cpu_var(bh_lrus);
1273 if (lru->bhs[0] != bh) {
1274 struct buffer_head *bhs[BH_LRU_SIZE];
1275 int in;
1276 int out = 0;
1277
1278 get_bh(bh);
1279 bhs[out++] = bh;
1280 for (in = 0; in < BH_LRU_SIZE; in++) {
1281 struct buffer_head *bh2 = lru->bhs[in];
1282
1283 if (bh2 == bh) {
1284 __brelse(bh2);
1285 } else {
1286 if (out >= BH_LRU_SIZE) {
1287 BUG_ON(evictee != NULL);
1288 evictee = bh2;
1289 } else {
1290 bhs[out++] = bh2;
1291 }
1292 }
1293 }
1294 while (out < BH_LRU_SIZE)
1295 bhs[out++] = NULL;
1296 memcpy(lru->bhs, bhs, sizeof(bhs));
1297 }
1298 bh_lru_unlock();
1299
1300 if (evictee)
1301 __brelse(evictee);
1302}
1303
1304/*
1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1306 */
858119e1 1307static struct buffer_head *
3991d3bd 1308lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1309{
1310 struct buffer_head *ret = NULL;
1311 struct bh_lru *lru;
3991d3bd 1312 unsigned int i;
1da177e4
LT
1313
1314 check_irqs_on();
1315 bh_lru_lock();
1316 lru = &__get_cpu_var(bh_lrus);
1317 for (i = 0; i < BH_LRU_SIZE; i++) {
1318 struct buffer_head *bh = lru->bhs[i];
1319
1320 if (bh && bh->b_bdev == bdev &&
1321 bh->b_blocknr == block && bh->b_size == size) {
1322 if (i) {
1323 while (i) {
1324 lru->bhs[i] = lru->bhs[i - 1];
1325 i--;
1326 }
1327 lru->bhs[0] = bh;
1328 }
1329 get_bh(bh);
1330 ret = bh;
1331 break;
1332 }
1333 }
1334 bh_lru_unlock();
1335 return ret;
1336}
1337
1338/*
1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1340 * it in the LRU and mark it as accessed. If it is not present then return
1341 * NULL
1342 */
1343struct buffer_head *
3991d3bd 1344__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1345{
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1347
1348 if (bh == NULL) {
385fd4c5 1349 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1350 if (bh)
1351 bh_lru_install(bh);
1352 }
1353 if (bh)
1354 touch_buffer(bh);
1355 return bh;
1356}
1357EXPORT_SYMBOL(__find_get_block);
1358
1359/*
1360 * __getblk will locate (and, if necessary, create) the buffer_head
1361 * which corresponds to the passed block_device, block and size. The
1362 * returned buffer has its reference count incremented.
1363 *
1364 * __getblk() cannot fail - it just keeps trying. If you pass it an
1365 * illegal block number, __getblk() will happily return a buffer_head
1366 * which represents the non-existent block. Very weird.
1367 *
1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369 * attempt is failing. FIXME, perhaps?
1370 */
1371struct buffer_head *
3991d3bd 1372__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1373{
1374 struct buffer_head *bh = __find_get_block(bdev, block, size);
1375
1376 might_sleep();
1377 if (bh == NULL)
1378 bh = __getblk_slow(bdev, block, size);
1379 return bh;
1380}
1381EXPORT_SYMBOL(__getblk);
1382
1383/*
1384 * Do async read-ahead on a buffer..
1385 */
3991d3bd 1386void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1387{
1388 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1389 if (likely(bh)) {
1390 ll_rw_block(READA, 1, &bh);
1391 brelse(bh);
1392 }
1da177e4
LT
1393}
1394EXPORT_SYMBOL(__breadahead);
1395
1396/**
1397 * __bread() - reads a specified block and returns the bh
67be2dd1 1398 * @bdev: the block_device to read from
1da177e4
LT
1399 * @block: number of block
1400 * @size: size (in bytes) to read
1401 *
1402 * Reads a specified block, and returns buffer head that contains it.
1403 * It returns NULL if the block was unreadable.
1404 */
1405struct buffer_head *
3991d3bd 1406__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1407{
1408 struct buffer_head *bh = __getblk(bdev, block, size);
1409
a3e713b5 1410 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1411 bh = __bread_slow(bh);
1412 return bh;
1413}
1414EXPORT_SYMBOL(__bread);
1415
1416/*
1417 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 * This doesn't race because it runs in each cpu either in irq
1419 * or with preempt disabled.
1420 */
1421static void invalidate_bh_lru(void *arg)
1422{
1423 struct bh_lru *b = &get_cpu_var(bh_lrus);
1424 int i;
1425
1426 for (i = 0; i < BH_LRU_SIZE; i++) {
1427 brelse(b->bhs[i]);
1428 b->bhs[i] = NULL;
1429 }
1430 put_cpu_var(bh_lrus);
1431}
1432
f9a14399 1433void invalidate_bh_lrus(void)
1da177e4
LT
1434{
1435 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1436}
1437
1438void set_bh_page(struct buffer_head *bh,
1439 struct page *page, unsigned long offset)
1440{
1441 bh->b_page = page;
e827f923 1442 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1443 if (PageHighMem(page))
1444 /*
1445 * This catches illegal uses and preserves the offset:
1446 */
1447 bh->b_data = (char *)(0 + offset);
1448 else
1449 bh->b_data = page_address(page) + offset;
1450}
1451EXPORT_SYMBOL(set_bh_page);
1452
1453/*
1454 * Called when truncating a buffer on a page completely.
1455 */
858119e1 1456static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1457{
1458 lock_buffer(bh);
1459 clear_buffer_dirty(bh);
1460 bh->b_bdev = NULL;
1461 clear_buffer_mapped(bh);
1462 clear_buffer_req(bh);
1463 clear_buffer_new(bh);
1464 clear_buffer_delay(bh);
33a266dd 1465 clear_buffer_unwritten(bh);
1da177e4
LT
1466 unlock_buffer(bh);
1467}
1468
1da177e4
LT
1469/**
1470 * block_invalidatepage - invalidate part of all of a buffer-backed page
1471 *
1472 * @page: the page which is affected
1473 * @offset: the index of the truncation point
1474 *
1475 * block_invalidatepage() is called when all or part of the page has become
1476 * invalidatedby a truncate operation.
1477 *
1478 * block_invalidatepage() does not have to release all buffers, but it must
1479 * ensure that no dirty buffer is left outside @offset and that no I/O
1480 * is underway against any of the blocks which are outside the truncation
1481 * point. Because the caller is about to free (and possibly reuse) those
1482 * blocks on-disk.
1483 */
2ff28e22 1484void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1485{
1486 struct buffer_head *head, *bh, *next;
1487 unsigned int curr_off = 0;
1da177e4
LT
1488
1489 BUG_ON(!PageLocked(page));
1490 if (!page_has_buffers(page))
1491 goto out;
1492
1493 head = page_buffers(page);
1494 bh = head;
1495 do {
1496 unsigned int next_off = curr_off + bh->b_size;
1497 next = bh->b_this_page;
1498
1499 /*
1500 * is this block fully invalidated?
1501 */
1502 if (offset <= curr_off)
1503 discard_buffer(bh);
1504 curr_off = next_off;
1505 bh = next;
1506 } while (bh != head);
1507
1508 /*
1509 * We release buffers only if the entire page is being invalidated.
1510 * The get_block cached value has been unconditionally invalidated,
1511 * so real IO is not possible anymore.
1512 */
1513 if (offset == 0)
2ff28e22 1514 try_to_release_page(page, 0);
1da177e4 1515out:
2ff28e22 1516 return;
1da177e4
LT
1517}
1518EXPORT_SYMBOL(block_invalidatepage);
1519
1520/*
1521 * We attach and possibly dirty the buffers atomically wrt
1522 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1523 * is already excluded via the page lock.
1524 */
1525void create_empty_buffers(struct page *page,
1526 unsigned long blocksize, unsigned long b_state)
1527{
1528 struct buffer_head *bh, *head, *tail;
1529
1530 head = alloc_page_buffers(page, blocksize, 1);
1531 bh = head;
1532 do {
1533 bh->b_state |= b_state;
1534 tail = bh;
1535 bh = bh->b_this_page;
1536 } while (bh);
1537 tail->b_this_page = head;
1538
1539 spin_lock(&page->mapping->private_lock);
1540 if (PageUptodate(page) || PageDirty(page)) {
1541 bh = head;
1542 do {
1543 if (PageDirty(page))
1544 set_buffer_dirty(bh);
1545 if (PageUptodate(page))
1546 set_buffer_uptodate(bh);
1547 bh = bh->b_this_page;
1548 } while (bh != head);
1549 }
1550 attach_page_buffers(page, head);
1551 spin_unlock(&page->mapping->private_lock);
1552}
1553EXPORT_SYMBOL(create_empty_buffers);
1554
1555/*
1556 * We are taking a block for data and we don't want any output from any
1557 * buffer-cache aliases starting from return from that function and
1558 * until the moment when something will explicitly mark the buffer
1559 * dirty (hopefully that will not happen until we will free that block ;-)
1560 * We don't even need to mark it not-uptodate - nobody can expect
1561 * anything from a newly allocated buffer anyway. We used to used
1562 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1563 * don't want to mark the alias unmapped, for example - it would confuse
1564 * anyone who might pick it with bread() afterwards...
1565 *
1566 * Also.. Note that bforget() doesn't lock the buffer. So there can
1567 * be writeout I/O going on against recently-freed buffers. We don't
1568 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1569 * only if we really need to. That happens here.
1570 */
1571void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1572{
1573 struct buffer_head *old_bh;
1574
1575 might_sleep();
1576
385fd4c5 1577 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1578 if (old_bh) {
1579 clear_buffer_dirty(old_bh);
1580 wait_on_buffer(old_bh);
1581 clear_buffer_req(old_bh);
1582 __brelse(old_bh);
1583 }
1584}
1585EXPORT_SYMBOL(unmap_underlying_metadata);
1586
1587/*
1588 * NOTE! All mapped/uptodate combinations are valid:
1589 *
1590 * Mapped Uptodate Meaning
1591 *
1592 * No No "unknown" - must do get_block()
1593 * No Yes "hole" - zero-filled
1594 * Yes No "allocated" - allocated on disk, not read in
1595 * Yes Yes "valid" - allocated and up-to-date in memory.
1596 *
1597 * "Dirty" is valid only with the last case (mapped+uptodate).
1598 */
1599
1600/*
1601 * While block_write_full_page is writing back the dirty buffers under
1602 * the page lock, whoever dirtied the buffers may decide to clean them
1603 * again at any time. We handle that by only looking at the buffer
1604 * state inside lock_buffer().
1605 *
1606 * If block_write_full_page() is called for regular writeback
1607 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1608 * locked buffer. This only can happen if someone has written the buffer
1609 * directly, with submit_bh(). At the address_space level PageWriteback
1610 * prevents this contention from occurring.
1611 */
1612static int __block_write_full_page(struct inode *inode, struct page *page,
1613 get_block_t *get_block, struct writeback_control *wbc)
1614{
1615 int err;
1616 sector_t block;
1617 sector_t last_block;
f0fbd5fc 1618 struct buffer_head *bh, *head;
b0cf2321 1619 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1620 int nr_underway = 0;
1621
1622 BUG_ON(!PageLocked(page));
1623
1624 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1625
1626 if (!page_has_buffers(page)) {
b0cf2321 1627 create_empty_buffers(page, blocksize,
1da177e4
LT
1628 (1 << BH_Dirty)|(1 << BH_Uptodate));
1629 }
1630
1631 /*
1632 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1633 * here, and the (potentially unmapped) buffers may become dirty at
1634 * any time. If a buffer becomes dirty here after we've inspected it
1635 * then we just miss that fact, and the page stays dirty.
1636 *
1637 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1638 * handle that here by just cleaning them.
1639 */
1640
54b21a79 1641 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1642 head = page_buffers(page);
1643 bh = head;
1644
1645 /*
1646 * Get all the dirty buffers mapped to disk addresses and
1647 * handle any aliases from the underlying blockdev's mapping.
1648 */
1649 do {
1650 if (block > last_block) {
1651 /*
1652 * mapped buffers outside i_size will occur, because
1653 * this page can be outside i_size when there is a
1654 * truncate in progress.
1655 */
1656 /*
1657 * The buffer was zeroed by block_write_full_page()
1658 */
1659 clear_buffer_dirty(bh);
1660 set_buffer_uptodate(bh);
1661 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
b0cf2321 1662 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1663 err = get_block(inode, block, bh, 1);
1664 if (err)
1665 goto recover;
1666 if (buffer_new(bh)) {
1667 /* blockdev mappings never come here */
1668 clear_buffer_new(bh);
1669 unmap_underlying_metadata(bh->b_bdev,
1670 bh->b_blocknr);
1671 }
1672 }
1673 bh = bh->b_this_page;
1674 block++;
1675 } while (bh != head);
1676
1677 do {
1da177e4
LT
1678 if (!buffer_mapped(bh))
1679 continue;
1680 /*
1681 * If it's a fully non-blocking write attempt and we cannot
1682 * lock the buffer then redirty the page. Note that this can
1683 * potentially cause a busy-wait loop from pdflush and kswapd
1684 * activity, but those code paths have their own higher-level
1685 * throttling.
1686 */
1687 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1688 lock_buffer(bh);
1689 } else if (test_set_buffer_locked(bh)) {
1690 redirty_page_for_writepage(wbc, page);
1691 continue;
1692 }
1693 if (test_clear_buffer_dirty(bh)) {
1694 mark_buffer_async_write(bh);
1695 } else {
1696 unlock_buffer(bh);
1697 }
1698 } while ((bh = bh->b_this_page) != head);
1699
1700 /*
1701 * The page and its buffers are protected by PageWriteback(), so we can
1702 * drop the bh refcounts early.
1703 */
1704 BUG_ON(PageWriteback(page));
1705 set_page_writeback(page);
1da177e4
LT
1706
1707 do {
1708 struct buffer_head *next = bh->b_this_page;
1709 if (buffer_async_write(bh)) {
1710 submit_bh(WRITE, bh);
1711 nr_underway++;
1712 }
1da177e4
LT
1713 bh = next;
1714 } while (bh != head);
05937baa 1715 unlock_page(page);
1da177e4
LT
1716
1717 err = 0;
1718done:
1719 if (nr_underway == 0) {
1720 /*
1721 * The page was marked dirty, but the buffers were
1722 * clean. Someone wrote them back by hand with
1723 * ll_rw_block/submit_bh. A rare case.
1724 */
1da177e4 1725 end_page_writeback(page);
3d67f2d7 1726
1da177e4
LT
1727 /*
1728 * The page and buffer_heads can be released at any time from
1729 * here on.
1730 */
1731 wbc->pages_skipped++; /* We didn't write this page */
1732 }
1733 return err;
1734
1735recover:
1736 /*
1737 * ENOSPC, or some other error. We may already have added some
1738 * blocks to the file, so we need to write these out to avoid
1739 * exposing stale data.
1740 * The page is currently locked and not marked for writeback
1741 */
1742 bh = head;
1743 /* Recovery: lock and submit the mapped buffers */
1744 do {
1da177e4
LT
1745 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1746 lock_buffer(bh);
1747 mark_buffer_async_write(bh);
1748 } else {
1749 /*
1750 * The buffer may have been set dirty during
1751 * attachment to a dirty page.
1752 */
1753 clear_buffer_dirty(bh);
1754 }
1755 } while ((bh = bh->b_this_page) != head);
1756 SetPageError(page);
1757 BUG_ON(PageWriteback(page));
7e4c3690 1758 mapping_set_error(page->mapping, err);
1da177e4 1759 set_page_writeback(page);
1da177e4
LT
1760 do {
1761 struct buffer_head *next = bh->b_this_page;
1762 if (buffer_async_write(bh)) {
1763 clear_buffer_dirty(bh);
1764 submit_bh(WRITE, bh);
1765 nr_underway++;
1766 }
1da177e4
LT
1767 bh = next;
1768 } while (bh != head);
ffda9d30 1769 unlock_page(page);
1da177e4
LT
1770 goto done;
1771}
1772
1773static int __block_prepare_write(struct inode *inode, struct page *page,
1774 unsigned from, unsigned to, get_block_t *get_block)
1775{
1776 unsigned block_start, block_end;
1777 sector_t block;
1778 int err = 0;
1779 unsigned blocksize, bbits;
1780 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1781
1782 BUG_ON(!PageLocked(page));
1783 BUG_ON(from > PAGE_CACHE_SIZE);
1784 BUG_ON(to > PAGE_CACHE_SIZE);
1785 BUG_ON(from > to);
1786
1787 blocksize = 1 << inode->i_blkbits;
1788 if (!page_has_buffers(page))
1789 create_empty_buffers(page, blocksize, 0);
1790 head = page_buffers(page);
1791
1792 bbits = inode->i_blkbits;
1793 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1794
1795 for(bh = head, block_start = 0; bh != head || !block_start;
1796 block++, block_start=block_end, bh = bh->b_this_page) {
1797 block_end = block_start + blocksize;
1798 if (block_end <= from || block_start >= to) {
1799 if (PageUptodate(page)) {
1800 if (!buffer_uptodate(bh))
1801 set_buffer_uptodate(bh);
1802 }
1803 continue;
1804 }
1805 if (buffer_new(bh))
1806 clear_buffer_new(bh);
1807 if (!buffer_mapped(bh)) {
b0cf2321 1808 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1809 err = get_block(inode, block, bh, 1);
1810 if (err)
f3ddbdc6 1811 break;
1da177e4 1812 if (buffer_new(bh)) {
1da177e4
LT
1813 unmap_underlying_metadata(bh->b_bdev,
1814 bh->b_blocknr);
1815 if (PageUptodate(page)) {
1816 set_buffer_uptodate(bh);
1817 continue;
1818 }
1819 if (block_end > to || block_start < from) {
1820 void *kaddr;
1821
1822 kaddr = kmap_atomic(page, KM_USER0);
1823 if (block_end > to)
1824 memset(kaddr+to, 0,
1825 block_end-to);
1826 if (block_start < from)
1827 memset(kaddr+block_start,
1828 0, from-block_start);
1829 flush_dcache_page(page);
1830 kunmap_atomic(kaddr, KM_USER0);
1831 }
1832 continue;
1833 }
1834 }
1835 if (PageUptodate(page)) {
1836 if (!buffer_uptodate(bh))
1837 set_buffer_uptodate(bh);
1838 continue;
1839 }
1840 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1841 !buffer_unwritten(bh) &&
1da177e4
LT
1842 (block_start < from || block_end > to)) {
1843 ll_rw_block(READ, 1, &bh);
1844 *wait_bh++=bh;
1845 }
1846 }
1847 /*
1848 * If we issued read requests - let them complete.
1849 */
1850 while(wait_bh > wait) {
1851 wait_on_buffer(*--wait_bh);
1852 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1853 err = -EIO;
1da177e4 1854 }
152becd2
AA
1855 if (!err) {
1856 bh = head;
1857 do {
1858 if (buffer_new(bh))
1859 clear_buffer_new(bh);
1860 } while ((bh = bh->b_this_page) != head);
1861 return 0;
1862 }
f3ddbdc6 1863 /* Error case: */
1da177e4
LT
1864 /*
1865 * Zero out any newly allocated blocks to avoid exposing stale
1866 * data. If BH_New is set, we know that the block was newly
1867 * allocated in the above loop.
1868 */
1869 bh = head;
1870 block_start = 0;
1871 do {
1872 block_end = block_start+blocksize;
1873 if (block_end <= from)
1874 goto next_bh;
1875 if (block_start >= to)
1876 break;
1877 if (buffer_new(bh)) {
1da177e4 1878 clear_buffer_new(bh);
01f2705d 1879 zero_user_page(page, block_start, bh->b_size, KM_USER0);
1da177e4
LT
1880 set_buffer_uptodate(bh);
1881 mark_buffer_dirty(bh);
1882 }
1883next_bh:
1884 block_start = block_end;
1885 bh = bh->b_this_page;
1886 } while (bh != head);
1887 return err;
1888}
1889
1890static int __block_commit_write(struct inode *inode, struct page *page,
1891 unsigned from, unsigned to)
1892{
1893 unsigned block_start, block_end;
1894 int partial = 0;
1895 unsigned blocksize;
1896 struct buffer_head *bh, *head;
1897
1898 blocksize = 1 << inode->i_blkbits;
1899
1900 for(bh = head = page_buffers(page), block_start = 0;
1901 bh != head || !block_start;
1902 block_start=block_end, bh = bh->b_this_page) {
1903 block_end = block_start + blocksize;
1904 if (block_end <= from || block_start >= to) {
1905 if (!buffer_uptodate(bh))
1906 partial = 1;
1907 } else {
1908 set_buffer_uptodate(bh);
1909 mark_buffer_dirty(bh);
1910 }
1911 }
1912
1913 /*
1914 * If this is a partial write which happened to make all buffers
1915 * uptodate then we can optimize away a bogus readpage() for
1916 * the next read(). Here we 'discover' whether the page went
1917 * uptodate as a result of this (potentially partial) write.
1918 */
1919 if (!partial)
1920 SetPageUptodate(page);
1921 return 0;
1922}
1923
1924/*
1925 * Generic "read page" function for block devices that have the normal
1926 * get_block functionality. This is most of the block device filesystems.
1927 * Reads the page asynchronously --- the unlock_buffer() and
1928 * set/clear_buffer_uptodate() functions propagate buffer state into the
1929 * page struct once IO has completed.
1930 */
1931int block_read_full_page(struct page *page, get_block_t *get_block)
1932{
1933 struct inode *inode = page->mapping->host;
1934 sector_t iblock, lblock;
1935 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1936 unsigned int blocksize;
1937 int nr, i;
1938 int fully_mapped = 1;
1939
cd7619d6 1940 BUG_ON(!PageLocked(page));
1da177e4
LT
1941 blocksize = 1 << inode->i_blkbits;
1942 if (!page_has_buffers(page))
1943 create_empty_buffers(page, blocksize, 0);
1944 head = page_buffers(page);
1945
1946 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1947 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1948 bh = head;
1949 nr = 0;
1950 i = 0;
1951
1952 do {
1953 if (buffer_uptodate(bh))
1954 continue;
1955
1956 if (!buffer_mapped(bh)) {
c64610ba
AM
1957 int err = 0;
1958
1da177e4
LT
1959 fully_mapped = 0;
1960 if (iblock < lblock) {
b0cf2321 1961 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
1962 err = get_block(inode, iblock, bh, 0);
1963 if (err)
1da177e4
LT
1964 SetPageError(page);
1965 }
1966 if (!buffer_mapped(bh)) {
01f2705d
ND
1967 zero_user_page(page, i * blocksize, blocksize,
1968 KM_USER0);
c64610ba
AM
1969 if (!err)
1970 set_buffer_uptodate(bh);
1da177e4
LT
1971 continue;
1972 }
1973 /*
1974 * get_block() might have updated the buffer
1975 * synchronously
1976 */
1977 if (buffer_uptodate(bh))
1978 continue;
1979 }
1980 arr[nr++] = bh;
1981 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1982
1983 if (fully_mapped)
1984 SetPageMappedToDisk(page);
1985
1986 if (!nr) {
1987 /*
1988 * All buffers are uptodate - we can set the page uptodate
1989 * as well. But not if get_block() returned an error.
1990 */
1991 if (!PageError(page))
1992 SetPageUptodate(page);
1993 unlock_page(page);
1994 return 0;
1995 }
1996
1997 /* Stage two: lock the buffers */
1998 for (i = 0; i < nr; i++) {
1999 bh = arr[i];
2000 lock_buffer(bh);
2001 mark_buffer_async_read(bh);
2002 }
2003
2004 /*
2005 * Stage 3: start the IO. Check for uptodateness
2006 * inside the buffer lock in case another process reading
2007 * the underlying blockdev brought it uptodate (the sct fix).
2008 */
2009 for (i = 0; i < nr; i++) {
2010 bh = arr[i];
2011 if (buffer_uptodate(bh))
2012 end_buffer_async_read(bh, 1);
2013 else
2014 submit_bh(READ, bh);
2015 }
2016 return 0;
2017}
2018
2019/* utility function for filesystems that need to do work on expanding
2020 * truncates. Uses prepare/commit_write to allow the filesystem to
2021 * deal with the hole.
2022 */
05eb0b51
OH
2023static int __generic_cont_expand(struct inode *inode, loff_t size,
2024 pgoff_t index, unsigned int offset)
1da177e4
LT
2025{
2026 struct address_space *mapping = inode->i_mapping;
2027 struct page *page;
05eb0b51 2028 unsigned long limit;
1da177e4
LT
2029 int err;
2030
2031 err = -EFBIG;
2032 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2033 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2034 send_sig(SIGXFSZ, current, 0);
2035 goto out;
2036 }
2037 if (size > inode->i_sb->s_maxbytes)
2038 goto out;
2039
1da177e4
LT
2040 err = -ENOMEM;
2041 page = grab_cache_page(mapping, index);
2042 if (!page)
2043 goto out;
2044 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
05eb0b51
OH
2045 if (err) {
2046 /*
2047 * ->prepare_write() may have instantiated a few blocks
2048 * outside i_size. Trim these off again.
2049 */
2050 unlock_page(page);
2051 page_cache_release(page);
2052 vmtruncate(inode, inode->i_size);
2053 goto out;
1da177e4 2054 }
05eb0b51
OH
2055
2056 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2057
1da177e4
LT
2058 unlock_page(page);
2059 page_cache_release(page);
2060 if (err > 0)
2061 err = 0;
2062out:
2063 return err;
2064}
2065
05eb0b51
OH
2066int generic_cont_expand(struct inode *inode, loff_t size)
2067{
2068 pgoff_t index;
2069 unsigned int offset;
2070
2071 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2072
2073 /* ugh. in prepare/commit_write, if from==to==start of block, we
2074 ** skip the prepare. make sure we never send an offset for the start
2075 ** of a block
2076 */
2077 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2078 /* caller must handle this extra byte. */
2079 offset++;
2080 }
2081 index = size >> PAGE_CACHE_SHIFT;
2082
2083 return __generic_cont_expand(inode, size, index, offset);
2084}
2085
2086int generic_cont_expand_simple(struct inode *inode, loff_t size)
2087{
2088 loff_t pos = size - 1;
2089 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2090 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2091
2092 /* prepare/commit_write can handle even if from==to==start of block. */
2093 return __generic_cont_expand(inode, size, index, offset);
2094}
2095
1da177e4
LT
2096/*
2097 * For moronic filesystems that do not allow holes in file.
2098 * We may have to extend the file.
2099 */
2100
2101int cont_prepare_write(struct page *page, unsigned offset,
2102 unsigned to, get_block_t *get_block, loff_t *bytes)
2103{
2104 struct address_space *mapping = page->mapping;
2105 struct inode *inode = mapping->host;
2106 struct page *new_page;
2107 pgoff_t pgpos;
2108 long status;
2109 unsigned zerofrom;
2110 unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
2111
2112 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2113 status = -ENOMEM;
2114 new_page = grab_cache_page(mapping, pgpos);
2115 if (!new_page)
2116 goto out;
2117 /* we might sleep */
2118 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2119 unlock_page(new_page);
2120 page_cache_release(new_page);
2121 continue;
2122 }
2123 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2124 if (zerofrom & (blocksize-1)) {
2125 *bytes |= (blocksize-1);
2126 (*bytes)++;
2127 }
2128 status = __block_prepare_write(inode, new_page, zerofrom,
2129 PAGE_CACHE_SIZE, get_block);
2130 if (status)
2131 goto out_unmap;
ff1be9ad 2132 zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
01f2705d 2133 KM_USER0);
1da177e4
LT
2134 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2135 unlock_page(new_page);
2136 page_cache_release(new_page);
2137 }
2138
2139 if (page->index < pgpos) {
2140 /* completely inside the area */
2141 zerofrom = offset;
2142 } else {
2143 /* page covers the boundary, find the boundary offset */
2144 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2145
2146 /* if we will expand the thing last block will be filled */
2147 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2148 *bytes |= (blocksize-1);
2149 (*bytes)++;
2150 }
2151
2152 /* starting below the boundary? Nothing to zero out */
2153 if (offset <= zerofrom)
2154 zerofrom = offset;
2155 }
2156 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2157 if (status)
2158 goto out1;
2159 if (zerofrom < offset) {
01f2705d 2160 zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
1da177e4
LT
2161 __block_commit_write(inode, page, zerofrom, offset);
2162 }
2163 return 0;
2164out1:
2165 ClearPageUptodate(page);
2166 return status;
2167
2168out_unmap:
2169 ClearPageUptodate(new_page);
2170 unlock_page(new_page);
2171 page_cache_release(new_page);
2172out:
2173 return status;
2174}
2175
2176int block_prepare_write(struct page *page, unsigned from, unsigned to,
2177 get_block_t *get_block)
2178{
2179 struct inode *inode = page->mapping->host;
2180 int err = __block_prepare_write(inode, page, from, to, get_block);
2181 if (err)
2182 ClearPageUptodate(page);
2183 return err;
2184}
2185
2186int block_commit_write(struct page *page, unsigned from, unsigned to)
2187{
2188 struct inode *inode = page->mapping->host;
2189 __block_commit_write(inode,page,from,to);
2190 return 0;
2191}
2192
2193int generic_commit_write(struct file *file, struct page *page,
2194 unsigned from, unsigned to)
2195{
2196 struct inode *inode = page->mapping->host;
2197 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2198 __block_commit_write(inode,page,from,to);
2199 /*
2200 * No need to use i_size_read() here, the i_size
1b1dcc1b 2201 * cannot change under us because we hold i_mutex.
1da177e4
LT
2202 */
2203 if (pos > inode->i_size) {
2204 i_size_write(inode, pos);
2205 mark_inode_dirty(inode);
2206 }
2207 return 0;
2208}
2209
54171690
DC
2210/*
2211 * block_page_mkwrite() is not allowed to change the file size as it gets
2212 * called from a page fault handler when a page is first dirtied. Hence we must
2213 * be careful to check for EOF conditions here. We set the page up correctly
2214 * for a written page which means we get ENOSPC checking when writing into
2215 * holes and correct delalloc and unwritten extent mapping on filesystems that
2216 * support these features.
2217 *
2218 * We are not allowed to take the i_mutex here so we have to play games to
2219 * protect against truncate races as the page could now be beyond EOF. Because
2220 * vmtruncate() writes the inode size before removing pages, once we have the
2221 * page lock we can determine safely if the page is beyond EOF. If it is not
2222 * beyond EOF, then the page is guaranteed safe against truncation until we
2223 * unlock the page.
2224 */
2225int
2226block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2227 get_block_t get_block)
2228{
2229 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2230 unsigned long end;
2231 loff_t size;
2232 int ret = -EINVAL;
2233
2234 lock_page(page);
2235 size = i_size_read(inode);
2236 if ((page->mapping != inode->i_mapping) ||
18336338 2237 (page_offset(page) > size)) {
54171690
DC
2238 /* page got truncated out from underneath us */
2239 goto out_unlock;
2240 }
2241
2242 /* page is wholly or partially inside EOF */
2243 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2244 end = size & ~PAGE_CACHE_MASK;
2245 else
2246 end = PAGE_CACHE_SIZE;
2247
2248 ret = block_prepare_write(page, 0, end, get_block);
2249 if (!ret)
2250 ret = block_commit_write(page, 0, end);
2251
2252out_unlock:
2253 unlock_page(page);
2254 return ret;
2255}
1da177e4
LT
2256
2257/*
2258 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2259 * immediately, while under the page lock. So it needs a special end_io
2260 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2261 */
2262static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2263{
68671f35 2264 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2265}
2266
2267/*
2268 * On entry, the page is fully not uptodate.
2269 * On exit the page is fully uptodate in the areas outside (from,to)
2270 */
2271int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2272 get_block_t *get_block)
2273{
2274 struct inode *inode = page->mapping->host;
2275 const unsigned blkbits = inode->i_blkbits;
2276 const unsigned blocksize = 1 << blkbits;
a4b0672d 2277 struct buffer_head *head, *bh;
1da177e4 2278 unsigned block_in_page;
a4b0672d 2279 unsigned block_start, block_end;
1da177e4
LT
2280 sector_t block_in_file;
2281 char *kaddr;
2282 int nr_reads = 0;
1da177e4
LT
2283 int ret = 0;
2284 int is_mapped_to_disk = 1;
1da177e4 2285
a4b0672d
NP
2286 if (page_has_buffers(page))
2287 return block_prepare_write(page, from, to, get_block);
2288
1da177e4
LT
2289 if (PageMappedToDisk(page))
2290 return 0;
2291
a4b0672d
NP
2292 /*
2293 * Allocate buffers so that we can keep track of state, and potentially
2294 * attach them to the page if an error occurs. In the common case of
2295 * no error, they will just be freed again without ever being attached
2296 * to the page (which is all OK, because we're under the page lock).
2297 *
2298 * Be careful: the buffer linked list is a NULL terminated one, rather
2299 * than the circular one we're used to.
2300 */
2301 head = alloc_page_buffers(page, blocksize, 0);
2302 if (!head)
2303 return -ENOMEM;
2304
1da177e4 2305 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2306
2307 /*
2308 * We loop across all blocks in the page, whether or not they are
2309 * part of the affected region. This is so we can discover if the
2310 * page is fully mapped-to-disk.
2311 */
a4b0672d 2312 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2313 block_start < PAGE_CACHE_SIZE;
a4b0672d 2314 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2315 int create;
2316
a4b0672d
NP
2317 block_end = block_start + blocksize;
2318 bh->b_state = 0;
1da177e4
LT
2319 create = 1;
2320 if (block_start >= to)
2321 create = 0;
2322 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2323 bh, create);
1da177e4
LT
2324 if (ret)
2325 goto failed;
a4b0672d 2326 if (!buffer_mapped(bh))
1da177e4 2327 is_mapped_to_disk = 0;
a4b0672d
NP
2328 if (buffer_new(bh))
2329 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2330 if (PageUptodate(page)) {
2331 set_buffer_uptodate(bh);
1da177e4 2332 continue;
a4b0672d
NP
2333 }
2334 if (buffer_new(bh) || !buffer_mapped(bh)) {
1da177e4 2335 kaddr = kmap_atomic(page, KM_USER0);
22c8ca78 2336 if (block_start < from)
1da177e4 2337 memset(kaddr+block_start, 0, from-block_start);
22c8ca78 2338 if (block_end > to)
1da177e4 2339 memset(kaddr + to, 0, block_end - to);
1da177e4
LT
2340 flush_dcache_page(page);
2341 kunmap_atomic(kaddr, KM_USER0);
2342 continue;
2343 }
a4b0672d 2344 if (buffer_uptodate(bh))
1da177e4
LT
2345 continue; /* reiserfs does this */
2346 if (block_start < from || block_end > to) {
a4b0672d
NP
2347 lock_buffer(bh);
2348 bh->b_end_io = end_buffer_read_nobh;
2349 submit_bh(READ, bh);
2350 nr_reads++;
1da177e4
LT
2351 }
2352 }
2353
2354 if (nr_reads) {
1da177e4
LT
2355 /*
2356 * The page is locked, so these buffers are protected from
2357 * any VM or truncate activity. Hence we don't need to care
2358 * for the buffer_head refcounts.
2359 */
a4b0672d 2360 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2361 wait_on_buffer(bh);
2362 if (!buffer_uptodate(bh))
2363 ret = -EIO;
1da177e4
LT
2364 }
2365 if (ret)
2366 goto failed;
2367 }
2368
2369 if (is_mapped_to_disk)
2370 SetPageMappedToDisk(page);
1da177e4 2371
a4b0672d
NP
2372 do {
2373 bh = head;
2374 head = head->b_this_page;
2375 free_buffer_head(bh);
2376 } while (head);
2377
1da177e4
LT
2378 return 0;
2379
2380failed:
1da177e4 2381 /*
a4b0672d
NP
2382 * Error recovery is a bit difficult. We need to zero out blocks that
2383 * were newly allocated, and dirty them to ensure they get written out.
2384 * Buffers need to be attached to the page at this point, otherwise
2385 * the handling of potential IO errors during writeout would be hard
2386 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2387 */
a4b0672d
NP
2388 spin_lock(&page->mapping->private_lock);
2389 bh = head;
2390 block_start = 0;
2391 do {
2392 if (PageUptodate(page))
2393 set_buffer_uptodate(bh);
2394 if (PageDirty(page))
2395 set_buffer_dirty(bh);
2396
2397 block_end = block_start+blocksize;
2398 if (block_end <= from)
2399 goto next;
2400 if (block_start >= to)
2401 goto next;
2402
2403 if (buffer_new(bh)) {
2404 clear_buffer_new(bh);
2405 if (!buffer_uptodate(bh)) {
2406 zero_user_page(page, block_start, bh->b_size, KM_USER0);
2407 set_buffer_uptodate(bh);
2408 }
2409 mark_buffer_dirty(bh);
2410 }
2411next:
2412 block_start = block_end;
2413 if (!bh->b_this_page)
2414 bh->b_this_page = head;
2415 bh = bh->b_this_page;
2416 } while (bh != head);
2417 attach_page_buffers(page, head);
2418 spin_unlock(&page->mapping->private_lock);
2419
1da177e4
LT
2420 return ret;
2421}
2422EXPORT_SYMBOL(nobh_prepare_write);
2423
57bf63d6
DK
2424/*
2425 * Make sure any changes to nobh_commit_write() are reflected in
2426 * nobh_truncate_page(), since it doesn't call commit_write().
2427 */
1da177e4
LT
2428int nobh_commit_write(struct file *file, struct page *page,
2429 unsigned from, unsigned to)
2430{
2431 struct inode *inode = page->mapping->host;
2432 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2433
a4b0672d
NP
2434 if (page_has_buffers(page))
2435 return generic_commit_write(file, page, from, to);
2436
22c8ca78 2437 SetPageUptodate(page);
1da177e4
LT
2438 set_page_dirty(page);
2439 if (pos > inode->i_size) {
2440 i_size_write(inode, pos);
2441 mark_inode_dirty(inode);
2442 }
2443 return 0;
2444}
2445EXPORT_SYMBOL(nobh_commit_write);
2446
2447/*
2448 * nobh_writepage() - based on block_full_write_page() except
2449 * that it tries to operate without attaching bufferheads to
2450 * the page.
2451 */
2452int nobh_writepage(struct page *page, get_block_t *get_block,
2453 struct writeback_control *wbc)
2454{
2455 struct inode * const inode = page->mapping->host;
2456 loff_t i_size = i_size_read(inode);
2457 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2458 unsigned offset;
1da177e4
LT
2459 int ret;
2460
2461 /* Is the page fully inside i_size? */
2462 if (page->index < end_index)
2463 goto out;
2464
2465 /* Is the page fully outside i_size? (truncate in progress) */
2466 offset = i_size & (PAGE_CACHE_SIZE-1);
2467 if (page->index >= end_index+1 || !offset) {
2468 /*
2469 * The page may have dirty, unmapped buffers. For example,
2470 * they may have been added in ext3_writepage(). Make them
2471 * freeable here, so the page does not leak.
2472 */
2473#if 0
2474 /* Not really sure about this - do we need this ? */
2475 if (page->mapping->a_ops->invalidatepage)
2476 page->mapping->a_ops->invalidatepage(page, offset);
2477#endif
2478 unlock_page(page);
2479 return 0; /* don't care */
2480 }
2481
2482 /*
2483 * The page straddles i_size. It must be zeroed out on each and every
2484 * writepage invocation because it may be mmapped. "A file is mapped
2485 * in multiples of the page size. For a file that is not a multiple of
2486 * the page size, the remaining memory is zeroed when mapped, and
2487 * writes to that region are not written out to the file."
2488 */
01f2705d 2489 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
1da177e4
LT
2490out:
2491 ret = mpage_writepage(page, get_block, wbc);
2492 if (ret == -EAGAIN)
2493 ret = __block_write_full_page(inode, page, get_block, wbc);
2494 return ret;
2495}
2496EXPORT_SYMBOL(nobh_writepage);
2497
2498/*
2499 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2500 */
2501int nobh_truncate_page(struct address_space *mapping, loff_t from)
2502{
2503 struct inode *inode = mapping->host;
2504 unsigned blocksize = 1 << inode->i_blkbits;
2505 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2506 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2507 unsigned to;
2508 struct page *page;
f5e54d6e 2509 const struct address_space_operations *a_ops = mapping->a_ops;
1da177e4
LT
2510 int ret = 0;
2511
2512 if ((offset & (blocksize - 1)) == 0)
2513 goto out;
2514
2515 ret = -ENOMEM;
2516 page = grab_cache_page(mapping, index);
2517 if (!page)
2518 goto out;
2519
2520 to = (offset + blocksize) & ~(blocksize - 1);
2521 ret = a_ops->prepare_write(NULL, page, offset, to);
2522 if (ret == 0) {
01f2705d
ND
2523 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2524 KM_USER0);
57bf63d6
DK
2525 /*
2526 * It would be more correct to call aops->commit_write()
2527 * here, but this is more efficient.
2528 */
2529 SetPageUptodate(page);
1da177e4
LT
2530 set_page_dirty(page);
2531 }
2532 unlock_page(page);
2533 page_cache_release(page);
2534out:
2535 return ret;
2536}
2537EXPORT_SYMBOL(nobh_truncate_page);
2538
2539int block_truncate_page(struct address_space *mapping,
2540 loff_t from, get_block_t *get_block)
2541{
2542 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2543 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2544 unsigned blocksize;
54b21a79 2545 sector_t iblock;
1da177e4
LT
2546 unsigned length, pos;
2547 struct inode *inode = mapping->host;
2548 struct page *page;
2549 struct buffer_head *bh;
1da177e4
LT
2550 int err;
2551
2552 blocksize = 1 << inode->i_blkbits;
2553 length = offset & (blocksize - 1);
2554
2555 /* Block boundary? Nothing to do */
2556 if (!length)
2557 return 0;
2558
2559 length = blocksize - length;
54b21a79 2560 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2561
2562 page = grab_cache_page(mapping, index);
2563 err = -ENOMEM;
2564 if (!page)
2565 goto out;
2566
2567 if (!page_has_buffers(page))
2568 create_empty_buffers(page, blocksize, 0);
2569
2570 /* Find the buffer that contains "offset" */
2571 bh = page_buffers(page);
2572 pos = blocksize;
2573 while (offset >= pos) {
2574 bh = bh->b_this_page;
2575 iblock++;
2576 pos += blocksize;
2577 }
2578
2579 err = 0;
2580 if (!buffer_mapped(bh)) {
b0cf2321 2581 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2582 err = get_block(inode, iblock, bh, 0);
2583 if (err)
2584 goto unlock;
2585 /* unmapped? It's a hole - nothing to do */
2586 if (!buffer_mapped(bh))
2587 goto unlock;
2588 }
2589
2590 /* Ok, it's mapped. Make sure it's up-to-date */
2591 if (PageUptodate(page))
2592 set_buffer_uptodate(bh);
2593
33a266dd 2594 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2595 err = -EIO;
2596 ll_rw_block(READ, 1, &bh);
2597 wait_on_buffer(bh);
2598 /* Uhhuh. Read error. Complain and punt. */
2599 if (!buffer_uptodate(bh))
2600 goto unlock;
2601 }
2602
01f2705d 2603 zero_user_page(page, offset, length, KM_USER0);
1da177e4
LT
2604 mark_buffer_dirty(bh);
2605 err = 0;
2606
2607unlock:
2608 unlock_page(page);
2609 page_cache_release(page);
2610out:
2611 return err;
2612}
2613
2614/*
2615 * The generic ->writepage function for buffer-backed address_spaces
2616 */
2617int block_write_full_page(struct page *page, get_block_t *get_block,
2618 struct writeback_control *wbc)
2619{
2620 struct inode * const inode = page->mapping->host;
2621 loff_t i_size = i_size_read(inode);
2622 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2623 unsigned offset;
1da177e4
LT
2624
2625 /* Is the page fully inside i_size? */
2626 if (page->index < end_index)
2627 return __block_write_full_page(inode, page, get_block, wbc);
2628
2629 /* Is the page fully outside i_size? (truncate in progress) */
2630 offset = i_size & (PAGE_CACHE_SIZE-1);
2631 if (page->index >= end_index+1 || !offset) {
2632 /*
2633 * The page may have dirty, unmapped buffers. For example,
2634 * they may have been added in ext3_writepage(). Make them
2635 * freeable here, so the page does not leak.
2636 */
aaa4059b 2637 do_invalidatepage(page, 0);
1da177e4
LT
2638 unlock_page(page);
2639 return 0; /* don't care */
2640 }
2641
2642 /*
2643 * The page straddles i_size. It must be zeroed out on each and every
2644 * writepage invokation because it may be mmapped. "A file is mapped
2645 * in multiples of the page size. For a file that is not a multiple of
2646 * the page size, the remaining memory is zeroed when mapped, and
2647 * writes to that region are not written out to the file."
2648 */
01f2705d 2649 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
1da177e4
LT
2650 return __block_write_full_page(inode, page, get_block, wbc);
2651}
2652
2653sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2654 get_block_t *get_block)
2655{
2656 struct buffer_head tmp;
2657 struct inode *inode = mapping->host;
2658 tmp.b_state = 0;
2659 tmp.b_blocknr = 0;
b0cf2321 2660 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2661 get_block(inode, block, &tmp, 0);
2662 return tmp.b_blocknr;
2663}
2664
6712ecf8 2665static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2666{
2667 struct buffer_head *bh = bio->bi_private;
2668
1da177e4
LT
2669 if (err == -EOPNOTSUPP) {
2670 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2671 set_bit(BH_Eopnotsupp, &bh->b_state);
2672 }
2673
2674 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2675 bio_put(bio);
1da177e4
LT
2676}
2677
2678int submit_bh(int rw, struct buffer_head * bh)
2679{
2680 struct bio *bio;
2681 int ret = 0;
2682
2683 BUG_ON(!buffer_locked(bh));
2684 BUG_ON(!buffer_mapped(bh));
2685 BUG_ON(!bh->b_end_io);
2686
2687 if (buffer_ordered(bh) && (rw == WRITE))
2688 rw = WRITE_BARRIER;
2689
2690 /*
2691 * Only clear out a write error when rewriting, should this
2692 * include WRITE_SYNC as well?
2693 */
2694 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2695 clear_buffer_write_io_error(bh);
2696
2697 /*
2698 * from here on down, it's all bio -- do the initial mapping,
2699 * submit_bio -> generic_make_request may further map this bio around
2700 */
2701 bio = bio_alloc(GFP_NOIO, 1);
2702
2703 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2704 bio->bi_bdev = bh->b_bdev;
2705 bio->bi_io_vec[0].bv_page = bh->b_page;
2706 bio->bi_io_vec[0].bv_len = bh->b_size;
2707 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2708
2709 bio->bi_vcnt = 1;
2710 bio->bi_idx = 0;
2711 bio->bi_size = bh->b_size;
2712
2713 bio->bi_end_io = end_bio_bh_io_sync;
2714 bio->bi_private = bh;
2715
2716 bio_get(bio);
2717 submit_bio(rw, bio);
2718
2719 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2720 ret = -EOPNOTSUPP;
2721
2722 bio_put(bio);
2723 return ret;
2724}
2725
2726/**
2727 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2728 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2729 * @nr: number of &struct buffer_heads in the array
2730 * @bhs: array of pointers to &struct buffer_head
2731 *
a7662236
JK
2732 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2733 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2734 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2735 * are sent to disk. The fourth %READA option is described in the documentation
2736 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2737 *
2738 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2739 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2740 * clean when doing a write request, and any buffer that appears to be
2741 * up-to-date when doing read request. Further it marks as clean buffers that
2742 * are processed for writing (the buffer cache won't assume that they are
2743 * actually clean until the buffer gets unlocked).
1da177e4
LT
2744 *
2745 * ll_rw_block sets b_end_io to simple completion handler that marks
2746 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2747 * any waiters.
2748 *
2749 * All of the buffers must be for the same device, and must also be a
2750 * multiple of the current approved size for the device.
2751 */
2752void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2753{
2754 int i;
2755
2756 for (i = 0; i < nr; i++) {
2757 struct buffer_head *bh = bhs[i];
2758
a7662236
JK
2759 if (rw == SWRITE)
2760 lock_buffer(bh);
2761 else if (test_set_buffer_locked(bh))
1da177e4
LT
2762 continue;
2763
a7662236 2764 if (rw == WRITE || rw == SWRITE) {
1da177e4 2765 if (test_clear_buffer_dirty(bh)) {
76c3073a 2766 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2767 get_bh(bh);
1da177e4
LT
2768 submit_bh(WRITE, bh);
2769 continue;
2770 }
2771 } else {
1da177e4 2772 if (!buffer_uptodate(bh)) {
76c3073a 2773 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2774 get_bh(bh);
1da177e4
LT
2775 submit_bh(rw, bh);
2776 continue;
2777 }
2778 }
2779 unlock_buffer(bh);
1da177e4
LT
2780 }
2781}
2782
2783/*
2784 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2785 * and then start new I/O and then wait upon it. The caller must have a ref on
2786 * the buffer_head.
2787 */
2788int sync_dirty_buffer(struct buffer_head *bh)
2789{
2790 int ret = 0;
2791
2792 WARN_ON(atomic_read(&bh->b_count) < 1);
2793 lock_buffer(bh);
2794 if (test_clear_buffer_dirty(bh)) {
2795 get_bh(bh);
2796 bh->b_end_io = end_buffer_write_sync;
2797 ret = submit_bh(WRITE, bh);
2798 wait_on_buffer(bh);
2799 if (buffer_eopnotsupp(bh)) {
2800 clear_buffer_eopnotsupp(bh);
2801 ret = -EOPNOTSUPP;
2802 }
2803 if (!ret && !buffer_uptodate(bh))
2804 ret = -EIO;
2805 } else {
2806 unlock_buffer(bh);
2807 }
2808 return ret;
2809}
2810
2811/*
2812 * try_to_free_buffers() checks if all the buffers on this particular page
2813 * are unused, and releases them if so.
2814 *
2815 * Exclusion against try_to_free_buffers may be obtained by either
2816 * locking the page or by holding its mapping's private_lock.
2817 *
2818 * If the page is dirty but all the buffers are clean then we need to
2819 * be sure to mark the page clean as well. This is because the page
2820 * may be against a block device, and a later reattachment of buffers
2821 * to a dirty page will set *all* buffers dirty. Which would corrupt
2822 * filesystem data on the same device.
2823 *
2824 * The same applies to regular filesystem pages: if all the buffers are
2825 * clean then we set the page clean and proceed. To do that, we require
2826 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2827 * private_lock.
2828 *
2829 * try_to_free_buffers() is non-blocking.
2830 */
2831static inline int buffer_busy(struct buffer_head *bh)
2832{
2833 return atomic_read(&bh->b_count) |
2834 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2835}
2836
2837static int
2838drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2839{
2840 struct buffer_head *head = page_buffers(page);
2841 struct buffer_head *bh;
2842
2843 bh = head;
2844 do {
de7d5a3b 2845 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
2846 set_bit(AS_EIO, &page->mapping->flags);
2847 if (buffer_busy(bh))
2848 goto failed;
2849 bh = bh->b_this_page;
2850 } while (bh != head);
2851
2852 do {
2853 struct buffer_head *next = bh->b_this_page;
2854
2855 if (!list_empty(&bh->b_assoc_buffers))
2856 __remove_assoc_queue(bh);
2857 bh = next;
2858 } while (bh != head);
2859 *buffers_to_free = head;
2860 __clear_page_buffers(page);
2861 return 1;
2862failed:
2863 return 0;
2864}
2865
2866int try_to_free_buffers(struct page *page)
2867{
2868 struct address_space * const mapping = page->mapping;
2869 struct buffer_head *buffers_to_free = NULL;
2870 int ret = 0;
2871
2872 BUG_ON(!PageLocked(page));
ecdfc978 2873 if (PageWriteback(page))
1da177e4
LT
2874 return 0;
2875
2876 if (mapping == NULL) { /* can this still happen? */
2877 ret = drop_buffers(page, &buffers_to_free);
2878 goto out;
2879 }
2880
2881 spin_lock(&mapping->private_lock);
2882 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
2883
2884 /*
2885 * If the filesystem writes its buffers by hand (eg ext3)
2886 * then we can have clean buffers against a dirty page. We
2887 * clean the page here; otherwise the VM will never notice
2888 * that the filesystem did any IO at all.
2889 *
2890 * Also, during truncate, discard_buffer will have marked all
2891 * the page's buffers clean. We discover that here and clean
2892 * the page also.
87df7241
NP
2893 *
2894 * private_lock must be held over this entire operation in order
2895 * to synchronise against __set_page_dirty_buffers and prevent the
2896 * dirty bit from being lost.
ecdfc978
LT
2897 */
2898 if (ret)
2899 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 2900 spin_unlock(&mapping->private_lock);
1da177e4
LT
2901out:
2902 if (buffers_to_free) {
2903 struct buffer_head *bh = buffers_to_free;
2904
2905 do {
2906 struct buffer_head *next = bh->b_this_page;
2907 free_buffer_head(bh);
2908 bh = next;
2909 } while (bh != buffers_to_free);
2910 }
2911 return ret;
2912}
2913EXPORT_SYMBOL(try_to_free_buffers);
2914
3978d717 2915void block_sync_page(struct page *page)
1da177e4
LT
2916{
2917 struct address_space *mapping;
2918
2919 smp_mb();
2920 mapping = page_mapping(page);
2921 if (mapping)
2922 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
2923}
2924
2925/*
2926 * There are no bdflush tunables left. But distributions are
2927 * still running obsolete flush daemons, so we terminate them here.
2928 *
2929 * Use of bdflush() is deprecated and will be removed in a future kernel.
2930 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2931 */
2932asmlinkage long sys_bdflush(int func, long data)
2933{
2934 static int msg_count;
2935
2936 if (!capable(CAP_SYS_ADMIN))
2937 return -EPERM;
2938
2939 if (msg_count < 5) {
2940 msg_count++;
2941 printk(KERN_INFO
2942 "warning: process `%s' used the obsolete bdflush"
2943 " system call\n", current->comm);
2944 printk(KERN_INFO "Fix your initscripts?\n");
2945 }
2946
2947 if (func == 1)
2948 do_exit(0);
2949 return 0;
2950}
2951
2952/*
2953 * Buffer-head allocation
2954 */
e18b890b 2955static struct kmem_cache *bh_cachep;
1da177e4
LT
2956
2957/*
2958 * Once the number of bh's in the machine exceeds this level, we start
2959 * stripping them in writeback.
2960 */
2961static int max_buffer_heads;
2962
2963int buffer_heads_over_limit;
2964
2965struct bh_accounting {
2966 int nr; /* Number of live bh's */
2967 int ratelimit; /* Limit cacheline bouncing */
2968};
2969
2970static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2971
2972static void recalc_bh_state(void)
2973{
2974 int i;
2975 int tot = 0;
2976
2977 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2978 return;
2979 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 2980 for_each_online_cpu(i)
1da177e4
LT
2981 tot += per_cpu(bh_accounting, i).nr;
2982 buffer_heads_over_limit = (tot > max_buffer_heads);
2983}
2984
dd0fc66f 2985struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 2986{
a35afb83 2987 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
1da177e4 2988 if (ret) {
a35afb83 2989 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 2990 get_cpu_var(bh_accounting).nr++;
1da177e4 2991 recalc_bh_state();
736c7b80 2992 put_cpu_var(bh_accounting);
1da177e4
LT
2993 }
2994 return ret;
2995}
2996EXPORT_SYMBOL(alloc_buffer_head);
2997
2998void free_buffer_head(struct buffer_head *bh)
2999{
3000 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3001 kmem_cache_free(bh_cachep, bh);
736c7b80 3002 get_cpu_var(bh_accounting).nr--;
1da177e4 3003 recalc_bh_state();
736c7b80 3004 put_cpu_var(bh_accounting);
1da177e4
LT
3005}
3006EXPORT_SYMBOL(free_buffer_head);
3007
1da177e4
LT
3008static void buffer_exit_cpu(int cpu)
3009{
3010 int i;
3011 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3012
3013 for (i = 0; i < BH_LRU_SIZE; i++) {
3014 brelse(b->bhs[i]);
3015 b->bhs[i] = NULL;
3016 }
8a143426
ED
3017 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3018 per_cpu(bh_accounting, cpu).nr = 0;
3019 put_cpu_var(bh_accounting);
1da177e4
LT
3020}
3021
3022static int buffer_cpu_notify(struct notifier_block *self,
3023 unsigned long action, void *hcpu)
3024{
8bb78442 3025 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3026 buffer_exit_cpu((unsigned long)hcpu);
3027 return NOTIFY_OK;
3028}
1da177e4
LT
3029
3030void __init buffer_init(void)
3031{
3032 int nrpages;
3033
a35afb83
CL
3034 bh_cachep = KMEM_CACHE(buffer_head,
3035 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
1da177e4
LT
3036
3037 /*
3038 * Limit the bh occupancy to 10% of ZONE_NORMAL
3039 */
3040 nrpages = (nr_free_buffer_pages() * 10) / 100;
3041 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3042 hotcpu_notifier(buffer_cpu_notify, 0);
3043}
3044
3045EXPORT_SYMBOL(__bforget);
3046EXPORT_SYMBOL(__brelse);
3047EXPORT_SYMBOL(__wait_on_buffer);
3048EXPORT_SYMBOL(block_commit_write);
3049EXPORT_SYMBOL(block_prepare_write);
54171690 3050EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3051EXPORT_SYMBOL(block_read_full_page);
3052EXPORT_SYMBOL(block_sync_page);
3053EXPORT_SYMBOL(block_truncate_page);
3054EXPORT_SYMBOL(block_write_full_page);
3055EXPORT_SYMBOL(cont_prepare_write);
1da177e4
LT
3056EXPORT_SYMBOL(end_buffer_read_sync);
3057EXPORT_SYMBOL(end_buffer_write_sync);
3058EXPORT_SYMBOL(file_fsync);
3059EXPORT_SYMBOL(fsync_bdev);
3060EXPORT_SYMBOL(generic_block_bmap);
3061EXPORT_SYMBOL(generic_commit_write);
3062EXPORT_SYMBOL(generic_cont_expand);
05eb0b51 3063EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3064EXPORT_SYMBOL(init_buffer);
3065EXPORT_SYMBOL(invalidate_bdev);
3066EXPORT_SYMBOL(ll_rw_block);
3067EXPORT_SYMBOL(mark_buffer_dirty);
3068EXPORT_SYMBOL(submit_bh);
3069EXPORT_SYMBOL(sync_dirty_buffer);
3070EXPORT_SYMBOL(unlock_buffer);