]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
gfs2: convert to new aops
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
70void fastcall __lock_buffer(struct buffer_head *bh)
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
77void fastcall unlock_buffer(struct buffer_head *bh)
78{
72ed3d03 79 smp_mb__before_clear_bit();
1da177e4
LT
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
84
85/*
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
89 */
90void __wait_on_buffer(struct buffer_head * bh)
91{
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93}
94
95static void
96__clear_page_buffers(struct page *page)
97{
98 ClearPagePrivate(page);
4c21e2f2 99 set_page_private(page, 0);
1da177e4
LT
100 page_cache_release(page);
101}
102
103static void buffer_io_error(struct buffer_head *bh)
104{
105 char b[BDEVNAME_SIZE];
106
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
110}
111
112/*
68671f35
DM
113 * End-of-IO handler helper function which does not touch the bh after
114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
1da177e4 119 */
68671f35 120static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
121{
122 if (uptodate) {
123 set_buffer_uptodate(bh);
124 } else {
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
127 }
128 unlock_buffer(bh);
68671f35
DM
129}
130
131/*
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
134 */
135void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136{
137 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
138 put_bh(bh);
139}
140
141void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142{
143 char b[BDEVNAME_SIZE];
144
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 buffer_io_error(bh);
150 printk(KERN_WARNING "lost page write due to "
151 "I/O error on %s\n",
152 bdevname(bh->b_bdev, b));
153 }
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
156 }
157 unlock_buffer(bh);
158 put_bh(bh);
159}
160
161/*
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
164 */
165int sync_blockdev(struct block_device *bdev)
166{
167 int ret = 0;
168
28fd1298
OH
169 if (bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
171 return ret;
172}
173EXPORT_SYMBOL(sync_blockdev);
174
1da177e4
LT
175/*
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
179 */
180int fsync_bdev(struct block_device *bdev)
181{
182 struct super_block *sb = get_super(bdev);
183 if (sb) {
184 int res = fsync_super(sb);
185 drop_super(sb);
186 return res;
187 }
188 return sync_blockdev(bdev);
189}
190
191/**
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
194 *
f73ca1b7 195 * This takes the block device bd_mount_sem to make sure no new mounts
1da177e4
LT
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
199 */
200struct super_block *freeze_bdev(struct block_device *bdev)
201{
202 struct super_block *sb;
203
f73ca1b7 204 down(&bdev->bd_mount_sem);
1da177e4
LT
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 208 smp_wmb();
1da177e4 209
d25b9a1f 210 __fsync_super(sb);
1da177e4
LT
211
212 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 213 smp_wmb();
1da177e4
LT
214
215 sync_blockdev(sb->s_bdev);
216
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
219 }
220
221 sync_blockdev(bdev);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
223}
224EXPORT_SYMBOL(freeze_bdev);
225
226/**
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
230 *
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232 */
233void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234{
235 if (sb) {
236 BUG_ON(sb->s_bdev != bdev);
237
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
d59dd462 241 smp_wmb();
1da177e4
LT
242 wake_up(&sb->s_wait_unfrozen);
243 drop_super(sb);
244 }
245
f73ca1b7 246 up(&bdev->bd_mount_sem);
1da177e4
LT
247}
248EXPORT_SYMBOL(thaw_bdev);
249
1da177e4
LT
250/*
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
254 * private_lock.
255 *
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
260 */
261static struct buffer_head *
385fd4c5 262__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
263{
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
267 pgoff_t index;
268 struct buffer_head *bh;
269 struct buffer_head *head;
270 struct page *page;
271 int all_mapped = 1;
272
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
275 if (!page)
276 goto out;
277
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
280 goto out_unlock;
281 head = page_buffers(page);
282 bh = head;
283 do {
284 if (bh->b_blocknr == block) {
285 ret = bh;
286 get_bh(bh);
287 goto out_unlock;
288 }
289 if (!buffer_mapped(bh))
290 all_mapped = 0;
291 bh = bh->b_this_page;
292 } while (bh != head);
293
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
298 */
299 if (all_mapped) {
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
1da177e4
LT
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 }
308out_unlock:
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
311out:
312 return ret;
313}
314
315/* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
318 by the user.
319
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
323
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
326
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
335
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
339
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
f98393a6 347void invalidate_bdev(struct block_device *bdev)
1da177e4 348{
0e1dfc66
AM
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
350
351 if (mapping->nrpages == 0)
352 return;
353
1da177e4 354 invalidate_bh_lrus();
fc0ecff6 355 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
356}
357
358/*
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
360 */
361static void free_more_memory(void)
362{
363 struct zone **zones;
364 pg_data_t *pgdat;
365
687a21ce 366 wakeup_pdflush(1024);
1da177e4
LT
367 yield();
368
ec936fc5 369 for_each_online_pgdat(pgdat) {
af4ca457 370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 371 if (*zones)
5ad333eb 372 try_to_free_pages(zones, 0, GFP_NOFS);
1da177e4
LT
373 }
374}
375
376/*
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
379 */
380static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
381{
1da177e4 382 unsigned long flags;
a3972203 383 struct buffer_head *first;
1da177e4
LT
384 struct buffer_head *tmp;
385 struct page *page;
386 int page_uptodate = 1;
387
388 BUG_ON(!buffer_async_read(bh));
389
390 page = bh->b_page;
391 if (uptodate) {
392 set_buffer_uptodate(bh);
393 } else {
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
396 buffer_io_error(bh);
397 SetPageError(page);
398 }
399
400 /*
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
404 */
a3972203
NP
405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
408 clear_buffer_async_read(bh);
409 unlock_buffer(bh);
410 tmp = bh;
411 do {
412 if (!buffer_uptodate(tmp))
413 page_uptodate = 0;
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
416 goto still_busy;
417 }
418 tmp = tmp->b_this_page;
419 } while (tmp != bh);
a3972203
NP
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
1da177e4
LT
422
423 /*
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
426 */
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
429 unlock_page(page);
430 return;
431
432still_busy:
a3972203
NP
433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
1da177e4
LT
435 return;
436}
437
438/*
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
441 */
b6cd0b77 442static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
443{
444 char b[BDEVNAME_SIZE];
1da177e4 445 unsigned long flags;
a3972203 446 struct buffer_head *first;
1da177e4
LT
447 struct buffer_head *tmp;
448 struct page *page;
449
450 BUG_ON(!buffer_async_write(bh));
451
452 page = bh->b_page;
453 if (uptodate) {
454 set_buffer_uptodate(bh);
455 } else {
456 if (printk_ratelimit()) {
457 buffer_io_error(bh);
458 printk(KERN_WARNING "lost page write due to "
459 "I/O error on %s\n",
460 bdevname(bh->b_bdev, b));
461 }
462 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 463 set_buffer_write_io_error(bh);
1da177e4
LT
464 clear_buffer_uptodate(bh);
465 SetPageError(page);
466 }
467
a3972203
NP
468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471
1da177e4
LT
472 clear_buffer_async_write(bh);
473 unlock_buffer(bh);
474 tmp = bh->b_this_page;
475 while (tmp != bh) {
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
478 goto still_busy;
479 }
480 tmp = tmp->b_this_page;
481 }
a3972203
NP
482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
1da177e4
LT
484 end_page_writeback(page);
485 return;
486
487still_busy:
a3972203
NP
488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
1da177e4
LT
490 return;
491}
492
493/*
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
501 *
502 * The page comes unlocked when it has no locked buffer_async buffers
503 * left.
504 *
505 * PageLocked prevents anyone starting new async I/O reads any of
506 * the buffers.
507 *
508 * PageWriteback is used to prevent simultaneous writeout of the same
509 * page.
510 *
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
513 */
514static void mark_buffer_async_read(struct buffer_head *bh)
515{
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
518}
519
520void mark_buffer_async_write(struct buffer_head *bh)
521{
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
524}
525EXPORT_SYMBOL(mark_buffer_async_write);
526
527
528/*
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
534 *
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
538 *
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
548 * ->private_lock.
549 *
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
552 *
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
557 *
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
561 *
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565 * queued up.
566 *
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
574 * b_inode back.
575 */
576
577/*
578 * The buffer's backing address_space's private_lock must be held
579 */
580static inline void __remove_assoc_queue(struct buffer_head *bh)
581{
582 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
1da177e4
LT
587}
588
589int inode_has_buffers(struct inode *inode)
590{
591 return !list_empty(&inode->i_data.private_list);
592}
593
594/*
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
598 *
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
603 */
604static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
605{
606 struct buffer_head *bh;
607 struct list_head *p;
608 int err = 0;
609
610 spin_lock(lock);
611repeat:
612 list_for_each_prev(p, list) {
613 bh = BH_ENTRY(p);
614 if (buffer_locked(bh)) {
615 get_bh(bh);
616 spin_unlock(lock);
617 wait_on_buffer(bh);
618 if (!buffer_uptodate(bh))
619 err = -EIO;
620 brelse(bh);
621 spin_lock(lock);
622 goto repeat;
623 }
624 }
625 spin_unlock(lock);
626 return err;
627}
628
629/**
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
631 * buffers
67be2dd1 632 * @mapping: the mapping which wants those buffers written
1da177e4
LT
633 *
634 * Starts I/O against the buffers at mapping->private_list, and waits upon
635 * that I/O.
636 *
67be2dd1
MW
637 * Basically, this is a convenience function for fsync().
638 * @mapping is a file or directory which needs those buffers to be written for
639 * a successful fsync().
1da177e4
LT
640 */
641int sync_mapping_buffers(struct address_space *mapping)
642{
643 struct address_space *buffer_mapping = mapping->assoc_mapping;
644
645 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
646 return 0;
647
648 return fsync_buffers_list(&buffer_mapping->private_lock,
649 &mapping->private_list);
650}
651EXPORT_SYMBOL(sync_mapping_buffers);
652
653/*
654 * Called when we've recently written block `bblock', and it is known that
655 * `bblock' was for a buffer_boundary() buffer. This means that the block at
656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
657 * dirty, schedule it for IO. So that indirects merge nicely with their data.
658 */
659void write_boundary_block(struct block_device *bdev,
660 sector_t bblock, unsigned blocksize)
661{
662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
663 if (bh) {
664 if (buffer_dirty(bh))
665 ll_rw_block(WRITE, 1, &bh);
666 put_bh(bh);
667 }
668}
669
670void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
671{
672 struct address_space *mapping = inode->i_mapping;
673 struct address_space *buffer_mapping = bh->b_page->mapping;
674
675 mark_buffer_dirty(bh);
676 if (!mapping->assoc_mapping) {
677 mapping->assoc_mapping = buffer_mapping;
678 } else {
e827f923 679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4
LT
680 }
681 if (list_empty(&bh->b_assoc_buffers)) {
682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list);
58ff407b 685 bh->b_assoc_map = mapping;
1da177e4
LT
686 spin_unlock(&buffer_mapping->private_lock);
687 }
688}
689EXPORT_SYMBOL(mark_buffer_dirty_inode);
690
787d2214
NP
691/*
692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
693 * dirty.
694 *
695 * If warn is true, then emit a warning if the page is not uptodate and has
696 * not been truncated.
697 */
698static int __set_page_dirty(struct page *page,
699 struct address_space *mapping, int warn)
700{
701 if (unlikely(!mapping))
702 return !TestSetPageDirty(page);
703
704 if (TestSetPageDirty(page))
705 return 0;
706
707 write_lock_irq(&mapping->tree_lock);
708 if (page->mapping) { /* Race with truncate? */
709 WARN_ON_ONCE(warn && !PageUptodate(page));
710
711 if (mapping_cap_account_dirty(mapping)) {
712 __inc_zone_page_state(page, NR_FILE_DIRTY);
713 task_io_account_write(PAGE_CACHE_SIZE);
714 }
715 radix_tree_tag_set(&mapping->page_tree,
716 page_index(page), PAGECACHE_TAG_DIRTY);
717 }
718 write_unlock_irq(&mapping->tree_lock);
719 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
720
721 return 1;
722}
723
1da177e4
LT
724/*
725 * Add a page to the dirty page list.
726 *
727 * It is a sad fact of life that this function is called from several places
728 * deeply under spinlocking. It may not sleep.
729 *
730 * If the page has buffers, the uptodate buffers are set dirty, to preserve
731 * dirty-state coherency between the page and the buffers. It the page does
732 * not have buffers then when they are later attached they will all be set
733 * dirty.
734 *
735 * The buffers are dirtied before the page is dirtied. There's a small race
736 * window in which a writepage caller may see the page cleanness but not the
737 * buffer dirtiness. That's fine. If this code were to set the page dirty
738 * before the buffers, a concurrent writepage caller could clear the page dirty
739 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
740 * page on the dirty page list.
741 *
742 * We use private_lock to lock against try_to_free_buffers while using the
743 * page's buffer list. Also use this to protect against clean buffers being
744 * added to the page after it was set dirty.
745 *
746 * FIXME: may need to call ->reservepage here as well. That's rather up to the
747 * address_space though.
748 */
749int __set_page_dirty_buffers(struct page *page)
750{
787d2214 751 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
752
753 if (unlikely(!mapping))
754 return !TestSetPageDirty(page);
1da177e4
LT
755
756 spin_lock(&mapping->private_lock);
757 if (page_has_buffers(page)) {
758 struct buffer_head *head = page_buffers(page);
759 struct buffer_head *bh = head;
760
761 do {
762 set_buffer_dirty(bh);
763 bh = bh->b_this_page;
764 } while (bh != head);
765 }
766 spin_unlock(&mapping->private_lock);
767
787d2214 768 return __set_page_dirty(page, mapping, 1);
1da177e4
LT
769}
770EXPORT_SYMBOL(__set_page_dirty_buffers);
771
772/*
773 * Write out and wait upon a list of buffers.
774 *
775 * We have conflicting pressures: we want to make sure that all
776 * initially dirty buffers get waited on, but that any subsequently
777 * dirtied buffers don't. After all, we don't want fsync to last
778 * forever if somebody is actively writing to the file.
779 *
780 * Do this in two main stages: first we copy dirty buffers to a
781 * temporary inode list, queueing the writes as we go. Then we clean
782 * up, waiting for those writes to complete.
783 *
784 * During this second stage, any subsequent updates to the file may end
785 * up refiling the buffer on the original inode's dirty list again, so
786 * there is a chance we will end up with a buffer queued for write but
787 * not yet completed on that list. So, as a final cleanup we go through
788 * the osync code to catch these locked, dirty buffers without requeuing
789 * any newly dirty buffers for write.
790 */
791static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
792{
793 struct buffer_head *bh;
794 struct list_head tmp;
795 int err = 0, err2;
796
797 INIT_LIST_HEAD(&tmp);
798
799 spin_lock(lock);
800 while (!list_empty(list)) {
801 bh = BH_ENTRY(list->next);
58ff407b 802 __remove_assoc_queue(bh);
1da177e4
LT
803 if (buffer_dirty(bh) || buffer_locked(bh)) {
804 list_add(&bh->b_assoc_buffers, &tmp);
805 if (buffer_dirty(bh)) {
806 get_bh(bh);
807 spin_unlock(lock);
808 /*
809 * Ensure any pending I/O completes so that
810 * ll_rw_block() actually writes the current
811 * contents - it is a noop if I/O is still in
812 * flight on potentially older contents.
813 */
a7662236 814 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
815 brelse(bh);
816 spin_lock(lock);
817 }
818 }
819 }
820
821 while (!list_empty(&tmp)) {
822 bh = BH_ENTRY(tmp.prev);
58ff407b 823 list_del_init(&bh->b_assoc_buffers);
1da177e4
LT
824 get_bh(bh);
825 spin_unlock(lock);
826 wait_on_buffer(bh);
827 if (!buffer_uptodate(bh))
828 err = -EIO;
829 brelse(bh);
830 spin_lock(lock);
831 }
832
833 spin_unlock(lock);
834 err2 = osync_buffers_list(lock, list);
835 if (err)
836 return err;
837 else
838 return err2;
839}
840
841/*
842 * Invalidate any and all dirty buffers on a given inode. We are
843 * probably unmounting the fs, but that doesn't mean we have already
844 * done a sync(). Just drop the buffers from the inode list.
845 *
846 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
847 * assumes that all the buffers are against the blockdev. Not true
848 * for reiserfs.
849 */
850void invalidate_inode_buffers(struct inode *inode)
851{
852 if (inode_has_buffers(inode)) {
853 struct address_space *mapping = &inode->i_data;
854 struct list_head *list = &mapping->private_list;
855 struct address_space *buffer_mapping = mapping->assoc_mapping;
856
857 spin_lock(&buffer_mapping->private_lock);
858 while (!list_empty(list))
859 __remove_assoc_queue(BH_ENTRY(list->next));
860 spin_unlock(&buffer_mapping->private_lock);
861 }
862}
863
864/*
865 * Remove any clean buffers from the inode's buffer list. This is called
866 * when we're trying to free the inode itself. Those buffers can pin it.
867 *
868 * Returns true if all buffers were removed.
869 */
870int remove_inode_buffers(struct inode *inode)
871{
872 int ret = 1;
873
874 if (inode_has_buffers(inode)) {
875 struct address_space *mapping = &inode->i_data;
876 struct list_head *list = &mapping->private_list;
877 struct address_space *buffer_mapping = mapping->assoc_mapping;
878
879 spin_lock(&buffer_mapping->private_lock);
880 while (!list_empty(list)) {
881 struct buffer_head *bh = BH_ENTRY(list->next);
882 if (buffer_dirty(bh)) {
883 ret = 0;
884 break;
885 }
886 __remove_assoc_queue(bh);
887 }
888 spin_unlock(&buffer_mapping->private_lock);
889 }
890 return ret;
891}
892
893/*
894 * Create the appropriate buffers when given a page for data area and
895 * the size of each buffer.. Use the bh->b_this_page linked list to
896 * follow the buffers created. Return NULL if unable to create more
897 * buffers.
898 *
899 * The retry flag is used to differentiate async IO (paging, swapping)
900 * which may not fail from ordinary buffer allocations.
901 */
902struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
903 int retry)
904{
905 struct buffer_head *bh, *head;
906 long offset;
907
908try_again:
909 head = NULL;
910 offset = PAGE_SIZE;
911 while ((offset -= size) >= 0) {
912 bh = alloc_buffer_head(GFP_NOFS);
913 if (!bh)
914 goto no_grow;
915
916 bh->b_bdev = NULL;
917 bh->b_this_page = head;
918 bh->b_blocknr = -1;
919 head = bh;
920
921 bh->b_state = 0;
922 atomic_set(&bh->b_count, 0);
fc5cd582 923 bh->b_private = NULL;
1da177e4
LT
924 bh->b_size = size;
925
926 /* Link the buffer to its page */
927 set_bh_page(bh, page, offset);
928
01ffe339 929 init_buffer(bh, NULL, NULL);
1da177e4
LT
930 }
931 return head;
932/*
933 * In case anything failed, we just free everything we got.
934 */
935no_grow:
936 if (head) {
937 do {
938 bh = head;
939 head = head->b_this_page;
940 free_buffer_head(bh);
941 } while (head);
942 }
943
944 /*
945 * Return failure for non-async IO requests. Async IO requests
946 * are not allowed to fail, so we have to wait until buffer heads
947 * become available. But we don't want tasks sleeping with
948 * partially complete buffers, so all were released above.
949 */
950 if (!retry)
951 return NULL;
952
953 /* We're _really_ low on memory. Now we just
954 * wait for old buffer heads to become free due to
955 * finishing IO. Since this is an async request and
956 * the reserve list is empty, we're sure there are
957 * async buffer heads in use.
958 */
959 free_more_memory();
960 goto try_again;
961}
962EXPORT_SYMBOL_GPL(alloc_page_buffers);
963
964static inline void
965link_dev_buffers(struct page *page, struct buffer_head *head)
966{
967 struct buffer_head *bh, *tail;
968
969 bh = head;
970 do {
971 tail = bh;
972 bh = bh->b_this_page;
973 } while (bh);
974 tail->b_this_page = head;
975 attach_page_buffers(page, head);
976}
977
978/*
979 * Initialise the state of a blockdev page's buffers.
980 */
981static void
982init_page_buffers(struct page *page, struct block_device *bdev,
983 sector_t block, int size)
984{
985 struct buffer_head *head = page_buffers(page);
986 struct buffer_head *bh = head;
987 int uptodate = PageUptodate(page);
988
989 do {
990 if (!buffer_mapped(bh)) {
991 init_buffer(bh, NULL, NULL);
992 bh->b_bdev = bdev;
993 bh->b_blocknr = block;
994 if (uptodate)
995 set_buffer_uptodate(bh);
996 set_buffer_mapped(bh);
997 }
998 block++;
999 bh = bh->b_this_page;
1000 } while (bh != head);
1001}
1002
1003/*
1004 * Create the page-cache page that contains the requested block.
1005 *
1006 * This is user purely for blockdev mappings.
1007 */
1008static struct page *
1009grow_dev_page(struct block_device *bdev, sector_t block,
1010 pgoff_t index, int size)
1011{
1012 struct inode *inode = bdev->bd_inode;
1013 struct page *page;
1014 struct buffer_head *bh;
1015
ea125892 1016 page = find_or_create_page(inode->i_mapping, index,
769848c0 1017 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
1018 if (!page)
1019 return NULL;
1020
e827f923 1021 BUG_ON(!PageLocked(page));
1da177e4
LT
1022
1023 if (page_has_buffers(page)) {
1024 bh = page_buffers(page);
1025 if (bh->b_size == size) {
1026 init_page_buffers(page, bdev, block, size);
1027 return page;
1028 }
1029 if (!try_to_free_buffers(page))
1030 goto failed;
1031 }
1032
1033 /*
1034 * Allocate some buffers for this page
1035 */
1036 bh = alloc_page_buffers(page, size, 0);
1037 if (!bh)
1038 goto failed;
1039
1040 /*
1041 * Link the page to the buffers and initialise them. Take the
1042 * lock to be atomic wrt __find_get_block(), which does not
1043 * run under the page lock.
1044 */
1045 spin_lock(&inode->i_mapping->private_lock);
1046 link_dev_buffers(page, bh);
1047 init_page_buffers(page, bdev, block, size);
1048 spin_unlock(&inode->i_mapping->private_lock);
1049 return page;
1050
1051failed:
1052 BUG();
1053 unlock_page(page);
1054 page_cache_release(page);
1055 return NULL;
1056}
1057
1058/*
1059 * Create buffers for the specified block device block's page. If
1060 * that page was dirty, the buffers are set dirty also.
1da177e4 1061 */
858119e1 1062static int
1da177e4
LT
1063grow_buffers(struct block_device *bdev, sector_t block, int size)
1064{
1065 struct page *page;
1066 pgoff_t index;
1067 int sizebits;
1068
1069 sizebits = -1;
1070 do {
1071 sizebits++;
1072 } while ((size << sizebits) < PAGE_SIZE);
1073
1074 index = block >> sizebits;
1da177e4 1075
e5657933
AM
1076 /*
1077 * Check for a block which wants to lie outside our maximum possible
1078 * pagecache index. (this comparison is done using sector_t types).
1079 */
1080 if (unlikely(index != block >> sizebits)) {
1081 char b[BDEVNAME_SIZE];
1082
1083 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1084 "device %s\n",
1085 __FUNCTION__, (unsigned long long)block,
1086 bdevname(bdev, b));
1087 return -EIO;
1088 }
1089 block = index << sizebits;
1da177e4
LT
1090 /* Create a page with the proper size buffers.. */
1091 page = grow_dev_page(bdev, block, index, size);
1092 if (!page)
1093 return 0;
1094 unlock_page(page);
1095 page_cache_release(page);
1096 return 1;
1097}
1098
75c96f85 1099static struct buffer_head *
1da177e4
LT
1100__getblk_slow(struct block_device *bdev, sector_t block, int size)
1101{
1102 /* Size must be multiple of hard sectorsize */
1103 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1104 (size < 512 || size > PAGE_SIZE))) {
1105 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1106 size);
1107 printk(KERN_ERR "hardsect size: %d\n",
1108 bdev_hardsect_size(bdev));
1109
1110 dump_stack();
1111 return NULL;
1112 }
1113
1114 for (;;) {
1115 struct buffer_head * bh;
e5657933 1116 int ret;
1da177e4
LT
1117
1118 bh = __find_get_block(bdev, block, size);
1119 if (bh)
1120 return bh;
1121
e5657933
AM
1122 ret = grow_buffers(bdev, block, size);
1123 if (ret < 0)
1124 return NULL;
1125 if (ret == 0)
1da177e4
LT
1126 free_more_memory();
1127 }
1128}
1129
1130/*
1131 * The relationship between dirty buffers and dirty pages:
1132 *
1133 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1134 * the page is tagged dirty in its radix tree.
1135 *
1136 * At all times, the dirtiness of the buffers represents the dirtiness of
1137 * subsections of the page. If the page has buffers, the page dirty bit is
1138 * merely a hint about the true dirty state.
1139 *
1140 * When a page is set dirty in its entirety, all its buffers are marked dirty
1141 * (if the page has buffers).
1142 *
1143 * When a buffer is marked dirty, its page is dirtied, but the page's other
1144 * buffers are not.
1145 *
1146 * Also. When blockdev buffers are explicitly read with bread(), they
1147 * individually become uptodate. But their backing page remains not
1148 * uptodate - even if all of its buffers are uptodate. A subsequent
1149 * block_read_full_page() against that page will discover all the uptodate
1150 * buffers, will set the page uptodate and will perform no I/O.
1151 */
1152
1153/**
1154 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1155 * @bh: the buffer_head to mark dirty
1da177e4
LT
1156 *
1157 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1158 * backing page dirty, then tag the page as dirty in its address_space's radix
1159 * tree and then attach the address_space's inode to its superblock's dirty
1160 * inode list.
1161 *
1162 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1163 * mapping->tree_lock and the global inode_lock.
1164 */
1165void fastcall mark_buffer_dirty(struct buffer_head *bh)
1166{
787d2214 1167 WARN_ON_ONCE(!buffer_uptodate(bh));
1da177e4 1168 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
787d2214 1169 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1da177e4
LT
1170}
1171
1172/*
1173 * Decrement a buffer_head's reference count. If all buffers against a page
1174 * have zero reference count, are clean and unlocked, and if the page is clean
1175 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1176 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1177 * a page but it ends up not being freed, and buffers may later be reattached).
1178 */
1179void __brelse(struct buffer_head * buf)
1180{
1181 if (atomic_read(&buf->b_count)) {
1182 put_bh(buf);
1183 return;
1184 }
1185 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1186 WARN_ON(1);
1187}
1188
1189/*
1190 * bforget() is like brelse(), except it discards any
1191 * potentially dirty data.
1192 */
1193void __bforget(struct buffer_head *bh)
1194{
1195 clear_buffer_dirty(bh);
1196 if (!list_empty(&bh->b_assoc_buffers)) {
1197 struct address_space *buffer_mapping = bh->b_page->mapping;
1198
1199 spin_lock(&buffer_mapping->private_lock);
1200 list_del_init(&bh->b_assoc_buffers);
58ff407b 1201 bh->b_assoc_map = NULL;
1da177e4
LT
1202 spin_unlock(&buffer_mapping->private_lock);
1203 }
1204 __brelse(bh);
1205}
1206
1207static struct buffer_head *__bread_slow(struct buffer_head *bh)
1208{
1209 lock_buffer(bh);
1210 if (buffer_uptodate(bh)) {
1211 unlock_buffer(bh);
1212 return bh;
1213 } else {
1214 get_bh(bh);
1215 bh->b_end_io = end_buffer_read_sync;
1216 submit_bh(READ, bh);
1217 wait_on_buffer(bh);
1218 if (buffer_uptodate(bh))
1219 return bh;
1220 }
1221 brelse(bh);
1222 return NULL;
1223}
1224
1225/*
1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1228 * refcount elevated by one when they're in an LRU. A buffer can only appear
1229 * once in a particular CPU's LRU. A single buffer can be present in multiple
1230 * CPU's LRUs at the same time.
1231 *
1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233 * sb_find_get_block().
1234 *
1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1236 * a local interrupt disable for that.
1237 */
1238
1239#define BH_LRU_SIZE 8
1240
1241struct bh_lru {
1242 struct buffer_head *bhs[BH_LRU_SIZE];
1243};
1244
1245static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1246
1247#ifdef CONFIG_SMP
1248#define bh_lru_lock() local_irq_disable()
1249#define bh_lru_unlock() local_irq_enable()
1250#else
1251#define bh_lru_lock() preempt_disable()
1252#define bh_lru_unlock() preempt_enable()
1253#endif
1254
1255static inline void check_irqs_on(void)
1256{
1257#ifdef irqs_disabled
1258 BUG_ON(irqs_disabled());
1259#endif
1260}
1261
1262/*
1263 * The LRU management algorithm is dopey-but-simple. Sorry.
1264 */
1265static void bh_lru_install(struct buffer_head *bh)
1266{
1267 struct buffer_head *evictee = NULL;
1268 struct bh_lru *lru;
1269
1270 check_irqs_on();
1271 bh_lru_lock();
1272 lru = &__get_cpu_var(bh_lrus);
1273 if (lru->bhs[0] != bh) {
1274 struct buffer_head *bhs[BH_LRU_SIZE];
1275 int in;
1276 int out = 0;
1277
1278 get_bh(bh);
1279 bhs[out++] = bh;
1280 for (in = 0; in < BH_LRU_SIZE; in++) {
1281 struct buffer_head *bh2 = lru->bhs[in];
1282
1283 if (bh2 == bh) {
1284 __brelse(bh2);
1285 } else {
1286 if (out >= BH_LRU_SIZE) {
1287 BUG_ON(evictee != NULL);
1288 evictee = bh2;
1289 } else {
1290 bhs[out++] = bh2;
1291 }
1292 }
1293 }
1294 while (out < BH_LRU_SIZE)
1295 bhs[out++] = NULL;
1296 memcpy(lru->bhs, bhs, sizeof(bhs));
1297 }
1298 bh_lru_unlock();
1299
1300 if (evictee)
1301 __brelse(evictee);
1302}
1303
1304/*
1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1306 */
858119e1 1307static struct buffer_head *
3991d3bd 1308lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1309{
1310 struct buffer_head *ret = NULL;
1311 struct bh_lru *lru;
3991d3bd 1312 unsigned int i;
1da177e4
LT
1313
1314 check_irqs_on();
1315 bh_lru_lock();
1316 lru = &__get_cpu_var(bh_lrus);
1317 for (i = 0; i < BH_LRU_SIZE; i++) {
1318 struct buffer_head *bh = lru->bhs[i];
1319
1320 if (bh && bh->b_bdev == bdev &&
1321 bh->b_blocknr == block && bh->b_size == size) {
1322 if (i) {
1323 while (i) {
1324 lru->bhs[i] = lru->bhs[i - 1];
1325 i--;
1326 }
1327 lru->bhs[0] = bh;
1328 }
1329 get_bh(bh);
1330 ret = bh;
1331 break;
1332 }
1333 }
1334 bh_lru_unlock();
1335 return ret;
1336}
1337
1338/*
1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1340 * it in the LRU and mark it as accessed. If it is not present then return
1341 * NULL
1342 */
1343struct buffer_head *
3991d3bd 1344__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1345{
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1347
1348 if (bh == NULL) {
385fd4c5 1349 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1350 if (bh)
1351 bh_lru_install(bh);
1352 }
1353 if (bh)
1354 touch_buffer(bh);
1355 return bh;
1356}
1357EXPORT_SYMBOL(__find_get_block);
1358
1359/*
1360 * __getblk will locate (and, if necessary, create) the buffer_head
1361 * which corresponds to the passed block_device, block and size. The
1362 * returned buffer has its reference count incremented.
1363 *
1364 * __getblk() cannot fail - it just keeps trying. If you pass it an
1365 * illegal block number, __getblk() will happily return a buffer_head
1366 * which represents the non-existent block. Very weird.
1367 *
1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369 * attempt is failing. FIXME, perhaps?
1370 */
1371struct buffer_head *
3991d3bd 1372__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1373{
1374 struct buffer_head *bh = __find_get_block(bdev, block, size);
1375
1376 might_sleep();
1377 if (bh == NULL)
1378 bh = __getblk_slow(bdev, block, size);
1379 return bh;
1380}
1381EXPORT_SYMBOL(__getblk);
1382
1383/*
1384 * Do async read-ahead on a buffer..
1385 */
3991d3bd 1386void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1387{
1388 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1389 if (likely(bh)) {
1390 ll_rw_block(READA, 1, &bh);
1391 brelse(bh);
1392 }
1da177e4
LT
1393}
1394EXPORT_SYMBOL(__breadahead);
1395
1396/**
1397 * __bread() - reads a specified block and returns the bh
67be2dd1 1398 * @bdev: the block_device to read from
1da177e4
LT
1399 * @block: number of block
1400 * @size: size (in bytes) to read
1401 *
1402 * Reads a specified block, and returns buffer head that contains it.
1403 * It returns NULL if the block was unreadable.
1404 */
1405struct buffer_head *
3991d3bd 1406__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1407{
1408 struct buffer_head *bh = __getblk(bdev, block, size);
1409
a3e713b5 1410 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1411 bh = __bread_slow(bh);
1412 return bh;
1413}
1414EXPORT_SYMBOL(__bread);
1415
1416/*
1417 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 * This doesn't race because it runs in each cpu either in irq
1419 * or with preempt disabled.
1420 */
1421static void invalidate_bh_lru(void *arg)
1422{
1423 struct bh_lru *b = &get_cpu_var(bh_lrus);
1424 int i;
1425
1426 for (i = 0; i < BH_LRU_SIZE; i++) {
1427 brelse(b->bhs[i]);
1428 b->bhs[i] = NULL;
1429 }
1430 put_cpu_var(bh_lrus);
1431}
1432
f9a14399 1433void invalidate_bh_lrus(void)
1da177e4
LT
1434{
1435 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1436}
1437
1438void set_bh_page(struct buffer_head *bh,
1439 struct page *page, unsigned long offset)
1440{
1441 bh->b_page = page;
e827f923 1442 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1443 if (PageHighMem(page))
1444 /*
1445 * This catches illegal uses and preserves the offset:
1446 */
1447 bh->b_data = (char *)(0 + offset);
1448 else
1449 bh->b_data = page_address(page) + offset;
1450}
1451EXPORT_SYMBOL(set_bh_page);
1452
1453/*
1454 * Called when truncating a buffer on a page completely.
1455 */
858119e1 1456static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1457{
1458 lock_buffer(bh);
1459 clear_buffer_dirty(bh);
1460 bh->b_bdev = NULL;
1461 clear_buffer_mapped(bh);
1462 clear_buffer_req(bh);
1463 clear_buffer_new(bh);
1464 clear_buffer_delay(bh);
33a266dd 1465 clear_buffer_unwritten(bh);
1da177e4
LT
1466 unlock_buffer(bh);
1467}
1468
1da177e4
LT
1469/**
1470 * block_invalidatepage - invalidate part of all of a buffer-backed page
1471 *
1472 * @page: the page which is affected
1473 * @offset: the index of the truncation point
1474 *
1475 * block_invalidatepage() is called when all or part of the page has become
1476 * invalidatedby a truncate operation.
1477 *
1478 * block_invalidatepage() does not have to release all buffers, but it must
1479 * ensure that no dirty buffer is left outside @offset and that no I/O
1480 * is underway against any of the blocks which are outside the truncation
1481 * point. Because the caller is about to free (and possibly reuse) those
1482 * blocks on-disk.
1483 */
2ff28e22 1484void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1485{
1486 struct buffer_head *head, *bh, *next;
1487 unsigned int curr_off = 0;
1da177e4
LT
1488
1489 BUG_ON(!PageLocked(page));
1490 if (!page_has_buffers(page))
1491 goto out;
1492
1493 head = page_buffers(page);
1494 bh = head;
1495 do {
1496 unsigned int next_off = curr_off + bh->b_size;
1497 next = bh->b_this_page;
1498
1499 /*
1500 * is this block fully invalidated?
1501 */
1502 if (offset <= curr_off)
1503 discard_buffer(bh);
1504 curr_off = next_off;
1505 bh = next;
1506 } while (bh != head);
1507
1508 /*
1509 * We release buffers only if the entire page is being invalidated.
1510 * The get_block cached value has been unconditionally invalidated,
1511 * so real IO is not possible anymore.
1512 */
1513 if (offset == 0)
2ff28e22 1514 try_to_release_page(page, 0);
1da177e4 1515out:
2ff28e22 1516 return;
1da177e4
LT
1517}
1518EXPORT_SYMBOL(block_invalidatepage);
1519
1520/*
1521 * We attach and possibly dirty the buffers atomically wrt
1522 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1523 * is already excluded via the page lock.
1524 */
1525void create_empty_buffers(struct page *page,
1526 unsigned long blocksize, unsigned long b_state)
1527{
1528 struct buffer_head *bh, *head, *tail;
1529
1530 head = alloc_page_buffers(page, blocksize, 1);
1531 bh = head;
1532 do {
1533 bh->b_state |= b_state;
1534 tail = bh;
1535 bh = bh->b_this_page;
1536 } while (bh);
1537 tail->b_this_page = head;
1538
1539 spin_lock(&page->mapping->private_lock);
1540 if (PageUptodate(page) || PageDirty(page)) {
1541 bh = head;
1542 do {
1543 if (PageDirty(page))
1544 set_buffer_dirty(bh);
1545 if (PageUptodate(page))
1546 set_buffer_uptodate(bh);
1547 bh = bh->b_this_page;
1548 } while (bh != head);
1549 }
1550 attach_page_buffers(page, head);
1551 spin_unlock(&page->mapping->private_lock);
1552}
1553EXPORT_SYMBOL(create_empty_buffers);
1554
1555/*
1556 * We are taking a block for data and we don't want any output from any
1557 * buffer-cache aliases starting from return from that function and
1558 * until the moment when something will explicitly mark the buffer
1559 * dirty (hopefully that will not happen until we will free that block ;-)
1560 * We don't even need to mark it not-uptodate - nobody can expect
1561 * anything from a newly allocated buffer anyway. We used to used
1562 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1563 * don't want to mark the alias unmapped, for example - it would confuse
1564 * anyone who might pick it with bread() afterwards...
1565 *
1566 * Also.. Note that bforget() doesn't lock the buffer. So there can
1567 * be writeout I/O going on against recently-freed buffers. We don't
1568 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1569 * only if we really need to. That happens here.
1570 */
1571void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1572{
1573 struct buffer_head *old_bh;
1574
1575 might_sleep();
1576
385fd4c5 1577 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1578 if (old_bh) {
1579 clear_buffer_dirty(old_bh);
1580 wait_on_buffer(old_bh);
1581 clear_buffer_req(old_bh);
1582 __brelse(old_bh);
1583 }
1584}
1585EXPORT_SYMBOL(unmap_underlying_metadata);
1586
1587/*
1588 * NOTE! All mapped/uptodate combinations are valid:
1589 *
1590 * Mapped Uptodate Meaning
1591 *
1592 * No No "unknown" - must do get_block()
1593 * No Yes "hole" - zero-filled
1594 * Yes No "allocated" - allocated on disk, not read in
1595 * Yes Yes "valid" - allocated and up-to-date in memory.
1596 *
1597 * "Dirty" is valid only with the last case (mapped+uptodate).
1598 */
1599
1600/*
1601 * While block_write_full_page is writing back the dirty buffers under
1602 * the page lock, whoever dirtied the buffers may decide to clean them
1603 * again at any time. We handle that by only looking at the buffer
1604 * state inside lock_buffer().
1605 *
1606 * If block_write_full_page() is called for regular writeback
1607 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1608 * locked buffer. This only can happen if someone has written the buffer
1609 * directly, with submit_bh(). At the address_space level PageWriteback
1610 * prevents this contention from occurring.
1611 */
1612static int __block_write_full_page(struct inode *inode, struct page *page,
1613 get_block_t *get_block, struct writeback_control *wbc)
1614{
1615 int err;
1616 sector_t block;
1617 sector_t last_block;
f0fbd5fc 1618 struct buffer_head *bh, *head;
b0cf2321 1619 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1620 int nr_underway = 0;
1621
1622 BUG_ON(!PageLocked(page));
1623
1624 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1625
1626 if (!page_has_buffers(page)) {
b0cf2321 1627 create_empty_buffers(page, blocksize,
1da177e4
LT
1628 (1 << BH_Dirty)|(1 << BH_Uptodate));
1629 }
1630
1631 /*
1632 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1633 * here, and the (potentially unmapped) buffers may become dirty at
1634 * any time. If a buffer becomes dirty here after we've inspected it
1635 * then we just miss that fact, and the page stays dirty.
1636 *
1637 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1638 * handle that here by just cleaning them.
1639 */
1640
54b21a79 1641 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1642 head = page_buffers(page);
1643 bh = head;
1644
1645 /*
1646 * Get all the dirty buffers mapped to disk addresses and
1647 * handle any aliases from the underlying blockdev's mapping.
1648 */
1649 do {
1650 if (block > last_block) {
1651 /*
1652 * mapped buffers outside i_size will occur, because
1653 * this page can be outside i_size when there is a
1654 * truncate in progress.
1655 */
1656 /*
1657 * The buffer was zeroed by block_write_full_page()
1658 */
1659 clear_buffer_dirty(bh);
1660 set_buffer_uptodate(bh);
1661 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
b0cf2321 1662 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1663 err = get_block(inode, block, bh, 1);
1664 if (err)
1665 goto recover;
1666 if (buffer_new(bh)) {
1667 /* blockdev mappings never come here */
1668 clear_buffer_new(bh);
1669 unmap_underlying_metadata(bh->b_bdev,
1670 bh->b_blocknr);
1671 }
1672 }
1673 bh = bh->b_this_page;
1674 block++;
1675 } while (bh != head);
1676
1677 do {
1da177e4
LT
1678 if (!buffer_mapped(bh))
1679 continue;
1680 /*
1681 * If it's a fully non-blocking write attempt and we cannot
1682 * lock the buffer then redirty the page. Note that this can
1683 * potentially cause a busy-wait loop from pdflush and kswapd
1684 * activity, but those code paths have their own higher-level
1685 * throttling.
1686 */
1687 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1688 lock_buffer(bh);
1689 } else if (test_set_buffer_locked(bh)) {
1690 redirty_page_for_writepage(wbc, page);
1691 continue;
1692 }
1693 if (test_clear_buffer_dirty(bh)) {
1694 mark_buffer_async_write(bh);
1695 } else {
1696 unlock_buffer(bh);
1697 }
1698 } while ((bh = bh->b_this_page) != head);
1699
1700 /*
1701 * The page and its buffers are protected by PageWriteback(), so we can
1702 * drop the bh refcounts early.
1703 */
1704 BUG_ON(PageWriteback(page));
1705 set_page_writeback(page);
1da177e4
LT
1706
1707 do {
1708 struct buffer_head *next = bh->b_this_page;
1709 if (buffer_async_write(bh)) {
1710 submit_bh(WRITE, bh);
1711 nr_underway++;
1712 }
1da177e4
LT
1713 bh = next;
1714 } while (bh != head);
05937baa 1715 unlock_page(page);
1da177e4
LT
1716
1717 err = 0;
1718done:
1719 if (nr_underway == 0) {
1720 /*
1721 * The page was marked dirty, but the buffers were
1722 * clean. Someone wrote them back by hand with
1723 * ll_rw_block/submit_bh. A rare case.
1724 */
1da177e4 1725 end_page_writeback(page);
3d67f2d7 1726
1da177e4
LT
1727 /*
1728 * The page and buffer_heads can be released at any time from
1729 * here on.
1730 */
1731 wbc->pages_skipped++; /* We didn't write this page */
1732 }
1733 return err;
1734
1735recover:
1736 /*
1737 * ENOSPC, or some other error. We may already have added some
1738 * blocks to the file, so we need to write these out to avoid
1739 * exposing stale data.
1740 * The page is currently locked and not marked for writeback
1741 */
1742 bh = head;
1743 /* Recovery: lock and submit the mapped buffers */
1744 do {
1da177e4
LT
1745 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1746 lock_buffer(bh);
1747 mark_buffer_async_write(bh);
1748 } else {
1749 /*
1750 * The buffer may have been set dirty during
1751 * attachment to a dirty page.
1752 */
1753 clear_buffer_dirty(bh);
1754 }
1755 } while ((bh = bh->b_this_page) != head);
1756 SetPageError(page);
1757 BUG_ON(PageWriteback(page));
7e4c3690 1758 mapping_set_error(page->mapping, err);
1da177e4 1759 set_page_writeback(page);
1da177e4
LT
1760 do {
1761 struct buffer_head *next = bh->b_this_page;
1762 if (buffer_async_write(bh)) {
1763 clear_buffer_dirty(bh);
1764 submit_bh(WRITE, bh);
1765 nr_underway++;
1766 }
1da177e4
LT
1767 bh = next;
1768 } while (bh != head);
ffda9d30 1769 unlock_page(page);
1da177e4
LT
1770 goto done;
1771}
1772
afddba49
NP
1773/*
1774 * If a page has any new buffers, zero them out here, and mark them uptodate
1775 * and dirty so they'll be written out (in order to prevent uninitialised
1776 * block data from leaking). And clear the new bit.
1777 */
1778void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1779{
1780 unsigned int block_start, block_end;
1781 struct buffer_head *head, *bh;
1782
1783 BUG_ON(!PageLocked(page));
1784 if (!page_has_buffers(page))
1785 return;
1786
1787 bh = head = page_buffers(page);
1788 block_start = 0;
1789 do {
1790 block_end = block_start + bh->b_size;
1791
1792 if (buffer_new(bh)) {
1793 if (block_end > from && block_start < to) {
1794 if (!PageUptodate(page)) {
1795 unsigned start, size;
1796
1797 start = max(from, block_start);
1798 size = min(to, block_end) - start;
1799
1800 zero_user_page(page, start, size, KM_USER0);
1801 set_buffer_uptodate(bh);
1802 }
1803
1804 clear_buffer_new(bh);
1805 mark_buffer_dirty(bh);
1806 }
1807 }
1808
1809 block_start = block_end;
1810 bh = bh->b_this_page;
1811 } while (bh != head);
1812}
1813EXPORT_SYMBOL(page_zero_new_buffers);
1814
1da177e4
LT
1815static int __block_prepare_write(struct inode *inode, struct page *page,
1816 unsigned from, unsigned to, get_block_t *get_block)
1817{
1818 unsigned block_start, block_end;
1819 sector_t block;
1820 int err = 0;
1821 unsigned blocksize, bbits;
1822 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1823
1824 BUG_ON(!PageLocked(page));
1825 BUG_ON(from > PAGE_CACHE_SIZE);
1826 BUG_ON(to > PAGE_CACHE_SIZE);
1827 BUG_ON(from > to);
1828
1829 blocksize = 1 << inode->i_blkbits;
1830 if (!page_has_buffers(page))
1831 create_empty_buffers(page, blocksize, 0);
1832 head = page_buffers(page);
1833
1834 bbits = inode->i_blkbits;
1835 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1836
1837 for(bh = head, block_start = 0; bh != head || !block_start;
1838 block++, block_start=block_end, bh = bh->b_this_page) {
1839 block_end = block_start + blocksize;
1840 if (block_end <= from || block_start >= to) {
1841 if (PageUptodate(page)) {
1842 if (!buffer_uptodate(bh))
1843 set_buffer_uptodate(bh);
1844 }
1845 continue;
1846 }
1847 if (buffer_new(bh))
1848 clear_buffer_new(bh);
1849 if (!buffer_mapped(bh)) {
b0cf2321 1850 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1851 err = get_block(inode, block, bh, 1);
1852 if (err)
f3ddbdc6 1853 break;
1da177e4 1854 if (buffer_new(bh)) {
1da177e4
LT
1855 unmap_underlying_metadata(bh->b_bdev,
1856 bh->b_blocknr);
1857 if (PageUptodate(page)) {
637aff46 1858 clear_buffer_new(bh);
1da177e4 1859 set_buffer_uptodate(bh);
637aff46 1860 mark_buffer_dirty(bh);
1da177e4
LT
1861 continue;
1862 }
1863 if (block_end > to || block_start < from) {
1864 void *kaddr;
1865
1866 kaddr = kmap_atomic(page, KM_USER0);
1867 if (block_end > to)
1868 memset(kaddr+to, 0,
1869 block_end-to);
1870 if (block_start < from)
1871 memset(kaddr+block_start,
1872 0, from-block_start);
1873 flush_dcache_page(page);
1874 kunmap_atomic(kaddr, KM_USER0);
1875 }
1876 continue;
1877 }
1878 }
1879 if (PageUptodate(page)) {
1880 if (!buffer_uptodate(bh))
1881 set_buffer_uptodate(bh);
1882 continue;
1883 }
1884 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1885 !buffer_unwritten(bh) &&
1da177e4
LT
1886 (block_start < from || block_end > to)) {
1887 ll_rw_block(READ, 1, &bh);
1888 *wait_bh++=bh;
1889 }
1890 }
1891 /*
1892 * If we issued read requests - let them complete.
1893 */
1894 while(wait_bh > wait) {
1895 wait_on_buffer(*--wait_bh);
1896 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1897 err = -EIO;
1da177e4 1898 }
afddba49
NP
1899 if (unlikely(err))
1900 page_zero_new_buffers(page, from, to);
1da177e4
LT
1901 return err;
1902}
1903
1904static int __block_commit_write(struct inode *inode, struct page *page,
1905 unsigned from, unsigned to)
1906{
1907 unsigned block_start, block_end;
1908 int partial = 0;
1909 unsigned blocksize;
1910 struct buffer_head *bh, *head;
1911
1912 blocksize = 1 << inode->i_blkbits;
1913
1914 for(bh = head = page_buffers(page), block_start = 0;
1915 bh != head || !block_start;
1916 block_start=block_end, bh = bh->b_this_page) {
1917 block_end = block_start + blocksize;
1918 if (block_end <= from || block_start >= to) {
1919 if (!buffer_uptodate(bh))
1920 partial = 1;
1921 } else {
1922 set_buffer_uptodate(bh);
1923 mark_buffer_dirty(bh);
1924 }
afddba49 1925 clear_buffer_new(bh);
1da177e4
LT
1926 }
1927
1928 /*
1929 * If this is a partial write which happened to make all buffers
1930 * uptodate then we can optimize away a bogus readpage() for
1931 * the next read(). Here we 'discover' whether the page went
1932 * uptodate as a result of this (potentially partial) write.
1933 */
1934 if (!partial)
1935 SetPageUptodate(page);
1936 return 0;
1937}
1938
afddba49
NP
1939/*
1940 * block_write_begin takes care of the basic task of block allocation and
1941 * bringing partial write blocks uptodate first.
1942 *
1943 * If *pagep is not NULL, then block_write_begin uses the locked page
1944 * at *pagep rather than allocating its own. In this case, the page will
1945 * not be unlocked or deallocated on failure.
1946 */
1947int block_write_begin(struct file *file, struct address_space *mapping,
1948 loff_t pos, unsigned len, unsigned flags,
1949 struct page **pagep, void **fsdata,
1950 get_block_t *get_block)
1951{
1952 struct inode *inode = mapping->host;
1953 int status = 0;
1954 struct page *page;
1955 pgoff_t index;
1956 unsigned start, end;
1957 int ownpage = 0;
1958
1959 index = pos >> PAGE_CACHE_SHIFT;
1960 start = pos & (PAGE_CACHE_SIZE - 1);
1961 end = start + len;
1962
1963 page = *pagep;
1964 if (page == NULL) {
1965 ownpage = 1;
1966 page = __grab_cache_page(mapping, index);
1967 if (!page) {
1968 status = -ENOMEM;
1969 goto out;
1970 }
1971 *pagep = page;
1972 } else
1973 BUG_ON(!PageLocked(page));
1974
1975 status = __block_prepare_write(inode, page, start, end, get_block);
1976 if (unlikely(status)) {
1977 ClearPageUptodate(page);
1978
1979 if (ownpage) {
1980 unlock_page(page);
1981 page_cache_release(page);
1982 *pagep = NULL;
1983
1984 /*
1985 * prepare_write() may have instantiated a few blocks
1986 * outside i_size. Trim these off again. Don't need
1987 * i_size_read because we hold i_mutex.
1988 */
1989 if (pos + len > inode->i_size)
1990 vmtruncate(inode, inode->i_size);
1991 }
1992 goto out;
1993 }
1994
1995out:
1996 return status;
1997}
1998EXPORT_SYMBOL(block_write_begin);
1999
2000int block_write_end(struct file *file, struct address_space *mapping,
2001 loff_t pos, unsigned len, unsigned copied,
2002 struct page *page, void *fsdata)
2003{
2004 struct inode *inode = mapping->host;
2005 unsigned start;
2006
2007 start = pos & (PAGE_CACHE_SIZE - 1);
2008
2009 if (unlikely(copied < len)) {
2010 /*
2011 * The buffers that were written will now be uptodate, so we
2012 * don't have to worry about a readpage reading them and
2013 * overwriting a partial write. However if we have encountered
2014 * a short write and only partially written into a buffer, it
2015 * will not be marked uptodate, so a readpage might come in and
2016 * destroy our partial write.
2017 *
2018 * Do the simplest thing, and just treat any short write to a
2019 * non uptodate page as a zero-length write, and force the
2020 * caller to redo the whole thing.
2021 */
2022 if (!PageUptodate(page))
2023 copied = 0;
2024
2025 page_zero_new_buffers(page, start+copied, start+len);
2026 }
2027 flush_dcache_page(page);
2028
2029 /* This could be a short (even 0-length) commit */
2030 __block_commit_write(inode, page, start, start+copied);
2031
2032 return copied;
2033}
2034EXPORT_SYMBOL(block_write_end);
2035
2036int generic_write_end(struct file *file, struct address_space *mapping,
2037 loff_t pos, unsigned len, unsigned copied,
2038 struct page *page, void *fsdata)
2039{
2040 struct inode *inode = mapping->host;
2041
2042 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2043
2044 /*
2045 * No need to use i_size_read() here, the i_size
2046 * cannot change under us because we hold i_mutex.
2047 *
2048 * But it's important to update i_size while still holding page lock:
2049 * page writeout could otherwise come in and zero beyond i_size.
2050 */
2051 if (pos+copied > inode->i_size) {
2052 i_size_write(inode, pos+copied);
2053 mark_inode_dirty(inode);
2054 }
2055
2056 unlock_page(page);
2057 page_cache_release(page);
2058
2059 return copied;
2060}
2061EXPORT_SYMBOL(generic_write_end);
2062
1da177e4
LT
2063/*
2064 * Generic "read page" function for block devices that have the normal
2065 * get_block functionality. This is most of the block device filesystems.
2066 * Reads the page asynchronously --- the unlock_buffer() and
2067 * set/clear_buffer_uptodate() functions propagate buffer state into the
2068 * page struct once IO has completed.
2069 */
2070int block_read_full_page(struct page *page, get_block_t *get_block)
2071{
2072 struct inode *inode = page->mapping->host;
2073 sector_t iblock, lblock;
2074 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2075 unsigned int blocksize;
2076 int nr, i;
2077 int fully_mapped = 1;
2078
cd7619d6 2079 BUG_ON(!PageLocked(page));
1da177e4
LT
2080 blocksize = 1 << inode->i_blkbits;
2081 if (!page_has_buffers(page))
2082 create_empty_buffers(page, blocksize, 0);
2083 head = page_buffers(page);
2084
2085 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2086 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2087 bh = head;
2088 nr = 0;
2089 i = 0;
2090
2091 do {
2092 if (buffer_uptodate(bh))
2093 continue;
2094
2095 if (!buffer_mapped(bh)) {
c64610ba
AM
2096 int err = 0;
2097
1da177e4
LT
2098 fully_mapped = 0;
2099 if (iblock < lblock) {
b0cf2321 2100 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2101 err = get_block(inode, iblock, bh, 0);
2102 if (err)
1da177e4
LT
2103 SetPageError(page);
2104 }
2105 if (!buffer_mapped(bh)) {
01f2705d
ND
2106 zero_user_page(page, i * blocksize, blocksize,
2107 KM_USER0);
c64610ba
AM
2108 if (!err)
2109 set_buffer_uptodate(bh);
1da177e4
LT
2110 continue;
2111 }
2112 /*
2113 * get_block() might have updated the buffer
2114 * synchronously
2115 */
2116 if (buffer_uptodate(bh))
2117 continue;
2118 }
2119 arr[nr++] = bh;
2120 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2121
2122 if (fully_mapped)
2123 SetPageMappedToDisk(page);
2124
2125 if (!nr) {
2126 /*
2127 * All buffers are uptodate - we can set the page uptodate
2128 * as well. But not if get_block() returned an error.
2129 */
2130 if (!PageError(page))
2131 SetPageUptodate(page);
2132 unlock_page(page);
2133 return 0;
2134 }
2135
2136 /* Stage two: lock the buffers */
2137 for (i = 0; i < nr; i++) {
2138 bh = arr[i];
2139 lock_buffer(bh);
2140 mark_buffer_async_read(bh);
2141 }
2142
2143 /*
2144 * Stage 3: start the IO. Check for uptodateness
2145 * inside the buffer lock in case another process reading
2146 * the underlying blockdev brought it uptodate (the sct fix).
2147 */
2148 for (i = 0; i < nr; i++) {
2149 bh = arr[i];
2150 if (buffer_uptodate(bh))
2151 end_buffer_async_read(bh, 1);
2152 else
2153 submit_bh(READ, bh);
2154 }
2155 return 0;
2156}
2157
2158/* utility function for filesystems that need to do work on expanding
2159 * truncates. Uses prepare/commit_write to allow the filesystem to
2160 * deal with the hole.
2161 */
05eb0b51
OH
2162static int __generic_cont_expand(struct inode *inode, loff_t size,
2163 pgoff_t index, unsigned int offset)
1da177e4
LT
2164{
2165 struct address_space *mapping = inode->i_mapping;
2166 struct page *page;
05eb0b51 2167 unsigned long limit;
1da177e4
LT
2168 int err;
2169
2170 err = -EFBIG;
2171 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2172 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2173 send_sig(SIGXFSZ, current, 0);
2174 goto out;
2175 }
2176 if (size > inode->i_sb->s_maxbytes)
2177 goto out;
2178
1da177e4
LT
2179 err = -ENOMEM;
2180 page = grab_cache_page(mapping, index);
2181 if (!page)
2182 goto out;
2183 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
05eb0b51
OH
2184 if (err) {
2185 /*
2186 * ->prepare_write() may have instantiated a few blocks
2187 * outside i_size. Trim these off again.
2188 */
2189 unlock_page(page);
2190 page_cache_release(page);
2191 vmtruncate(inode, inode->i_size);
2192 goto out;
1da177e4 2193 }
05eb0b51
OH
2194
2195 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2196
1da177e4
LT
2197 unlock_page(page);
2198 page_cache_release(page);
2199 if (err > 0)
2200 err = 0;
2201out:
2202 return err;
2203}
2204
05eb0b51
OH
2205int generic_cont_expand(struct inode *inode, loff_t size)
2206{
2207 pgoff_t index;
2208 unsigned int offset;
2209
2210 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2211
2212 /* ugh. in prepare/commit_write, if from==to==start of block, we
2213 ** skip the prepare. make sure we never send an offset for the start
2214 ** of a block
2215 */
2216 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2217 /* caller must handle this extra byte. */
2218 offset++;
2219 }
2220 index = size >> PAGE_CACHE_SHIFT;
2221
2222 return __generic_cont_expand(inode, size, index, offset);
2223}
2224
2225int generic_cont_expand_simple(struct inode *inode, loff_t size)
2226{
2227 loff_t pos = size - 1;
2228 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2229 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2230
2231 /* prepare/commit_write can handle even if from==to==start of block. */
2232 return __generic_cont_expand(inode, size, index, offset);
2233}
2234
1da177e4
LT
2235/*
2236 * For moronic filesystems that do not allow holes in file.
2237 * We may have to extend the file.
2238 */
2239
2240int cont_prepare_write(struct page *page, unsigned offset,
2241 unsigned to, get_block_t *get_block, loff_t *bytes)
2242{
2243 struct address_space *mapping = page->mapping;
2244 struct inode *inode = mapping->host;
2245 struct page *new_page;
2246 pgoff_t pgpos;
2247 long status;
2248 unsigned zerofrom;
2249 unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
2250
2251 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2252 status = -ENOMEM;
2253 new_page = grab_cache_page(mapping, pgpos);
2254 if (!new_page)
2255 goto out;
2256 /* we might sleep */
2257 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2258 unlock_page(new_page);
2259 page_cache_release(new_page);
2260 continue;
2261 }
2262 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2263 if (zerofrom & (blocksize-1)) {
2264 *bytes |= (blocksize-1);
2265 (*bytes)++;
2266 }
2267 status = __block_prepare_write(inode, new_page, zerofrom,
2268 PAGE_CACHE_SIZE, get_block);
2269 if (status)
2270 goto out_unmap;
ff1be9ad 2271 zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
01f2705d 2272 KM_USER0);
1da177e4
LT
2273 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2274 unlock_page(new_page);
2275 page_cache_release(new_page);
2276 }
2277
2278 if (page->index < pgpos) {
2279 /* completely inside the area */
2280 zerofrom = offset;
2281 } else {
2282 /* page covers the boundary, find the boundary offset */
2283 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2284
2285 /* if we will expand the thing last block will be filled */
2286 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2287 *bytes |= (blocksize-1);
2288 (*bytes)++;
2289 }
2290
2291 /* starting below the boundary? Nothing to zero out */
2292 if (offset <= zerofrom)
2293 zerofrom = offset;
2294 }
2295 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2296 if (status)
2297 goto out1;
2298 if (zerofrom < offset) {
01f2705d 2299 zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
1da177e4
LT
2300 __block_commit_write(inode, page, zerofrom, offset);
2301 }
2302 return 0;
2303out1:
2304 ClearPageUptodate(page);
2305 return status;
2306
2307out_unmap:
2308 ClearPageUptodate(new_page);
2309 unlock_page(new_page);
2310 page_cache_release(new_page);
2311out:
2312 return status;
2313}
2314
2315int block_prepare_write(struct page *page, unsigned from, unsigned to,
2316 get_block_t *get_block)
2317{
2318 struct inode *inode = page->mapping->host;
2319 int err = __block_prepare_write(inode, page, from, to, get_block);
2320 if (err)
2321 ClearPageUptodate(page);
2322 return err;
2323}
2324
2325int block_commit_write(struct page *page, unsigned from, unsigned to)
2326{
2327 struct inode *inode = page->mapping->host;
2328 __block_commit_write(inode,page,from,to);
2329 return 0;
2330}
2331
2332int generic_commit_write(struct file *file, struct page *page,
2333 unsigned from, unsigned to)
2334{
2335 struct inode *inode = page->mapping->host;
2336 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2337 __block_commit_write(inode,page,from,to);
2338 /*
2339 * No need to use i_size_read() here, the i_size
1b1dcc1b 2340 * cannot change under us because we hold i_mutex.
1da177e4
LT
2341 */
2342 if (pos > inode->i_size) {
2343 i_size_write(inode, pos);
2344 mark_inode_dirty(inode);
2345 }
2346 return 0;
2347}
2348
54171690
DC
2349/*
2350 * block_page_mkwrite() is not allowed to change the file size as it gets
2351 * called from a page fault handler when a page is first dirtied. Hence we must
2352 * be careful to check for EOF conditions here. We set the page up correctly
2353 * for a written page which means we get ENOSPC checking when writing into
2354 * holes and correct delalloc and unwritten extent mapping on filesystems that
2355 * support these features.
2356 *
2357 * We are not allowed to take the i_mutex here so we have to play games to
2358 * protect against truncate races as the page could now be beyond EOF. Because
2359 * vmtruncate() writes the inode size before removing pages, once we have the
2360 * page lock we can determine safely if the page is beyond EOF. If it is not
2361 * beyond EOF, then the page is guaranteed safe against truncation until we
2362 * unlock the page.
2363 */
2364int
2365block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2366 get_block_t get_block)
2367{
2368 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2369 unsigned long end;
2370 loff_t size;
2371 int ret = -EINVAL;
2372
2373 lock_page(page);
2374 size = i_size_read(inode);
2375 if ((page->mapping != inode->i_mapping) ||
18336338 2376 (page_offset(page) > size)) {
54171690
DC
2377 /* page got truncated out from underneath us */
2378 goto out_unlock;
2379 }
2380
2381 /* page is wholly or partially inside EOF */
2382 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2383 end = size & ~PAGE_CACHE_MASK;
2384 else
2385 end = PAGE_CACHE_SIZE;
2386
2387 ret = block_prepare_write(page, 0, end, get_block);
2388 if (!ret)
2389 ret = block_commit_write(page, 0, end);
2390
2391out_unlock:
2392 unlock_page(page);
2393 return ret;
2394}
1da177e4
LT
2395
2396/*
2397 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2398 * immediately, while under the page lock. So it needs a special end_io
2399 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2400 */
2401static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2402{
68671f35 2403 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2404}
2405
2406/*
2407 * On entry, the page is fully not uptodate.
2408 * On exit the page is fully uptodate in the areas outside (from,to)
2409 */
2410int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2411 get_block_t *get_block)
2412{
2413 struct inode *inode = page->mapping->host;
2414 const unsigned blkbits = inode->i_blkbits;
2415 const unsigned blocksize = 1 << blkbits;
a4b0672d 2416 struct buffer_head *head, *bh;
1da177e4 2417 unsigned block_in_page;
a4b0672d 2418 unsigned block_start, block_end;
1da177e4
LT
2419 sector_t block_in_file;
2420 char *kaddr;
2421 int nr_reads = 0;
1da177e4
LT
2422 int ret = 0;
2423 int is_mapped_to_disk = 1;
1da177e4 2424
a4b0672d
NP
2425 if (page_has_buffers(page))
2426 return block_prepare_write(page, from, to, get_block);
2427
1da177e4
LT
2428 if (PageMappedToDisk(page))
2429 return 0;
2430
a4b0672d
NP
2431 /*
2432 * Allocate buffers so that we can keep track of state, and potentially
2433 * attach them to the page if an error occurs. In the common case of
2434 * no error, they will just be freed again without ever being attached
2435 * to the page (which is all OK, because we're under the page lock).
2436 *
2437 * Be careful: the buffer linked list is a NULL terminated one, rather
2438 * than the circular one we're used to.
2439 */
2440 head = alloc_page_buffers(page, blocksize, 0);
2441 if (!head)
2442 return -ENOMEM;
2443
1da177e4 2444 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2445
2446 /*
2447 * We loop across all blocks in the page, whether or not they are
2448 * part of the affected region. This is so we can discover if the
2449 * page is fully mapped-to-disk.
2450 */
a4b0672d 2451 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2452 block_start < PAGE_CACHE_SIZE;
a4b0672d 2453 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2454 int create;
2455
a4b0672d
NP
2456 block_end = block_start + blocksize;
2457 bh->b_state = 0;
1da177e4
LT
2458 create = 1;
2459 if (block_start >= to)
2460 create = 0;
2461 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2462 bh, create);
1da177e4
LT
2463 if (ret)
2464 goto failed;
a4b0672d 2465 if (!buffer_mapped(bh))
1da177e4 2466 is_mapped_to_disk = 0;
a4b0672d
NP
2467 if (buffer_new(bh))
2468 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2469 if (PageUptodate(page)) {
2470 set_buffer_uptodate(bh);
1da177e4 2471 continue;
a4b0672d
NP
2472 }
2473 if (buffer_new(bh) || !buffer_mapped(bh)) {
1da177e4 2474 kaddr = kmap_atomic(page, KM_USER0);
22c8ca78 2475 if (block_start < from)
1da177e4 2476 memset(kaddr+block_start, 0, from-block_start);
22c8ca78 2477 if (block_end > to)
1da177e4 2478 memset(kaddr + to, 0, block_end - to);
1da177e4
LT
2479 flush_dcache_page(page);
2480 kunmap_atomic(kaddr, KM_USER0);
2481 continue;
2482 }
a4b0672d 2483 if (buffer_uptodate(bh))
1da177e4
LT
2484 continue; /* reiserfs does this */
2485 if (block_start < from || block_end > to) {
a4b0672d
NP
2486 lock_buffer(bh);
2487 bh->b_end_io = end_buffer_read_nobh;
2488 submit_bh(READ, bh);
2489 nr_reads++;
1da177e4
LT
2490 }
2491 }
2492
2493 if (nr_reads) {
1da177e4
LT
2494 /*
2495 * The page is locked, so these buffers are protected from
2496 * any VM or truncate activity. Hence we don't need to care
2497 * for the buffer_head refcounts.
2498 */
a4b0672d 2499 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2500 wait_on_buffer(bh);
2501 if (!buffer_uptodate(bh))
2502 ret = -EIO;
1da177e4
LT
2503 }
2504 if (ret)
2505 goto failed;
2506 }
2507
2508 if (is_mapped_to_disk)
2509 SetPageMappedToDisk(page);
1da177e4 2510
a4b0672d
NP
2511 do {
2512 bh = head;
2513 head = head->b_this_page;
2514 free_buffer_head(bh);
2515 } while (head);
2516
1da177e4
LT
2517 return 0;
2518
2519failed:
1da177e4 2520 /*
a4b0672d
NP
2521 * Error recovery is a bit difficult. We need to zero out blocks that
2522 * were newly allocated, and dirty them to ensure they get written out.
2523 * Buffers need to be attached to the page at this point, otherwise
2524 * the handling of potential IO errors during writeout would be hard
2525 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2526 */
a4b0672d
NP
2527 spin_lock(&page->mapping->private_lock);
2528 bh = head;
2529 block_start = 0;
2530 do {
2531 if (PageUptodate(page))
2532 set_buffer_uptodate(bh);
2533 if (PageDirty(page))
2534 set_buffer_dirty(bh);
2535
2536 block_end = block_start+blocksize;
2537 if (block_end <= from)
2538 goto next;
2539 if (block_start >= to)
2540 goto next;
2541
2542 if (buffer_new(bh)) {
2543 clear_buffer_new(bh);
2544 if (!buffer_uptodate(bh)) {
2545 zero_user_page(page, block_start, bh->b_size, KM_USER0);
2546 set_buffer_uptodate(bh);
2547 }
2548 mark_buffer_dirty(bh);
2549 }
2550next:
2551 block_start = block_end;
2552 if (!bh->b_this_page)
2553 bh->b_this_page = head;
2554 bh = bh->b_this_page;
2555 } while (bh != head);
2556 attach_page_buffers(page, head);
2557 spin_unlock(&page->mapping->private_lock);
2558
1da177e4
LT
2559 return ret;
2560}
2561EXPORT_SYMBOL(nobh_prepare_write);
2562
57bf63d6
DK
2563/*
2564 * Make sure any changes to nobh_commit_write() are reflected in
2565 * nobh_truncate_page(), since it doesn't call commit_write().
2566 */
1da177e4
LT
2567int nobh_commit_write(struct file *file, struct page *page,
2568 unsigned from, unsigned to)
2569{
2570 struct inode *inode = page->mapping->host;
2571 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2572
a4b0672d
NP
2573 if (page_has_buffers(page))
2574 return generic_commit_write(file, page, from, to);
2575
22c8ca78 2576 SetPageUptodate(page);
1da177e4
LT
2577 set_page_dirty(page);
2578 if (pos > inode->i_size) {
2579 i_size_write(inode, pos);
2580 mark_inode_dirty(inode);
2581 }
2582 return 0;
2583}
2584EXPORT_SYMBOL(nobh_commit_write);
2585
2586/*
2587 * nobh_writepage() - based on block_full_write_page() except
2588 * that it tries to operate without attaching bufferheads to
2589 * the page.
2590 */
2591int nobh_writepage(struct page *page, get_block_t *get_block,
2592 struct writeback_control *wbc)
2593{
2594 struct inode * const inode = page->mapping->host;
2595 loff_t i_size = i_size_read(inode);
2596 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2597 unsigned offset;
1da177e4
LT
2598 int ret;
2599
2600 /* Is the page fully inside i_size? */
2601 if (page->index < end_index)
2602 goto out;
2603
2604 /* Is the page fully outside i_size? (truncate in progress) */
2605 offset = i_size & (PAGE_CACHE_SIZE-1);
2606 if (page->index >= end_index+1 || !offset) {
2607 /*
2608 * The page may have dirty, unmapped buffers. For example,
2609 * they may have been added in ext3_writepage(). Make them
2610 * freeable here, so the page does not leak.
2611 */
2612#if 0
2613 /* Not really sure about this - do we need this ? */
2614 if (page->mapping->a_ops->invalidatepage)
2615 page->mapping->a_ops->invalidatepage(page, offset);
2616#endif
2617 unlock_page(page);
2618 return 0; /* don't care */
2619 }
2620
2621 /*
2622 * The page straddles i_size. It must be zeroed out on each and every
2623 * writepage invocation because it may be mmapped. "A file is mapped
2624 * in multiples of the page size. For a file that is not a multiple of
2625 * the page size, the remaining memory is zeroed when mapped, and
2626 * writes to that region are not written out to the file."
2627 */
01f2705d 2628 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
1da177e4
LT
2629out:
2630 ret = mpage_writepage(page, get_block, wbc);
2631 if (ret == -EAGAIN)
2632 ret = __block_write_full_page(inode, page, get_block, wbc);
2633 return ret;
2634}
2635EXPORT_SYMBOL(nobh_writepage);
2636
2637/*
2638 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2639 */
2640int nobh_truncate_page(struct address_space *mapping, loff_t from)
2641{
2642 struct inode *inode = mapping->host;
2643 unsigned blocksize = 1 << inode->i_blkbits;
2644 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2645 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2646 unsigned to;
2647 struct page *page;
f5e54d6e 2648 const struct address_space_operations *a_ops = mapping->a_ops;
1da177e4
LT
2649 int ret = 0;
2650
2651 if ((offset & (blocksize - 1)) == 0)
2652 goto out;
2653
2654 ret = -ENOMEM;
2655 page = grab_cache_page(mapping, index);
2656 if (!page)
2657 goto out;
2658
2659 to = (offset + blocksize) & ~(blocksize - 1);
2660 ret = a_ops->prepare_write(NULL, page, offset, to);
2661 if (ret == 0) {
01f2705d
ND
2662 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2663 KM_USER0);
57bf63d6
DK
2664 /*
2665 * It would be more correct to call aops->commit_write()
2666 * here, but this is more efficient.
2667 */
2668 SetPageUptodate(page);
1da177e4
LT
2669 set_page_dirty(page);
2670 }
2671 unlock_page(page);
2672 page_cache_release(page);
2673out:
2674 return ret;
2675}
2676EXPORT_SYMBOL(nobh_truncate_page);
2677
2678int block_truncate_page(struct address_space *mapping,
2679 loff_t from, get_block_t *get_block)
2680{
2681 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2682 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2683 unsigned blocksize;
54b21a79 2684 sector_t iblock;
1da177e4
LT
2685 unsigned length, pos;
2686 struct inode *inode = mapping->host;
2687 struct page *page;
2688 struct buffer_head *bh;
1da177e4
LT
2689 int err;
2690
2691 blocksize = 1 << inode->i_blkbits;
2692 length = offset & (blocksize - 1);
2693
2694 /* Block boundary? Nothing to do */
2695 if (!length)
2696 return 0;
2697
2698 length = blocksize - length;
54b21a79 2699 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2700
2701 page = grab_cache_page(mapping, index);
2702 err = -ENOMEM;
2703 if (!page)
2704 goto out;
2705
2706 if (!page_has_buffers(page))
2707 create_empty_buffers(page, blocksize, 0);
2708
2709 /* Find the buffer that contains "offset" */
2710 bh = page_buffers(page);
2711 pos = blocksize;
2712 while (offset >= pos) {
2713 bh = bh->b_this_page;
2714 iblock++;
2715 pos += blocksize;
2716 }
2717
2718 err = 0;
2719 if (!buffer_mapped(bh)) {
b0cf2321 2720 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2721 err = get_block(inode, iblock, bh, 0);
2722 if (err)
2723 goto unlock;
2724 /* unmapped? It's a hole - nothing to do */
2725 if (!buffer_mapped(bh))
2726 goto unlock;
2727 }
2728
2729 /* Ok, it's mapped. Make sure it's up-to-date */
2730 if (PageUptodate(page))
2731 set_buffer_uptodate(bh);
2732
33a266dd 2733 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2734 err = -EIO;
2735 ll_rw_block(READ, 1, &bh);
2736 wait_on_buffer(bh);
2737 /* Uhhuh. Read error. Complain and punt. */
2738 if (!buffer_uptodate(bh))
2739 goto unlock;
2740 }
2741
01f2705d 2742 zero_user_page(page, offset, length, KM_USER0);
1da177e4
LT
2743 mark_buffer_dirty(bh);
2744 err = 0;
2745
2746unlock:
2747 unlock_page(page);
2748 page_cache_release(page);
2749out:
2750 return err;
2751}
2752
2753/*
2754 * The generic ->writepage function for buffer-backed address_spaces
2755 */
2756int block_write_full_page(struct page *page, get_block_t *get_block,
2757 struct writeback_control *wbc)
2758{
2759 struct inode * const inode = page->mapping->host;
2760 loff_t i_size = i_size_read(inode);
2761 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2762 unsigned offset;
1da177e4
LT
2763
2764 /* Is the page fully inside i_size? */
2765 if (page->index < end_index)
2766 return __block_write_full_page(inode, page, get_block, wbc);
2767
2768 /* Is the page fully outside i_size? (truncate in progress) */
2769 offset = i_size & (PAGE_CACHE_SIZE-1);
2770 if (page->index >= end_index+1 || !offset) {
2771 /*
2772 * The page may have dirty, unmapped buffers. For example,
2773 * they may have been added in ext3_writepage(). Make them
2774 * freeable here, so the page does not leak.
2775 */
aaa4059b 2776 do_invalidatepage(page, 0);
1da177e4
LT
2777 unlock_page(page);
2778 return 0; /* don't care */
2779 }
2780
2781 /*
2782 * The page straddles i_size. It must be zeroed out on each and every
2783 * writepage invokation because it may be mmapped. "A file is mapped
2784 * in multiples of the page size. For a file that is not a multiple of
2785 * the page size, the remaining memory is zeroed when mapped, and
2786 * writes to that region are not written out to the file."
2787 */
01f2705d 2788 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
1da177e4
LT
2789 return __block_write_full_page(inode, page, get_block, wbc);
2790}
2791
2792sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2793 get_block_t *get_block)
2794{
2795 struct buffer_head tmp;
2796 struct inode *inode = mapping->host;
2797 tmp.b_state = 0;
2798 tmp.b_blocknr = 0;
b0cf2321 2799 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2800 get_block(inode, block, &tmp, 0);
2801 return tmp.b_blocknr;
2802}
2803
6712ecf8 2804static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2805{
2806 struct buffer_head *bh = bio->bi_private;
2807
1da177e4
LT
2808 if (err == -EOPNOTSUPP) {
2809 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2810 set_bit(BH_Eopnotsupp, &bh->b_state);
2811 }
2812
2813 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2814 bio_put(bio);
1da177e4
LT
2815}
2816
2817int submit_bh(int rw, struct buffer_head * bh)
2818{
2819 struct bio *bio;
2820 int ret = 0;
2821
2822 BUG_ON(!buffer_locked(bh));
2823 BUG_ON(!buffer_mapped(bh));
2824 BUG_ON(!bh->b_end_io);
2825
2826 if (buffer_ordered(bh) && (rw == WRITE))
2827 rw = WRITE_BARRIER;
2828
2829 /*
2830 * Only clear out a write error when rewriting, should this
2831 * include WRITE_SYNC as well?
2832 */
2833 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2834 clear_buffer_write_io_error(bh);
2835
2836 /*
2837 * from here on down, it's all bio -- do the initial mapping,
2838 * submit_bio -> generic_make_request may further map this bio around
2839 */
2840 bio = bio_alloc(GFP_NOIO, 1);
2841
2842 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2843 bio->bi_bdev = bh->b_bdev;
2844 bio->bi_io_vec[0].bv_page = bh->b_page;
2845 bio->bi_io_vec[0].bv_len = bh->b_size;
2846 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2847
2848 bio->bi_vcnt = 1;
2849 bio->bi_idx = 0;
2850 bio->bi_size = bh->b_size;
2851
2852 bio->bi_end_io = end_bio_bh_io_sync;
2853 bio->bi_private = bh;
2854
2855 bio_get(bio);
2856 submit_bio(rw, bio);
2857
2858 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2859 ret = -EOPNOTSUPP;
2860
2861 bio_put(bio);
2862 return ret;
2863}
2864
2865/**
2866 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2867 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2868 * @nr: number of &struct buffer_heads in the array
2869 * @bhs: array of pointers to &struct buffer_head
2870 *
a7662236
JK
2871 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2872 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2873 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2874 * are sent to disk. The fourth %READA option is described in the documentation
2875 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2876 *
2877 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2878 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2879 * clean when doing a write request, and any buffer that appears to be
2880 * up-to-date when doing read request. Further it marks as clean buffers that
2881 * are processed for writing (the buffer cache won't assume that they are
2882 * actually clean until the buffer gets unlocked).
1da177e4
LT
2883 *
2884 * ll_rw_block sets b_end_io to simple completion handler that marks
2885 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2886 * any waiters.
2887 *
2888 * All of the buffers must be for the same device, and must also be a
2889 * multiple of the current approved size for the device.
2890 */
2891void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2892{
2893 int i;
2894
2895 for (i = 0; i < nr; i++) {
2896 struct buffer_head *bh = bhs[i];
2897
a7662236
JK
2898 if (rw == SWRITE)
2899 lock_buffer(bh);
2900 else if (test_set_buffer_locked(bh))
1da177e4
LT
2901 continue;
2902
a7662236 2903 if (rw == WRITE || rw == SWRITE) {
1da177e4 2904 if (test_clear_buffer_dirty(bh)) {
76c3073a 2905 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2906 get_bh(bh);
1da177e4
LT
2907 submit_bh(WRITE, bh);
2908 continue;
2909 }
2910 } else {
1da177e4 2911 if (!buffer_uptodate(bh)) {
76c3073a 2912 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2913 get_bh(bh);
1da177e4
LT
2914 submit_bh(rw, bh);
2915 continue;
2916 }
2917 }
2918 unlock_buffer(bh);
1da177e4
LT
2919 }
2920}
2921
2922/*
2923 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2924 * and then start new I/O and then wait upon it. The caller must have a ref on
2925 * the buffer_head.
2926 */
2927int sync_dirty_buffer(struct buffer_head *bh)
2928{
2929 int ret = 0;
2930
2931 WARN_ON(atomic_read(&bh->b_count) < 1);
2932 lock_buffer(bh);
2933 if (test_clear_buffer_dirty(bh)) {
2934 get_bh(bh);
2935 bh->b_end_io = end_buffer_write_sync;
2936 ret = submit_bh(WRITE, bh);
2937 wait_on_buffer(bh);
2938 if (buffer_eopnotsupp(bh)) {
2939 clear_buffer_eopnotsupp(bh);
2940 ret = -EOPNOTSUPP;
2941 }
2942 if (!ret && !buffer_uptodate(bh))
2943 ret = -EIO;
2944 } else {
2945 unlock_buffer(bh);
2946 }
2947 return ret;
2948}
2949
2950/*
2951 * try_to_free_buffers() checks if all the buffers on this particular page
2952 * are unused, and releases them if so.
2953 *
2954 * Exclusion against try_to_free_buffers may be obtained by either
2955 * locking the page or by holding its mapping's private_lock.
2956 *
2957 * If the page is dirty but all the buffers are clean then we need to
2958 * be sure to mark the page clean as well. This is because the page
2959 * may be against a block device, and a later reattachment of buffers
2960 * to a dirty page will set *all* buffers dirty. Which would corrupt
2961 * filesystem data on the same device.
2962 *
2963 * The same applies to regular filesystem pages: if all the buffers are
2964 * clean then we set the page clean and proceed. To do that, we require
2965 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2966 * private_lock.
2967 *
2968 * try_to_free_buffers() is non-blocking.
2969 */
2970static inline int buffer_busy(struct buffer_head *bh)
2971{
2972 return atomic_read(&bh->b_count) |
2973 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2974}
2975
2976static int
2977drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2978{
2979 struct buffer_head *head = page_buffers(page);
2980 struct buffer_head *bh;
2981
2982 bh = head;
2983 do {
de7d5a3b 2984 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
2985 set_bit(AS_EIO, &page->mapping->flags);
2986 if (buffer_busy(bh))
2987 goto failed;
2988 bh = bh->b_this_page;
2989 } while (bh != head);
2990
2991 do {
2992 struct buffer_head *next = bh->b_this_page;
2993
2994 if (!list_empty(&bh->b_assoc_buffers))
2995 __remove_assoc_queue(bh);
2996 bh = next;
2997 } while (bh != head);
2998 *buffers_to_free = head;
2999 __clear_page_buffers(page);
3000 return 1;
3001failed:
3002 return 0;
3003}
3004
3005int try_to_free_buffers(struct page *page)
3006{
3007 struct address_space * const mapping = page->mapping;
3008 struct buffer_head *buffers_to_free = NULL;
3009 int ret = 0;
3010
3011 BUG_ON(!PageLocked(page));
ecdfc978 3012 if (PageWriteback(page))
1da177e4
LT
3013 return 0;
3014
3015 if (mapping == NULL) { /* can this still happen? */
3016 ret = drop_buffers(page, &buffers_to_free);
3017 goto out;
3018 }
3019
3020 spin_lock(&mapping->private_lock);
3021 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3022
3023 /*
3024 * If the filesystem writes its buffers by hand (eg ext3)
3025 * then we can have clean buffers against a dirty page. We
3026 * clean the page here; otherwise the VM will never notice
3027 * that the filesystem did any IO at all.
3028 *
3029 * Also, during truncate, discard_buffer will have marked all
3030 * the page's buffers clean. We discover that here and clean
3031 * the page also.
87df7241
NP
3032 *
3033 * private_lock must be held over this entire operation in order
3034 * to synchronise against __set_page_dirty_buffers and prevent the
3035 * dirty bit from being lost.
ecdfc978
LT
3036 */
3037 if (ret)
3038 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3039 spin_unlock(&mapping->private_lock);
1da177e4
LT
3040out:
3041 if (buffers_to_free) {
3042 struct buffer_head *bh = buffers_to_free;
3043
3044 do {
3045 struct buffer_head *next = bh->b_this_page;
3046 free_buffer_head(bh);
3047 bh = next;
3048 } while (bh != buffers_to_free);
3049 }
3050 return ret;
3051}
3052EXPORT_SYMBOL(try_to_free_buffers);
3053
3978d717 3054void block_sync_page(struct page *page)
1da177e4
LT
3055{
3056 struct address_space *mapping;
3057
3058 smp_mb();
3059 mapping = page_mapping(page);
3060 if (mapping)
3061 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3062}
3063
3064/*
3065 * There are no bdflush tunables left. But distributions are
3066 * still running obsolete flush daemons, so we terminate them here.
3067 *
3068 * Use of bdflush() is deprecated and will be removed in a future kernel.
3069 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3070 */
3071asmlinkage long sys_bdflush(int func, long data)
3072{
3073 static int msg_count;
3074
3075 if (!capable(CAP_SYS_ADMIN))
3076 return -EPERM;
3077
3078 if (msg_count < 5) {
3079 msg_count++;
3080 printk(KERN_INFO
3081 "warning: process `%s' used the obsolete bdflush"
3082 " system call\n", current->comm);
3083 printk(KERN_INFO "Fix your initscripts?\n");
3084 }
3085
3086 if (func == 1)
3087 do_exit(0);
3088 return 0;
3089}
3090
3091/*
3092 * Buffer-head allocation
3093 */
e18b890b 3094static struct kmem_cache *bh_cachep;
1da177e4
LT
3095
3096/*
3097 * Once the number of bh's in the machine exceeds this level, we start
3098 * stripping them in writeback.
3099 */
3100static int max_buffer_heads;
3101
3102int buffer_heads_over_limit;
3103
3104struct bh_accounting {
3105 int nr; /* Number of live bh's */
3106 int ratelimit; /* Limit cacheline bouncing */
3107};
3108
3109static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3110
3111static void recalc_bh_state(void)
3112{
3113 int i;
3114 int tot = 0;
3115
3116 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3117 return;
3118 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3119 for_each_online_cpu(i)
1da177e4
LT
3120 tot += per_cpu(bh_accounting, i).nr;
3121 buffer_heads_over_limit = (tot > max_buffer_heads);
3122}
3123
dd0fc66f 3124struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3125{
a35afb83 3126 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
1da177e4 3127 if (ret) {
a35afb83 3128 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3129 get_cpu_var(bh_accounting).nr++;
1da177e4 3130 recalc_bh_state();
736c7b80 3131 put_cpu_var(bh_accounting);
1da177e4
LT
3132 }
3133 return ret;
3134}
3135EXPORT_SYMBOL(alloc_buffer_head);
3136
3137void free_buffer_head(struct buffer_head *bh)
3138{
3139 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3140 kmem_cache_free(bh_cachep, bh);
736c7b80 3141 get_cpu_var(bh_accounting).nr--;
1da177e4 3142 recalc_bh_state();
736c7b80 3143 put_cpu_var(bh_accounting);
1da177e4
LT
3144}
3145EXPORT_SYMBOL(free_buffer_head);
3146
1da177e4
LT
3147static void buffer_exit_cpu(int cpu)
3148{
3149 int i;
3150 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3151
3152 for (i = 0; i < BH_LRU_SIZE; i++) {
3153 brelse(b->bhs[i]);
3154 b->bhs[i] = NULL;
3155 }
8a143426
ED
3156 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3157 per_cpu(bh_accounting, cpu).nr = 0;
3158 put_cpu_var(bh_accounting);
1da177e4
LT
3159}
3160
3161static int buffer_cpu_notify(struct notifier_block *self,
3162 unsigned long action, void *hcpu)
3163{
8bb78442 3164 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3165 buffer_exit_cpu((unsigned long)hcpu);
3166 return NOTIFY_OK;
3167}
1da177e4
LT
3168
3169void __init buffer_init(void)
3170{
3171 int nrpages;
3172
a35afb83
CL
3173 bh_cachep = KMEM_CACHE(buffer_head,
3174 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
1da177e4
LT
3175
3176 /*
3177 * Limit the bh occupancy to 10% of ZONE_NORMAL
3178 */
3179 nrpages = (nr_free_buffer_pages() * 10) / 100;
3180 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3181 hotcpu_notifier(buffer_cpu_notify, 0);
3182}
3183
3184EXPORT_SYMBOL(__bforget);
3185EXPORT_SYMBOL(__brelse);
3186EXPORT_SYMBOL(__wait_on_buffer);
3187EXPORT_SYMBOL(block_commit_write);
3188EXPORT_SYMBOL(block_prepare_write);
54171690 3189EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3190EXPORT_SYMBOL(block_read_full_page);
3191EXPORT_SYMBOL(block_sync_page);
3192EXPORT_SYMBOL(block_truncate_page);
3193EXPORT_SYMBOL(block_write_full_page);
3194EXPORT_SYMBOL(cont_prepare_write);
1da177e4
LT
3195EXPORT_SYMBOL(end_buffer_read_sync);
3196EXPORT_SYMBOL(end_buffer_write_sync);
3197EXPORT_SYMBOL(file_fsync);
3198EXPORT_SYMBOL(fsync_bdev);
3199EXPORT_SYMBOL(generic_block_bmap);
3200EXPORT_SYMBOL(generic_commit_write);
3201EXPORT_SYMBOL(generic_cont_expand);
05eb0b51 3202EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3203EXPORT_SYMBOL(init_buffer);
3204EXPORT_SYMBOL(invalidate_bdev);
3205EXPORT_SYMBOL(ll_rw_block);
3206EXPORT_SYMBOL(mark_buffer_dirty);
3207EXPORT_SYMBOL(submit_bh);
3208EXPORT_SYMBOL(sync_dirty_buffer);
3209EXPORT_SYMBOL(unlock_buffer);