]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
Fix read/truncate race
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
70void fastcall __lock_buffer(struct buffer_head *bh)
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
77void fastcall unlock_buffer(struct buffer_head *bh)
78{
72ed3d03 79 smp_mb__before_clear_bit();
1da177e4
LT
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
84
85/*
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
89 */
90void __wait_on_buffer(struct buffer_head * bh)
91{
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93}
94
95static void
96__clear_page_buffers(struct page *page)
97{
98 ClearPagePrivate(page);
4c21e2f2 99 set_page_private(page, 0);
1da177e4
LT
100 page_cache_release(page);
101}
102
103static void buffer_io_error(struct buffer_head *bh)
104{
105 char b[BDEVNAME_SIZE];
106
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
110}
111
112/*
113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
114 * unlock the buffer. This is what ll_rw_block uses too.
115 */
116void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117{
118 if (uptodate) {
119 set_buffer_uptodate(bh);
120 } else {
121 /* This happens, due to failed READA attempts. */
122 clear_buffer_uptodate(bh);
123 }
124 unlock_buffer(bh);
125 put_bh(bh);
126}
127
128void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
129{
130 char b[BDEVNAME_SIZE];
131
132 if (uptodate) {
133 set_buffer_uptodate(bh);
134 } else {
135 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
136 buffer_io_error(bh);
137 printk(KERN_WARNING "lost page write due to "
138 "I/O error on %s\n",
139 bdevname(bh->b_bdev, b));
140 }
141 set_buffer_write_io_error(bh);
142 clear_buffer_uptodate(bh);
143 }
144 unlock_buffer(bh);
145 put_bh(bh);
146}
147
148/*
149 * Write out and wait upon all the dirty data associated with a block
150 * device via its mapping. Does not take the superblock lock.
151 */
152int sync_blockdev(struct block_device *bdev)
153{
154 int ret = 0;
155
28fd1298
OH
156 if (bdev)
157 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
158 return ret;
159}
160EXPORT_SYMBOL(sync_blockdev);
161
1da177e4
LT
162/*
163 * Write out and wait upon all dirty data associated with this
164 * device. Filesystem data as well as the underlying block
165 * device. Takes the superblock lock.
166 */
167int fsync_bdev(struct block_device *bdev)
168{
169 struct super_block *sb = get_super(bdev);
170 if (sb) {
171 int res = fsync_super(sb);
172 drop_super(sb);
173 return res;
174 }
175 return sync_blockdev(bdev);
176}
177
178/**
179 * freeze_bdev -- lock a filesystem and force it into a consistent state
180 * @bdev: blockdevice to lock
181 *
f73ca1b7 182 * This takes the block device bd_mount_sem to make sure no new mounts
1da177e4
LT
183 * happen on bdev until thaw_bdev() is called.
184 * If a superblock is found on this device, we take the s_umount semaphore
185 * on it to make sure nobody unmounts until the snapshot creation is done.
186 */
187struct super_block *freeze_bdev(struct block_device *bdev)
188{
189 struct super_block *sb;
190
f73ca1b7 191 down(&bdev->bd_mount_sem);
1da177e4
LT
192 sb = get_super(bdev);
193 if (sb && !(sb->s_flags & MS_RDONLY)) {
194 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 195 smp_wmb();
1da177e4 196
d25b9a1f 197 __fsync_super(sb);
1da177e4
LT
198
199 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 200 smp_wmb();
1da177e4
LT
201
202 sync_blockdev(sb->s_bdev);
203
204 if (sb->s_op->write_super_lockfs)
205 sb->s_op->write_super_lockfs(sb);
206 }
207
208 sync_blockdev(bdev);
209 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
210}
211EXPORT_SYMBOL(freeze_bdev);
212
213/**
214 * thaw_bdev -- unlock filesystem
215 * @bdev: blockdevice to unlock
216 * @sb: associated superblock
217 *
218 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
219 */
220void thaw_bdev(struct block_device *bdev, struct super_block *sb)
221{
222 if (sb) {
223 BUG_ON(sb->s_bdev != bdev);
224
225 if (sb->s_op->unlockfs)
226 sb->s_op->unlockfs(sb);
227 sb->s_frozen = SB_UNFROZEN;
d59dd462 228 smp_wmb();
1da177e4
LT
229 wake_up(&sb->s_wait_unfrozen);
230 drop_super(sb);
231 }
232
f73ca1b7 233 up(&bdev->bd_mount_sem);
1da177e4
LT
234}
235EXPORT_SYMBOL(thaw_bdev);
236
1da177e4
LT
237/*
238 * Various filesystems appear to want __find_get_block to be non-blocking.
239 * But it's the page lock which protects the buffers. To get around this,
240 * we get exclusion from try_to_free_buffers with the blockdev mapping's
241 * private_lock.
242 *
243 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
244 * may be quite high. This code could TryLock the page, and if that
245 * succeeds, there is no need to take private_lock. (But if
246 * private_lock is contended then so is mapping->tree_lock).
247 */
248static struct buffer_head *
385fd4c5 249__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
250{
251 struct inode *bd_inode = bdev->bd_inode;
252 struct address_space *bd_mapping = bd_inode->i_mapping;
253 struct buffer_head *ret = NULL;
254 pgoff_t index;
255 struct buffer_head *bh;
256 struct buffer_head *head;
257 struct page *page;
258 int all_mapped = 1;
259
260 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
261 page = find_get_page(bd_mapping, index);
262 if (!page)
263 goto out;
264
265 spin_lock(&bd_mapping->private_lock);
266 if (!page_has_buffers(page))
267 goto out_unlock;
268 head = page_buffers(page);
269 bh = head;
270 do {
271 if (bh->b_blocknr == block) {
272 ret = bh;
273 get_bh(bh);
274 goto out_unlock;
275 }
276 if (!buffer_mapped(bh))
277 all_mapped = 0;
278 bh = bh->b_this_page;
279 } while (bh != head);
280
281 /* we might be here because some of the buffers on this page are
282 * not mapped. This is due to various races between
283 * file io on the block device and getblk. It gets dealt with
284 * elsewhere, don't buffer_error if we had some unmapped buffers
285 */
286 if (all_mapped) {
287 printk("__find_get_block_slow() failed. "
288 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
289 (unsigned long long)block,
290 (unsigned long long)bh->b_blocknr);
291 printk("b_state=0x%08lx, b_size=%zu\n",
292 bh->b_state, bh->b_size);
1da177e4
LT
293 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
294 }
295out_unlock:
296 spin_unlock(&bd_mapping->private_lock);
297 page_cache_release(page);
298out:
299 return ret;
300}
301
302/* If invalidate_buffers() will trash dirty buffers, it means some kind
303 of fs corruption is going on. Trashing dirty data always imply losing
304 information that was supposed to be just stored on the physical layer
305 by the user.
306
307 Thus invalidate_buffers in general usage is not allwowed to trash
308 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
309 be preserved. These buffers are simply skipped.
310
311 We also skip buffers which are still in use. For example this can
312 happen if a userspace program is reading the block device.
313
314 NOTE: In the case where the user removed a removable-media-disk even if
315 there's still dirty data not synced on disk (due a bug in the device driver
316 or due an error of the user), by not destroying the dirty buffers we could
317 generate corruption also on the next media inserted, thus a parameter is
318 necessary to handle this case in the most safe way possible (trying
319 to not corrupt also the new disk inserted with the data belonging to
320 the old now corrupted disk). Also for the ramdisk the natural thing
321 to do in order to release the ramdisk memory is to destroy dirty buffers.
322
323 These are two special cases. Normal usage imply the device driver
324 to issue a sync on the device (without waiting I/O completion) and
325 then an invalidate_buffers call that doesn't trash dirty buffers.
326
327 For handling cache coherency with the blkdev pagecache the 'update' case
328 is been introduced. It is needed to re-read from disk any pinned
329 buffer. NOTE: re-reading from disk is destructive so we can do it only
330 when we assume nobody is changing the buffercache under our I/O and when
331 we think the disk contains more recent information than the buffercache.
332 The update == 1 pass marks the buffers we need to update, the update == 2
333 pass does the actual I/O. */
f98393a6 334void invalidate_bdev(struct block_device *bdev)
1da177e4 335{
0e1dfc66
AM
336 struct address_space *mapping = bdev->bd_inode->i_mapping;
337
338 if (mapping->nrpages == 0)
339 return;
340
1da177e4 341 invalidate_bh_lrus();
fc0ecff6 342 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
343}
344
345/*
346 * Kick pdflush then try to free up some ZONE_NORMAL memory.
347 */
348static void free_more_memory(void)
349{
350 struct zone **zones;
351 pg_data_t *pgdat;
352
687a21ce 353 wakeup_pdflush(1024);
1da177e4
LT
354 yield();
355
ec936fc5 356 for_each_online_pgdat(pgdat) {
af4ca457 357 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 358 if (*zones)
1ad539b2 359 try_to_free_pages(zones, GFP_NOFS);
1da177e4
LT
360 }
361}
362
363/*
364 * I/O completion handler for block_read_full_page() - pages
365 * which come unlocked at the end of I/O.
366 */
367static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
368{
1da177e4 369 unsigned long flags;
a3972203 370 struct buffer_head *first;
1da177e4
LT
371 struct buffer_head *tmp;
372 struct page *page;
373 int page_uptodate = 1;
374
375 BUG_ON(!buffer_async_read(bh));
376
377 page = bh->b_page;
378 if (uptodate) {
379 set_buffer_uptodate(bh);
380 } else {
381 clear_buffer_uptodate(bh);
382 if (printk_ratelimit())
383 buffer_io_error(bh);
384 SetPageError(page);
385 }
386
387 /*
388 * Be _very_ careful from here on. Bad things can happen if
389 * two buffer heads end IO at almost the same time and both
390 * decide that the page is now completely done.
391 */
a3972203
NP
392 first = page_buffers(page);
393 local_irq_save(flags);
394 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
395 clear_buffer_async_read(bh);
396 unlock_buffer(bh);
397 tmp = bh;
398 do {
399 if (!buffer_uptodate(tmp))
400 page_uptodate = 0;
401 if (buffer_async_read(tmp)) {
402 BUG_ON(!buffer_locked(tmp));
403 goto still_busy;
404 }
405 tmp = tmp->b_this_page;
406 } while (tmp != bh);
a3972203
NP
407 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
408 local_irq_restore(flags);
1da177e4
LT
409
410 /*
411 * If none of the buffers had errors and they are all
412 * uptodate then we can set the page uptodate.
413 */
414 if (page_uptodate && !PageError(page))
415 SetPageUptodate(page);
416 unlock_page(page);
417 return;
418
419still_busy:
a3972203
NP
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
1da177e4
LT
422 return;
423}
424
425/*
426 * Completion handler for block_write_full_page() - pages which are unlocked
427 * during I/O, and which have PageWriteback cleared upon I/O completion.
428 */
b6cd0b77 429static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
430{
431 char b[BDEVNAME_SIZE];
1da177e4 432 unsigned long flags;
a3972203 433 struct buffer_head *first;
1da177e4
LT
434 struct buffer_head *tmp;
435 struct page *page;
436
437 BUG_ON(!buffer_async_write(bh));
438
439 page = bh->b_page;
440 if (uptodate) {
441 set_buffer_uptodate(bh);
442 } else {
443 if (printk_ratelimit()) {
444 buffer_io_error(bh);
445 printk(KERN_WARNING "lost page write due to "
446 "I/O error on %s\n",
447 bdevname(bh->b_bdev, b));
448 }
449 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 450 set_buffer_write_io_error(bh);
1da177e4
LT
451 clear_buffer_uptodate(bh);
452 SetPageError(page);
453 }
454
a3972203
NP
455 first = page_buffers(page);
456 local_irq_save(flags);
457 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
458
1da177e4
LT
459 clear_buffer_async_write(bh);
460 unlock_buffer(bh);
461 tmp = bh->b_this_page;
462 while (tmp != bh) {
463 if (buffer_async_write(tmp)) {
464 BUG_ON(!buffer_locked(tmp));
465 goto still_busy;
466 }
467 tmp = tmp->b_this_page;
468 }
a3972203
NP
469 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
470 local_irq_restore(flags);
1da177e4
LT
471 end_page_writeback(page);
472 return;
473
474still_busy:
a3972203
NP
475 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
476 local_irq_restore(flags);
1da177e4
LT
477 return;
478}
479
480/*
481 * If a page's buffers are under async readin (end_buffer_async_read
482 * completion) then there is a possibility that another thread of
483 * control could lock one of the buffers after it has completed
484 * but while some of the other buffers have not completed. This
485 * locked buffer would confuse end_buffer_async_read() into not unlocking
486 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
487 * that this buffer is not under async I/O.
488 *
489 * The page comes unlocked when it has no locked buffer_async buffers
490 * left.
491 *
492 * PageLocked prevents anyone starting new async I/O reads any of
493 * the buffers.
494 *
495 * PageWriteback is used to prevent simultaneous writeout of the same
496 * page.
497 *
498 * PageLocked prevents anyone from starting writeback of a page which is
499 * under read I/O (PageWriteback is only ever set against a locked page).
500 */
501static void mark_buffer_async_read(struct buffer_head *bh)
502{
503 bh->b_end_io = end_buffer_async_read;
504 set_buffer_async_read(bh);
505}
506
507void mark_buffer_async_write(struct buffer_head *bh)
508{
509 bh->b_end_io = end_buffer_async_write;
510 set_buffer_async_write(bh);
511}
512EXPORT_SYMBOL(mark_buffer_async_write);
513
514
515/*
516 * fs/buffer.c contains helper functions for buffer-backed address space's
517 * fsync functions. A common requirement for buffer-based filesystems is
518 * that certain data from the backing blockdev needs to be written out for
519 * a successful fsync(). For example, ext2 indirect blocks need to be
520 * written back and waited upon before fsync() returns.
521 *
522 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
523 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
524 * management of a list of dependent buffers at ->i_mapping->private_list.
525 *
526 * Locking is a little subtle: try_to_free_buffers() will remove buffers
527 * from their controlling inode's queue when they are being freed. But
528 * try_to_free_buffers() will be operating against the *blockdev* mapping
529 * at the time, not against the S_ISREG file which depends on those buffers.
530 * So the locking for private_list is via the private_lock in the address_space
531 * which backs the buffers. Which is different from the address_space
532 * against which the buffers are listed. So for a particular address_space,
533 * mapping->private_lock does *not* protect mapping->private_list! In fact,
534 * mapping->private_list will always be protected by the backing blockdev's
535 * ->private_lock.
536 *
537 * Which introduces a requirement: all buffers on an address_space's
538 * ->private_list must be from the same address_space: the blockdev's.
539 *
540 * address_spaces which do not place buffers at ->private_list via these
541 * utility functions are free to use private_lock and private_list for
542 * whatever they want. The only requirement is that list_empty(private_list)
543 * be true at clear_inode() time.
544 *
545 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
546 * filesystems should do that. invalidate_inode_buffers() should just go
547 * BUG_ON(!list_empty).
548 *
549 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
550 * take an address_space, not an inode. And it should be called
551 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
552 * queued up.
553 *
554 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
555 * list if it is already on a list. Because if the buffer is on a list,
556 * it *must* already be on the right one. If not, the filesystem is being
557 * silly. This will save a ton of locking. But first we have to ensure
558 * that buffers are taken *off* the old inode's list when they are freed
559 * (presumably in truncate). That requires careful auditing of all
560 * filesystems (do it inside bforget()). It could also be done by bringing
561 * b_inode back.
562 */
563
564/*
565 * The buffer's backing address_space's private_lock must be held
566 */
567static inline void __remove_assoc_queue(struct buffer_head *bh)
568{
569 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
570 WARN_ON(!bh->b_assoc_map);
571 if (buffer_write_io_error(bh))
572 set_bit(AS_EIO, &bh->b_assoc_map->flags);
573 bh->b_assoc_map = NULL;
1da177e4
LT
574}
575
576int inode_has_buffers(struct inode *inode)
577{
578 return !list_empty(&inode->i_data.private_list);
579}
580
581/*
582 * osync is designed to support O_SYNC io. It waits synchronously for
583 * all already-submitted IO to complete, but does not queue any new
584 * writes to the disk.
585 *
586 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
587 * you dirty the buffers, and then use osync_inode_buffers to wait for
588 * completion. Any other dirty buffers which are not yet queued for
589 * write will not be flushed to disk by the osync.
590 */
591static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
592{
593 struct buffer_head *bh;
594 struct list_head *p;
595 int err = 0;
596
597 spin_lock(lock);
598repeat:
599 list_for_each_prev(p, list) {
600 bh = BH_ENTRY(p);
601 if (buffer_locked(bh)) {
602 get_bh(bh);
603 spin_unlock(lock);
604 wait_on_buffer(bh);
605 if (!buffer_uptodate(bh))
606 err = -EIO;
607 brelse(bh);
608 spin_lock(lock);
609 goto repeat;
610 }
611 }
612 spin_unlock(lock);
613 return err;
614}
615
616/**
617 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
618 * buffers
67be2dd1 619 * @mapping: the mapping which wants those buffers written
1da177e4
LT
620 *
621 * Starts I/O against the buffers at mapping->private_list, and waits upon
622 * that I/O.
623 *
67be2dd1
MW
624 * Basically, this is a convenience function for fsync().
625 * @mapping is a file or directory which needs those buffers to be written for
626 * a successful fsync().
1da177e4
LT
627 */
628int sync_mapping_buffers(struct address_space *mapping)
629{
630 struct address_space *buffer_mapping = mapping->assoc_mapping;
631
632 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
633 return 0;
634
635 return fsync_buffers_list(&buffer_mapping->private_lock,
636 &mapping->private_list);
637}
638EXPORT_SYMBOL(sync_mapping_buffers);
639
640/*
641 * Called when we've recently written block `bblock', and it is known that
642 * `bblock' was for a buffer_boundary() buffer. This means that the block at
643 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
644 * dirty, schedule it for IO. So that indirects merge nicely with their data.
645 */
646void write_boundary_block(struct block_device *bdev,
647 sector_t bblock, unsigned blocksize)
648{
649 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
650 if (bh) {
651 if (buffer_dirty(bh))
652 ll_rw_block(WRITE, 1, &bh);
653 put_bh(bh);
654 }
655}
656
657void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
658{
659 struct address_space *mapping = inode->i_mapping;
660 struct address_space *buffer_mapping = bh->b_page->mapping;
661
662 mark_buffer_dirty(bh);
663 if (!mapping->assoc_mapping) {
664 mapping->assoc_mapping = buffer_mapping;
665 } else {
e827f923 666 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4
LT
667 }
668 if (list_empty(&bh->b_assoc_buffers)) {
669 spin_lock(&buffer_mapping->private_lock);
670 list_move_tail(&bh->b_assoc_buffers,
671 &mapping->private_list);
58ff407b 672 bh->b_assoc_map = mapping;
1da177e4
LT
673 spin_unlock(&buffer_mapping->private_lock);
674 }
675}
676EXPORT_SYMBOL(mark_buffer_dirty_inode);
677
678/*
679 * Add a page to the dirty page list.
680 *
681 * It is a sad fact of life that this function is called from several places
682 * deeply under spinlocking. It may not sleep.
683 *
684 * If the page has buffers, the uptodate buffers are set dirty, to preserve
685 * dirty-state coherency between the page and the buffers. It the page does
686 * not have buffers then when they are later attached they will all be set
687 * dirty.
688 *
689 * The buffers are dirtied before the page is dirtied. There's a small race
690 * window in which a writepage caller may see the page cleanness but not the
691 * buffer dirtiness. That's fine. If this code were to set the page dirty
692 * before the buffers, a concurrent writepage caller could clear the page dirty
693 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
694 * page on the dirty page list.
695 *
696 * We use private_lock to lock against try_to_free_buffers while using the
697 * page's buffer list. Also use this to protect against clean buffers being
698 * added to the page after it was set dirty.
699 *
700 * FIXME: may need to call ->reservepage here as well. That's rather up to the
701 * address_space though.
702 */
703int __set_page_dirty_buffers(struct page *page)
704{
ebf7a227
NP
705 struct address_space * const mapping = page_mapping(page);
706
707 if (unlikely(!mapping))
708 return !TestSetPageDirty(page);
1da177e4
LT
709
710 spin_lock(&mapping->private_lock);
711 if (page_has_buffers(page)) {
712 struct buffer_head *head = page_buffers(page);
713 struct buffer_head *bh = head;
714
715 do {
716 set_buffer_dirty(bh);
717 bh = bh->b_this_page;
718 } while (bh != head);
719 }
720 spin_unlock(&mapping->private_lock);
721
8c08540f
AM
722 if (TestSetPageDirty(page))
723 return 0;
724
725 write_lock_irq(&mapping->tree_lock);
726 if (page->mapping) { /* Race with truncate? */
55e829af 727 if (mapping_cap_account_dirty(mapping)) {
8c08540f 728 __inc_zone_page_state(page, NR_FILE_DIRTY);
55e829af
AM
729 task_io_account_write(PAGE_CACHE_SIZE);
730 }
8c08540f
AM
731 radix_tree_tag_set(&mapping->page_tree,
732 page_index(page), PAGECACHE_TAG_DIRTY);
1da177e4 733 }
8c08540f
AM
734 write_unlock_irq(&mapping->tree_lock);
735 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
736 return 1;
1da177e4
LT
737}
738EXPORT_SYMBOL(__set_page_dirty_buffers);
739
740/*
741 * Write out and wait upon a list of buffers.
742 *
743 * We have conflicting pressures: we want to make sure that all
744 * initially dirty buffers get waited on, but that any subsequently
745 * dirtied buffers don't. After all, we don't want fsync to last
746 * forever if somebody is actively writing to the file.
747 *
748 * Do this in two main stages: first we copy dirty buffers to a
749 * temporary inode list, queueing the writes as we go. Then we clean
750 * up, waiting for those writes to complete.
751 *
752 * During this second stage, any subsequent updates to the file may end
753 * up refiling the buffer on the original inode's dirty list again, so
754 * there is a chance we will end up with a buffer queued for write but
755 * not yet completed on that list. So, as a final cleanup we go through
756 * the osync code to catch these locked, dirty buffers without requeuing
757 * any newly dirty buffers for write.
758 */
759static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
760{
761 struct buffer_head *bh;
762 struct list_head tmp;
763 int err = 0, err2;
764
765 INIT_LIST_HEAD(&tmp);
766
767 spin_lock(lock);
768 while (!list_empty(list)) {
769 bh = BH_ENTRY(list->next);
58ff407b 770 __remove_assoc_queue(bh);
1da177e4
LT
771 if (buffer_dirty(bh) || buffer_locked(bh)) {
772 list_add(&bh->b_assoc_buffers, &tmp);
773 if (buffer_dirty(bh)) {
774 get_bh(bh);
775 spin_unlock(lock);
776 /*
777 * Ensure any pending I/O completes so that
778 * ll_rw_block() actually writes the current
779 * contents - it is a noop if I/O is still in
780 * flight on potentially older contents.
781 */
a7662236 782 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
783 brelse(bh);
784 spin_lock(lock);
785 }
786 }
787 }
788
789 while (!list_empty(&tmp)) {
790 bh = BH_ENTRY(tmp.prev);
58ff407b 791 list_del_init(&bh->b_assoc_buffers);
1da177e4
LT
792 get_bh(bh);
793 spin_unlock(lock);
794 wait_on_buffer(bh);
795 if (!buffer_uptodate(bh))
796 err = -EIO;
797 brelse(bh);
798 spin_lock(lock);
799 }
800
801 spin_unlock(lock);
802 err2 = osync_buffers_list(lock, list);
803 if (err)
804 return err;
805 else
806 return err2;
807}
808
809/*
810 * Invalidate any and all dirty buffers on a given inode. We are
811 * probably unmounting the fs, but that doesn't mean we have already
812 * done a sync(). Just drop the buffers from the inode list.
813 *
814 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
815 * assumes that all the buffers are against the blockdev. Not true
816 * for reiserfs.
817 */
818void invalidate_inode_buffers(struct inode *inode)
819{
820 if (inode_has_buffers(inode)) {
821 struct address_space *mapping = &inode->i_data;
822 struct list_head *list = &mapping->private_list;
823 struct address_space *buffer_mapping = mapping->assoc_mapping;
824
825 spin_lock(&buffer_mapping->private_lock);
826 while (!list_empty(list))
827 __remove_assoc_queue(BH_ENTRY(list->next));
828 spin_unlock(&buffer_mapping->private_lock);
829 }
830}
831
832/*
833 * Remove any clean buffers from the inode's buffer list. This is called
834 * when we're trying to free the inode itself. Those buffers can pin it.
835 *
836 * Returns true if all buffers were removed.
837 */
838int remove_inode_buffers(struct inode *inode)
839{
840 int ret = 1;
841
842 if (inode_has_buffers(inode)) {
843 struct address_space *mapping = &inode->i_data;
844 struct list_head *list = &mapping->private_list;
845 struct address_space *buffer_mapping = mapping->assoc_mapping;
846
847 spin_lock(&buffer_mapping->private_lock);
848 while (!list_empty(list)) {
849 struct buffer_head *bh = BH_ENTRY(list->next);
850 if (buffer_dirty(bh)) {
851 ret = 0;
852 break;
853 }
854 __remove_assoc_queue(bh);
855 }
856 spin_unlock(&buffer_mapping->private_lock);
857 }
858 return ret;
859}
860
861/*
862 * Create the appropriate buffers when given a page for data area and
863 * the size of each buffer.. Use the bh->b_this_page linked list to
864 * follow the buffers created. Return NULL if unable to create more
865 * buffers.
866 *
867 * The retry flag is used to differentiate async IO (paging, swapping)
868 * which may not fail from ordinary buffer allocations.
869 */
870struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
871 int retry)
872{
873 struct buffer_head *bh, *head;
874 long offset;
875
876try_again:
877 head = NULL;
878 offset = PAGE_SIZE;
879 while ((offset -= size) >= 0) {
880 bh = alloc_buffer_head(GFP_NOFS);
881 if (!bh)
882 goto no_grow;
883
884 bh->b_bdev = NULL;
885 bh->b_this_page = head;
886 bh->b_blocknr = -1;
887 head = bh;
888
889 bh->b_state = 0;
890 atomic_set(&bh->b_count, 0);
fc5cd582 891 bh->b_private = NULL;
1da177e4
LT
892 bh->b_size = size;
893
894 /* Link the buffer to its page */
895 set_bh_page(bh, page, offset);
896
01ffe339 897 init_buffer(bh, NULL, NULL);
1da177e4
LT
898 }
899 return head;
900/*
901 * In case anything failed, we just free everything we got.
902 */
903no_grow:
904 if (head) {
905 do {
906 bh = head;
907 head = head->b_this_page;
908 free_buffer_head(bh);
909 } while (head);
910 }
911
912 /*
913 * Return failure for non-async IO requests. Async IO requests
914 * are not allowed to fail, so we have to wait until buffer heads
915 * become available. But we don't want tasks sleeping with
916 * partially complete buffers, so all were released above.
917 */
918 if (!retry)
919 return NULL;
920
921 /* We're _really_ low on memory. Now we just
922 * wait for old buffer heads to become free due to
923 * finishing IO. Since this is an async request and
924 * the reserve list is empty, we're sure there are
925 * async buffer heads in use.
926 */
927 free_more_memory();
928 goto try_again;
929}
930EXPORT_SYMBOL_GPL(alloc_page_buffers);
931
932static inline void
933link_dev_buffers(struct page *page, struct buffer_head *head)
934{
935 struct buffer_head *bh, *tail;
936
937 bh = head;
938 do {
939 tail = bh;
940 bh = bh->b_this_page;
941 } while (bh);
942 tail->b_this_page = head;
943 attach_page_buffers(page, head);
944}
945
946/*
947 * Initialise the state of a blockdev page's buffers.
948 */
949static void
950init_page_buffers(struct page *page, struct block_device *bdev,
951 sector_t block, int size)
952{
953 struct buffer_head *head = page_buffers(page);
954 struct buffer_head *bh = head;
955 int uptodate = PageUptodate(page);
956
957 do {
958 if (!buffer_mapped(bh)) {
959 init_buffer(bh, NULL, NULL);
960 bh->b_bdev = bdev;
961 bh->b_blocknr = block;
962 if (uptodate)
963 set_buffer_uptodate(bh);
964 set_buffer_mapped(bh);
965 }
966 block++;
967 bh = bh->b_this_page;
968 } while (bh != head);
969}
970
971/*
972 * Create the page-cache page that contains the requested block.
973 *
974 * This is user purely for blockdev mappings.
975 */
976static struct page *
977grow_dev_page(struct block_device *bdev, sector_t block,
978 pgoff_t index, int size)
979{
980 struct inode *inode = bdev->bd_inode;
981 struct page *page;
982 struct buffer_head *bh;
983
ea125892
CL
984 page = find_or_create_page(inode->i_mapping, index,
985 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
1da177e4
LT
986 if (!page)
987 return NULL;
988
e827f923 989 BUG_ON(!PageLocked(page));
1da177e4
LT
990
991 if (page_has_buffers(page)) {
992 bh = page_buffers(page);
993 if (bh->b_size == size) {
994 init_page_buffers(page, bdev, block, size);
995 return page;
996 }
997 if (!try_to_free_buffers(page))
998 goto failed;
999 }
1000
1001 /*
1002 * Allocate some buffers for this page
1003 */
1004 bh = alloc_page_buffers(page, size, 0);
1005 if (!bh)
1006 goto failed;
1007
1008 /*
1009 * Link the page to the buffers and initialise them. Take the
1010 * lock to be atomic wrt __find_get_block(), which does not
1011 * run under the page lock.
1012 */
1013 spin_lock(&inode->i_mapping->private_lock);
1014 link_dev_buffers(page, bh);
1015 init_page_buffers(page, bdev, block, size);
1016 spin_unlock(&inode->i_mapping->private_lock);
1017 return page;
1018
1019failed:
1020 BUG();
1021 unlock_page(page);
1022 page_cache_release(page);
1023 return NULL;
1024}
1025
1026/*
1027 * Create buffers for the specified block device block's page. If
1028 * that page was dirty, the buffers are set dirty also.
1da177e4 1029 */
858119e1 1030static int
1da177e4
LT
1031grow_buffers(struct block_device *bdev, sector_t block, int size)
1032{
1033 struct page *page;
1034 pgoff_t index;
1035 int sizebits;
1036
1037 sizebits = -1;
1038 do {
1039 sizebits++;
1040 } while ((size << sizebits) < PAGE_SIZE);
1041
1042 index = block >> sizebits;
1da177e4 1043
e5657933
AM
1044 /*
1045 * Check for a block which wants to lie outside our maximum possible
1046 * pagecache index. (this comparison is done using sector_t types).
1047 */
1048 if (unlikely(index != block >> sizebits)) {
1049 char b[BDEVNAME_SIZE];
1050
1051 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1052 "device %s\n",
1053 __FUNCTION__, (unsigned long long)block,
1054 bdevname(bdev, b));
1055 return -EIO;
1056 }
1057 block = index << sizebits;
1da177e4
LT
1058 /* Create a page with the proper size buffers.. */
1059 page = grow_dev_page(bdev, block, index, size);
1060 if (!page)
1061 return 0;
1062 unlock_page(page);
1063 page_cache_release(page);
1064 return 1;
1065}
1066
75c96f85 1067static struct buffer_head *
1da177e4
LT
1068__getblk_slow(struct block_device *bdev, sector_t block, int size)
1069{
1070 /* Size must be multiple of hard sectorsize */
1071 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1072 (size < 512 || size > PAGE_SIZE))) {
1073 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1074 size);
1075 printk(KERN_ERR "hardsect size: %d\n",
1076 bdev_hardsect_size(bdev));
1077
1078 dump_stack();
1079 return NULL;
1080 }
1081
1082 for (;;) {
1083 struct buffer_head * bh;
e5657933 1084 int ret;
1da177e4
LT
1085
1086 bh = __find_get_block(bdev, block, size);
1087 if (bh)
1088 return bh;
1089
e5657933
AM
1090 ret = grow_buffers(bdev, block, size);
1091 if (ret < 0)
1092 return NULL;
1093 if (ret == 0)
1da177e4
LT
1094 free_more_memory();
1095 }
1096}
1097
1098/*
1099 * The relationship between dirty buffers and dirty pages:
1100 *
1101 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1102 * the page is tagged dirty in its radix tree.
1103 *
1104 * At all times, the dirtiness of the buffers represents the dirtiness of
1105 * subsections of the page. If the page has buffers, the page dirty bit is
1106 * merely a hint about the true dirty state.
1107 *
1108 * When a page is set dirty in its entirety, all its buffers are marked dirty
1109 * (if the page has buffers).
1110 *
1111 * When a buffer is marked dirty, its page is dirtied, but the page's other
1112 * buffers are not.
1113 *
1114 * Also. When blockdev buffers are explicitly read with bread(), they
1115 * individually become uptodate. But their backing page remains not
1116 * uptodate - even if all of its buffers are uptodate. A subsequent
1117 * block_read_full_page() against that page will discover all the uptodate
1118 * buffers, will set the page uptodate and will perform no I/O.
1119 */
1120
1121/**
1122 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1123 * @bh: the buffer_head to mark dirty
1da177e4
LT
1124 *
1125 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1126 * backing page dirty, then tag the page as dirty in its address_space's radix
1127 * tree and then attach the address_space's inode to its superblock's dirty
1128 * inode list.
1129 *
1130 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1131 * mapping->tree_lock and the global inode_lock.
1132 */
1133void fastcall mark_buffer_dirty(struct buffer_head *bh)
1134{
1135 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1136 __set_page_dirty_nobuffers(bh->b_page);
1137}
1138
1139/*
1140 * Decrement a buffer_head's reference count. If all buffers against a page
1141 * have zero reference count, are clean and unlocked, and if the page is clean
1142 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1143 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1144 * a page but it ends up not being freed, and buffers may later be reattached).
1145 */
1146void __brelse(struct buffer_head * buf)
1147{
1148 if (atomic_read(&buf->b_count)) {
1149 put_bh(buf);
1150 return;
1151 }
1152 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1153 WARN_ON(1);
1154}
1155
1156/*
1157 * bforget() is like brelse(), except it discards any
1158 * potentially dirty data.
1159 */
1160void __bforget(struct buffer_head *bh)
1161{
1162 clear_buffer_dirty(bh);
1163 if (!list_empty(&bh->b_assoc_buffers)) {
1164 struct address_space *buffer_mapping = bh->b_page->mapping;
1165
1166 spin_lock(&buffer_mapping->private_lock);
1167 list_del_init(&bh->b_assoc_buffers);
58ff407b 1168 bh->b_assoc_map = NULL;
1da177e4
LT
1169 spin_unlock(&buffer_mapping->private_lock);
1170 }
1171 __brelse(bh);
1172}
1173
1174static struct buffer_head *__bread_slow(struct buffer_head *bh)
1175{
1176 lock_buffer(bh);
1177 if (buffer_uptodate(bh)) {
1178 unlock_buffer(bh);
1179 return bh;
1180 } else {
1181 get_bh(bh);
1182 bh->b_end_io = end_buffer_read_sync;
1183 submit_bh(READ, bh);
1184 wait_on_buffer(bh);
1185 if (buffer_uptodate(bh))
1186 return bh;
1187 }
1188 brelse(bh);
1189 return NULL;
1190}
1191
1192/*
1193 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1194 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1195 * refcount elevated by one when they're in an LRU. A buffer can only appear
1196 * once in a particular CPU's LRU. A single buffer can be present in multiple
1197 * CPU's LRUs at the same time.
1198 *
1199 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1200 * sb_find_get_block().
1201 *
1202 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1203 * a local interrupt disable for that.
1204 */
1205
1206#define BH_LRU_SIZE 8
1207
1208struct bh_lru {
1209 struct buffer_head *bhs[BH_LRU_SIZE];
1210};
1211
1212static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1213
1214#ifdef CONFIG_SMP
1215#define bh_lru_lock() local_irq_disable()
1216#define bh_lru_unlock() local_irq_enable()
1217#else
1218#define bh_lru_lock() preempt_disable()
1219#define bh_lru_unlock() preempt_enable()
1220#endif
1221
1222static inline void check_irqs_on(void)
1223{
1224#ifdef irqs_disabled
1225 BUG_ON(irqs_disabled());
1226#endif
1227}
1228
1229/*
1230 * The LRU management algorithm is dopey-but-simple. Sorry.
1231 */
1232static void bh_lru_install(struct buffer_head *bh)
1233{
1234 struct buffer_head *evictee = NULL;
1235 struct bh_lru *lru;
1236
1237 check_irqs_on();
1238 bh_lru_lock();
1239 lru = &__get_cpu_var(bh_lrus);
1240 if (lru->bhs[0] != bh) {
1241 struct buffer_head *bhs[BH_LRU_SIZE];
1242 int in;
1243 int out = 0;
1244
1245 get_bh(bh);
1246 bhs[out++] = bh;
1247 for (in = 0; in < BH_LRU_SIZE; in++) {
1248 struct buffer_head *bh2 = lru->bhs[in];
1249
1250 if (bh2 == bh) {
1251 __brelse(bh2);
1252 } else {
1253 if (out >= BH_LRU_SIZE) {
1254 BUG_ON(evictee != NULL);
1255 evictee = bh2;
1256 } else {
1257 bhs[out++] = bh2;
1258 }
1259 }
1260 }
1261 while (out < BH_LRU_SIZE)
1262 bhs[out++] = NULL;
1263 memcpy(lru->bhs, bhs, sizeof(bhs));
1264 }
1265 bh_lru_unlock();
1266
1267 if (evictee)
1268 __brelse(evictee);
1269}
1270
1271/*
1272 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1273 */
858119e1 1274static struct buffer_head *
3991d3bd 1275lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1276{
1277 struct buffer_head *ret = NULL;
1278 struct bh_lru *lru;
3991d3bd 1279 unsigned int i;
1da177e4
LT
1280
1281 check_irqs_on();
1282 bh_lru_lock();
1283 lru = &__get_cpu_var(bh_lrus);
1284 for (i = 0; i < BH_LRU_SIZE; i++) {
1285 struct buffer_head *bh = lru->bhs[i];
1286
1287 if (bh && bh->b_bdev == bdev &&
1288 bh->b_blocknr == block && bh->b_size == size) {
1289 if (i) {
1290 while (i) {
1291 lru->bhs[i] = lru->bhs[i - 1];
1292 i--;
1293 }
1294 lru->bhs[0] = bh;
1295 }
1296 get_bh(bh);
1297 ret = bh;
1298 break;
1299 }
1300 }
1301 bh_lru_unlock();
1302 return ret;
1303}
1304
1305/*
1306 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1307 * it in the LRU and mark it as accessed. If it is not present then return
1308 * NULL
1309 */
1310struct buffer_head *
3991d3bd 1311__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1312{
1313 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1314
1315 if (bh == NULL) {
385fd4c5 1316 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1317 if (bh)
1318 bh_lru_install(bh);
1319 }
1320 if (bh)
1321 touch_buffer(bh);
1322 return bh;
1323}
1324EXPORT_SYMBOL(__find_get_block);
1325
1326/*
1327 * __getblk will locate (and, if necessary, create) the buffer_head
1328 * which corresponds to the passed block_device, block and size. The
1329 * returned buffer has its reference count incremented.
1330 *
1331 * __getblk() cannot fail - it just keeps trying. If you pass it an
1332 * illegal block number, __getblk() will happily return a buffer_head
1333 * which represents the non-existent block. Very weird.
1334 *
1335 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1336 * attempt is failing. FIXME, perhaps?
1337 */
1338struct buffer_head *
3991d3bd 1339__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1340{
1341 struct buffer_head *bh = __find_get_block(bdev, block, size);
1342
1343 might_sleep();
1344 if (bh == NULL)
1345 bh = __getblk_slow(bdev, block, size);
1346 return bh;
1347}
1348EXPORT_SYMBOL(__getblk);
1349
1350/*
1351 * Do async read-ahead on a buffer..
1352 */
3991d3bd 1353void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1354{
1355 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1356 if (likely(bh)) {
1357 ll_rw_block(READA, 1, &bh);
1358 brelse(bh);
1359 }
1da177e4
LT
1360}
1361EXPORT_SYMBOL(__breadahead);
1362
1363/**
1364 * __bread() - reads a specified block and returns the bh
67be2dd1 1365 * @bdev: the block_device to read from
1da177e4
LT
1366 * @block: number of block
1367 * @size: size (in bytes) to read
1368 *
1369 * Reads a specified block, and returns buffer head that contains it.
1370 * It returns NULL if the block was unreadable.
1371 */
1372struct buffer_head *
3991d3bd 1373__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1374{
1375 struct buffer_head *bh = __getblk(bdev, block, size);
1376
a3e713b5 1377 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1378 bh = __bread_slow(bh);
1379 return bh;
1380}
1381EXPORT_SYMBOL(__bread);
1382
1383/*
1384 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1385 * This doesn't race because it runs in each cpu either in irq
1386 * or with preempt disabled.
1387 */
1388static void invalidate_bh_lru(void *arg)
1389{
1390 struct bh_lru *b = &get_cpu_var(bh_lrus);
1391 int i;
1392
1393 for (i = 0; i < BH_LRU_SIZE; i++) {
1394 brelse(b->bhs[i]);
1395 b->bhs[i] = NULL;
1396 }
1397 put_cpu_var(bh_lrus);
1398}
1399
f9a14399 1400void invalidate_bh_lrus(void)
1da177e4
LT
1401{
1402 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1403}
1404
1405void set_bh_page(struct buffer_head *bh,
1406 struct page *page, unsigned long offset)
1407{
1408 bh->b_page = page;
e827f923 1409 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1410 if (PageHighMem(page))
1411 /*
1412 * This catches illegal uses and preserves the offset:
1413 */
1414 bh->b_data = (char *)(0 + offset);
1415 else
1416 bh->b_data = page_address(page) + offset;
1417}
1418EXPORT_SYMBOL(set_bh_page);
1419
1420/*
1421 * Called when truncating a buffer on a page completely.
1422 */
858119e1 1423static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1424{
1425 lock_buffer(bh);
1426 clear_buffer_dirty(bh);
1427 bh->b_bdev = NULL;
1428 clear_buffer_mapped(bh);
1429 clear_buffer_req(bh);
1430 clear_buffer_new(bh);
1431 clear_buffer_delay(bh);
33a266dd 1432 clear_buffer_unwritten(bh);
1da177e4
LT
1433 unlock_buffer(bh);
1434}
1435
1da177e4
LT
1436/**
1437 * block_invalidatepage - invalidate part of all of a buffer-backed page
1438 *
1439 * @page: the page which is affected
1440 * @offset: the index of the truncation point
1441 *
1442 * block_invalidatepage() is called when all or part of the page has become
1443 * invalidatedby a truncate operation.
1444 *
1445 * block_invalidatepage() does not have to release all buffers, but it must
1446 * ensure that no dirty buffer is left outside @offset and that no I/O
1447 * is underway against any of the blocks which are outside the truncation
1448 * point. Because the caller is about to free (and possibly reuse) those
1449 * blocks on-disk.
1450 */
2ff28e22 1451void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1452{
1453 struct buffer_head *head, *bh, *next;
1454 unsigned int curr_off = 0;
1da177e4
LT
1455
1456 BUG_ON(!PageLocked(page));
1457 if (!page_has_buffers(page))
1458 goto out;
1459
1460 head = page_buffers(page);
1461 bh = head;
1462 do {
1463 unsigned int next_off = curr_off + bh->b_size;
1464 next = bh->b_this_page;
1465
1466 /*
1467 * is this block fully invalidated?
1468 */
1469 if (offset <= curr_off)
1470 discard_buffer(bh);
1471 curr_off = next_off;
1472 bh = next;
1473 } while (bh != head);
1474
1475 /*
1476 * We release buffers only if the entire page is being invalidated.
1477 * The get_block cached value has been unconditionally invalidated,
1478 * so real IO is not possible anymore.
1479 */
1480 if (offset == 0)
2ff28e22 1481 try_to_release_page(page, 0);
1da177e4 1482out:
2ff28e22 1483 return;
1da177e4
LT
1484}
1485EXPORT_SYMBOL(block_invalidatepage);
1486
1487/*
1488 * We attach and possibly dirty the buffers atomically wrt
1489 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1490 * is already excluded via the page lock.
1491 */
1492void create_empty_buffers(struct page *page,
1493 unsigned long blocksize, unsigned long b_state)
1494{
1495 struct buffer_head *bh, *head, *tail;
1496
1497 head = alloc_page_buffers(page, blocksize, 1);
1498 bh = head;
1499 do {
1500 bh->b_state |= b_state;
1501 tail = bh;
1502 bh = bh->b_this_page;
1503 } while (bh);
1504 tail->b_this_page = head;
1505
1506 spin_lock(&page->mapping->private_lock);
1507 if (PageUptodate(page) || PageDirty(page)) {
1508 bh = head;
1509 do {
1510 if (PageDirty(page))
1511 set_buffer_dirty(bh);
1512 if (PageUptodate(page))
1513 set_buffer_uptodate(bh);
1514 bh = bh->b_this_page;
1515 } while (bh != head);
1516 }
1517 attach_page_buffers(page, head);
1518 spin_unlock(&page->mapping->private_lock);
1519}
1520EXPORT_SYMBOL(create_empty_buffers);
1521
1522/*
1523 * We are taking a block for data and we don't want any output from any
1524 * buffer-cache aliases starting from return from that function and
1525 * until the moment when something will explicitly mark the buffer
1526 * dirty (hopefully that will not happen until we will free that block ;-)
1527 * We don't even need to mark it not-uptodate - nobody can expect
1528 * anything from a newly allocated buffer anyway. We used to used
1529 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1530 * don't want to mark the alias unmapped, for example - it would confuse
1531 * anyone who might pick it with bread() afterwards...
1532 *
1533 * Also.. Note that bforget() doesn't lock the buffer. So there can
1534 * be writeout I/O going on against recently-freed buffers. We don't
1535 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1536 * only if we really need to. That happens here.
1537 */
1538void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1539{
1540 struct buffer_head *old_bh;
1541
1542 might_sleep();
1543
385fd4c5 1544 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1545 if (old_bh) {
1546 clear_buffer_dirty(old_bh);
1547 wait_on_buffer(old_bh);
1548 clear_buffer_req(old_bh);
1549 __brelse(old_bh);
1550 }
1551}
1552EXPORT_SYMBOL(unmap_underlying_metadata);
1553
1554/*
1555 * NOTE! All mapped/uptodate combinations are valid:
1556 *
1557 * Mapped Uptodate Meaning
1558 *
1559 * No No "unknown" - must do get_block()
1560 * No Yes "hole" - zero-filled
1561 * Yes No "allocated" - allocated on disk, not read in
1562 * Yes Yes "valid" - allocated and up-to-date in memory.
1563 *
1564 * "Dirty" is valid only with the last case (mapped+uptodate).
1565 */
1566
1567/*
1568 * While block_write_full_page is writing back the dirty buffers under
1569 * the page lock, whoever dirtied the buffers may decide to clean them
1570 * again at any time. We handle that by only looking at the buffer
1571 * state inside lock_buffer().
1572 *
1573 * If block_write_full_page() is called for regular writeback
1574 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1575 * locked buffer. This only can happen if someone has written the buffer
1576 * directly, with submit_bh(). At the address_space level PageWriteback
1577 * prevents this contention from occurring.
1578 */
1579static int __block_write_full_page(struct inode *inode, struct page *page,
1580 get_block_t *get_block, struct writeback_control *wbc)
1581{
1582 int err;
1583 sector_t block;
1584 sector_t last_block;
f0fbd5fc 1585 struct buffer_head *bh, *head;
b0cf2321 1586 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1587 int nr_underway = 0;
1588
1589 BUG_ON(!PageLocked(page));
1590
1591 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1592
1593 if (!page_has_buffers(page)) {
b0cf2321 1594 create_empty_buffers(page, blocksize,
1da177e4
LT
1595 (1 << BH_Dirty)|(1 << BH_Uptodate));
1596 }
1597
1598 /*
1599 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1600 * here, and the (potentially unmapped) buffers may become dirty at
1601 * any time. If a buffer becomes dirty here after we've inspected it
1602 * then we just miss that fact, and the page stays dirty.
1603 *
1604 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1605 * handle that here by just cleaning them.
1606 */
1607
54b21a79 1608 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1609 head = page_buffers(page);
1610 bh = head;
1611
1612 /*
1613 * Get all the dirty buffers mapped to disk addresses and
1614 * handle any aliases from the underlying blockdev's mapping.
1615 */
1616 do {
1617 if (block > last_block) {
1618 /*
1619 * mapped buffers outside i_size will occur, because
1620 * this page can be outside i_size when there is a
1621 * truncate in progress.
1622 */
1623 /*
1624 * The buffer was zeroed by block_write_full_page()
1625 */
1626 clear_buffer_dirty(bh);
1627 set_buffer_uptodate(bh);
1628 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
b0cf2321 1629 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1630 err = get_block(inode, block, bh, 1);
1631 if (err)
1632 goto recover;
1633 if (buffer_new(bh)) {
1634 /* blockdev mappings never come here */
1635 clear_buffer_new(bh);
1636 unmap_underlying_metadata(bh->b_bdev,
1637 bh->b_blocknr);
1638 }
1639 }
1640 bh = bh->b_this_page;
1641 block++;
1642 } while (bh != head);
1643
1644 do {
1da177e4
LT
1645 if (!buffer_mapped(bh))
1646 continue;
1647 /*
1648 * If it's a fully non-blocking write attempt and we cannot
1649 * lock the buffer then redirty the page. Note that this can
1650 * potentially cause a busy-wait loop from pdflush and kswapd
1651 * activity, but those code paths have their own higher-level
1652 * throttling.
1653 */
1654 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1655 lock_buffer(bh);
1656 } else if (test_set_buffer_locked(bh)) {
1657 redirty_page_for_writepage(wbc, page);
1658 continue;
1659 }
1660 if (test_clear_buffer_dirty(bh)) {
1661 mark_buffer_async_write(bh);
1662 } else {
1663 unlock_buffer(bh);
1664 }
1665 } while ((bh = bh->b_this_page) != head);
1666
1667 /*
1668 * The page and its buffers are protected by PageWriteback(), so we can
1669 * drop the bh refcounts early.
1670 */
1671 BUG_ON(PageWriteback(page));
1672 set_page_writeback(page);
1da177e4
LT
1673
1674 do {
1675 struct buffer_head *next = bh->b_this_page;
1676 if (buffer_async_write(bh)) {
1677 submit_bh(WRITE, bh);
1678 nr_underway++;
1679 }
1da177e4
LT
1680 bh = next;
1681 } while (bh != head);
05937baa 1682 unlock_page(page);
1da177e4
LT
1683
1684 err = 0;
1685done:
1686 if (nr_underway == 0) {
1687 /*
1688 * The page was marked dirty, but the buffers were
1689 * clean. Someone wrote them back by hand with
1690 * ll_rw_block/submit_bh. A rare case.
1691 */
1da177e4 1692 end_page_writeback(page);
3d67f2d7 1693
1da177e4
LT
1694 /*
1695 * The page and buffer_heads can be released at any time from
1696 * here on.
1697 */
1698 wbc->pages_skipped++; /* We didn't write this page */
1699 }
1700 return err;
1701
1702recover:
1703 /*
1704 * ENOSPC, or some other error. We may already have added some
1705 * blocks to the file, so we need to write these out to avoid
1706 * exposing stale data.
1707 * The page is currently locked and not marked for writeback
1708 */
1709 bh = head;
1710 /* Recovery: lock and submit the mapped buffers */
1711 do {
1da177e4
LT
1712 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1713 lock_buffer(bh);
1714 mark_buffer_async_write(bh);
1715 } else {
1716 /*
1717 * The buffer may have been set dirty during
1718 * attachment to a dirty page.
1719 */
1720 clear_buffer_dirty(bh);
1721 }
1722 } while ((bh = bh->b_this_page) != head);
1723 SetPageError(page);
1724 BUG_ON(PageWriteback(page));
7e4c3690 1725 mapping_set_error(page->mapping, err);
1da177e4 1726 set_page_writeback(page);
1da177e4
LT
1727 do {
1728 struct buffer_head *next = bh->b_this_page;
1729 if (buffer_async_write(bh)) {
1730 clear_buffer_dirty(bh);
1731 submit_bh(WRITE, bh);
1732 nr_underway++;
1733 }
1da177e4
LT
1734 bh = next;
1735 } while (bh != head);
ffda9d30 1736 unlock_page(page);
1da177e4
LT
1737 goto done;
1738}
1739
1740static int __block_prepare_write(struct inode *inode, struct page *page,
1741 unsigned from, unsigned to, get_block_t *get_block)
1742{
1743 unsigned block_start, block_end;
1744 sector_t block;
1745 int err = 0;
1746 unsigned blocksize, bbits;
1747 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1748
1749 BUG_ON(!PageLocked(page));
1750 BUG_ON(from > PAGE_CACHE_SIZE);
1751 BUG_ON(to > PAGE_CACHE_SIZE);
1752 BUG_ON(from > to);
1753
1754 blocksize = 1 << inode->i_blkbits;
1755 if (!page_has_buffers(page))
1756 create_empty_buffers(page, blocksize, 0);
1757 head = page_buffers(page);
1758
1759 bbits = inode->i_blkbits;
1760 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1761
1762 for(bh = head, block_start = 0; bh != head || !block_start;
1763 block++, block_start=block_end, bh = bh->b_this_page) {
1764 block_end = block_start + blocksize;
1765 if (block_end <= from || block_start >= to) {
1766 if (PageUptodate(page)) {
1767 if (!buffer_uptodate(bh))
1768 set_buffer_uptodate(bh);
1769 }
1770 continue;
1771 }
1772 if (buffer_new(bh))
1773 clear_buffer_new(bh);
1774 if (!buffer_mapped(bh)) {
b0cf2321 1775 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1776 err = get_block(inode, block, bh, 1);
1777 if (err)
f3ddbdc6 1778 break;
1da177e4 1779 if (buffer_new(bh)) {
1da177e4
LT
1780 unmap_underlying_metadata(bh->b_bdev,
1781 bh->b_blocknr);
1782 if (PageUptodate(page)) {
1783 set_buffer_uptodate(bh);
1784 continue;
1785 }
1786 if (block_end > to || block_start < from) {
1787 void *kaddr;
1788
1789 kaddr = kmap_atomic(page, KM_USER0);
1790 if (block_end > to)
1791 memset(kaddr+to, 0,
1792 block_end-to);
1793 if (block_start < from)
1794 memset(kaddr+block_start,
1795 0, from-block_start);
1796 flush_dcache_page(page);
1797 kunmap_atomic(kaddr, KM_USER0);
1798 }
1799 continue;
1800 }
1801 }
1802 if (PageUptodate(page)) {
1803 if (!buffer_uptodate(bh))
1804 set_buffer_uptodate(bh);
1805 continue;
1806 }
1807 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1808 !buffer_unwritten(bh) &&
1da177e4
LT
1809 (block_start < from || block_end > to)) {
1810 ll_rw_block(READ, 1, &bh);
1811 *wait_bh++=bh;
1812 }
1813 }
1814 /*
1815 * If we issued read requests - let them complete.
1816 */
1817 while(wait_bh > wait) {
1818 wait_on_buffer(*--wait_bh);
1819 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1820 err = -EIO;
1da177e4 1821 }
152becd2
AA
1822 if (!err) {
1823 bh = head;
1824 do {
1825 if (buffer_new(bh))
1826 clear_buffer_new(bh);
1827 } while ((bh = bh->b_this_page) != head);
1828 return 0;
1829 }
f3ddbdc6 1830 /* Error case: */
1da177e4
LT
1831 /*
1832 * Zero out any newly allocated blocks to avoid exposing stale
1833 * data. If BH_New is set, we know that the block was newly
1834 * allocated in the above loop.
1835 */
1836 bh = head;
1837 block_start = 0;
1838 do {
1839 block_end = block_start+blocksize;
1840 if (block_end <= from)
1841 goto next_bh;
1842 if (block_start >= to)
1843 break;
1844 if (buffer_new(bh)) {
1da177e4 1845 clear_buffer_new(bh);
01f2705d 1846 zero_user_page(page, block_start, bh->b_size, KM_USER0);
1da177e4
LT
1847 set_buffer_uptodate(bh);
1848 mark_buffer_dirty(bh);
1849 }
1850next_bh:
1851 block_start = block_end;
1852 bh = bh->b_this_page;
1853 } while (bh != head);
1854 return err;
1855}
1856
1857static int __block_commit_write(struct inode *inode, struct page *page,
1858 unsigned from, unsigned to)
1859{
1860 unsigned block_start, block_end;
1861 int partial = 0;
1862 unsigned blocksize;
1863 struct buffer_head *bh, *head;
1864
1865 blocksize = 1 << inode->i_blkbits;
1866
1867 for(bh = head = page_buffers(page), block_start = 0;
1868 bh != head || !block_start;
1869 block_start=block_end, bh = bh->b_this_page) {
1870 block_end = block_start + blocksize;
1871 if (block_end <= from || block_start >= to) {
1872 if (!buffer_uptodate(bh))
1873 partial = 1;
1874 } else {
1875 set_buffer_uptodate(bh);
1876 mark_buffer_dirty(bh);
1877 }
1878 }
1879
1880 /*
1881 * If this is a partial write which happened to make all buffers
1882 * uptodate then we can optimize away a bogus readpage() for
1883 * the next read(). Here we 'discover' whether the page went
1884 * uptodate as a result of this (potentially partial) write.
1885 */
1886 if (!partial)
1887 SetPageUptodate(page);
1888 return 0;
1889}
1890
1891/*
1892 * Generic "read page" function for block devices that have the normal
1893 * get_block functionality. This is most of the block device filesystems.
1894 * Reads the page asynchronously --- the unlock_buffer() and
1895 * set/clear_buffer_uptodate() functions propagate buffer state into the
1896 * page struct once IO has completed.
1897 */
1898int block_read_full_page(struct page *page, get_block_t *get_block)
1899{
1900 struct inode *inode = page->mapping->host;
1901 sector_t iblock, lblock;
1902 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1903 unsigned int blocksize;
1904 int nr, i;
1905 int fully_mapped = 1;
1906
cd7619d6 1907 BUG_ON(!PageLocked(page));
1da177e4
LT
1908 blocksize = 1 << inode->i_blkbits;
1909 if (!page_has_buffers(page))
1910 create_empty_buffers(page, blocksize, 0);
1911 head = page_buffers(page);
1912
1913 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1914 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1915 bh = head;
1916 nr = 0;
1917 i = 0;
1918
1919 do {
1920 if (buffer_uptodate(bh))
1921 continue;
1922
1923 if (!buffer_mapped(bh)) {
c64610ba
AM
1924 int err = 0;
1925
1da177e4
LT
1926 fully_mapped = 0;
1927 if (iblock < lblock) {
b0cf2321 1928 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
1929 err = get_block(inode, iblock, bh, 0);
1930 if (err)
1da177e4
LT
1931 SetPageError(page);
1932 }
1933 if (!buffer_mapped(bh)) {
01f2705d
ND
1934 zero_user_page(page, i * blocksize, blocksize,
1935 KM_USER0);
c64610ba
AM
1936 if (!err)
1937 set_buffer_uptodate(bh);
1da177e4
LT
1938 continue;
1939 }
1940 /*
1941 * get_block() might have updated the buffer
1942 * synchronously
1943 */
1944 if (buffer_uptodate(bh))
1945 continue;
1946 }
1947 arr[nr++] = bh;
1948 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1949
1950 if (fully_mapped)
1951 SetPageMappedToDisk(page);
1952
1953 if (!nr) {
1954 /*
1955 * All buffers are uptodate - we can set the page uptodate
1956 * as well. But not if get_block() returned an error.
1957 */
1958 if (!PageError(page))
1959 SetPageUptodate(page);
1960 unlock_page(page);
1961 return 0;
1962 }
1963
1964 /* Stage two: lock the buffers */
1965 for (i = 0; i < nr; i++) {
1966 bh = arr[i];
1967 lock_buffer(bh);
1968 mark_buffer_async_read(bh);
1969 }
1970
1971 /*
1972 * Stage 3: start the IO. Check for uptodateness
1973 * inside the buffer lock in case another process reading
1974 * the underlying blockdev brought it uptodate (the sct fix).
1975 */
1976 for (i = 0; i < nr; i++) {
1977 bh = arr[i];
1978 if (buffer_uptodate(bh))
1979 end_buffer_async_read(bh, 1);
1980 else
1981 submit_bh(READ, bh);
1982 }
1983 return 0;
1984}
1985
1986/* utility function for filesystems that need to do work on expanding
1987 * truncates. Uses prepare/commit_write to allow the filesystem to
1988 * deal with the hole.
1989 */
05eb0b51
OH
1990static int __generic_cont_expand(struct inode *inode, loff_t size,
1991 pgoff_t index, unsigned int offset)
1da177e4
LT
1992{
1993 struct address_space *mapping = inode->i_mapping;
1994 struct page *page;
05eb0b51 1995 unsigned long limit;
1da177e4
LT
1996 int err;
1997
1998 err = -EFBIG;
1999 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2000 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2001 send_sig(SIGXFSZ, current, 0);
2002 goto out;
2003 }
2004 if (size > inode->i_sb->s_maxbytes)
2005 goto out;
2006
1da177e4
LT
2007 err = -ENOMEM;
2008 page = grab_cache_page(mapping, index);
2009 if (!page)
2010 goto out;
2011 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
05eb0b51
OH
2012 if (err) {
2013 /*
2014 * ->prepare_write() may have instantiated a few blocks
2015 * outside i_size. Trim these off again.
2016 */
2017 unlock_page(page);
2018 page_cache_release(page);
2019 vmtruncate(inode, inode->i_size);
2020 goto out;
1da177e4 2021 }
05eb0b51
OH
2022
2023 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2024
1da177e4
LT
2025 unlock_page(page);
2026 page_cache_release(page);
2027 if (err > 0)
2028 err = 0;
2029out:
2030 return err;
2031}
2032
05eb0b51
OH
2033int generic_cont_expand(struct inode *inode, loff_t size)
2034{
2035 pgoff_t index;
2036 unsigned int offset;
2037
2038 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2039
2040 /* ugh. in prepare/commit_write, if from==to==start of block, we
2041 ** skip the prepare. make sure we never send an offset for the start
2042 ** of a block
2043 */
2044 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2045 /* caller must handle this extra byte. */
2046 offset++;
2047 }
2048 index = size >> PAGE_CACHE_SHIFT;
2049
2050 return __generic_cont_expand(inode, size, index, offset);
2051}
2052
2053int generic_cont_expand_simple(struct inode *inode, loff_t size)
2054{
2055 loff_t pos = size - 1;
2056 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2057 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2058
2059 /* prepare/commit_write can handle even if from==to==start of block. */
2060 return __generic_cont_expand(inode, size, index, offset);
2061}
2062
1da177e4
LT
2063/*
2064 * For moronic filesystems that do not allow holes in file.
2065 * We may have to extend the file.
2066 */
2067
2068int cont_prepare_write(struct page *page, unsigned offset,
2069 unsigned to, get_block_t *get_block, loff_t *bytes)
2070{
2071 struct address_space *mapping = page->mapping;
2072 struct inode *inode = mapping->host;
2073 struct page *new_page;
2074 pgoff_t pgpos;
2075 long status;
2076 unsigned zerofrom;
2077 unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
2078
2079 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2080 status = -ENOMEM;
2081 new_page = grab_cache_page(mapping, pgpos);
2082 if (!new_page)
2083 goto out;
2084 /* we might sleep */
2085 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2086 unlock_page(new_page);
2087 page_cache_release(new_page);
2088 continue;
2089 }
2090 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2091 if (zerofrom & (blocksize-1)) {
2092 *bytes |= (blocksize-1);
2093 (*bytes)++;
2094 }
2095 status = __block_prepare_write(inode, new_page, zerofrom,
2096 PAGE_CACHE_SIZE, get_block);
2097 if (status)
2098 goto out_unmap;
ff1be9ad 2099 zero_user_page(new_page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
01f2705d 2100 KM_USER0);
1da177e4
LT
2101 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2102 unlock_page(new_page);
2103 page_cache_release(new_page);
2104 }
2105
2106 if (page->index < pgpos) {
2107 /* completely inside the area */
2108 zerofrom = offset;
2109 } else {
2110 /* page covers the boundary, find the boundary offset */
2111 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2112
2113 /* if we will expand the thing last block will be filled */
2114 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2115 *bytes |= (blocksize-1);
2116 (*bytes)++;
2117 }
2118
2119 /* starting below the boundary? Nothing to zero out */
2120 if (offset <= zerofrom)
2121 zerofrom = offset;
2122 }
2123 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2124 if (status)
2125 goto out1;
2126 if (zerofrom < offset) {
01f2705d 2127 zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
1da177e4
LT
2128 __block_commit_write(inode, page, zerofrom, offset);
2129 }
2130 return 0;
2131out1:
2132 ClearPageUptodate(page);
2133 return status;
2134
2135out_unmap:
2136 ClearPageUptodate(new_page);
2137 unlock_page(new_page);
2138 page_cache_release(new_page);
2139out:
2140 return status;
2141}
2142
2143int block_prepare_write(struct page *page, unsigned from, unsigned to,
2144 get_block_t *get_block)
2145{
2146 struct inode *inode = page->mapping->host;
2147 int err = __block_prepare_write(inode, page, from, to, get_block);
2148 if (err)
2149 ClearPageUptodate(page);
2150 return err;
2151}
2152
2153int block_commit_write(struct page *page, unsigned from, unsigned to)
2154{
2155 struct inode *inode = page->mapping->host;
2156 __block_commit_write(inode,page,from,to);
2157 return 0;
2158}
2159
2160int generic_commit_write(struct file *file, struct page *page,
2161 unsigned from, unsigned to)
2162{
2163 struct inode *inode = page->mapping->host;
2164 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2165 __block_commit_write(inode,page,from,to);
2166 /*
2167 * No need to use i_size_read() here, the i_size
1b1dcc1b 2168 * cannot change under us because we hold i_mutex.
1da177e4
LT
2169 */
2170 if (pos > inode->i_size) {
2171 i_size_write(inode, pos);
2172 mark_inode_dirty(inode);
2173 }
2174 return 0;
2175}
2176
2177
2178/*
2179 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2180 * immediately, while under the page lock. So it needs a special end_io
2181 * handler which does not touch the bh after unlocking it.
2182 *
2183 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2184 * a race there is benign: unlock_buffer() only use the bh's address for
2185 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2186 * itself.
2187 */
2188static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2189{
2190 if (uptodate) {
2191 set_buffer_uptodate(bh);
2192 } else {
2193 /* This happens, due to failed READA attempts. */
2194 clear_buffer_uptodate(bh);
2195 }
2196 unlock_buffer(bh);
2197}
2198
2199/*
2200 * On entry, the page is fully not uptodate.
2201 * On exit the page is fully uptodate in the areas outside (from,to)
2202 */
2203int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2204 get_block_t *get_block)
2205{
2206 struct inode *inode = page->mapping->host;
2207 const unsigned blkbits = inode->i_blkbits;
2208 const unsigned blocksize = 1 << blkbits;
2209 struct buffer_head map_bh;
2210 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2211 unsigned block_in_page;
2212 unsigned block_start;
2213 sector_t block_in_file;
2214 char *kaddr;
2215 int nr_reads = 0;
2216 int i;
2217 int ret = 0;
2218 int is_mapped_to_disk = 1;
1da177e4
LT
2219
2220 if (PageMappedToDisk(page))
2221 return 0;
2222
2223 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2224 map_bh.b_page = page;
2225
2226 /*
2227 * We loop across all blocks in the page, whether or not they are
2228 * part of the affected region. This is so we can discover if the
2229 * page is fully mapped-to-disk.
2230 */
2231 for (block_start = 0, block_in_page = 0;
2232 block_start < PAGE_CACHE_SIZE;
2233 block_in_page++, block_start += blocksize) {
2234 unsigned block_end = block_start + blocksize;
2235 int create;
2236
2237 map_bh.b_state = 0;
2238 create = 1;
2239 if (block_start >= to)
2240 create = 0;
b0cf2321 2241 map_bh.b_size = blocksize;
1da177e4
LT
2242 ret = get_block(inode, block_in_file + block_in_page,
2243 &map_bh, create);
2244 if (ret)
2245 goto failed;
2246 if (!buffer_mapped(&map_bh))
2247 is_mapped_to_disk = 0;
2248 if (buffer_new(&map_bh))
2249 unmap_underlying_metadata(map_bh.b_bdev,
2250 map_bh.b_blocknr);
2251 if (PageUptodate(page))
2252 continue;
2253 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2254 kaddr = kmap_atomic(page, KM_USER0);
22c8ca78 2255 if (block_start < from)
1da177e4 2256 memset(kaddr+block_start, 0, from-block_start);
22c8ca78 2257 if (block_end > to)
1da177e4 2258 memset(kaddr + to, 0, block_end - to);
1da177e4
LT
2259 flush_dcache_page(page);
2260 kunmap_atomic(kaddr, KM_USER0);
2261 continue;
2262 }
2263 if (buffer_uptodate(&map_bh))
2264 continue; /* reiserfs does this */
2265 if (block_start < from || block_end > to) {
2266 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2267
2268 if (!bh) {
2269 ret = -ENOMEM;
2270 goto failed;
2271 }
2272 bh->b_state = map_bh.b_state;
2273 atomic_set(&bh->b_count, 0);
2274 bh->b_this_page = NULL;
2275 bh->b_page = page;
2276 bh->b_blocknr = map_bh.b_blocknr;
2277 bh->b_size = blocksize;
2278 bh->b_data = (char *)(long)block_start;
2279 bh->b_bdev = map_bh.b_bdev;
2280 bh->b_private = NULL;
2281 read_bh[nr_reads++] = bh;
2282 }
2283 }
2284
2285 if (nr_reads) {
2286 struct buffer_head *bh;
2287
2288 /*
2289 * The page is locked, so these buffers are protected from
2290 * any VM or truncate activity. Hence we don't need to care
2291 * for the buffer_head refcounts.
2292 */
2293 for (i = 0; i < nr_reads; i++) {
2294 bh = read_bh[i];
2295 lock_buffer(bh);
2296 bh->b_end_io = end_buffer_read_nobh;
2297 submit_bh(READ, bh);
2298 }
2299 for (i = 0; i < nr_reads; i++) {
2300 bh = read_bh[i];
2301 wait_on_buffer(bh);
2302 if (!buffer_uptodate(bh))
2303 ret = -EIO;
2304 free_buffer_head(bh);
2305 read_bh[i] = NULL;
2306 }
2307 if (ret)
2308 goto failed;
2309 }
2310
2311 if (is_mapped_to_disk)
2312 SetPageMappedToDisk(page);
1da177e4
LT
2313
2314 return 0;
2315
2316failed:
2317 for (i = 0; i < nr_reads; i++) {
2318 if (read_bh[i])
2319 free_buffer_head(read_bh[i]);
2320 }
2321
2322 /*
2323 * Error recovery is pretty slack. Clear the page and mark it dirty
2324 * so we'll later zero out any blocks which _were_ allocated.
2325 */
01f2705d 2326 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
1da177e4
LT
2327 SetPageUptodate(page);
2328 set_page_dirty(page);
2329 return ret;
2330}
2331EXPORT_SYMBOL(nobh_prepare_write);
2332
57bf63d6
DK
2333/*
2334 * Make sure any changes to nobh_commit_write() are reflected in
2335 * nobh_truncate_page(), since it doesn't call commit_write().
2336 */
1da177e4
LT
2337int nobh_commit_write(struct file *file, struct page *page,
2338 unsigned from, unsigned to)
2339{
2340 struct inode *inode = page->mapping->host;
2341 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2342
22c8ca78 2343 SetPageUptodate(page);
1da177e4
LT
2344 set_page_dirty(page);
2345 if (pos > inode->i_size) {
2346 i_size_write(inode, pos);
2347 mark_inode_dirty(inode);
2348 }
2349 return 0;
2350}
2351EXPORT_SYMBOL(nobh_commit_write);
2352
2353/*
2354 * nobh_writepage() - based on block_full_write_page() except
2355 * that it tries to operate without attaching bufferheads to
2356 * the page.
2357 */
2358int nobh_writepage(struct page *page, get_block_t *get_block,
2359 struct writeback_control *wbc)
2360{
2361 struct inode * const inode = page->mapping->host;
2362 loff_t i_size = i_size_read(inode);
2363 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2364 unsigned offset;
1da177e4
LT
2365 int ret;
2366
2367 /* Is the page fully inside i_size? */
2368 if (page->index < end_index)
2369 goto out;
2370
2371 /* Is the page fully outside i_size? (truncate in progress) */
2372 offset = i_size & (PAGE_CACHE_SIZE-1);
2373 if (page->index >= end_index+1 || !offset) {
2374 /*
2375 * The page may have dirty, unmapped buffers. For example,
2376 * they may have been added in ext3_writepage(). Make them
2377 * freeable here, so the page does not leak.
2378 */
2379#if 0
2380 /* Not really sure about this - do we need this ? */
2381 if (page->mapping->a_ops->invalidatepage)
2382 page->mapping->a_ops->invalidatepage(page, offset);
2383#endif
2384 unlock_page(page);
2385 return 0; /* don't care */
2386 }
2387
2388 /*
2389 * The page straddles i_size. It must be zeroed out on each and every
2390 * writepage invocation because it may be mmapped. "A file is mapped
2391 * in multiples of the page size. For a file that is not a multiple of
2392 * the page size, the remaining memory is zeroed when mapped, and
2393 * writes to that region are not written out to the file."
2394 */
01f2705d 2395 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
1da177e4
LT
2396out:
2397 ret = mpage_writepage(page, get_block, wbc);
2398 if (ret == -EAGAIN)
2399 ret = __block_write_full_page(inode, page, get_block, wbc);
2400 return ret;
2401}
2402EXPORT_SYMBOL(nobh_writepage);
2403
2404/*
2405 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2406 */
2407int nobh_truncate_page(struct address_space *mapping, loff_t from)
2408{
2409 struct inode *inode = mapping->host;
2410 unsigned blocksize = 1 << inode->i_blkbits;
2411 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2412 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2413 unsigned to;
2414 struct page *page;
f5e54d6e 2415 const struct address_space_operations *a_ops = mapping->a_ops;
1da177e4
LT
2416 int ret = 0;
2417
2418 if ((offset & (blocksize - 1)) == 0)
2419 goto out;
2420
2421 ret = -ENOMEM;
2422 page = grab_cache_page(mapping, index);
2423 if (!page)
2424 goto out;
2425
2426 to = (offset + blocksize) & ~(blocksize - 1);
2427 ret = a_ops->prepare_write(NULL, page, offset, to);
2428 if (ret == 0) {
01f2705d
ND
2429 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
2430 KM_USER0);
57bf63d6
DK
2431 /*
2432 * It would be more correct to call aops->commit_write()
2433 * here, but this is more efficient.
2434 */
2435 SetPageUptodate(page);
1da177e4
LT
2436 set_page_dirty(page);
2437 }
2438 unlock_page(page);
2439 page_cache_release(page);
2440out:
2441 return ret;
2442}
2443EXPORT_SYMBOL(nobh_truncate_page);
2444
2445int block_truncate_page(struct address_space *mapping,
2446 loff_t from, get_block_t *get_block)
2447{
2448 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2449 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2450 unsigned blocksize;
54b21a79 2451 sector_t iblock;
1da177e4
LT
2452 unsigned length, pos;
2453 struct inode *inode = mapping->host;
2454 struct page *page;
2455 struct buffer_head *bh;
1da177e4
LT
2456 int err;
2457
2458 blocksize = 1 << inode->i_blkbits;
2459 length = offset & (blocksize - 1);
2460
2461 /* Block boundary? Nothing to do */
2462 if (!length)
2463 return 0;
2464
2465 length = blocksize - length;
54b21a79 2466 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2467
2468 page = grab_cache_page(mapping, index);
2469 err = -ENOMEM;
2470 if (!page)
2471 goto out;
2472
2473 if (!page_has_buffers(page))
2474 create_empty_buffers(page, blocksize, 0);
2475
2476 /* Find the buffer that contains "offset" */
2477 bh = page_buffers(page);
2478 pos = blocksize;
2479 while (offset >= pos) {
2480 bh = bh->b_this_page;
2481 iblock++;
2482 pos += blocksize;
2483 }
2484
2485 err = 0;
2486 if (!buffer_mapped(bh)) {
b0cf2321 2487 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2488 err = get_block(inode, iblock, bh, 0);
2489 if (err)
2490 goto unlock;
2491 /* unmapped? It's a hole - nothing to do */
2492 if (!buffer_mapped(bh))
2493 goto unlock;
2494 }
2495
2496 /* Ok, it's mapped. Make sure it's up-to-date */
2497 if (PageUptodate(page))
2498 set_buffer_uptodate(bh);
2499
33a266dd 2500 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2501 err = -EIO;
2502 ll_rw_block(READ, 1, &bh);
2503 wait_on_buffer(bh);
2504 /* Uhhuh. Read error. Complain and punt. */
2505 if (!buffer_uptodate(bh))
2506 goto unlock;
2507 }
2508
01f2705d 2509 zero_user_page(page, offset, length, KM_USER0);
1da177e4
LT
2510 mark_buffer_dirty(bh);
2511 err = 0;
2512
2513unlock:
2514 unlock_page(page);
2515 page_cache_release(page);
2516out:
2517 return err;
2518}
2519
2520/*
2521 * The generic ->writepage function for buffer-backed address_spaces
2522 */
2523int block_write_full_page(struct page *page, get_block_t *get_block,
2524 struct writeback_control *wbc)
2525{
2526 struct inode * const inode = page->mapping->host;
2527 loff_t i_size = i_size_read(inode);
2528 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2529 unsigned offset;
1da177e4
LT
2530
2531 /* Is the page fully inside i_size? */
2532 if (page->index < end_index)
2533 return __block_write_full_page(inode, page, get_block, wbc);
2534
2535 /* Is the page fully outside i_size? (truncate in progress) */
2536 offset = i_size & (PAGE_CACHE_SIZE-1);
2537 if (page->index >= end_index+1 || !offset) {
2538 /*
2539 * The page may have dirty, unmapped buffers. For example,
2540 * they may have been added in ext3_writepage(). Make them
2541 * freeable here, so the page does not leak.
2542 */
aaa4059b 2543 do_invalidatepage(page, 0);
1da177e4
LT
2544 unlock_page(page);
2545 return 0; /* don't care */
2546 }
2547
2548 /*
2549 * The page straddles i_size. It must be zeroed out on each and every
2550 * writepage invokation because it may be mmapped. "A file is mapped
2551 * in multiples of the page size. For a file that is not a multiple of
2552 * the page size, the remaining memory is zeroed when mapped, and
2553 * writes to that region are not written out to the file."
2554 */
01f2705d 2555 zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
1da177e4
LT
2556 return __block_write_full_page(inode, page, get_block, wbc);
2557}
2558
2559sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2560 get_block_t *get_block)
2561{
2562 struct buffer_head tmp;
2563 struct inode *inode = mapping->host;
2564 tmp.b_state = 0;
2565 tmp.b_blocknr = 0;
b0cf2321 2566 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2567 get_block(inode, block, &tmp, 0);
2568 return tmp.b_blocknr;
2569}
2570
2571static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2572{
2573 struct buffer_head *bh = bio->bi_private;
2574
2575 if (bio->bi_size)
2576 return 1;
2577
2578 if (err == -EOPNOTSUPP) {
2579 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2580 set_bit(BH_Eopnotsupp, &bh->b_state);
2581 }
2582
2583 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2584 bio_put(bio);
2585 return 0;
2586}
2587
2588int submit_bh(int rw, struct buffer_head * bh)
2589{
2590 struct bio *bio;
2591 int ret = 0;
2592
2593 BUG_ON(!buffer_locked(bh));
2594 BUG_ON(!buffer_mapped(bh));
2595 BUG_ON(!bh->b_end_io);
2596
2597 if (buffer_ordered(bh) && (rw == WRITE))
2598 rw = WRITE_BARRIER;
2599
2600 /*
2601 * Only clear out a write error when rewriting, should this
2602 * include WRITE_SYNC as well?
2603 */
2604 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2605 clear_buffer_write_io_error(bh);
2606
2607 /*
2608 * from here on down, it's all bio -- do the initial mapping,
2609 * submit_bio -> generic_make_request may further map this bio around
2610 */
2611 bio = bio_alloc(GFP_NOIO, 1);
2612
2613 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2614 bio->bi_bdev = bh->b_bdev;
2615 bio->bi_io_vec[0].bv_page = bh->b_page;
2616 bio->bi_io_vec[0].bv_len = bh->b_size;
2617 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2618
2619 bio->bi_vcnt = 1;
2620 bio->bi_idx = 0;
2621 bio->bi_size = bh->b_size;
2622
2623 bio->bi_end_io = end_bio_bh_io_sync;
2624 bio->bi_private = bh;
2625
2626 bio_get(bio);
2627 submit_bio(rw, bio);
2628
2629 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2630 ret = -EOPNOTSUPP;
2631
2632 bio_put(bio);
2633 return ret;
2634}
2635
2636/**
2637 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2638 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2639 * @nr: number of &struct buffer_heads in the array
2640 * @bhs: array of pointers to &struct buffer_head
2641 *
a7662236
JK
2642 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2643 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2644 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2645 * are sent to disk. The fourth %READA option is described in the documentation
2646 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2647 *
2648 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2649 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2650 * clean when doing a write request, and any buffer that appears to be
2651 * up-to-date when doing read request. Further it marks as clean buffers that
2652 * are processed for writing (the buffer cache won't assume that they are
2653 * actually clean until the buffer gets unlocked).
1da177e4
LT
2654 *
2655 * ll_rw_block sets b_end_io to simple completion handler that marks
2656 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2657 * any waiters.
2658 *
2659 * All of the buffers must be for the same device, and must also be a
2660 * multiple of the current approved size for the device.
2661 */
2662void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2663{
2664 int i;
2665
2666 for (i = 0; i < nr; i++) {
2667 struct buffer_head *bh = bhs[i];
2668
a7662236
JK
2669 if (rw == SWRITE)
2670 lock_buffer(bh);
2671 else if (test_set_buffer_locked(bh))
1da177e4
LT
2672 continue;
2673
a7662236 2674 if (rw == WRITE || rw == SWRITE) {
1da177e4 2675 if (test_clear_buffer_dirty(bh)) {
76c3073a 2676 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2677 get_bh(bh);
1da177e4
LT
2678 submit_bh(WRITE, bh);
2679 continue;
2680 }
2681 } else {
1da177e4 2682 if (!buffer_uptodate(bh)) {
76c3073a 2683 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2684 get_bh(bh);
1da177e4
LT
2685 submit_bh(rw, bh);
2686 continue;
2687 }
2688 }
2689 unlock_buffer(bh);
1da177e4
LT
2690 }
2691}
2692
2693/*
2694 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2695 * and then start new I/O and then wait upon it. The caller must have a ref on
2696 * the buffer_head.
2697 */
2698int sync_dirty_buffer(struct buffer_head *bh)
2699{
2700 int ret = 0;
2701
2702 WARN_ON(atomic_read(&bh->b_count) < 1);
2703 lock_buffer(bh);
2704 if (test_clear_buffer_dirty(bh)) {
2705 get_bh(bh);
2706 bh->b_end_io = end_buffer_write_sync;
2707 ret = submit_bh(WRITE, bh);
2708 wait_on_buffer(bh);
2709 if (buffer_eopnotsupp(bh)) {
2710 clear_buffer_eopnotsupp(bh);
2711 ret = -EOPNOTSUPP;
2712 }
2713 if (!ret && !buffer_uptodate(bh))
2714 ret = -EIO;
2715 } else {
2716 unlock_buffer(bh);
2717 }
2718 return ret;
2719}
2720
2721/*
2722 * try_to_free_buffers() checks if all the buffers on this particular page
2723 * are unused, and releases them if so.
2724 *
2725 * Exclusion against try_to_free_buffers may be obtained by either
2726 * locking the page or by holding its mapping's private_lock.
2727 *
2728 * If the page is dirty but all the buffers are clean then we need to
2729 * be sure to mark the page clean as well. This is because the page
2730 * may be against a block device, and a later reattachment of buffers
2731 * to a dirty page will set *all* buffers dirty. Which would corrupt
2732 * filesystem data on the same device.
2733 *
2734 * The same applies to regular filesystem pages: if all the buffers are
2735 * clean then we set the page clean and proceed. To do that, we require
2736 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2737 * private_lock.
2738 *
2739 * try_to_free_buffers() is non-blocking.
2740 */
2741static inline int buffer_busy(struct buffer_head *bh)
2742{
2743 return atomic_read(&bh->b_count) |
2744 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2745}
2746
2747static int
2748drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2749{
2750 struct buffer_head *head = page_buffers(page);
2751 struct buffer_head *bh;
2752
2753 bh = head;
2754 do {
de7d5a3b 2755 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
2756 set_bit(AS_EIO, &page->mapping->flags);
2757 if (buffer_busy(bh))
2758 goto failed;
2759 bh = bh->b_this_page;
2760 } while (bh != head);
2761
2762 do {
2763 struct buffer_head *next = bh->b_this_page;
2764
2765 if (!list_empty(&bh->b_assoc_buffers))
2766 __remove_assoc_queue(bh);
2767 bh = next;
2768 } while (bh != head);
2769 *buffers_to_free = head;
2770 __clear_page_buffers(page);
2771 return 1;
2772failed:
2773 return 0;
2774}
2775
2776int try_to_free_buffers(struct page *page)
2777{
2778 struct address_space * const mapping = page->mapping;
2779 struct buffer_head *buffers_to_free = NULL;
2780 int ret = 0;
2781
2782 BUG_ON(!PageLocked(page));
ecdfc978 2783 if (PageWriteback(page))
1da177e4
LT
2784 return 0;
2785
2786 if (mapping == NULL) { /* can this still happen? */
2787 ret = drop_buffers(page, &buffers_to_free);
2788 goto out;
2789 }
2790
2791 spin_lock(&mapping->private_lock);
2792 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
2793
2794 /*
2795 * If the filesystem writes its buffers by hand (eg ext3)
2796 * then we can have clean buffers against a dirty page. We
2797 * clean the page here; otherwise the VM will never notice
2798 * that the filesystem did any IO at all.
2799 *
2800 * Also, during truncate, discard_buffer will have marked all
2801 * the page's buffers clean. We discover that here and clean
2802 * the page also.
87df7241
NP
2803 *
2804 * private_lock must be held over this entire operation in order
2805 * to synchronise against __set_page_dirty_buffers and prevent the
2806 * dirty bit from being lost.
ecdfc978
LT
2807 */
2808 if (ret)
2809 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 2810 spin_unlock(&mapping->private_lock);
1da177e4
LT
2811out:
2812 if (buffers_to_free) {
2813 struct buffer_head *bh = buffers_to_free;
2814
2815 do {
2816 struct buffer_head *next = bh->b_this_page;
2817 free_buffer_head(bh);
2818 bh = next;
2819 } while (bh != buffers_to_free);
2820 }
2821 return ret;
2822}
2823EXPORT_SYMBOL(try_to_free_buffers);
2824
3978d717 2825void block_sync_page(struct page *page)
1da177e4
LT
2826{
2827 struct address_space *mapping;
2828
2829 smp_mb();
2830 mapping = page_mapping(page);
2831 if (mapping)
2832 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
2833}
2834
2835/*
2836 * There are no bdflush tunables left. But distributions are
2837 * still running obsolete flush daemons, so we terminate them here.
2838 *
2839 * Use of bdflush() is deprecated and will be removed in a future kernel.
2840 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2841 */
2842asmlinkage long sys_bdflush(int func, long data)
2843{
2844 static int msg_count;
2845
2846 if (!capable(CAP_SYS_ADMIN))
2847 return -EPERM;
2848
2849 if (msg_count < 5) {
2850 msg_count++;
2851 printk(KERN_INFO
2852 "warning: process `%s' used the obsolete bdflush"
2853 " system call\n", current->comm);
2854 printk(KERN_INFO "Fix your initscripts?\n");
2855 }
2856
2857 if (func == 1)
2858 do_exit(0);
2859 return 0;
2860}
2861
2862/*
2863 * Buffer-head allocation
2864 */
e18b890b 2865static struct kmem_cache *bh_cachep;
1da177e4
LT
2866
2867/*
2868 * Once the number of bh's in the machine exceeds this level, we start
2869 * stripping them in writeback.
2870 */
2871static int max_buffer_heads;
2872
2873int buffer_heads_over_limit;
2874
2875struct bh_accounting {
2876 int nr; /* Number of live bh's */
2877 int ratelimit; /* Limit cacheline bouncing */
2878};
2879
2880static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2881
2882static void recalc_bh_state(void)
2883{
2884 int i;
2885 int tot = 0;
2886
2887 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2888 return;
2889 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 2890 for_each_online_cpu(i)
1da177e4
LT
2891 tot += per_cpu(bh_accounting, i).nr;
2892 buffer_heads_over_limit = (tot > max_buffer_heads);
2893}
2894
dd0fc66f 2895struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 2896{
a35afb83 2897 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
1da177e4 2898 if (ret) {
a35afb83 2899 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 2900 get_cpu_var(bh_accounting).nr++;
1da177e4 2901 recalc_bh_state();
736c7b80 2902 put_cpu_var(bh_accounting);
1da177e4
LT
2903 }
2904 return ret;
2905}
2906EXPORT_SYMBOL(alloc_buffer_head);
2907
2908void free_buffer_head(struct buffer_head *bh)
2909{
2910 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2911 kmem_cache_free(bh_cachep, bh);
736c7b80 2912 get_cpu_var(bh_accounting).nr--;
1da177e4 2913 recalc_bh_state();
736c7b80 2914 put_cpu_var(bh_accounting);
1da177e4
LT
2915}
2916EXPORT_SYMBOL(free_buffer_head);
2917
1da177e4
LT
2918static void buffer_exit_cpu(int cpu)
2919{
2920 int i;
2921 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2922
2923 for (i = 0; i < BH_LRU_SIZE; i++) {
2924 brelse(b->bhs[i]);
2925 b->bhs[i] = NULL;
2926 }
8a143426
ED
2927 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2928 per_cpu(bh_accounting, cpu).nr = 0;
2929 put_cpu_var(bh_accounting);
1da177e4
LT
2930}
2931
2932static int buffer_cpu_notify(struct notifier_block *self,
2933 unsigned long action, void *hcpu)
2934{
8bb78442 2935 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
2936 buffer_exit_cpu((unsigned long)hcpu);
2937 return NOTIFY_OK;
2938}
1da177e4
LT
2939
2940void __init buffer_init(void)
2941{
2942 int nrpages;
2943
a35afb83
CL
2944 bh_cachep = KMEM_CACHE(buffer_head,
2945 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
1da177e4
LT
2946
2947 /*
2948 * Limit the bh occupancy to 10% of ZONE_NORMAL
2949 */
2950 nrpages = (nr_free_buffer_pages() * 10) / 100;
2951 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
2952 hotcpu_notifier(buffer_cpu_notify, 0);
2953}
2954
2955EXPORT_SYMBOL(__bforget);
2956EXPORT_SYMBOL(__brelse);
2957EXPORT_SYMBOL(__wait_on_buffer);
2958EXPORT_SYMBOL(block_commit_write);
2959EXPORT_SYMBOL(block_prepare_write);
2960EXPORT_SYMBOL(block_read_full_page);
2961EXPORT_SYMBOL(block_sync_page);
2962EXPORT_SYMBOL(block_truncate_page);
2963EXPORT_SYMBOL(block_write_full_page);
2964EXPORT_SYMBOL(cont_prepare_write);
1da177e4
LT
2965EXPORT_SYMBOL(end_buffer_read_sync);
2966EXPORT_SYMBOL(end_buffer_write_sync);
2967EXPORT_SYMBOL(file_fsync);
2968EXPORT_SYMBOL(fsync_bdev);
2969EXPORT_SYMBOL(generic_block_bmap);
2970EXPORT_SYMBOL(generic_commit_write);
2971EXPORT_SYMBOL(generic_cont_expand);
05eb0b51 2972EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
2973EXPORT_SYMBOL(init_buffer);
2974EXPORT_SYMBOL(invalidate_bdev);
2975EXPORT_SYMBOL(ll_rw_block);
2976EXPORT_SYMBOL(mark_buffer_dirty);
2977EXPORT_SYMBOL(submit_bh);
2978EXPORT_SYMBOL(sync_dirty_buffer);
2979EXPORT_SYMBOL(unlock_buffer);