]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
mm: remove destroy_dirty_buffers from invalidate_bdev()
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
27#include <linux/smp_lock.h>
16f7e0fe 28#include <linux/capability.h>
1da177e4
LT
29#include <linux/blkdev.h>
30#include <linux/file.h>
31#include <linux/quotaops.h>
32#include <linux/highmem.h>
33#include <linux/module.h>
34#include <linux/writeback.h>
35#include <linux/hash.h>
36#include <linux/suspend.h>
37#include <linux/buffer_head.h>
55e829af 38#include <linux/task_io_accounting_ops.h>
1da177e4
LT
39#include <linux/bio.h>
40#include <linux/notifier.h>
41#include <linux/cpu.h>
42#include <linux/bitops.h>
43#include <linux/mpage.h>
fb1c8f93 44#include <linux/bit_spinlock.h>
1da177e4
LT
45
46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47static void invalidate_bh_lrus(void);
48
49#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50
51inline void
52init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53{
54 bh->b_end_io = handler;
55 bh->b_private = private;
56}
57
58static int sync_buffer(void *word)
59{
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
63
64 smp_mb();
65 bd = bh->b_bdev;
66 if (bd)
67 blk_run_address_space(bd->bd_inode->i_mapping);
68 io_schedule();
69 return 0;
70}
71
72void fastcall __lock_buffer(struct buffer_head *bh)
73{
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
76}
77EXPORT_SYMBOL(__lock_buffer);
78
79void fastcall unlock_buffer(struct buffer_head *bh)
80{
72ed3d03 81 smp_mb__before_clear_bit();
1da177e4
LT
82 clear_buffer_locked(bh);
83 smp_mb__after_clear_bit();
84 wake_up_bit(&bh->b_state, BH_Lock);
85}
86
87/*
88 * Block until a buffer comes unlocked. This doesn't stop it
89 * from becoming locked again - you have to lock it yourself
90 * if you want to preserve its state.
91 */
92void __wait_on_buffer(struct buffer_head * bh)
93{
94 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95}
96
97static void
98__clear_page_buffers(struct page *page)
99{
100 ClearPagePrivate(page);
4c21e2f2 101 set_page_private(page, 0);
1da177e4
LT
102 page_cache_release(page);
103}
104
105static void buffer_io_error(struct buffer_head *bh)
106{
107 char b[BDEVNAME_SIZE];
108
109 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 bdevname(bh->b_bdev, b),
111 (unsigned long long)bh->b_blocknr);
112}
113
114/*
115 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
116 * unlock the buffer. This is what ll_rw_block uses too.
117 */
118void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
119{
120 if (uptodate) {
121 set_buffer_uptodate(bh);
122 } else {
123 /* This happens, due to failed READA attempts. */
124 clear_buffer_uptodate(bh);
125 }
126 unlock_buffer(bh);
127 put_bh(bh);
128}
129
130void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131{
132 char b[BDEVNAME_SIZE];
133
134 if (uptodate) {
135 set_buffer_uptodate(bh);
136 } else {
137 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
138 buffer_io_error(bh);
139 printk(KERN_WARNING "lost page write due to "
140 "I/O error on %s\n",
141 bdevname(bh->b_bdev, b));
142 }
143 set_buffer_write_io_error(bh);
144 clear_buffer_uptodate(bh);
145 }
146 unlock_buffer(bh);
147 put_bh(bh);
148}
149
150/*
151 * Write out and wait upon all the dirty data associated with a block
152 * device via its mapping. Does not take the superblock lock.
153 */
154int sync_blockdev(struct block_device *bdev)
155{
156 int ret = 0;
157
28fd1298
OH
158 if (bdev)
159 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
160 return ret;
161}
162EXPORT_SYMBOL(sync_blockdev);
163
1da177e4
LT
164/*
165 * Write out and wait upon all dirty data associated with this
166 * device. Filesystem data as well as the underlying block
167 * device. Takes the superblock lock.
168 */
169int fsync_bdev(struct block_device *bdev)
170{
171 struct super_block *sb = get_super(bdev);
172 if (sb) {
173 int res = fsync_super(sb);
174 drop_super(sb);
175 return res;
176 }
177 return sync_blockdev(bdev);
178}
179
180/**
181 * freeze_bdev -- lock a filesystem and force it into a consistent state
182 * @bdev: blockdevice to lock
183 *
f73ca1b7 184 * This takes the block device bd_mount_sem to make sure no new mounts
1da177e4
LT
185 * happen on bdev until thaw_bdev() is called.
186 * If a superblock is found on this device, we take the s_umount semaphore
187 * on it to make sure nobody unmounts until the snapshot creation is done.
188 */
189struct super_block *freeze_bdev(struct block_device *bdev)
190{
191 struct super_block *sb;
192
f73ca1b7 193 down(&bdev->bd_mount_sem);
1da177e4
LT
194 sb = get_super(bdev);
195 if (sb && !(sb->s_flags & MS_RDONLY)) {
196 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 197 smp_wmb();
1da177e4 198
d25b9a1f 199 __fsync_super(sb);
1da177e4
LT
200
201 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 202 smp_wmb();
1da177e4
LT
203
204 sync_blockdev(sb->s_bdev);
205
206 if (sb->s_op->write_super_lockfs)
207 sb->s_op->write_super_lockfs(sb);
208 }
209
210 sync_blockdev(bdev);
211 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
212}
213EXPORT_SYMBOL(freeze_bdev);
214
215/**
216 * thaw_bdev -- unlock filesystem
217 * @bdev: blockdevice to unlock
218 * @sb: associated superblock
219 *
220 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
221 */
222void thaw_bdev(struct block_device *bdev, struct super_block *sb)
223{
224 if (sb) {
225 BUG_ON(sb->s_bdev != bdev);
226
227 if (sb->s_op->unlockfs)
228 sb->s_op->unlockfs(sb);
229 sb->s_frozen = SB_UNFROZEN;
d59dd462 230 smp_wmb();
1da177e4
LT
231 wake_up(&sb->s_wait_unfrozen);
232 drop_super(sb);
233 }
234
f73ca1b7 235 up(&bdev->bd_mount_sem);
1da177e4
LT
236}
237EXPORT_SYMBOL(thaw_bdev);
238
1da177e4
LT
239/*
240 * Various filesystems appear to want __find_get_block to be non-blocking.
241 * But it's the page lock which protects the buffers. To get around this,
242 * we get exclusion from try_to_free_buffers with the blockdev mapping's
243 * private_lock.
244 *
245 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
246 * may be quite high. This code could TryLock the page, and if that
247 * succeeds, there is no need to take private_lock. (But if
248 * private_lock is contended then so is mapping->tree_lock).
249 */
250static struct buffer_head *
385fd4c5 251__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
252{
253 struct inode *bd_inode = bdev->bd_inode;
254 struct address_space *bd_mapping = bd_inode->i_mapping;
255 struct buffer_head *ret = NULL;
256 pgoff_t index;
257 struct buffer_head *bh;
258 struct buffer_head *head;
259 struct page *page;
260 int all_mapped = 1;
261
262 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
263 page = find_get_page(bd_mapping, index);
264 if (!page)
265 goto out;
266
267 spin_lock(&bd_mapping->private_lock);
268 if (!page_has_buffers(page))
269 goto out_unlock;
270 head = page_buffers(page);
271 bh = head;
272 do {
273 if (bh->b_blocknr == block) {
274 ret = bh;
275 get_bh(bh);
276 goto out_unlock;
277 }
278 if (!buffer_mapped(bh))
279 all_mapped = 0;
280 bh = bh->b_this_page;
281 } while (bh != head);
282
283 /* we might be here because some of the buffers on this page are
284 * not mapped. This is due to various races between
285 * file io on the block device and getblk. It gets dealt with
286 * elsewhere, don't buffer_error if we had some unmapped buffers
287 */
288 if (all_mapped) {
289 printk("__find_get_block_slow() failed. "
290 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
291 (unsigned long long)block,
292 (unsigned long long)bh->b_blocknr);
293 printk("b_state=0x%08lx, b_size=%zu\n",
294 bh->b_state, bh->b_size);
1da177e4
LT
295 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
296 }
297out_unlock:
298 spin_unlock(&bd_mapping->private_lock);
299 page_cache_release(page);
300out:
301 return ret;
302}
303
304/* If invalidate_buffers() will trash dirty buffers, it means some kind
305 of fs corruption is going on. Trashing dirty data always imply losing
306 information that was supposed to be just stored on the physical layer
307 by the user.
308
309 Thus invalidate_buffers in general usage is not allwowed to trash
310 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
311 be preserved. These buffers are simply skipped.
312
313 We also skip buffers which are still in use. For example this can
314 happen if a userspace program is reading the block device.
315
316 NOTE: In the case where the user removed a removable-media-disk even if
317 there's still dirty data not synced on disk (due a bug in the device driver
318 or due an error of the user), by not destroying the dirty buffers we could
319 generate corruption also on the next media inserted, thus a parameter is
320 necessary to handle this case in the most safe way possible (trying
321 to not corrupt also the new disk inserted with the data belonging to
322 the old now corrupted disk). Also for the ramdisk the natural thing
323 to do in order to release the ramdisk memory is to destroy dirty buffers.
324
325 These are two special cases. Normal usage imply the device driver
326 to issue a sync on the device (without waiting I/O completion) and
327 then an invalidate_buffers call that doesn't trash dirty buffers.
328
329 For handling cache coherency with the blkdev pagecache the 'update' case
330 is been introduced. It is needed to re-read from disk any pinned
331 buffer. NOTE: re-reading from disk is destructive so we can do it only
332 when we assume nobody is changing the buffercache under our I/O and when
333 we think the disk contains more recent information than the buffercache.
334 The update == 1 pass marks the buffers we need to update, the update == 2
335 pass does the actual I/O. */
f98393a6 336void invalidate_bdev(struct block_device *bdev)
1da177e4 337{
0e1dfc66
AM
338 struct address_space *mapping = bdev->bd_inode->i_mapping;
339
340 if (mapping->nrpages == 0)
341 return;
342
1da177e4 343 invalidate_bh_lrus();
fc0ecff6 344 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
345}
346
347/*
348 * Kick pdflush then try to free up some ZONE_NORMAL memory.
349 */
350static void free_more_memory(void)
351{
352 struct zone **zones;
353 pg_data_t *pgdat;
354
687a21ce 355 wakeup_pdflush(1024);
1da177e4
LT
356 yield();
357
ec936fc5 358 for_each_online_pgdat(pgdat) {
af4ca457 359 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 360 if (*zones)
1ad539b2 361 try_to_free_pages(zones, GFP_NOFS);
1da177e4
LT
362 }
363}
364
365/*
366 * I/O completion handler for block_read_full_page() - pages
367 * which come unlocked at the end of I/O.
368 */
369static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
370{
1da177e4 371 unsigned long flags;
a3972203 372 struct buffer_head *first;
1da177e4
LT
373 struct buffer_head *tmp;
374 struct page *page;
375 int page_uptodate = 1;
376
377 BUG_ON(!buffer_async_read(bh));
378
379 page = bh->b_page;
380 if (uptodate) {
381 set_buffer_uptodate(bh);
382 } else {
383 clear_buffer_uptodate(bh);
384 if (printk_ratelimit())
385 buffer_io_error(bh);
386 SetPageError(page);
387 }
388
389 /*
390 * Be _very_ careful from here on. Bad things can happen if
391 * two buffer heads end IO at almost the same time and both
392 * decide that the page is now completely done.
393 */
a3972203
NP
394 first = page_buffers(page);
395 local_irq_save(flags);
396 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
397 clear_buffer_async_read(bh);
398 unlock_buffer(bh);
399 tmp = bh;
400 do {
401 if (!buffer_uptodate(tmp))
402 page_uptodate = 0;
403 if (buffer_async_read(tmp)) {
404 BUG_ON(!buffer_locked(tmp));
405 goto still_busy;
406 }
407 tmp = tmp->b_this_page;
408 } while (tmp != bh);
a3972203
NP
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
1da177e4
LT
411
412 /*
413 * If none of the buffers had errors and they are all
414 * uptodate then we can set the page uptodate.
415 */
416 if (page_uptodate && !PageError(page))
417 SetPageUptodate(page);
418 unlock_page(page);
419 return;
420
421still_busy:
a3972203
NP
422 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
423 local_irq_restore(flags);
1da177e4
LT
424 return;
425}
426
427/*
428 * Completion handler for block_write_full_page() - pages which are unlocked
429 * during I/O, and which have PageWriteback cleared upon I/O completion.
430 */
b6cd0b77 431static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
432{
433 char b[BDEVNAME_SIZE];
1da177e4 434 unsigned long flags;
a3972203 435 struct buffer_head *first;
1da177e4
LT
436 struct buffer_head *tmp;
437 struct page *page;
438
439 BUG_ON(!buffer_async_write(bh));
440
441 page = bh->b_page;
442 if (uptodate) {
443 set_buffer_uptodate(bh);
444 } else {
445 if (printk_ratelimit()) {
446 buffer_io_error(bh);
447 printk(KERN_WARNING "lost page write due to "
448 "I/O error on %s\n",
449 bdevname(bh->b_bdev, b));
450 }
451 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 452 set_buffer_write_io_error(bh);
1da177e4
LT
453 clear_buffer_uptodate(bh);
454 SetPageError(page);
455 }
456
a3972203
NP
457 first = page_buffers(page);
458 local_irq_save(flags);
459 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
460
1da177e4
LT
461 clear_buffer_async_write(bh);
462 unlock_buffer(bh);
463 tmp = bh->b_this_page;
464 while (tmp != bh) {
465 if (buffer_async_write(tmp)) {
466 BUG_ON(!buffer_locked(tmp));
467 goto still_busy;
468 }
469 tmp = tmp->b_this_page;
470 }
a3972203
NP
471 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
472 local_irq_restore(flags);
1da177e4
LT
473 end_page_writeback(page);
474 return;
475
476still_busy:
a3972203
NP
477 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
478 local_irq_restore(flags);
1da177e4
LT
479 return;
480}
481
482/*
483 * If a page's buffers are under async readin (end_buffer_async_read
484 * completion) then there is a possibility that another thread of
485 * control could lock one of the buffers after it has completed
486 * but while some of the other buffers have not completed. This
487 * locked buffer would confuse end_buffer_async_read() into not unlocking
488 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
489 * that this buffer is not under async I/O.
490 *
491 * The page comes unlocked when it has no locked buffer_async buffers
492 * left.
493 *
494 * PageLocked prevents anyone starting new async I/O reads any of
495 * the buffers.
496 *
497 * PageWriteback is used to prevent simultaneous writeout of the same
498 * page.
499 *
500 * PageLocked prevents anyone from starting writeback of a page which is
501 * under read I/O (PageWriteback is only ever set against a locked page).
502 */
503static void mark_buffer_async_read(struct buffer_head *bh)
504{
505 bh->b_end_io = end_buffer_async_read;
506 set_buffer_async_read(bh);
507}
508
509void mark_buffer_async_write(struct buffer_head *bh)
510{
511 bh->b_end_io = end_buffer_async_write;
512 set_buffer_async_write(bh);
513}
514EXPORT_SYMBOL(mark_buffer_async_write);
515
516
517/*
518 * fs/buffer.c contains helper functions for buffer-backed address space's
519 * fsync functions. A common requirement for buffer-based filesystems is
520 * that certain data from the backing blockdev needs to be written out for
521 * a successful fsync(). For example, ext2 indirect blocks need to be
522 * written back and waited upon before fsync() returns.
523 *
524 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
525 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
526 * management of a list of dependent buffers at ->i_mapping->private_list.
527 *
528 * Locking is a little subtle: try_to_free_buffers() will remove buffers
529 * from their controlling inode's queue when they are being freed. But
530 * try_to_free_buffers() will be operating against the *blockdev* mapping
531 * at the time, not against the S_ISREG file which depends on those buffers.
532 * So the locking for private_list is via the private_lock in the address_space
533 * which backs the buffers. Which is different from the address_space
534 * against which the buffers are listed. So for a particular address_space,
535 * mapping->private_lock does *not* protect mapping->private_list! In fact,
536 * mapping->private_list will always be protected by the backing blockdev's
537 * ->private_lock.
538 *
539 * Which introduces a requirement: all buffers on an address_space's
540 * ->private_list must be from the same address_space: the blockdev's.
541 *
542 * address_spaces which do not place buffers at ->private_list via these
543 * utility functions are free to use private_lock and private_list for
544 * whatever they want. The only requirement is that list_empty(private_list)
545 * be true at clear_inode() time.
546 *
547 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
548 * filesystems should do that. invalidate_inode_buffers() should just go
549 * BUG_ON(!list_empty).
550 *
551 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
552 * take an address_space, not an inode. And it should be called
553 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
554 * queued up.
555 *
556 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
557 * list if it is already on a list. Because if the buffer is on a list,
558 * it *must* already be on the right one. If not, the filesystem is being
559 * silly. This will save a ton of locking. But first we have to ensure
560 * that buffers are taken *off* the old inode's list when they are freed
561 * (presumably in truncate). That requires careful auditing of all
562 * filesystems (do it inside bforget()). It could also be done by bringing
563 * b_inode back.
564 */
565
566/*
567 * The buffer's backing address_space's private_lock must be held
568 */
569static inline void __remove_assoc_queue(struct buffer_head *bh)
570{
571 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
572 WARN_ON(!bh->b_assoc_map);
573 if (buffer_write_io_error(bh))
574 set_bit(AS_EIO, &bh->b_assoc_map->flags);
575 bh->b_assoc_map = NULL;
1da177e4
LT
576}
577
578int inode_has_buffers(struct inode *inode)
579{
580 return !list_empty(&inode->i_data.private_list);
581}
582
583/*
584 * osync is designed to support O_SYNC io. It waits synchronously for
585 * all already-submitted IO to complete, but does not queue any new
586 * writes to the disk.
587 *
588 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
589 * you dirty the buffers, and then use osync_inode_buffers to wait for
590 * completion. Any other dirty buffers which are not yet queued for
591 * write will not be flushed to disk by the osync.
592 */
593static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
594{
595 struct buffer_head *bh;
596 struct list_head *p;
597 int err = 0;
598
599 spin_lock(lock);
600repeat:
601 list_for_each_prev(p, list) {
602 bh = BH_ENTRY(p);
603 if (buffer_locked(bh)) {
604 get_bh(bh);
605 spin_unlock(lock);
606 wait_on_buffer(bh);
607 if (!buffer_uptodate(bh))
608 err = -EIO;
609 brelse(bh);
610 spin_lock(lock);
611 goto repeat;
612 }
613 }
614 spin_unlock(lock);
615 return err;
616}
617
618/**
619 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
620 * buffers
67be2dd1 621 * @mapping: the mapping which wants those buffers written
1da177e4
LT
622 *
623 * Starts I/O against the buffers at mapping->private_list, and waits upon
624 * that I/O.
625 *
67be2dd1
MW
626 * Basically, this is a convenience function for fsync().
627 * @mapping is a file or directory which needs those buffers to be written for
628 * a successful fsync().
1da177e4
LT
629 */
630int sync_mapping_buffers(struct address_space *mapping)
631{
632 struct address_space *buffer_mapping = mapping->assoc_mapping;
633
634 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
635 return 0;
636
637 return fsync_buffers_list(&buffer_mapping->private_lock,
638 &mapping->private_list);
639}
640EXPORT_SYMBOL(sync_mapping_buffers);
641
642/*
643 * Called when we've recently written block `bblock', and it is known that
644 * `bblock' was for a buffer_boundary() buffer. This means that the block at
645 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
646 * dirty, schedule it for IO. So that indirects merge nicely with their data.
647 */
648void write_boundary_block(struct block_device *bdev,
649 sector_t bblock, unsigned blocksize)
650{
651 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
652 if (bh) {
653 if (buffer_dirty(bh))
654 ll_rw_block(WRITE, 1, &bh);
655 put_bh(bh);
656 }
657}
658
659void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
660{
661 struct address_space *mapping = inode->i_mapping;
662 struct address_space *buffer_mapping = bh->b_page->mapping;
663
664 mark_buffer_dirty(bh);
665 if (!mapping->assoc_mapping) {
666 mapping->assoc_mapping = buffer_mapping;
667 } else {
e827f923 668 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4
LT
669 }
670 if (list_empty(&bh->b_assoc_buffers)) {
671 spin_lock(&buffer_mapping->private_lock);
672 list_move_tail(&bh->b_assoc_buffers,
673 &mapping->private_list);
58ff407b 674 bh->b_assoc_map = mapping;
1da177e4
LT
675 spin_unlock(&buffer_mapping->private_lock);
676 }
677}
678EXPORT_SYMBOL(mark_buffer_dirty_inode);
679
680/*
681 * Add a page to the dirty page list.
682 *
683 * It is a sad fact of life that this function is called from several places
684 * deeply under spinlocking. It may not sleep.
685 *
686 * If the page has buffers, the uptodate buffers are set dirty, to preserve
687 * dirty-state coherency between the page and the buffers. It the page does
688 * not have buffers then when they are later attached they will all be set
689 * dirty.
690 *
691 * The buffers are dirtied before the page is dirtied. There's a small race
692 * window in which a writepage caller may see the page cleanness but not the
693 * buffer dirtiness. That's fine. If this code were to set the page dirty
694 * before the buffers, a concurrent writepage caller could clear the page dirty
695 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
696 * page on the dirty page list.
697 *
698 * We use private_lock to lock against try_to_free_buffers while using the
699 * page's buffer list. Also use this to protect against clean buffers being
700 * added to the page after it was set dirty.
701 *
702 * FIXME: may need to call ->reservepage here as well. That's rather up to the
703 * address_space though.
704 */
705int __set_page_dirty_buffers(struct page *page)
706{
ebf7a227
NP
707 struct address_space * const mapping = page_mapping(page);
708
709 if (unlikely(!mapping))
710 return !TestSetPageDirty(page);
1da177e4
LT
711
712 spin_lock(&mapping->private_lock);
713 if (page_has_buffers(page)) {
714 struct buffer_head *head = page_buffers(page);
715 struct buffer_head *bh = head;
716
717 do {
718 set_buffer_dirty(bh);
719 bh = bh->b_this_page;
720 } while (bh != head);
721 }
722 spin_unlock(&mapping->private_lock);
723
8c08540f
AM
724 if (TestSetPageDirty(page))
725 return 0;
726
727 write_lock_irq(&mapping->tree_lock);
728 if (page->mapping) { /* Race with truncate? */
55e829af 729 if (mapping_cap_account_dirty(mapping)) {
8c08540f 730 __inc_zone_page_state(page, NR_FILE_DIRTY);
55e829af
AM
731 task_io_account_write(PAGE_CACHE_SIZE);
732 }
8c08540f
AM
733 radix_tree_tag_set(&mapping->page_tree,
734 page_index(page), PAGECACHE_TAG_DIRTY);
1da177e4 735 }
8c08540f
AM
736 write_unlock_irq(&mapping->tree_lock);
737 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
738 return 1;
1da177e4
LT
739}
740EXPORT_SYMBOL(__set_page_dirty_buffers);
741
742/*
743 * Write out and wait upon a list of buffers.
744 *
745 * We have conflicting pressures: we want to make sure that all
746 * initially dirty buffers get waited on, but that any subsequently
747 * dirtied buffers don't. After all, we don't want fsync to last
748 * forever if somebody is actively writing to the file.
749 *
750 * Do this in two main stages: first we copy dirty buffers to a
751 * temporary inode list, queueing the writes as we go. Then we clean
752 * up, waiting for those writes to complete.
753 *
754 * During this second stage, any subsequent updates to the file may end
755 * up refiling the buffer on the original inode's dirty list again, so
756 * there is a chance we will end up with a buffer queued for write but
757 * not yet completed on that list. So, as a final cleanup we go through
758 * the osync code to catch these locked, dirty buffers without requeuing
759 * any newly dirty buffers for write.
760 */
761static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
762{
763 struct buffer_head *bh;
764 struct list_head tmp;
765 int err = 0, err2;
766
767 INIT_LIST_HEAD(&tmp);
768
769 spin_lock(lock);
770 while (!list_empty(list)) {
771 bh = BH_ENTRY(list->next);
58ff407b 772 __remove_assoc_queue(bh);
1da177e4
LT
773 if (buffer_dirty(bh) || buffer_locked(bh)) {
774 list_add(&bh->b_assoc_buffers, &tmp);
775 if (buffer_dirty(bh)) {
776 get_bh(bh);
777 spin_unlock(lock);
778 /*
779 * Ensure any pending I/O completes so that
780 * ll_rw_block() actually writes the current
781 * contents - it is a noop if I/O is still in
782 * flight on potentially older contents.
783 */
a7662236 784 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
785 brelse(bh);
786 spin_lock(lock);
787 }
788 }
789 }
790
791 while (!list_empty(&tmp)) {
792 bh = BH_ENTRY(tmp.prev);
58ff407b 793 list_del_init(&bh->b_assoc_buffers);
1da177e4
LT
794 get_bh(bh);
795 spin_unlock(lock);
796 wait_on_buffer(bh);
797 if (!buffer_uptodate(bh))
798 err = -EIO;
799 brelse(bh);
800 spin_lock(lock);
801 }
802
803 spin_unlock(lock);
804 err2 = osync_buffers_list(lock, list);
805 if (err)
806 return err;
807 else
808 return err2;
809}
810
811/*
812 * Invalidate any and all dirty buffers on a given inode. We are
813 * probably unmounting the fs, but that doesn't mean we have already
814 * done a sync(). Just drop the buffers from the inode list.
815 *
816 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
817 * assumes that all the buffers are against the blockdev. Not true
818 * for reiserfs.
819 */
820void invalidate_inode_buffers(struct inode *inode)
821{
822 if (inode_has_buffers(inode)) {
823 struct address_space *mapping = &inode->i_data;
824 struct list_head *list = &mapping->private_list;
825 struct address_space *buffer_mapping = mapping->assoc_mapping;
826
827 spin_lock(&buffer_mapping->private_lock);
828 while (!list_empty(list))
829 __remove_assoc_queue(BH_ENTRY(list->next));
830 spin_unlock(&buffer_mapping->private_lock);
831 }
832}
833
834/*
835 * Remove any clean buffers from the inode's buffer list. This is called
836 * when we're trying to free the inode itself. Those buffers can pin it.
837 *
838 * Returns true if all buffers were removed.
839 */
840int remove_inode_buffers(struct inode *inode)
841{
842 int ret = 1;
843
844 if (inode_has_buffers(inode)) {
845 struct address_space *mapping = &inode->i_data;
846 struct list_head *list = &mapping->private_list;
847 struct address_space *buffer_mapping = mapping->assoc_mapping;
848
849 spin_lock(&buffer_mapping->private_lock);
850 while (!list_empty(list)) {
851 struct buffer_head *bh = BH_ENTRY(list->next);
852 if (buffer_dirty(bh)) {
853 ret = 0;
854 break;
855 }
856 __remove_assoc_queue(bh);
857 }
858 spin_unlock(&buffer_mapping->private_lock);
859 }
860 return ret;
861}
862
863/*
864 * Create the appropriate buffers when given a page for data area and
865 * the size of each buffer.. Use the bh->b_this_page linked list to
866 * follow the buffers created. Return NULL if unable to create more
867 * buffers.
868 *
869 * The retry flag is used to differentiate async IO (paging, swapping)
870 * which may not fail from ordinary buffer allocations.
871 */
872struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
873 int retry)
874{
875 struct buffer_head *bh, *head;
876 long offset;
877
878try_again:
879 head = NULL;
880 offset = PAGE_SIZE;
881 while ((offset -= size) >= 0) {
882 bh = alloc_buffer_head(GFP_NOFS);
883 if (!bh)
884 goto no_grow;
885
886 bh->b_bdev = NULL;
887 bh->b_this_page = head;
888 bh->b_blocknr = -1;
889 head = bh;
890
891 bh->b_state = 0;
892 atomic_set(&bh->b_count, 0);
fc5cd582 893 bh->b_private = NULL;
1da177e4
LT
894 bh->b_size = size;
895
896 /* Link the buffer to its page */
897 set_bh_page(bh, page, offset);
898
01ffe339 899 init_buffer(bh, NULL, NULL);
1da177e4
LT
900 }
901 return head;
902/*
903 * In case anything failed, we just free everything we got.
904 */
905no_grow:
906 if (head) {
907 do {
908 bh = head;
909 head = head->b_this_page;
910 free_buffer_head(bh);
911 } while (head);
912 }
913
914 /*
915 * Return failure for non-async IO requests. Async IO requests
916 * are not allowed to fail, so we have to wait until buffer heads
917 * become available. But we don't want tasks sleeping with
918 * partially complete buffers, so all were released above.
919 */
920 if (!retry)
921 return NULL;
922
923 /* We're _really_ low on memory. Now we just
924 * wait for old buffer heads to become free due to
925 * finishing IO. Since this is an async request and
926 * the reserve list is empty, we're sure there are
927 * async buffer heads in use.
928 */
929 free_more_memory();
930 goto try_again;
931}
932EXPORT_SYMBOL_GPL(alloc_page_buffers);
933
934static inline void
935link_dev_buffers(struct page *page, struct buffer_head *head)
936{
937 struct buffer_head *bh, *tail;
938
939 bh = head;
940 do {
941 tail = bh;
942 bh = bh->b_this_page;
943 } while (bh);
944 tail->b_this_page = head;
945 attach_page_buffers(page, head);
946}
947
948/*
949 * Initialise the state of a blockdev page's buffers.
950 */
951static void
952init_page_buffers(struct page *page, struct block_device *bdev,
953 sector_t block, int size)
954{
955 struct buffer_head *head = page_buffers(page);
956 struct buffer_head *bh = head;
957 int uptodate = PageUptodate(page);
958
959 do {
960 if (!buffer_mapped(bh)) {
961 init_buffer(bh, NULL, NULL);
962 bh->b_bdev = bdev;
963 bh->b_blocknr = block;
964 if (uptodate)
965 set_buffer_uptodate(bh);
966 set_buffer_mapped(bh);
967 }
968 block++;
969 bh = bh->b_this_page;
970 } while (bh != head);
971}
972
973/*
974 * Create the page-cache page that contains the requested block.
975 *
976 * This is user purely for blockdev mappings.
977 */
978static struct page *
979grow_dev_page(struct block_device *bdev, sector_t block,
980 pgoff_t index, int size)
981{
982 struct inode *inode = bdev->bd_inode;
983 struct page *page;
984 struct buffer_head *bh;
985
986 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
987 if (!page)
988 return NULL;
989
e827f923 990 BUG_ON(!PageLocked(page));
1da177e4
LT
991
992 if (page_has_buffers(page)) {
993 bh = page_buffers(page);
994 if (bh->b_size == size) {
995 init_page_buffers(page, bdev, block, size);
996 return page;
997 }
998 if (!try_to_free_buffers(page))
999 goto failed;
1000 }
1001
1002 /*
1003 * Allocate some buffers for this page
1004 */
1005 bh = alloc_page_buffers(page, size, 0);
1006 if (!bh)
1007 goto failed;
1008
1009 /*
1010 * Link the page to the buffers and initialise them. Take the
1011 * lock to be atomic wrt __find_get_block(), which does not
1012 * run under the page lock.
1013 */
1014 spin_lock(&inode->i_mapping->private_lock);
1015 link_dev_buffers(page, bh);
1016 init_page_buffers(page, bdev, block, size);
1017 spin_unlock(&inode->i_mapping->private_lock);
1018 return page;
1019
1020failed:
1021 BUG();
1022 unlock_page(page);
1023 page_cache_release(page);
1024 return NULL;
1025}
1026
1027/*
1028 * Create buffers for the specified block device block's page. If
1029 * that page was dirty, the buffers are set dirty also.
1030 *
1031 * Except that's a bug. Attaching dirty buffers to a dirty
1032 * blockdev's page can result in filesystem corruption, because
1033 * some of those buffers may be aliases of filesystem data.
1034 * grow_dev_page() will go BUG() if this happens.
1035 */
858119e1 1036static int
1da177e4
LT
1037grow_buffers(struct block_device *bdev, sector_t block, int size)
1038{
1039 struct page *page;
1040 pgoff_t index;
1041 int sizebits;
1042
1043 sizebits = -1;
1044 do {
1045 sizebits++;
1046 } while ((size << sizebits) < PAGE_SIZE);
1047
1048 index = block >> sizebits;
1da177e4 1049
e5657933
AM
1050 /*
1051 * Check for a block which wants to lie outside our maximum possible
1052 * pagecache index. (this comparison is done using sector_t types).
1053 */
1054 if (unlikely(index != block >> sizebits)) {
1055 char b[BDEVNAME_SIZE];
1056
1057 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1058 "device %s\n",
1059 __FUNCTION__, (unsigned long long)block,
1060 bdevname(bdev, b));
1061 return -EIO;
1062 }
1063 block = index << sizebits;
1da177e4
LT
1064 /* Create a page with the proper size buffers.. */
1065 page = grow_dev_page(bdev, block, index, size);
1066 if (!page)
1067 return 0;
1068 unlock_page(page);
1069 page_cache_release(page);
1070 return 1;
1071}
1072
75c96f85 1073static struct buffer_head *
1da177e4
LT
1074__getblk_slow(struct block_device *bdev, sector_t block, int size)
1075{
1076 /* Size must be multiple of hard sectorsize */
1077 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1078 (size < 512 || size > PAGE_SIZE))) {
1079 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1080 size);
1081 printk(KERN_ERR "hardsect size: %d\n",
1082 bdev_hardsect_size(bdev));
1083
1084 dump_stack();
1085 return NULL;
1086 }
1087
1088 for (;;) {
1089 struct buffer_head * bh;
e5657933 1090 int ret;
1da177e4
LT
1091
1092 bh = __find_get_block(bdev, block, size);
1093 if (bh)
1094 return bh;
1095
e5657933
AM
1096 ret = grow_buffers(bdev, block, size);
1097 if (ret < 0)
1098 return NULL;
1099 if (ret == 0)
1da177e4
LT
1100 free_more_memory();
1101 }
1102}
1103
1104/*
1105 * The relationship between dirty buffers and dirty pages:
1106 *
1107 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1108 * the page is tagged dirty in its radix tree.
1109 *
1110 * At all times, the dirtiness of the buffers represents the dirtiness of
1111 * subsections of the page. If the page has buffers, the page dirty bit is
1112 * merely a hint about the true dirty state.
1113 *
1114 * When a page is set dirty in its entirety, all its buffers are marked dirty
1115 * (if the page has buffers).
1116 *
1117 * When a buffer is marked dirty, its page is dirtied, but the page's other
1118 * buffers are not.
1119 *
1120 * Also. When blockdev buffers are explicitly read with bread(), they
1121 * individually become uptodate. But their backing page remains not
1122 * uptodate - even if all of its buffers are uptodate. A subsequent
1123 * block_read_full_page() against that page will discover all the uptodate
1124 * buffers, will set the page uptodate and will perform no I/O.
1125 */
1126
1127/**
1128 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1129 * @bh: the buffer_head to mark dirty
1da177e4
LT
1130 *
1131 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1132 * backing page dirty, then tag the page as dirty in its address_space's radix
1133 * tree and then attach the address_space's inode to its superblock's dirty
1134 * inode list.
1135 *
1136 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1137 * mapping->tree_lock and the global inode_lock.
1138 */
1139void fastcall mark_buffer_dirty(struct buffer_head *bh)
1140{
1141 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1142 __set_page_dirty_nobuffers(bh->b_page);
1143}
1144
1145/*
1146 * Decrement a buffer_head's reference count. If all buffers against a page
1147 * have zero reference count, are clean and unlocked, and if the page is clean
1148 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1149 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1150 * a page but it ends up not being freed, and buffers may later be reattached).
1151 */
1152void __brelse(struct buffer_head * buf)
1153{
1154 if (atomic_read(&buf->b_count)) {
1155 put_bh(buf);
1156 return;
1157 }
1158 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1159 WARN_ON(1);
1160}
1161
1162/*
1163 * bforget() is like brelse(), except it discards any
1164 * potentially dirty data.
1165 */
1166void __bforget(struct buffer_head *bh)
1167{
1168 clear_buffer_dirty(bh);
1169 if (!list_empty(&bh->b_assoc_buffers)) {
1170 struct address_space *buffer_mapping = bh->b_page->mapping;
1171
1172 spin_lock(&buffer_mapping->private_lock);
1173 list_del_init(&bh->b_assoc_buffers);
58ff407b 1174 bh->b_assoc_map = NULL;
1da177e4
LT
1175 spin_unlock(&buffer_mapping->private_lock);
1176 }
1177 __brelse(bh);
1178}
1179
1180static struct buffer_head *__bread_slow(struct buffer_head *bh)
1181{
1182 lock_buffer(bh);
1183 if (buffer_uptodate(bh)) {
1184 unlock_buffer(bh);
1185 return bh;
1186 } else {
1187 get_bh(bh);
1188 bh->b_end_io = end_buffer_read_sync;
1189 submit_bh(READ, bh);
1190 wait_on_buffer(bh);
1191 if (buffer_uptodate(bh))
1192 return bh;
1193 }
1194 brelse(bh);
1195 return NULL;
1196}
1197
1198/*
1199 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1200 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1201 * refcount elevated by one when they're in an LRU. A buffer can only appear
1202 * once in a particular CPU's LRU. A single buffer can be present in multiple
1203 * CPU's LRUs at the same time.
1204 *
1205 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1206 * sb_find_get_block().
1207 *
1208 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1209 * a local interrupt disable for that.
1210 */
1211
1212#define BH_LRU_SIZE 8
1213
1214struct bh_lru {
1215 struct buffer_head *bhs[BH_LRU_SIZE];
1216};
1217
1218static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1219
1220#ifdef CONFIG_SMP
1221#define bh_lru_lock() local_irq_disable()
1222#define bh_lru_unlock() local_irq_enable()
1223#else
1224#define bh_lru_lock() preempt_disable()
1225#define bh_lru_unlock() preempt_enable()
1226#endif
1227
1228static inline void check_irqs_on(void)
1229{
1230#ifdef irqs_disabled
1231 BUG_ON(irqs_disabled());
1232#endif
1233}
1234
1235/*
1236 * The LRU management algorithm is dopey-but-simple. Sorry.
1237 */
1238static void bh_lru_install(struct buffer_head *bh)
1239{
1240 struct buffer_head *evictee = NULL;
1241 struct bh_lru *lru;
1242
1243 check_irqs_on();
1244 bh_lru_lock();
1245 lru = &__get_cpu_var(bh_lrus);
1246 if (lru->bhs[0] != bh) {
1247 struct buffer_head *bhs[BH_LRU_SIZE];
1248 int in;
1249 int out = 0;
1250
1251 get_bh(bh);
1252 bhs[out++] = bh;
1253 for (in = 0; in < BH_LRU_SIZE; in++) {
1254 struct buffer_head *bh2 = lru->bhs[in];
1255
1256 if (bh2 == bh) {
1257 __brelse(bh2);
1258 } else {
1259 if (out >= BH_LRU_SIZE) {
1260 BUG_ON(evictee != NULL);
1261 evictee = bh2;
1262 } else {
1263 bhs[out++] = bh2;
1264 }
1265 }
1266 }
1267 while (out < BH_LRU_SIZE)
1268 bhs[out++] = NULL;
1269 memcpy(lru->bhs, bhs, sizeof(bhs));
1270 }
1271 bh_lru_unlock();
1272
1273 if (evictee)
1274 __brelse(evictee);
1275}
1276
1277/*
1278 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1279 */
858119e1 1280static struct buffer_head *
3991d3bd 1281lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1282{
1283 struct buffer_head *ret = NULL;
1284 struct bh_lru *lru;
3991d3bd 1285 unsigned int i;
1da177e4
LT
1286
1287 check_irqs_on();
1288 bh_lru_lock();
1289 lru = &__get_cpu_var(bh_lrus);
1290 for (i = 0; i < BH_LRU_SIZE; i++) {
1291 struct buffer_head *bh = lru->bhs[i];
1292
1293 if (bh && bh->b_bdev == bdev &&
1294 bh->b_blocknr == block && bh->b_size == size) {
1295 if (i) {
1296 while (i) {
1297 lru->bhs[i] = lru->bhs[i - 1];
1298 i--;
1299 }
1300 lru->bhs[0] = bh;
1301 }
1302 get_bh(bh);
1303 ret = bh;
1304 break;
1305 }
1306 }
1307 bh_lru_unlock();
1308 return ret;
1309}
1310
1311/*
1312 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1313 * it in the LRU and mark it as accessed. If it is not present then return
1314 * NULL
1315 */
1316struct buffer_head *
3991d3bd 1317__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1318{
1319 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1320
1321 if (bh == NULL) {
385fd4c5 1322 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1323 if (bh)
1324 bh_lru_install(bh);
1325 }
1326 if (bh)
1327 touch_buffer(bh);
1328 return bh;
1329}
1330EXPORT_SYMBOL(__find_get_block);
1331
1332/*
1333 * __getblk will locate (and, if necessary, create) the buffer_head
1334 * which corresponds to the passed block_device, block and size. The
1335 * returned buffer has its reference count incremented.
1336 *
1337 * __getblk() cannot fail - it just keeps trying. If you pass it an
1338 * illegal block number, __getblk() will happily return a buffer_head
1339 * which represents the non-existent block. Very weird.
1340 *
1341 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1342 * attempt is failing. FIXME, perhaps?
1343 */
1344struct buffer_head *
3991d3bd 1345__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1346{
1347 struct buffer_head *bh = __find_get_block(bdev, block, size);
1348
1349 might_sleep();
1350 if (bh == NULL)
1351 bh = __getblk_slow(bdev, block, size);
1352 return bh;
1353}
1354EXPORT_SYMBOL(__getblk);
1355
1356/*
1357 * Do async read-ahead on a buffer..
1358 */
3991d3bd 1359void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1360{
1361 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1362 if (likely(bh)) {
1363 ll_rw_block(READA, 1, &bh);
1364 brelse(bh);
1365 }
1da177e4
LT
1366}
1367EXPORT_SYMBOL(__breadahead);
1368
1369/**
1370 * __bread() - reads a specified block and returns the bh
67be2dd1 1371 * @bdev: the block_device to read from
1da177e4
LT
1372 * @block: number of block
1373 * @size: size (in bytes) to read
1374 *
1375 * Reads a specified block, and returns buffer head that contains it.
1376 * It returns NULL if the block was unreadable.
1377 */
1378struct buffer_head *
3991d3bd 1379__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1380{
1381 struct buffer_head *bh = __getblk(bdev, block, size);
1382
a3e713b5 1383 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1384 bh = __bread_slow(bh);
1385 return bh;
1386}
1387EXPORT_SYMBOL(__bread);
1388
1389/*
1390 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1391 * This doesn't race because it runs in each cpu either in irq
1392 * or with preempt disabled.
1393 */
1394static void invalidate_bh_lru(void *arg)
1395{
1396 struct bh_lru *b = &get_cpu_var(bh_lrus);
1397 int i;
1398
1399 for (i = 0; i < BH_LRU_SIZE; i++) {
1400 brelse(b->bhs[i]);
1401 b->bhs[i] = NULL;
1402 }
1403 put_cpu_var(bh_lrus);
1404}
1405
1406static void invalidate_bh_lrus(void)
1407{
1408 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1409}
1410
1411void set_bh_page(struct buffer_head *bh,
1412 struct page *page, unsigned long offset)
1413{
1414 bh->b_page = page;
e827f923 1415 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1416 if (PageHighMem(page))
1417 /*
1418 * This catches illegal uses and preserves the offset:
1419 */
1420 bh->b_data = (char *)(0 + offset);
1421 else
1422 bh->b_data = page_address(page) + offset;
1423}
1424EXPORT_SYMBOL(set_bh_page);
1425
1426/*
1427 * Called when truncating a buffer on a page completely.
1428 */
858119e1 1429static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1430{
1431 lock_buffer(bh);
1432 clear_buffer_dirty(bh);
1433 bh->b_bdev = NULL;
1434 clear_buffer_mapped(bh);
1435 clear_buffer_req(bh);
1436 clear_buffer_new(bh);
1437 clear_buffer_delay(bh);
33a266dd 1438 clear_buffer_unwritten(bh);
1da177e4
LT
1439 unlock_buffer(bh);
1440}
1441
1da177e4
LT
1442/**
1443 * block_invalidatepage - invalidate part of all of a buffer-backed page
1444 *
1445 * @page: the page which is affected
1446 * @offset: the index of the truncation point
1447 *
1448 * block_invalidatepage() is called when all or part of the page has become
1449 * invalidatedby a truncate operation.
1450 *
1451 * block_invalidatepage() does not have to release all buffers, but it must
1452 * ensure that no dirty buffer is left outside @offset and that no I/O
1453 * is underway against any of the blocks which are outside the truncation
1454 * point. Because the caller is about to free (and possibly reuse) those
1455 * blocks on-disk.
1456 */
2ff28e22 1457void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1458{
1459 struct buffer_head *head, *bh, *next;
1460 unsigned int curr_off = 0;
1da177e4
LT
1461
1462 BUG_ON(!PageLocked(page));
1463 if (!page_has_buffers(page))
1464 goto out;
1465
1466 head = page_buffers(page);
1467 bh = head;
1468 do {
1469 unsigned int next_off = curr_off + bh->b_size;
1470 next = bh->b_this_page;
1471
1472 /*
1473 * is this block fully invalidated?
1474 */
1475 if (offset <= curr_off)
1476 discard_buffer(bh);
1477 curr_off = next_off;
1478 bh = next;
1479 } while (bh != head);
1480
1481 /*
1482 * We release buffers only if the entire page is being invalidated.
1483 * The get_block cached value has been unconditionally invalidated,
1484 * so real IO is not possible anymore.
1485 */
1486 if (offset == 0)
2ff28e22 1487 try_to_release_page(page, 0);
1da177e4 1488out:
2ff28e22 1489 return;
1da177e4
LT
1490}
1491EXPORT_SYMBOL(block_invalidatepage);
1492
1493/*
1494 * We attach and possibly dirty the buffers atomically wrt
1495 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1496 * is already excluded via the page lock.
1497 */
1498void create_empty_buffers(struct page *page,
1499 unsigned long blocksize, unsigned long b_state)
1500{
1501 struct buffer_head *bh, *head, *tail;
1502
1503 head = alloc_page_buffers(page, blocksize, 1);
1504 bh = head;
1505 do {
1506 bh->b_state |= b_state;
1507 tail = bh;
1508 bh = bh->b_this_page;
1509 } while (bh);
1510 tail->b_this_page = head;
1511
1512 spin_lock(&page->mapping->private_lock);
1513 if (PageUptodate(page) || PageDirty(page)) {
1514 bh = head;
1515 do {
1516 if (PageDirty(page))
1517 set_buffer_dirty(bh);
1518 if (PageUptodate(page))
1519 set_buffer_uptodate(bh);
1520 bh = bh->b_this_page;
1521 } while (bh != head);
1522 }
1523 attach_page_buffers(page, head);
1524 spin_unlock(&page->mapping->private_lock);
1525}
1526EXPORT_SYMBOL(create_empty_buffers);
1527
1528/*
1529 * We are taking a block for data and we don't want any output from any
1530 * buffer-cache aliases starting from return from that function and
1531 * until the moment when something will explicitly mark the buffer
1532 * dirty (hopefully that will not happen until we will free that block ;-)
1533 * We don't even need to mark it not-uptodate - nobody can expect
1534 * anything from a newly allocated buffer anyway. We used to used
1535 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1536 * don't want to mark the alias unmapped, for example - it would confuse
1537 * anyone who might pick it with bread() afterwards...
1538 *
1539 * Also.. Note that bforget() doesn't lock the buffer. So there can
1540 * be writeout I/O going on against recently-freed buffers. We don't
1541 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1542 * only if we really need to. That happens here.
1543 */
1544void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1545{
1546 struct buffer_head *old_bh;
1547
1548 might_sleep();
1549
385fd4c5 1550 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1551 if (old_bh) {
1552 clear_buffer_dirty(old_bh);
1553 wait_on_buffer(old_bh);
1554 clear_buffer_req(old_bh);
1555 __brelse(old_bh);
1556 }
1557}
1558EXPORT_SYMBOL(unmap_underlying_metadata);
1559
1560/*
1561 * NOTE! All mapped/uptodate combinations are valid:
1562 *
1563 * Mapped Uptodate Meaning
1564 *
1565 * No No "unknown" - must do get_block()
1566 * No Yes "hole" - zero-filled
1567 * Yes No "allocated" - allocated on disk, not read in
1568 * Yes Yes "valid" - allocated and up-to-date in memory.
1569 *
1570 * "Dirty" is valid only with the last case (mapped+uptodate).
1571 */
1572
1573/*
1574 * While block_write_full_page is writing back the dirty buffers under
1575 * the page lock, whoever dirtied the buffers may decide to clean them
1576 * again at any time. We handle that by only looking at the buffer
1577 * state inside lock_buffer().
1578 *
1579 * If block_write_full_page() is called for regular writeback
1580 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1581 * locked buffer. This only can happen if someone has written the buffer
1582 * directly, with submit_bh(). At the address_space level PageWriteback
1583 * prevents this contention from occurring.
1584 */
1585static int __block_write_full_page(struct inode *inode, struct page *page,
1586 get_block_t *get_block, struct writeback_control *wbc)
1587{
1588 int err;
1589 sector_t block;
1590 sector_t last_block;
f0fbd5fc 1591 struct buffer_head *bh, *head;
b0cf2321 1592 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1593 int nr_underway = 0;
1594
1595 BUG_ON(!PageLocked(page));
1596
1597 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1598
1599 if (!page_has_buffers(page)) {
b0cf2321 1600 create_empty_buffers(page, blocksize,
1da177e4
LT
1601 (1 << BH_Dirty)|(1 << BH_Uptodate));
1602 }
1603
1604 /*
1605 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1606 * here, and the (potentially unmapped) buffers may become dirty at
1607 * any time. If a buffer becomes dirty here after we've inspected it
1608 * then we just miss that fact, and the page stays dirty.
1609 *
1610 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1611 * handle that here by just cleaning them.
1612 */
1613
54b21a79 1614 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1615 head = page_buffers(page);
1616 bh = head;
1617
1618 /*
1619 * Get all the dirty buffers mapped to disk addresses and
1620 * handle any aliases from the underlying blockdev's mapping.
1621 */
1622 do {
1623 if (block > last_block) {
1624 /*
1625 * mapped buffers outside i_size will occur, because
1626 * this page can be outside i_size when there is a
1627 * truncate in progress.
1628 */
1629 /*
1630 * The buffer was zeroed by block_write_full_page()
1631 */
1632 clear_buffer_dirty(bh);
1633 set_buffer_uptodate(bh);
1634 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
b0cf2321 1635 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1636 err = get_block(inode, block, bh, 1);
1637 if (err)
1638 goto recover;
1639 if (buffer_new(bh)) {
1640 /* blockdev mappings never come here */
1641 clear_buffer_new(bh);
1642 unmap_underlying_metadata(bh->b_bdev,
1643 bh->b_blocknr);
1644 }
1645 }
1646 bh = bh->b_this_page;
1647 block++;
1648 } while (bh != head);
1649
1650 do {
1da177e4
LT
1651 if (!buffer_mapped(bh))
1652 continue;
1653 /*
1654 * If it's a fully non-blocking write attempt and we cannot
1655 * lock the buffer then redirty the page. Note that this can
1656 * potentially cause a busy-wait loop from pdflush and kswapd
1657 * activity, but those code paths have their own higher-level
1658 * throttling.
1659 */
1660 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1661 lock_buffer(bh);
1662 } else if (test_set_buffer_locked(bh)) {
1663 redirty_page_for_writepage(wbc, page);
1664 continue;
1665 }
1666 if (test_clear_buffer_dirty(bh)) {
1667 mark_buffer_async_write(bh);
1668 } else {
1669 unlock_buffer(bh);
1670 }
1671 } while ((bh = bh->b_this_page) != head);
1672
1673 /*
1674 * The page and its buffers are protected by PageWriteback(), so we can
1675 * drop the bh refcounts early.
1676 */
1677 BUG_ON(PageWriteback(page));
1678 set_page_writeback(page);
1da177e4
LT
1679
1680 do {
1681 struct buffer_head *next = bh->b_this_page;
1682 if (buffer_async_write(bh)) {
1683 submit_bh(WRITE, bh);
1684 nr_underway++;
1685 }
1da177e4
LT
1686 bh = next;
1687 } while (bh != head);
05937baa 1688 unlock_page(page);
1da177e4
LT
1689
1690 err = 0;
1691done:
1692 if (nr_underway == 0) {
1693 /*
1694 * The page was marked dirty, but the buffers were
1695 * clean. Someone wrote them back by hand with
1696 * ll_rw_block/submit_bh. A rare case.
1697 */
1da177e4 1698 end_page_writeback(page);
3d67f2d7 1699
1da177e4
LT
1700 /*
1701 * The page and buffer_heads can be released at any time from
1702 * here on.
1703 */
1704 wbc->pages_skipped++; /* We didn't write this page */
1705 }
1706 return err;
1707
1708recover:
1709 /*
1710 * ENOSPC, or some other error. We may already have added some
1711 * blocks to the file, so we need to write these out to avoid
1712 * exposing stale data.
1713 * The page is currently locked and not marked for writeback
1714 */
1715 bh = head;
1716 /* Recovery: lock and submit the mapped buffers */
1717 do {
1da177e4
LT
1718 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1719 lock_buffer(bh);
1720 mark_buffer_async_write(bh);
1721 } else {
1722 /*
1723 * The buffer may have been set dirty during
1724 * attachment to a dirty page.
1725 */
1726 clear_buffer_dirty(bh);
1727 }
1728 } while ((bh = bh->b_this_page) != head);
1729 SetPageError(page);
1730 BUG_ON(PageWriteback(page));
1731 set_page_writeback(page);
1da177e4
LT
1732 do {
1733 struct buffer_head *next = bh->b_this_page;
1734 if (buffer_async_write(bh)) {
1735 clear_buffer_dirty(bh);
1736 submit_bh(WRITE, bh);
1737 nr_underway++;
1738 }
1da177e4
LT
1739 bh = next;
1740 } while (bh != head);
ffda9d30 1741 unlock_page(page);
1da177e4
LT
1742 goto done;
1743}
1744
1745static int __block_prepare_write(struct inode *inode, struct page *page,
1746 unsigned from, unsigned to, get_block_t *get_block)
1747{
1748 unsigned block_start, block_end;
1749 sector_t block;
1750 int err = 0;
1751 unsigned blocksize, bbits;
1752 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1753
1754 BUG_ON(!PageLocked(page));
1755 BUG_ON(from > PAGE_CACHE_SIZE);
1756 BUG_ON(to > PAGE_CACHE_SIZE);
1757 BUG_ON(from > to);
1758
1759 blocksize = 1 << inode->i_blkbits;
1760 if (!page_has_buffers(page))
1761 create_empty_buffers(page, blocksize, 0);
1762 head = page_buffers(page);
1763
1764 bbits = inode->i_blkbits;
1765 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1766
1767 for(bh = head, block_start = 0; bh != head || !block_start;
1768 block++, block_start=block_end, bh = bh->b_this_page) {
1769 block_end = block_start + blocksize;
1770 if (block_end <= from || block_start >= to) {
1771 if (PageUptodate(page)) {
1772 if (!buffer_uptodate(bh))
1773 set_buffer_uptodate(bh);
1774 }
1775 continue;
1776 }
1777 if (buffer_new(bh))
1778 clear_buffer_new(bh);
1779 if (!buffer_mapped(bh)) {
b0cf2321 1780 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1781 err = get_block(inode, block, bh, 1);
1782 if (err)
f3ddbdc6 1783 break;
1da177e4 1784 if (buffer_new(bh)) {
1da177e4
LT
1785 unmap_underlying_metadata(bh->b_bdev,
1786 bh->b_blocknr);
1787 if (PageUptodate(page)) {
1788 set_buffer_uptodate(bh);
1789 continue;
1790 }
1791 if (block_end > to || block_start < from) {
1792 void *kaddr;
1793
1794 kaddr = kmap_atomic(page, KM_USER0);
1795 if (block_end > to)
1796 memset(kaddr+to, 0,
1797 block_end-to);
1798 if (block_start < from)
1799 memset(kaddr+block_start,
1800 0, from-block_start);
1801 flush_dcache_page(page);
1802 kunmap_atomic(kaddr, KM_USER0);
1803 }
1804 continue;
1805 }
1806 }
1807 if (PageUptodate(page)) {
1808 if (!buffer_uptodate(bh))
1809 set_buffer_uptodate(bh);
1810 continue;
1811 }
1812 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1813 !buffer_unwritten(bh) &&
1da177e4
LT
1814 (block_start < from || block_end > to)) {
1815 ll_rw_block(READ, 1, &bh);
1816 *wait_bh++=bh;
1817 }
1818 }
1819 /*
1820 * If we issued read requests - let them complete.
1821 */
1822 while(wait_bh > wait) {
1823 wait_on_buffer(*--wait_bh);
1824 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1825 err = -EIO;
1da177e4 1826 }
152becd2
AA
1827 if (!err) {
1828 bh = head;
1829 do {
1830 if (buffer_new(bh))
1831 clear_buffer_new(bh);
1832 } while ((bh = bh->b_this_page) != head);
1833 return 0;
1834 }
f3ddbdc6 1835 /* Error case: */
1da177e4
LT
1836 /*
1837 * Zero out any newly allocated blocks to avoid exposing stale
1838 * data. If BH_New is set, we know that the block was newly
1839 * allocated in the above loop.
1840 */
1841 bh = head;
1842 block_start = 0;
1843 do {
1844 block_end = block_start+blocksize;
1845 if (block_end <= from)
1846 goto next_bh;
1847 if (block_start >= to)
1848 break;
1849 if (buffer_new(bh)) {
1850 void *kaddr;
1851
1852 clear_buffer_new(bh);
1853 kaddr = kmap_atomic(page, KM_USER0);
1854 memset(kaddr+block_start, 0, bh->b_size);
8c581651 1855 flush_dcache_page(page);
1da177e4
LT
1856 kunmap_atomic(kaddr, KM_USER0);
1857 set_buffer_uptodate(bh);
1858 mark_buffer_dirty(bh);
1859 }
1860next_bh:
1861 block_start = block_end;
1862 bh = bh->b_this_page;
1863 } while (bh != head);
1864 return err;
1865}
1866
1867static int __block_commit_write(struct inode *inode, struct page *page,
1868 unsigned from, unsigned to)
1869{
1870 unsigned block_start, block_end;
1871 int partial = 0;
1872 unsigned blocksize;
1873 struct buffer_head *bh, *head;
1874
1875 blocksize = 1 << inode->i_blkbits;
1876
1877 for(bh = head = page_buffers(page), block_start = 0;
1878 bh != head || !block_start;
1879 block_start=block_end, bh = bh->b_this_page) {
1880 block_end = block_start + blocksize;
1881 if (block_end <= from || block_start >= to) {
1882 if (!buffer_uptodate(bh))
1883 partial = 1;
1884 } else {
1885 set_buffer_uptodate(bh);
1886 mark_buffer_dirty(bh);
1887 }
1888 }
1889
1890 /*
1891 * If this is a partial write which happened to make all buffers
1892 * uptodate then we can optimize away a bogus readpage() for
1893 * the next read(). Here we 'discover' whether the page went
1894 * uptodate as a result of this (potentially partial) write.
1895 */
1896 if (!partial)
1897 SetPageUptodate(page);
1898 return 0;
1899}
1900
1901/*
1902 * Generic "read page" function for block devices that have the normal
1903 * get_block functionality. This is most of the block device filesystems.
1904 * Reads the page asynchronously --- the unlock_buffer() and
1905 * set/clear_buffer_uptodate() functions propagate buffer state into the
1906 * page struct once IO has completed.
1907 */
1908int block_read_full_page(struct page *page, get_block_t *get_block)
1909{
1910 struct inode *inode = page->mapping->host;
1911 sector_t iblock, lblock;
1912 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1913 unsigned int blocksize;
1914 int nr, i;
1915 int fully_mapped = 1;
1916
cd7619d6 1917 BUG_ON(!PageLocked(page));
1da177e4
LT
1918 blocksize = 1 << inode->i_blkbits;
1919 if (!page_has_buffers(page))
1920 create_empty_buffers(page, blocksize, 0);
1921 head = page_buffers(page);
1922
1923 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1924 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1925 bh = head;
1926 nr = 0;
1927 i = 0;
1928
1929 do {
1930 if (buffer_uptodate(bh))
1931 continue;
1932
1933 if (!buffer_mapped(bh)) {
c64610ba
AM
1934 int err = 0;
1935
1da177e4
LT
1936 fully_mapped = 0;
1937 if (iblock < lblock) {
b0cf2321 1938 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
1939 err = get_block(inode, iblock, bh, 0);
1940 if (err)
1da177e4
LT
1941 SetPageError(page);
1942 }
1943 if (!buffer_mapped(bh)) {
1944 void *kaddr = kmap_atomic(page, KM_USER0);
1945 memset(kaddr + i * blocksize, 0, blocksize);
1946 flush_dcache_page(page);
1947 kunmap_atomic(kaddr, KM_USER0);
c64610ba
AM
1948 if (!err)
1949 set_buffer_uptodate(bh);
1da177e4
LT
1950 continue;
1951 }
1952 /*
1953 * get_block() might have updated the buffer
1954 * synchronously
1955 */
1956 if (buffer_uptodate(bh))
1957 continue;
1958 }
1959 arr[nr++] = bh;
1960 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1961
1962 if (fully_mapped)
1963 SetPageMappedToDisk(page);
1964
1965 if (!nr) {
1966 /*
1967 * All buffers are uptodate - we can set the page uptodate
1968 * as well. But not if get_block() returned an error.
1969 */
1970 if (!PageError(page))
1971 SetPageUptodate(page);
1972 unlock_page(page);
1973 return 0;
1974 }
1975
1976 /* Stage two: lock the buffers */
1977 for (i = 0; i < nr; i++) {
1978 bh = arr[i];
1979 lock_buffer(bh);
1980 mark_buffer_async_read(bh);
1981 }
1982
1983 /*
1984 * Stage 3: start the IO. Check for uptodateness
1985 * inside the buffer lock in case another process reading
1986 * the underlying blockdev brought it uptodate (the sct fix).
1987 */
1988 for (i = 0; i < nr; i++) {
1989 bh = arr[i];
1990 if (buffer_uptodate(bh))
1991 end_buffer_async_read(bh, 1);
1992 else
1993 submit_bh(READ, bh);
1994 }
1995 return 0;
1996}
1997
1998/* utility function for filesystems that need to do work on expanding
1999 * truncates. Uses prepare/commit_write to allow the filesystem to
2000 * deal with the hole.
2001 */
05eb0b51
OH
2002static int __generic_cont_expand(struct inode *inode, loff_t size,
2003 pgoff_t index, unsigned int offset)
1da177e4
LT
2004{
2005 struct address_space *mapping = inode->i_mapping;
2006 struct page *page;
05eb0b51 2007 unsigned long limit;
1da177e4
LT
2008 int err;
2009
2010 err = -EFBIG;
2011 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2012 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2013 send_sig(SIGXFSZ, current, 0);
2014 goto out;
2015 }
2016 if (size > inode->i_sb->s_maxbytes)
2017 goto out;
2018
1da177e4
LT
2019 err = -ENOMEM;
2020 page = grab_cache_page(mapping, index);
2021 if (!page)
2022 goto out;
2023 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
05eb0b51
OH
2024 if (err) {
2025 /*
2026 * ->prepare_write() may have instantiated a few blocks
2027 * outside i_size. Trim these off again.
2028 */
2029 unlock_page(page);
2030 page_cache_release(page);
2031 vmtruncate(inode, inode->i_size);
2032 goto out;
1da177e4 2033 }
05eb0b51
OH
2034
2035 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2036
1da177e4
LT
2037 unlock_page(page);
2038 page_cache_release(page);
2039 if (err > 0)
2040 err = 0;
2041out:
2042 return err;
2043}
2044
05eb0b51
OH
2045int generic_cont_expand(struct inode *inode, loff_t size)
2046{
2047 pgoff_t index;
2048 unsigned int offset;
2049
2050 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2051
2052 /* ugh. in prepare/commit_write, if from==to==start of block, we
2053 ** skip the prepare. make sure we never send an offset for the start
2054 ** of a block
2055 */
2056 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2057 /* caller must handle this extra byte. */
2058 offset++;
2059 }
2060 index = size >> PAGE_CACHE_SHIFT;
2061
2062 return __generic_cont_expand(inode, size, index, offset);
2063}
2064
2065int generic_cont_expand_simple(struct inode *inode, loff_t size)
2066{
2067 loff_t pos = size - 1;
2068 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2069 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2070
2071 /* prepare/commit_write can handle even if from==to==start of block. */
2072 return __generic_cont_expand(inode, size, index, offset);
2073}
2074
1da177e4
LT
2075/*
2076 * For moronic filesystems that do not allow holes in file.
2077 * We may have to extend the file.
2078 */
2079
2080int cont_prepare_write(struct page *page, unsigned offset,
2081 unsigned to, get_block_t *get_block, loff_t *bytes)
2082{
2083 struct address_space *mapping = page->mapping;
2084 struct inode *inode = mapping->host;
2085 struct page *new_page;
2086 pgoff_t pgpos;
2087 long status;
2088 unsigned zerofrom;
2089 unsigned blocksize = 1 << inode->i_blkbits;
2090 void *kaddr;
2091
2092 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2093 status = -ENOMEM;
2094 new_page = grab_cache_page(mapping, pgpos);
2095 if (!new_page)
2096 goto out;
2097 /* we might sleep */
2098 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2099 unlock_page(new_page);
2100 page_cache_release(new_page);
2101 continue;
2102 }
2103 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2104 if (zerofrom & (blocksize-1)) {
2105 *bytes |= (blocksize-1);
2106 (*bytes)++;
2107 }
2108 status = __block_prepare_write(inode, new_page, zerofrom,
2109 PAGE_CACHE_SIZE, get_block);
2110 if (status)
2111 goto out_unmap;
2112 kaddr = kmap_atomic(new_page, KM_USER0);
2113 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2114 flush_dcache_page(new_page);
2115 kunmap_atomic(kaddr, KM_USER0);
2116 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2117 unlock_page(new_page);
2118 page_cache_release(new_page);
2119 }
2120
2121 if (page->index < pgpos) {
2122 /* completely inside the area */
2123 zerofrom = offset;
2124 } else {
2125 /* page covers the boundary, find the boundary offset */
2126 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2127
2128 /* if we will expand the thing last block will be filled */
2129 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2130 *bytes |= (blocksize-1);
2131 (*bytes)++;
2132 }
2133
2134 /* starting below the boundary? Nothing to zero out */
2135 if (offset <= zerofrom)
2136 zerofrom = offset;
2137 }
2138 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2139 if (status)
2140 goto out1;
2141 if (zerofrom < offset) {
2142 kaddr = kmap_atomic(page, KM_USER0);
2143 memset(kaddr+zerofrom, 0, offset-zerofrom);
2144 flush_dcache_page(page);
2145 kunmap_atomic(kaddr, KM_USER0);
2146 __block_commit_write(inode, page, zerofrom, offset);
2147 }
2148 return 0;
2149out1:
2150 ClearPageUptodate(page);
2151 return status;
2152
2153out_unmap:
2154 ClearPageUptodate(new_page);
2155 unlock_page(new_page);
2156 page_cache_release(new_page);
2157out:
2158 return status;
2159}
2160
2161int block_prepare_write(struct page *page, unsigned from, unsigned to,
2162 get_block_t *get_block)
2163{
2164 struct inode *inode = page->mapping->host;
2165 int err = __block_prepare_write(inode, page, from, to, get_block);
2166 if (err)
2167 ClearPageUptodate(page);
2168 return err;
2169}
2170
2171int block_commit_write(struct page *page, unsigned from, unsigned to)
2172{
2173 struct inode *inode = page->mapping->host;
2174 __block_commit_write(inode,page,from,to);
2175 return 0;
2176}
2177
2178int generic_commit_write(struct file *file, struct page *page,
2179 unsigned from, unsigned to)
2180{
2181 struct inode *inode = page->mapping->host;
2182 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2183 __block_commit_write(inode,page,from,to);
2184 /*
2185 * No need to use i_size_read() here, the i_size
1b1dcc1b 2186 * cannot change under us because we hold i_mutex.
1da177e4
LT
2187 */
2188 if (pos > inode->i_size) {
2189 i_size_write(inode, pos);
2190 mark_inode_dirty(inode);
2191 }
2192 return 0;
2193}
2194
2195
2196/*
2197 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2198 * immediately, while under the page lock. So it needs a special end_io
2199 * handler which does not touch the bh after unlocking it.
2200 *
2201 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2202 * a race there is benign: unlock_buffer() only use the bh's address for
2203 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2204 * itself.
2205 */
2206static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2207{
2208 if (uptodate) {
2209 set_buffer_uptodate(bh);
2210 } else {
2211 /* This happens, due to failed READA attempts. */
2212 clear_buffer_uptodate(bh);
2213 }
2214 unlock_buffer(bh);
2215}
2216
2217/*
2218 * On entry, the page is fully not uptodate.
2219 * On exit the page is fully uptodate in the areas outside (from,to)
2220 */
2221int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2222 get_block_t *get_block)
2223{
2224 struct inode *inode = page->mapping->host;
2225 const unsigned blkbits = inode->i_blkbits;
2226 const unsigned blocksize = 1 << blkbits;
2227 struct buffer_head map_bh;
2228 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2229 unsigned block_in_page;
2230 unsigned block_start;
2231 sector_t block_in_file;
2232 char *kaddr;
2233 int nr_reads = 0;
2234 int i;
2235 int ret = 0;
2236 int is_mapped_to_disk = 1;
1da177e4
LT
2237
2238 if (PageMappedToDisk(page))
2239 return 0;
2240
2241 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2242 map_bh.b_page = page;
2243
2244 /*
2245 * We loop across all blocks in the page, whether or not they are
2246 * part of the affected region. This is so we can discover if the
2247 * page is fully mapped-to-disk.
2248 */
2249 for (block_start = 0, block_in_page = 0;
2250 block_start < PAGE_CACHE_SIZE;
2251 block_in_page++, block_start += blocksize) {
2252 unsigned block_end = block_start + blocksize;
2253 int create;
2254
2255 map_bh.b_state = 0;
2256 create = 1;
2257 if (block_start >= to)
2258 create = 0;
b0cf2321 2259 map_bh.b_size = blocksize;
1da177e4
LT
2260 ret = get_block(inode, block_in_file + block_in_page,
2261 &map_bh, create);
2262 if (ret)
2263 goto failed;
2264 if (!buffer_mapped(&map_bh))
2265 is_mapped_to_disk = 0;
2266 if (buffer_new(&map_bh))
2267 unmap_underlying_metadata(map_bh.b_bdev,
2268 map_bh.b_blocknr);
2269 if (PageUptodate(page))
2270 continue;
2271 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2272 kaddr = kmap_atomic(page, KM_USER0);
22c8ca78 2273 if (block_start < from)
1da177e4 2274 memset(kaddr+block_start, 0, from-block_start);
22c8ca78 2275 if (block_end > to)
1da177e4 2276 memset(kaddr + to, 0, block_end - to);
1da177e4
LT
2277 flush_dcache_page(page);
2278 kunmap_atomic(kaddr, KM_USER0);
2279 continue;
2280 }
2281 if (buffer_uptodate(&map_bh))
2282 continue; /* reiserfs does this */
2283 if (block_start < from || block_end > to) {
2284 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2285
2286 if (!bh) {
2287 ret = -ENOMEM;
2288 goto failed;
2289 }
2290 bh->b_state = map_bh.b_state;
2291 atomic_set(&bh->b_count, 0);
2292 bh->b_this_page = NULL;
2293 bh->b_page = page;
2294 bh->b_blocknr = map_bh.b_blocknr;
2295 bh->b_size = blocksize;
2296 bh->b_data = (char *)(long)block_start;
2297 bh->b_bdev = map_bh.b_bdev;
2298 bh->b_private = NULL;
2299 read_bh[nr_reads++] = bh;
2300 }
2301 }
2302
2303 if (nr_reads) {
2304 struct buffer_head *bh;
2305
2306 /*
2307 * The page is locked, so these buffers are protected from
2308 * any VM or truncate activity. Hence we don't need to care
2309 * for the buffer_head refcounts.
2310 */
2311 for (i = 0; i < nr_reads; i++) {
2312 bh = read_bh[i];
2313 lock_buffer(bh);
2314 bh->b_end_io = end_buffer_read_nobh;
2315 submit_bh(READ, bh);
2316 }
2317 for (i = 0; i < nr_reads; i++) {
2318 bh = read_bh[i];
2319 wait_on_buffer(bh);
2320 if (!buffer_uptodate(bh))
2321 ret = -EIO;
2322 free_buffer_head(bh);
2323 read_bh[i] = NULL;
2324 }
2325 if (ret)
2326 goto failed;
2327 }
2328
2329 if (is_mapped_to_disk)
2330 SetPageMappedToDisk(page);
1da177e4
LT
2331
2332 return 0;
2333
2334failed:
2335 for (i = 0; i < nr_reads; i++) {
2336 if (read_bh[i])
2337 free_buffer_head(read_bh[i]);
2338 }
2339
2340 /*
2341 * Error recovery is pretty slack. Clear the page and mark it dirty
2342 * so we'll later zero out any blocks which _were_ allocated.
2343 */
2344 kaddr = kmap_atomic(page, KM_USER0);
2345 memset(kaddr, 0, PAGE_CACHE_SIZE);
8c581651 2346 flush_dcache_page(page);
1da177e4
LT
2347 kunmap_atomic(kaddr, KM_USER0);
2348 SetPageUptodate(page);
2349 set_page_dirty(page);
2350 return ret;
2351}
2352EXPORT_SYMBOL(nobh_prepare_write);
2353
57bf63d6
DK
2354/*
2355 * Make sure any changes to nobh_commit_write() are reflected in
2356 * nobh_truncate_page(), since it doesn't call commit_write().
2357 */
1da177e4
LT
2358int nobh_commit_write(struct file *file, struct page *page,
2359 unsigned from, unsigned to)
2360{
2361 struct inode *inode = page->mapping->host;
2362 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2363
22c8ca78 2364 SetPageUptodate(page);
1da177e4
LT
2365 set_page_dirty(page);
2366 if (pos > inode->i_size) {
2367 i_size_write(inode, pos);
2368 mark_inode_dirty(inode);
2369 }
2370 return 0;
2371}
2372EXPORT_SYMBOL(nobh_commit_write);
2373
2374/*
2375 * nobh_writepage() - based on block_full_write_page() except
2376 * that it tries to operate without attaching bufferheads to
2377 * the page.
2378 */
2379int nobh_writepage(struct page *page, get_block_t *get_block,
2380 struct writeback_control *wbc)
2381{
2382 struct inode * const inode = page->mapping->host;
2383 loff_t i_size = i_size_read(inode);
2384 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2385 unsigned offset;
2386 void *kaddr;
2387 int ret;
2388
2389 /* Is the page fully inside i_size? */
2390 if (page->index < end_index)
2391 goto out;
2392
2393 /* Is the page fully outside i_size? (truncate in progress) */
2394 offset = i_size & (PAGE_CACHE_SIZE-1);
2395 if (page->index >= end_index+1 || !offset) {
2396 /*
2397 * The page may have dirty, unmapped buffers. For example,
2398 * they may have been added in ext3_writepage(). Make them
2399 * freeable here, so the page does not leak.
2400 */
2401#if 0
2402 /* Not really sure about this - do we need this ? */
2403 if (page->mapping->a_ops->invalidatepage)
2404 page->mapping->a_ops->invalidatepage(page, offset);
2405#endif
2406 unlock_page(page);
2407 return 0; /* don't care */
2408 }
2409
2410 /*
2411 * The page straddles i_size. It must be zeroed out on each and every
2412 * writepage invocation because it may be mmapped. "A file is mapped
2413 * in multiples of the page size. For a file that is not a multiple of
2414 * the page size, the remaining memory is zeroed when mapped, and
2415 * writes to that region are not written out to the file."
2416 */
2417 kaddr = kmap_atomic(page, KM_USER0);
2418 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2419 flush_dcache_page(page);
2420 kunmap_atomic(kaddr, KM_USER0);
2421out:
2422 ret = mpage_writepage(page, get_block, wbc);
2423 if (ret == -EAGAIN)
2424 ret = __block_write_full_page(inode, page, get_block, wbc);
2425 return ret;
2426}
2427EXPORT_SYMBOL(nobh_writepage);
2428
2429/*
2430 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2431 */
2432int nobh_truncate_page(struct address_space *mapping, loff_t from)
2433{
2434 struct inode *inode = mapping->host;
2435 unsigned blocksize = 1 << inode->i_blkbits;
2436 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2437 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2438 unsigned to;
2439 struct page *page;
f5e54d6e 2440 const struct address_space_operations *a_ops = mapping->a_ops;
1da177e4
LT
2441 char *kaddr;
2442 int ret = 0;
2443
2444 if ((offset & (blocksize - 1)) == 0)
2445 goto out;
2446
2447 ret = -ENOMEM;
2448 page = grab_cache_page(mapping, index);
2449 if (!page)
2450 goto out;
2451
2452 to = (offset + blocksize) & ~(blocksize - 1);
2453 ret = a_ops->prepare_write(NULL, page, offset, to);
2454 if (ret == 0) {
2455 kaddr = kmap_atomic(page, KM_USER0);
2456 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2457 flush_dcache_page(page);
2458 kunmap_atomic(kaddr, KM_USER0);
57bf63d6
DK
2459 /*
2460 * It would be more correct to call aops->commit_write()
2461 * here, but this is more efficient.
2462 */
2463 SetPageUptodate(page);
1da177e4
LT
2464 set_page_dirty(page);
2465 }
2466 unlock_page(page);
2467 page_cache_release(page);
2468out:
2469 return ret;
2470}
2471EXPORT_SYMBOL(nobh_truncate_page);
2472
2473int block_truncate_page(struct address_space *mapping,
2474 loff_t from, get_block_t *get_block)
2475{
2476 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2477 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2478 unsigned blocksize;
54b21a79 2479 sector_t iblock;
1da177e4
LT
2480 unsigned length, pos;
2481 struct inode *inode = mapping->host;
2482 struct page *page;
2483 struct buffer_head *bh;
2484 void *kaddr;
2485 int err;
2486
2487 blocksize = 1 << inode->i_blkbits;
2488 length = offset & (blocksize - 1);
2489
2490 /* Block boundary? Nothing to do */
2491 if (!length)
2492 return 0;
2493
2494 length = blocksize - length;
54b21a79 2495 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2496
2497 page = grab_cache_page(mapping, index);
2498 err = -ENOMEM;
2499 if (!page)
2500 goto out;
2501
2502 if (!page_has_buffers(page))
2503 create_empty_buffers(page, blocksize, 0);
2504
2505 /* Find the buffer that contains "offset" */
2506 bh = page_buffers(page);
2507 pos = blocksize;
2508 while (offset >= pos) {
2509 bh = bh->b_this_page;
2510 iblock++;
2511 pos += blocksize;
2512 }
2513
2514 err = 0;
2515 if (!buffer_mapped(bh)) {
b0cf2321 2516 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2517 err = get_block(inode, iblock, bh, 0);
2518 if (err)
2519 goto unlock;
2520 /* unmapped? It's a hole - nothing to do */
2521 if (!buffer_mapped(bh))
2522 goto unlock;
2523 }
2524
2525 /* Ok, it's mapped. Make sure it's up-to-date */
2526 if (PageUptodate(page))
2527 set_buffer_uptodate(bh);
2528
33a266dd 2529 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2530 err = -EIO;
2531 ll_rw_block(READ, 1, &bh);
2532 wait_on_buffer(bh);
2533 /* Uhhuh. Read error. Complain and punt. */
2534 if (!buffer_uptodate(bh))
2535 goto unlock;
2536 }
2537
2538 kaddr = kmap_atomic(page, KM_USER0);
2539 memset(kaddr + offset, 0, length);
2540 flush_dcache_page(page);
2541 kunmap_atomic(kaddr, KM_USER0);
2542
2543 mark_buffer_dirty(bh);
2544 err = 0;
2545
2546unlock:
2547 unlock_page(page);
2548 page_cache_release(page);
2549out:
2550 return err;
2551}
2552
2553/*
2554 * The generic ->writepage function for buffer-backed address_spaces
2555 */
2556int block_write_full_page(struct page *page, get_block_t *get_block,
2557 struct writeback_control *wbc)
2558{
2559 struct inode * const inode = page->mapping->host;
2560 loff_t i_size = i_size_read(inode);
2561 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2562 unsigned offset;
2563 void *kaddr;
2564
2565 /* Is the page fully inside i_size? */
2566 if (page->index < end_index)
2567 return __block_write_full_page(inode, page, get_block, wbc);
2568
2569 /* Is the page fully outside i_size? (truncate in progress) */
2570 offset = i_size & (PAGE_CACHE_SIZE-1);
2571 if (page->index >= end_index+1 || !offset) {
2572 /*
2573 * The page may have dirty, unmapped buffers. For example,
2574 * they may have been added in ext3_writepage(). Make them
2575 * freeable here, so the page does not leak.
2576 */
aaa4059b 2577 do_invalidatepage(page, 0);
1da177e4
LT
2578 unlock_page(page);
2579 return 0; /* don't care */
2580 }
2581
2582 /*
2583 * The page straddles i_size. It must be zeroed out on each and every
2584 * writepage invokation because it may be mmapped. "A file is mapped
2585 * in multiples of the page size. For a file that is not a multiple of
2586 * the page size, the remaining memory is zeroed when mapped, and
2587 * writes to that region are not written out to the file."
2588 */
2589 kaddr = kmap_atomic(page, KM_USER0);
2590 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2591 flush_dcache_page(page);
2592 kunmap_atomic(kaddr, KM_USER0);
2593 return __block_write_full_page(inode, page, get_block, wbc);
2594}
2595
2596sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2597 get_block_t *get_block)
2598{
2599 struct buffer_head tmp;
2600 struct inode *inode = mapping->host;
2601 tmp.b_state = 0;
2602 tmp.b_blocknr = 0;
b0cf2321 2603 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2604 get_block(inode, block, &tmp, 0);
2605 return tmp.b_blocknr;
2606}
2607
2608static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2609{
2610 struct buffer_head *bh = bio->bi_private;
2611
2612 if (bio->bi_size)
2613 return 1;
2614
2615 if (err == -EOPNOTSUPP) {
2616 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2617 set_bit(BH_Eopnotsupp, &bh->b_state);
2618 }
2619
2620 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2621 bio_put(bio);
2622 return 0;
2623}
2624
2625int submit_bh(int rw, struct buffer_head * bh)
2626{
2627 struct bio *bio;
2628 int ret = 0;
2629
2630 BUG_ON(!buffer_locked(bh));
2631 BUG_ON(!buffer_mapped(bh));
2632 BUG_ON(!bh->b_end_io);
2633
2634 if (buffer_ordered(bh) && (rw == WRITE))
2635 rw = WRITE_BARRIER;
2636
2637 /*
2638 * Only clear out a write error when rewriting, should this
2639 * include WRITE_SYNC as well?
2640 */
2641 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2642 clear_buffer_write_io_error(bh);
2643
2644 /*
2645 * from here on down, it's all bio -- do the initial mapping,
2646 * submit_bio -> generic_make_request may further map this bio around
2647 */
2648 bio = bio_alloc(GFP_NOIO, 1);
2649
2650 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2651 bio->bi_bdev = bh->b_bdev;
2652 bio->bi_io_vec[0].bv_page = bh->b_page;
2653 bio->bi_io_vec[0].bv_len = bh->b_size;
2654 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2655
2656 bio->bi_vcnt = 1;
2657 bio->bi_idx = 0;
2658 bio->bi_size = bh->b_size;
2659
2660 bio->bi_end_io = end_bio_bh_io_sync;
2661 bio->bi_private = bh;
2662
2663 bio_get(bio);
2664 submit_bio(rw, bio);
2665
2666 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2667 ret = -EOPNOTSUPP;
2668
2669 bio_put(bio);
2670 return ret;
2671}
2672
2673/**
2674 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2675 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2676 * @nr: number of &struct buffer_heads in the array
2677 * @bhs: array of pointers to &struct buffer_head
2678 *
a7662236
JK
2679 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2680 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2681 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2682 * are sent to disk. The fourth %READA option is described in the documentation
2683 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2684 *
2685 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2686 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2687 * clean when doing a write request, and any buffer that appears to be
2688 * up-to-date when doing read request. Further it marks as clean buffers that
2689 * are processed for writing (the buffer cache won't assume that they are
2690 * actually clean until the buffer gets unlocked).
1da177e4
LT
2691 *
2692 * ll_rw_block sets b_end_io to simple completion handler that marks
2693 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2694 * any waiters.
2695 *
2696 * All of the buffers must be for the same device, and must also be a
2697 * multiple of the current approved size for the device.
2698 */
2699void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2700{
2701 int i;
2702
2703 for (i = 0; i < nr; i++) {
2704 struct buffer_head *bh = bhs[i];
2705
a7662236
JK
2706 if (rw == SWRITE)
2707 lock_buffer(bh);
2708 else if (test_set_buffer_locked(bh))
1da177e4
LT
2709 continue;
2710
a7662236 2711 if (rw == WRITE || rw == SWRITE) {
1da177e4 2712 if (test_clear_buffer_dirty(bh)) {
76c3073a 2713 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2714 get_bh(bh);
1da177e4
LT
2715 submit_bh(WRITE, bh);
2716 continue;
2717 }
2718 } else {
1da177e4 2719 if (!buffer_uptodate(bh)) {
76c3073a 2720 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2721 get_bh(bh);
1da177e4
LT
2722 submit_bh(rw, bh);
2723 continue;
2724 }
2725 }
2726 unlock_buffer(bh);
1da177e4
LT
2727 }
2728}
2729
2730/*
2731 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2732 * and then start new I/O and then wait upon it. The caller must have a ref on
2733 * the buffer_head.
2734 */
2735int sync_dirty_buffer(struct buffer_head *bh)
2736{
2737 int ret = 0;
2738
2739 WARN_ON(atomic_read(&bh->b_count) < 1);
2740 lock_buffer(bh);
2741 if (test_clear_buffer_dirty(bh)) {
2742 get_bh(bh);
2743 bh->b_end_io = end_buffer_write_sync;
2744 ret = submit_bh(WRITE, bh);
2745 wait_on_buffer(bh);
2746 if (buffer_eopnotsupp(bh)) {
2747 clear_buffer_eopnotsupp(bh);
2748 ret = -EOPNOTSUPP;
2749 }
2750 if (!ret && !buffer_uptodate(bh))
2751 ret = -EIO;
2752 } else {
2753 unlock_buffer(bh);
2754 }
2755 return ret;
2756}
2757
2758/*
2759 * try_to_free_buffers() checks if all the buffers on this particular page
2760 * are unused, and releases them if so.
2761 *
2762 * Exclusion against try_to_free_buffers may be obtained by either
2763 * locking the page or by holding its mapping's private_lock.
2764 *
2765 * If the page is dirty but all the buffers are clean then we need to
2766 * be sure to mark the page clean as well. This is because the page
2767 * may be against a block device, and a later reattachment of buffers
2768 * to a dirty page will set *all* buffers dirty. Which would corrupt
2769 * filesystem data on the same device.
2770 *
2771 * The same applies to regular filesystem pages: if all the buffers are
2772 * clean then we set the page clean and proceed. To do that, we require
2773 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2774 * private_lock.
2775 *
2776 * try_to_free_buffers() is non-blocking.
2777 */
2778static inline int buffer_busy(struct buffer_head *bh)
2779{
2780 return atomic_read(&bh->b_count) |
2781 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2782}
2783
2784static int
2785drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2786{
2787 struct buffer_head *head = page_buffers(page);
2788 struct buffer_head *bh;
2789
2790 bh = head;
2791 do {
de7d5a3b 2792 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
2793 set_bit(AS_EIO, &page->mapping->flags);
2794 if (buffer_busy(bh))
2795 goto failed;
2796 bh = bh->b_this_page;
2797 } while (bh != head);
2798
2799 do {
2800 struct buffer_head *next = bh->b_this_page;
2801
2802 if (!list_empty(&bh->b_assoc_buffers))
2803 __remove_assoc_queue(bh);
2804 bh = next;
2805 } while (bh != head);
2806 *buffers_to_free = head;
2807 __clear_page_buffers(page);
2808 return 1;
2809failed:
2810 return 0;
2811}
2812
2813int try_to_free_buffers(struct page *page)
2814{
2815 struct address_space * const mapping = page->mapping;
2816 struct buffer_head *buffers_to_free = NULL;
2817 int ret = 0;
2818
2819 BUG_ON(!PageLocked(page));
ecdfc978 2820 if (PageWriteback(page))
1da177e4
LT
2821 return 0;
2822
2823 if (mapping == NULL) { /* can this still happen? */
2824 ret = drop_buffers(page, &buffers_to_free);
2825 goto out;
2826 }
2827
2828 spin_lock(&mapping->private_lock);
2829 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
2830
2831 /*
2832 * If the filesystem writes its buffers by hand (eg ext3)
2833 * then we can have clean buffers against a dirty page. We
2834 * clean the page here; otherwise the VM will never notice
2835 * that the filesystem did any IO at all.
2836 *
2837 * Also, during truncate, discard_buffer will have marked all
2838 * the page's buffers clean. We discover that here and clean
2839 * the page also.
87df7241
NP
2840 *
2841 * private_lock must be held over this entire operation in order
2842 * to synchronise against __set_page_dirty_buffers and prevent the
2843 * dirty bit from being lost.
ecdfc978
LT
2844 */
2845 if (ret)
2846 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 2847 spin_unlock(&mapping->private_lock);
1da177e4
LT
2848out:
2849 if (buffers_to_free) {
2850 struct buffer_head *bh = buffers_to_free;
2851
2852 do {
2853 struct buffer_head *next = bh->b_this_page;
2854 free_buffer_head(bh);
2855 bh = next;
2856 } while (bh != buffers_to_free);
2857 }
2858 return ret;
2859}
2860EXPORT_SYMBOL(try_to_free_buffers);
2861
3978d717 2862void block_sync_page(struct page *page)
1da177e4
LT
2863{
2864 struct address_space *mapping;
2865
2866 smp_mb();
2867 mapping = page_mapping(page);
2868 if (mapping)
2869 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
2870}
2871
2872/*
2873 * There are no bdflush tunables left. But distributions are
2874 * still running obsolete flush daemons, so we terminate them here.
2875 *
2876 * Use of bdflush() is deprecated and will be removed in a future kernel.
2877 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2878 */
2879asmlinkage long sys_bdflush(int func, long data)
2880{
2881 static int msg_count;
2882
2883 if (!capable(CAP_SYS_ADMIN))
2884 return -EPERM;
2885
2886 if (msg_count < 5) {
2887 msg_count++;
2888 printk(KERN_INFO
2889 "warning: process `%s' used the obsolete bdflush"
2890 " system call\n", current->comm);
2891 printk(KERN_INFO "Fix your initscripts?\n");
2892 }
2893
2894 if (func == 1)
2895 do_exit(0);
2896 return 0;
2897}
2898
2899/*
2900 * Buffer-head allocation
2901 */
e18b890b 2902static struct kmem_cache *bh_cachep;
1da177e4
LT
2903
2904/*
2905 * Once the number of bh's in the machine exceeds this level, we start
2906 * stripping them in writeback.
2907 */
2908static int max_buffer_heads;
2909
2910int buffer_heads_over_limit;
2911
2912struct bh_accounting {
2913 int nr; /* Number of live bh's */
2914 int ratelimit; /* Limit cacheline bouncing */
2915};
2916
2917static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2918
2919static void recalc_bh_state(void)
2920{
2921 int i;
2922 int tot = 0;
2923
2924 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2925 return;
2926 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 2927 for_each_online_cpu(i)
1da177e4
LT
2928 tot += per_cpu(bh_accounting, i).nr;
2929 buffer_heads_over_limit = (tot > max_buffer_heads);
2930}
2931
dd0fc66f 2932struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4
LT
2933{
2934 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2935 if (ret) {
736c7b80 2936 get_cpu_var(bh_accounting).nr++;
1da177e4 2937 recalc_bh_state();
736c7b80 2938 put_cpu_var(bh_accounting);
1da177e4
LT
2939 }
2940 return ret;
2941}
2942EXPORT_SYMBOL(alloc_buffer_head);
2943
2944void free_buffer_head(struct buffer_head *bh)
2945{
2946 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2947 kmem_cache_free(bh_cachep, bh);
736c7b80 2948 get_cpu_var(bh_accounting).nr--;
1da177e4 2949 recalc_bh_state();
736c7b80 2950 put_cpu_var(bh_accounting);
1da177e4
LT
2951}
2952EXPORT_SYMBOL(free_buffer_head);
2953
2954static void
e18b890b 2955init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
1da177e4
LT
2956{
2957 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2958 SLAB_CTOR_CONSTRUCTOR) {
2959 struct buffer_head * bh = (struct buffer_head *)data;
2960
2961 memset(bh, 0, sizeof(*bh));
2962 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2963 }
2964}
2965
1da177e4
LT
2966static void buffer_exit_cpu(int cpu)
2967{
2968 int i;
2969 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2970
2971 for (i = 0; i < BH_LRU_SIZE; i++) {
2972 brelse(b->bhs[i]);
2973 b->bhs[i] = NULL;
2974 }
8a143426
ED
2975 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2976 per_cpu(bh_accounting, cpu).nr = 0;
2977 put_cpu_var(bh_accounting);
1da177e4
LT
2978}
2979
2980static int buffer_cpu_notify(struct notifier_block *self,
2981 unsigned long action, void *hcpu)
2982{
2983 if (action == CPU_DEAD)
2984 buffer_exit_cpu((unsigned long)hcpu);
2985 return NOTIFY_OK;
2986}
1da177e4
LT
2987
2988void __init buffer_init(void)
2989{
2990 int nrpages;
2991
2992 bh_cachep = kmem_cache_create("buffer_head",
b0196009
PJ
2993 sizeof(struct buffer_head), 0,
2994 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2995 SLAB_MEM_SPREAD),
2996 init_buffer_head,
2997 NULL);
1da177e4
LT
2998
2999 /*
3000 * Limit the bh occupancy to 10% of ZONE_NORMAL
3001 */
3002 nrpages = (nr_free_buffer_pages() * 10) / 100;
3003 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3004 hotcpu_notifier(buffer_cpu_notify, 0);
3005}
3006
3007EXPORT_SYMBOL(__bforget);
3008EXPORT_SYMBOL(__brelse);
3009EXPORT_SYMBOL(__wait_on_buffer);
3010EXPORT_SYMBOL(block_commit_write);
3011EXPORT_SYMBOL(block_prepare_write);
3012EXPORT_SYMBOL(block_read_full_page);
3013EXPORT_SYMBOL(block_sync_page);
3014EXPORT_SYMBOL(block_truncate_page);
3015EXPORT_SYMBOL(block_write_full_page);
3016EXPORT_SYMBOL(cont_prepare_write);
1da177e4
LT
3017EXPORT_SYMBOL(end_buffer_read_sync);
3018EXPORT_SYMBOL(end_buffer_write_sync);
3019EXPORT_SYMBOL(file_fsync);
3020EXPORT_SYMBOL(fsync_bdev);
3021EXPORT_SYMBOL(generic_block_bmap);
3022EXPORT_SYMBOL(generic_commit_write);
3023EXPORT_SYMBOL(generic_cont_expand);
05eb0b51 3024EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3025EXPORT_SYMBOL(init_buffer);
3026EXPORT_SYMBOL(invalidate_bdev);
3027EXPORT_SYMBOL(ll_rw_block);
3028EXPORT_SYMBOL(mark_buffer_dirty);
3029EXPORT_SYMBOL(submit_bh);
3030EXPORT_SYMBOL(sync_dirty_buffer);
3031EXPORT_SYMBOL(unlock_buffer);