]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
parport_pc: make sure to release IO ports after probing for IT87XX
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
fc9b52cd 70void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
fc9b52cd 77void unlock_buffer(struct buffer_head *bh)
1da177e4 78{
72ed3d03 79 smp_mb__before_clear_bit();
1da177e4
LT
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
84
85/*
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
89 */
90void __wait_on_buffer(struct buffer_head * bh)
91{
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93}
94
95static void
96__clear_page_buffers(struct page *page)
97{
98 ClearPagePrivate(page);
4c21e2f2 99 set_page_private(page, 0);
1da177e4
LT
100 page_cache_release(page);
101}
102
103static void buffer_io_error(struct buffer_head *bh)
104{
105 char b[BDEVNAME_SIZE];
106
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
110}
111
112/*
68671f35
DM
113 * End-of-IO handler helper function which does not touch the bh after
114 * unlocking it.
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
118 * itself.
1da177e4 119 */
68671f35 120static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
121{
122 if (uptodate) {
123 set_buffer_uptodate(bh);
124 } else {
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
127 }
128 unlock_buffer(bh);
68671f35
DM
129}
130
131/*
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
134 */
135void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136{
137 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
138 put_bh(bh);
139}
140
141void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142{
143 char b[BDEVNAME_SIZE];
144
145 if (uptodate) {
146 set_buffer_uptodate(bh);
147 } else {
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 buffer_io_error(bh);
150 printk(KERN_WARNING "lost page write due to "
151 "I/O error on %s\n",
152 bdevname(bh->b_bdev, b));
153 }
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
156 }
157 unlock_buffer(bh);
158 put_bh(bh);
159}
160
161/*
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
164 */
165int sync_blockdev(struct block_device *bdev)
166{
167 int ret = 0;
168
28fd1298
OH
169 if (bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
171 return ret;
172}
173EXPORT_SYMBOL(sync_blockdev);
174
1da177e4
LT
175/*
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
179 */
180int fsync_bdev(struct block_device *bdev)
181{
182 struct super_block *sb = get_super(bdev);
183 if (sb) {
184 int res = fsync_super(sb);
185 drop_super(sb);
186 return res;
187 }
188 return sync_blockdev(bdev);
189}
190
191/**
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
194 *
f73ca1b7 195 * This takes the block device bd_mount_sem to make sure no new mounts
1da177e4
LT
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
199 */
200struct super_block *freeze_bdev(struct block_device *bdev)
201{
202 struct super_block *sb;
203
f73ca1b7 204 down(&bdev->bd_mount_sem);
1da177e4
LT
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 208 smp_wmb();
1da177e4 209
d25b9a1f 210 __fsync_super(sb);
1da177e4
LT
211
212 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 213 smp_wmb();
1da177e4
LT
214
215 sync_blockdev(sb->s_bdev);
216
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
219 }
220
221 sync_blockdev(bdev);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
223}
224EXPORT_SYMBOL(freeze_bdev);
225
226/**
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
230 *
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232 */
233void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234{
235 if (sb) {
236 BUG_ON(sb->s_bdev != bdev);
237
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
d59dd462 241 smp_wmb();
1da177e4
LT
242 wake_up(&sb->s_wait_unfrozen);
243 drop_super(sb);
244 }
245
f73ca1b7 246 up(&bdev->bd_mount_sem);
1da177e4
LT
247}
248EXPORT_SYMBOL(thaw_bdev);
249
1da177e4
LT
250/*
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
254 * private_lock.
255 *
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
260 */
261static struct buffer_head *
385fd4c5 262__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
263{
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
267 pgoff_t index;
268 struct buffer_head *bh;
269 struct buffer_head *head;
270 struct page *page;
271 int all_mapped = 1;
272
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
275 if (!page)
276 goto out;
277
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
280 goto out_unlock;
281 head = page_buffers(page);
282 bh = head;
283 do {
284 if (bh->b_blocknr == block) {
285 ret = bh;
286 get_bh(bh);
287 goto out_unlock;
288 }
289 if (!buffer_mapped(bh))
290 all_mapped = 0;
291 bh = bh->b_this_page;
292 } while (bh != head);
293
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
298 */
299 if (all_mapped) {
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
1da177e4
LT
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 }
308out_unlock:
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
311out:
312 return ret;
313}
314
315/* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
318 by the user.
319
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
323
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
326
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
335
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
339
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
f98393a6 347void invalidate_bdev(struct block_device *bdev)
1da177e4 348{
0e1dfc66
AM
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
350
351 if (mapping->nrpages == 0)
352 return;
353
1da177e4 354 invalidate_bh_lrus();
fc0ecff6 355 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
356}
357
358/*
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
360 */
361static void free_more_memory(void)
362{
363 struct zone **zones;
364 pg_data_t *pgdat;
365
687a21ce 366 wakeup_pdflush(1024);
1da177e4
LT
367 yield();
368
ec936fc5 369 for_each_online_pgdat(pgdat) {
af4ca457 370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 371 if (*zones)
5ad333eb 372 try_to_free_pages(zones, 0, GFP_NOFS);
1da177e4
LT
373 }
374}
375
376/*
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
379 */
380static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
381{
1da177e4 382 unsigned long flags;
a3972203 383 struct buffer_head *first;
1da177e4
LT
384 struct buffer_head *tmp;
385 struct page *page;
386 int page_uptodate = 1;
387
388 BUG_ON(!buffer_async_read(bh));
389
390 page = bh->b_page;
391 if (uptodate) {
392 set_buffer_uptodate(bh);
393 } else {
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
396 buffer_io_error(bh);
397 SetPageError(page);
398 }
399
400 /*
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
404 */
a3972203
NP
405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
408 clear_buffer_async_read(bh);
409 unlock_buffer(bh);
410 tmp = bh;
411 do {
412 if (!buffer_uptodate(tmp))
413 page_uptodate = 0;
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
416 goto still_busy;
417 }
418 tmp = tmp->b_this_page;
419 } while (tmp != bh);
a3972203
NP
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
1da177e4
LT
422
423 /*
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
426 */
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
429 unlock_page(page);
430 return;
431
432still_busy:
a3972203
NP
433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
1da177e4
LT
435 return;
436}
437
438/*
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
441 */
b6cd0b77 442static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
443{
444 char b[BDEVNAME_SIZE];
1da177e4 445 unsigned long flags;
a3972203 446 struct buffer_head *first;
1da177e4
LT
447 struct buffer_head *tmp;
448 struct page *page;
449
450 BUG_ON(!buffer_async_write(bh));
451
452 page = bh->b_page;
453 if (uptodate) {
454 set_buffer_uptodate(bh);
455 } else {
456 if (printk_ratelimit()) {
457 buffer_io_error(bh);
458 printk(KERN_WARNING "lost page write due to "
459 "I/O error on %s\n",
460 bdevname(bh->b_bdev, b));
461 }
462 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 463 set_buffer_write_io_error(bh);
1da177e4
LT
464 clear_buffer_uptodate(bh);
465 SetPageError(page);
466 }
467
a3972203
NP
468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471
1da177e4
LT
472 clear_buffer_async_write(bh);
473 unlock_buffer(bh);
474 tmp = bh->b_this_page;
475 while (tmp != bh) {
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
478 goto still_busy;
479 }
480 tmp = tmp->b_this_page;
481 }
a3972203
NP
482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
1da177e4
LT
484 end_page_writeback(page);
485 return;
486
487still_busy:
a3972203
NP
488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
1da177e4
LT
490 return;
491}
492
493/*
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
501 *
502 * The page comes unlocked when it has no locked buffer_async buffers
503 * left.
504 *
505 * PageLocked prevents anyone starting new async I/O reads any of
506 * the buffers.
507 *
508 * PageWriteback is used to prevent simultaneous writeout of the same
509 * page.
510 *
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
513 */
514static void mark_buffer_async_read(struct buffer_head *bh)
515{
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
518}
519
520void mark_buffer_async_write(struct buffer_head *bh)
521{
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
524}
525EXPORT_SYMBOL(mark_buffer_async_write);
526
527
528/*
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
534 *
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
538 *
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
548 * ->private_lock.
549 *
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
552 *
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
557 *
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
561 *
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565 * queued up.
566 *
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
574 * b_inode back.
575 */
576
577/*
578 * The buffer's backing address_space's private_lock must be held
579 */
580static inline void __remove_assoc_queue(struct buffer_head *bh)
581{
582 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
1da177e4
LT
587}
588
589int inode_has_buffers(struct inode *inode)
590{
591 return !list_empty(&inode->i_data.private_list);
592}
593
594/*
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
598 *
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
603 */
604static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
605{
606 struct buffer_head *bh;
607 struct list_head *p;
608 int err = 0;
609
610 spin_lock(lock);
611repeat:
612 list_for_each_prev(p, list) {
613 bh = BH_ENTRY(p);
614 if (buffer_locked(bh)) {
615 get_bh(bh);
616 spin_unlock(lock);
617 wait_on_buffer(bh);
618 if (!buffer_uptodate(bh))
619 err = -EIO;
620 brelse(bh);
621 spin_lock(lock);
622 goto repeat;
623 }
624 }
625 spin_unlock(lock);
626 return err;
627}
628
629/**
78a4a50a 630 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 631 * @mapping: the mapping which wants those buffers written
1da177e4
LT
632 *
633 * Starts I/O against the buffers at mapping->private_list, and waits upon
634 * that I/O.
635 *
67be2dd1
MW
636 * Basically, this is a convenience function for fsync().
637 * @mapping is a file or directory which needs those buffers to be written for
638 * a successful fsync().
1da177e4
LT
639 */
640int sync_mapping_buffers(struct address_space *mapping)
641{
642 struct address_space *buffer_mapping = mapping->assoc_mapping;
643
644 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
645 return 0;
646
647 return fsync_buffers_list(&buffer_mapping->private_lock,
648 &mapping->private_list);
649}
650EXPORT_SYMBOL(sync_mapping_buffers);
651
652/*
653 * Called when we've recently written block `bblock', and it is known that
654 * `bblock' was for a buffer_boundary() buffer. This means that the block at
655 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
656 * dirty, schedule it for IO. So that indirects merge nicely with their data.
657 */
658void write_boundary_block(struct block_device *bdev,
659 sector_t bblock, unsigned blocksize)
660{
661 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
662 if (bh) {
663 if (buffer_dirty(bh))
664 ll_rw_block(WRITE, 1, &bh);
665 put_bh(bh);
666 }
667}
668
669void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
670{
671 struct address_space *mapping = inode->i_mapping;
672 struct address_space *buffer_mapping = bh->b_page->mapping;
673
674 mark_buffer_dirty(bh);
675 if (!mapping->assoc_mapping) {
676 mapping->assoc_mapping = buffer_mapping;
677 } else {
e827f923 678 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 679 }
535ee2fb 680 if (!bh->b_assoc_map) {
1da177e4
LT
681 spin_lock(&buffer_mapping->private_lock);
682 list_move_tail(&bh->b_assoc_buffers,
683 &mapping->private_list);
58ff407b 684 bh->b_assoc_map = mapping;
1da177e4
LT
685 spin_unlock(&buffer_mapping->private_lock);
686 }
687}
688EXPORT_SYMBOL(mark_buffer_dirty_inode);
689
787d2214
NP
690/*
691 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
692 * dirty.
693 *
694 * If warn is true, then emit a warning if the page is not uptodate and has
695 * not been truncated.
696 */
697static int __set_page_dirty(struct page *page,
698 struct address_space *mapping, int warn)
699{
700 if (unlikely(!mapping))
701 return !TestSetPageDirty(page);
702
703 if (TestSetPageDirty(page))
704 return 0;
705
706 write_lock_irq(&mapping->tree_lock);
707 if (page->mapping) { /* Race with truncate? */
708 WARN_ON_ONCE(warn && !PageUptodate(page));
709
710 if (mapping_cap_account_dirty(mapping)) {
711 __inc_zone_page_state(page, NR_FILE_DIRTY);
c9e51e41
PZ
712 __inc_bdi_stat(mapping->backing_dev_info,
713 BDI_RECLAIMABLE);
787d2214
NP
714 task_io_account_write(PAGE_CACHE_SIZE);
715 }
716 radix_tree_tag_set(&mapping->page_tree,
717 page_index(page), PAGECACHE_TAG_DIRTY);
718 }
719 write_unlock_irq(&mapping->tree_lock);
720 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
721
722 return 1;
723}
724
1da177e4
LT
725/*
726 * Add a page to the dirty page list.
727 *
728 * It is a sad fact of life that this function is called from several places
729 * deeply under spinlocking. It may not sleep.
730 *
731 * If the page has buffers, the uptodate buffers are set dirty, to preserve
732 * dirty-state coherency between the page and the buffers. It the page does
733 * not have buffers then when they are later attached they will all be set
734 * dirty.
735 *
736 * The buffers are dirtied before the page is dirtied. There's a small race
737 * window in which a writepage caller may see the page cleanness but not the
738 * buffer dirtiness. That's fine. If this code were to set the page dirty
739 * before the buffers, a concurrent writepage caller could clear the page dirty
740 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
741 * page on the dirty page list.
742 *
743 * We use private_lock to lock against try_to_free_buffers while using the
744 * page's buffer list. Also use this to protect against clean buffers being
745 * added to the page after it was set dirty.
746 *
747 * FIXME: may need to call ->reservepage here as well. That's rather up to the
748 * address_space though.
749 */
750int __set_page_dirty_buffers(struct page *page)
751{
787d2214 752 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
753
754 if (unlikely(!mapping))
755 return !TestSetPageDirty(page);
1da177e4
LT
756
757 spin_lock(&mapping->private_lock);
758 if (page_has_buffers(page)) {
759 struct buffer_head *head = page_buffers(page);
760 struct buffer_head *bh = head;
761
762 do {
763 set_buffer_dirty(bh);
764 bh = bh->b_this_page;
765 } while (bh != head);
766 }
767 spin_unlock(&mapping->private_lock);
768
787d2214 769 return __set_page_dirty(page, mapping, 1);
1da177e4
LT
770}
771EXPORT_SYMBOL(__set_page_dirty_buffers);
772
773/*
774 * Write out and wait upon a list of buffers.
775 *
776 * We have conflicting pressures: we want to make sure that all
777 * initially dirty buffers get waited on, but that any subsequently
778 * dirtied buffers don't. After all, we don't want fsync to last
779 * forever if somebody is actively writing to the file.
780 *
781 * Do this in two main stages: first we copy dirty buffers to a
782 * temporary inode list, queueing the writes as we go. Then we clean
783 * up, waiting for those writes to complete.
784 *
785 * During this second stage, any subsequent updates to the file may end
786 * up refiling the buffer on the original inode's dirty list again, so
787 * there is a chance we will end up with a buffer queued for write but
788 * not yet completed on that list. So, as a final cleanup we go through
789 * the osync code to catch these locked, dirty buffers without requeuing
790 * any newly dirty buffers for write.
791 */
792static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
793{
794 struct buffer_head *bh;
795 struct list_head tmp;
535ee2fb 796 struct address_space *mapping;
1da177e4
LT
797 int err = 0, err2;
798
799 INIT_LIST_HEAD(&tmp);
800
801 spin_lock(lock);
802 while (!list_empty(list)) {
803 bh = BH_ENTRY(list->next);
535ee2fb 804 mapping = bh->b_assoc_map;
58ff407b 805 __remove_assoc_queue(bh);
535ee2fb
JK
806 /* Avoid race with mark_buffer_dirty_inode() which does
807 * a lockless check and we rely on seeing the dirty bit */
808 smp_mb();
1da177e4
LT
809 if (buffer_dirty(bh) || buffer_locked(bh)) {
810 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 811 bh->b_assoc_map = mapping;
1da177e4
LT
812 if (buffer_dirty(bh)) {
813 get_bh(bh);
814 spin_unlock(lock);
815 /*
816 * Ensure any pending I/O completes so that
817 * ll_rw_block() actually writes the current
818 * contents - it is a noop if I/O is still in
819 * flight on potentially older contents.
820 */
a7662236 821 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
822 brelse(bh);
823 spin_lock(lock);
824 }
825 }
826 }
827
828 while (!list_empty(&tmp)) {
829 bh = BH_ENTRY(tmp.prev);
1da177e4 830 get_bh(bh);
535ee2fb
JK
831 mapping = bh->b_assoc_map;
832 __remove_assoc_queue(bh);
833 /* Avoid race with mark_buffer_dirty_inode() which does
834 * a lockless check and we rely on seeing the dirty bit */
835 smp_mb();
836 if (buffer_dirty(bh)) {
837 list_add(&bh->b_assoc_buffers,
e3892296 838 &mapping->private_list);
535ee2fb
JK
839 bh->b_assoc_map = mapping;
840 }
1da177e4
LT
841 spin_unlock(lock);
842 wait_on_buffer(bh);
843 if (!buffer_uptodate(bh))
844 err = -EIO;
845 brelse(bh);
846 spin_lock(lock);
847 }
848
849 spin_unlock(lock);
850 err2 = osync_buffers_list(lock, list);
851 if (err)
852 return err;
853 else
854 return err2;
855}
856
857/*
858 * Invalidate any and all dirty buffers on a given inode. We are
859 * probably unmounting the fs, but that doesn't mean we have already
860 * done a sync(). Just drop the buffers from the inode list.
861 *
862 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
863 * assumes that all the buffers are against the blockdev. Not true
864 * for reiserfs.
865 */
866void invalidate_inode_buffers(struct inode *inode)
867{
868 if (inode_has_buffers(inode)) {
869 struct address_space *mapping = &inode->i_data;
870 struct list_head *list = &mapping->private_list;
871 struct address_space *buffer_mapping = mapping->assoc_mapping;
872
873 spin_lock(&buffer_mapping->private_lock);
874 while (!list_empty(list))
875 __remove_assoc_queue(BH_ENTRY(list->next));
876 spin_unlock(&buffer_mapping->private_lock);
877 }
878}
879
880/*
881 * Remove any clean buffers from the inode's buffer list. This is called
882 * when we're trying to free the inode itself. Those buffers can pin it.
883 *
884 * Returns true if all buffers were removed.
885 */
886int remove_inode_buffers(struct inode *inode)
887{
888 int ret = 1;
889
890 if (inode_has_buffers(inode)) {
891 struct address_space *mapping = &inode->i_data;
892 struct list_head *list = &mapping->private_list;
893 struct address_space *buffer_mapping = mapping->assoc_mapping;
894
895 spin_lock(&buffer_mapping->private_lock);
896 while (!list_empty(list)) {
897 struct buffer_head *bh = BH_ENTRY(list->next);
898 if (buffer_dirty(bh)) {
899 ret = 0;
900 break;
901 }
902 __remove_assoc_queue(bh);
903 }
904 spin_unlock(&buffer_mapping->private_lock);
905 }
906 return ret;
907}
908
909/*
910 * Create the appropriate buffers when given a page for data area and
911 * the size of each buffer.. Use the bh->b_this_page linked list to
912 * follow the buffers created. Return NULL if unable to create more
913 * buffers.
914 *
915 * The retry flag is used to differentiate async IO (paging, swapping)
916 * which may not fail from ordinary buffer allocations.
917 */
918struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
919 int retry)
920{
921 struct buffer_head *bh, *head;
922 long offset;
923
924try_again:
925 head = NULL;
926 offset = PAGE_SIZE;
927 while ((offset -= size) >= 0) {
928 bh = alloc_buffer_head(GFP_NOFS);
929 if (!bh)
930 goto no_grow;
931
932 bh->b_bdev = NULL;
933 bh->b_this_page = head;
934 bh->b_blocknr = -1;
935 head = bh;
936
937 bh->b_state = 0;
938 atomic_set(&bh->b_count, 0);
fc5cd582 939 bh->b_private = NULL;
1da177e4
LT
940 bh->b_size = size;
941
942 /* Link the buffer to its page */
943 set_bh_page(bh, page, offset);
944
01ffe339 945 init_buffer(bh, NULL, NULL);
1da177e4
LT
946 }
947 return head;
948/*
949 * In case anything failed, we just free everything we got.
950 */
951no_grow:
952 if (head) {
953 do {
954 bh = head;
955 head = head->b_this_page;
956 free_buffer_head(bh);
957 } while (head);
958 }
959
960 /*
961 * Return failure for non-async IO requests. Async IO requests
962 * are not allowed to fail, so we have to wait until buffer heads
963 * become available. But we don't want tasks sleeping with
964 * partially complete buffers, so all were released above.
965 */
966 if (!retry)
967 return NULL;
968
969 /* We're _really_ low on memory. Now we just
970 * wait for old buffer heads to become free due to
971 * finishing IO. Since this is an async request and
972 * the reserve list is empty, we're sure there are
973 * async buffer heads in use.
974 */
975 free_more_memory();
976 goto try_again;
977}
978EXPORT_SYMBOL_GPL(alloc_page_buffers);
979
980static inline void
981link_dev_buffers(struct page *page, struct buffer_head *head)
982{
983 struct buffer_head *bh, *tail;
984
985 bh = head;
986 do {
987 tail = bh;
988 bh = bh->b_this_page;
989 } while (bh);
990 tail->b_this_page = head;
991 attach_page_buffers(page, head);
992}
993
994/*
995 * Initialise the state of a blockdev page's buffers.
996 */
997static void
998init_page_buffers(struct page *page, struct block_device *bdev,
999 sector_t block, int size)
1000{
1001 struct buffer_head *head = page_buffers(page);
1002 struct buffer_head *bh = head;
1003 int uptodate = PageUptodate(page);
1004
1005 do {
1006 if (!buffer_mapped(bh)) {
1007 init_buffer(bh, NULL, NULL);
1008 bh->b_bdev = bdev;
1009 bh->b_blocknr = block;
1010 if (uptodate)
1011 set_buffer_uptodate(bh);
1012 set_buffer_mapped(bh);
1013 }
1014 block++;
1015 bh = bh->b_this_page;
1016 } while (bh != head);
1017}
1018
1019/*
1020 * Create the page-cache page that contains the requested block.
1021 *
1022 * This is user purely for blockdev mappings.
1023 */
1024static struct page *
1025grow_dev_page(struct block_device *bdev, sector_t block,
1026 pgoff_t index, int size)
1027{
1028 struct inode *inode = bdev->bd_inode;
1029 struct page *page;
1030 struct buffer_head *bh;
1031
ea125892 1032 page = find_or_create_page(inode->i_mapping, index,
769848c0 1033 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
1034 if (!page)
1035 return NULL;
1036
e827f923 1037 BUG_ON(!PageLocked(page));
1da177e4
LT
1038
1039 if (page_has_buffers(page)) {
1040 bh = page_buffers(page);
1041 if (bh->b_size == size) {
1042 init_page_buffers(page, bdev, block, size);
1043 return page;
1044 }
1045 if (!try_to_free_buffers(page))
1046 goto failed;
1047 }
1048
1049 /*
1050 * Allocate some buffers for this page
1051 */
1052 bh = alloc_page_buffers(page, size, 0);
1053 if (!bh)
1054 goto failed;
1055
1056 /*
1057 * Link the page to the buffers and initialise them. Take the
1058 * lock to be atomic wrt __find_get_block(), which does not
1059 * run under the page lock.
1060 */
1061 spin_lock(&inode->i_mapping->private_lock);
1062 link_dev_buffers(page, bh);
1063 init_page_buffers(page, bdev, block, size);
1064 spin_unlock(&inode->i_mapping->private_lock);
1065 return page;
1066
1067failed:
1068 BUG();
1069 unlock_page(page);
1070 page_cache_release(page);
1071 return NULL;
1072}
1073
1074/*
1075 * Create buffers for the specified block device block's page. If
1076 * that page was dirty, the buffers are set dirty also.
1da177e4 1077 */
858119e1 1078static int
1da177e4
LT
1079grow_buffers(struct block_device *bdev, sector_t block, int size)
1080{
1081 struct page *page;
1082 pgoff_t index;
1083 int sizebits;
1084
1085 sizebits = -1;
1086 do {
1087 sizebits++;
1088 } while ((size << sizebits) < PAGE_SIZE);
1089
1090 index = block >> sizebits;
1da177e4 1091
e5657933
AM
1092 /*
1093 * Check for a block which wants to lie outside our maximum possible
1094 * pagecache index. (this comparison is done using sector_t types).
1095 */
1096 if (unlikely(index != block >> sizebits)) {
1097 char b[BDEVNAME_SIZE];
1098
1099 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1100 "device %s\n",
1101 __FUNCTION__, (unsigned long long)block,
1102 bdevname(bdev, b));
1103 return -EIO;
1104 }
1105 block = index << sizebits;
1da177e4
LT
1106 /* Create a page with the proper size buffers.. */
1107 page = grow_dev_page(bdev, block, index, size);
1108 if (!page)
1109 return 0;
1110 unlock_page(page);
1111 page_cache_release(page);
1112 return 1;
1113}
1114
75c96f85 1115static struct buffer_head *
1da177e4
LT
1116__getblk_slow(struct block_device *bdev, sector_t block, int size)
1117{
1118 /* Size must be multiple of hard sectorsize */
1119 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1120 (size < 512 || size > PAGE_SIZE))) {
1121 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1122 size);
1123 printk(KERN_ERR "hardsect size: %d\n",
1124 bdev_hardsect_size(bdev));
1125
1126 dump_stack();
1127 return NULL;
1128 }
1129
1130 for (;;) {
1131 struct buffer_head * bh;
e5657933 1132 int ret;
1da177e4
LT
1133
1134 bh = __find_get_block(bdev, block, size);
1135 if (bh)
1136 return bh;
1137
e5657933
AM
1138 ret = grow_buffers(bdev, block, size);
1139 if (ret < 0)
1140 return NULL;
1141 if (ret == 0)
1da177e4
LT
1142 free_more_memory();
1143 }
1144}
1145
1146/*
1147 * The relationship between dirty buffers and dirty pages:
1148 *
1149 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1150 * the page is tagged dirty in its radix tree.
1151 *
1152 * At all times, the dirtiness of the buffers represents the dirtiness of
1153 * subsections of the page. If the page has buffers, the page dirty bit is
1154 * merely a hint about the true dirty state.
1155 *
1156 * When a page is set dirty in its entirety, all its buffers are marked dirty
1157 * (if the page has buffers).
1158 *
1159 * When a buffer is marked dirty, its page is dirtied, but the page's other
1160 * buffers are not.
1161 *
1162 * Also. When blockdev buffers are explicitly read with bread(), they
1163 * individually become uptodate. But their backing page remains not
1164 * uptodate - even if all of its buffers are uptodate. A subsequent
1165 * block_read_full_page() against that page will discover all the uptodate
1166 * buffers, will set the page uptodate and will perform no I/O.
1167 */
1168
1169/**
1170 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1171 * @bh: the buffer_head to mark dirty
1da177e4
LT
1172 *
1173 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1174 * backing page dirty, then tag the page as dirty in its address_space's radix
1175 * tree and then attach the address_space's inode to its superblock's dirty
1176 * inode list.
1177 *
1178 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1179 * mapping->tree_lock and the global inode_lock.
1180 */
fc9b52cd 1181void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1182{
787d2214 1183 WARN_ON_ONCE(!buffer_uptodate(bh));
1da177e4 1184 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
787d2214 1185 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1da177e4
LT
1186}
1187
1188/*
1189 * Decrement a buffer_head's reference count. If all buffers against a page
1190 * have zero reference count, are clean and unlocked, and if the page is clean
1191 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1192 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1193 * a page but it ends up not being freed, and buffers may later be reattached).
1194 */
1195void __brelse(struct buffer_head * buf)
1196{
1197 if (atomic_read(&buf->b_count)) {
1198 put_bh(buf);
1199 return;
1200 }
1201 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1202 WARN_ON(1);
1203}
1204
1205/*
1206 * bforget() is like brelse(), except it discards any
1207 * potentially dirty data.
1208 */
1209void __bforget(struct buffer_head *bh)
1210{
1211 clear_buffer_dirty(bh);
535ee2fb 1212 if (bh->b_assoc_map) {
1da177e4
LT
1213 struct address_space *buffer_mapping = bh->b_page->mapping;
1214
1215 spin_lock(&buffer_mapping->private_lock);
1216 list_del_init(&bh->b_assoc_buffers);
58ff407b 1217 bh->b_assoc_map = NULL;
1da177e4
LT
1218 spin_unlock(&buffer_mapping->private_lock);
1219 }
1220 __brelse(bh);
1221}
1222
1223static struct buffer_head *__bread_slow(struct buffer_head *bh)
1224{
1225 lock_buffer(bh);
1226 if (buffer_uptodate(bh)) {
1227 unlock_buffer(bh);
1228 return bh;
1229 } else {
1230 get_bh(bh);
1231 bh->b_end_io = end_buffer_read_sync;
1232 submit_bh(READ, bh);
1233 wait_on_buffer(bh);
1234 if (buffer_uptodate(bh))
1235 return bh;
1236 }
1237 brelse(bh);
1238 return NULL;
1239}
1240
1241/*
1242 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1243 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1244 * refcount elevated by one when they're in an LRU. A buffer can only appear
1245 * once in a particular CPU's LRU. A single buffer can be present in multiple
1246 * CPU's LRUs at the same time.
1247 *
1248 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1249 * sb_find_get_block().
1250 *
1251 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1252 * a local interrupt disable for that.
1253 */
1254
1255#define BH_LRU_SIZE 8
1256
1257struct bh_lru {
1258 struct buffer_head *bhs[BH_LRU_SIZE];
1259};
1260
1261static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1262
1263#ifdef CONFIG_SMP
1264#define bh_lru_lock() local_irq_disable()
1265#define bh_lru_unlock() local_irq_enable()
1266#else
1267#define bh_lru_lock() preempt_disable()
1268#define bh_lru_unlock() preempt_enable()
1269#endif
1270
1271static inline void check_irqs_on(void)
1272{
1273#ifdef irqs_disabled
1274 BUG_ON(irqs_disabled());
1275#endif
1276}
1277
1278/*
1279 * The LRU management algorithm is dopey-but-simple. Sorry.
1280 */
1281static void bh_lru_install(struct buffer_head *bh)
1282{
1283 struct buffer_head *evictee = NULL;
1284 struct bh_lru *lru;
1285
1286 check_irqs_on();
1287 bh_lru_lock();
1288 lru = &__get_cpu_var(bh_lrus);
1289 if (lru->bhs[0] != bh) {
1290 struct buffer_head *bhs[BH_LRU_SIZE];
1291 int in;
1292 int out = 0;
1293
1294 get_bh(bh);
1295 bhs[out++] = bh;
1296 for (in = 0; in < BH_LRU_SIZE; in++) {
1297 struct buffer_head *bh2 = lru->bhs[in];
1298
1299 if (bh2 == bh) {
1300 __brelse(bh2);
1301 } else {
1302 if (out >= BH_LRU_SIZE) {
1303 BUG_ON(evictee != NULL);
1304 evictee = bh2;
1305 } else {
1306 bhs[out++] = bh2;
1307 }
1308 }
1309 }
1310 while (out < BH_LRU_SIZE)
1311 bhs[out++] = NULL;
1312 memcpy(lru->bhs, bhs, sizeof(bhs));
1313 }
1314 bh_lru_unlock();
1315
1316 if (evictee)
1317 __brelse(evictee);
1318}
1319
1320/*
1321 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1322 */
858119e1 1323static struct buffer_head *
3991d3bd 1324lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1325{
1326 struct buffer_head *ret = NULL;
1327 struct bh_lru *lru;
3991d3bd 1328 unsigned int i;
1da177e4
LT
1329
1330 check_irqs_on();
1331 bh_lru_lock();
1332 lru = &__get_cpu_var(bh_lrus);
1333 for (i = 0; i < BH_LRU_SIZE; i++) {
1334 struct buffer_head *bh = lru->bhs[i];
1335
1336 if (bh && bh->b_bdev == bdev &&
1337 bh->b_blocknr == block && bh->b_size == size) {
1338 if (i) {
1339 while (i) {
1340 lru->bhs[i] = lru->bhs[i - 1];
1341 i--;
1342 }
1343 lru->bhs[0] = bh;
1344 }
1345 get_bh(bh);
1346 ret = bh;
1347 break;
1348 }
1349 }
1350 bh_lru_unlock();
1351 return ret;
1352}
1353
1354/*
1355 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1356 * it in the LRU and mark it as accessed. If it is not present then return
1357 * NULL
1358 */
1359struct buffer_head *
3991d3bd 1360__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1361{
1362 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1363
1364 if (bh == NULL) {
385fd4c5 1365 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1366 if (bh)
1367 bh_lru_install(bh);
1368 }
1369 if (bh)
1370 touch_buffer(bh);
1371 return bh;
1372}
1373EXPORT_SYMBOL(__find_get_block);
1374
1375/*
1376 * __getblk will locate (and, if necessary, create) the buffer_head
1377 * which corresponds to the passed block_device, block and size. The
1378 * returned buffer has its reference count incremented.
1379 *
1380 * __getblk() cannot fail - it just keeps trying. If you pass it an
1381 * illegal block number, __getblk() will happily return a buffer_head
1382 * which represents the non-existent block. Very weird.
1383 *
1384 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1385 * attempt is failing. FIXME, perhaps?
1386 */
1387struct buffer_head *
3991d3bd 1388__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1389{
1390 struct buffer_head *bh = __find_get_block(bdev, block, size);
1391
1392 might_sleep();
1393 if (bh == NULL)
1394 bh = __getblk_slow(bdev, block, size);
1395 return bh;
1396}
1397EXPORT_SYMBOL(__getblk);
1398
1399/*
1400 * Do async read-ahead on a buffer..
1401 */
3991d3bd 1402void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1403{
1404 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1405 if (likely(bh)) {
1406 ll_rw_block(READA, 1, &bh);
1407 brelse(bh);
1408 }
1da177e4
LT
1409}
1410EXPORT_SYMBOL(__breadahead);
1411
1412/**
1413 * __bread() - reads a specified block and returns the bh
67be2dd1 1414 * @bdev: the block_device to read from
1da177e4
LT
1415 * @block: number of block
1416 * @size: size (in bytes) to read
1417 *
1418 * Reads a specified block, and returns buffer head that contains it.
1419 * It returns NULL if the block was unreadable.
1420 */
1421struct buffer_head *
3991d3bd 1422__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1423{
1424 struct buffer_head *bh = __getblk(bdev, block, size);
1425
a3e713b5 1426 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1427 bh = __bread_slow(bh);
1428 return bh;
1429}
1430EXPORT_SYMBOL(__bread);
1431
1432/*
1433 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1434 * This doesn't race because it runs in each cpu either in irq
1435 * or with preempt disabled.
1436 */
1437static void invalidate_bh_lru(void *arg)
1438{
1439 struct bh_lru *b = &get_cpu_var(bh_lrus);
1440 int i;
1441
1442 for (i = 0; i < BH_LRU_SIZE; i++) {
1443 brelse(b->bhs[i]);
1444 b->bhs[i] = NULL;
1445 }
1446 put_cpu_var(bh_lrus);
1447}
1448
f9a14399 1449void invalidate_bh_lrus(void)
1da177e4
LT
1450{
1451 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1452}
9db5579b 1453EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1454
1455void set_bh_page(struct buffer_head *bh,
1456 struct page *page, unsigned long offset)
1457{
1458 bh->b_page = page;
e827f923 1459 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1460 if (PageHighMem(page))
1461 /*
1462 * This catches illegal uses and preserves the offset:
1463 */
1464 bh->b_data = (char *)(0 + offset);
1465 else
1466 bh->b_data = page_address(page) + offset;
1467}
1468EXPORT_SYMBOL(set_bh_page);
1469
1470/*
1471 * Called when truncating a buffer on a page completely.
1472 */
858119e1 1473static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1474{
1475 lock_buffer(bh);
1476 clear_buffer_dirty(bh);
1477 bh->b_bdev = NULL;
1478 clear_buffer_mapped(bh);
1479 clear_buffer_req(bh);
1480 clear_buffer_new(bh);
1481 clear_buffer_delay(bh);
33a266dd 1482 clear_buffer_unwritten(bh);
1da177e4
LT
1483 unlock_buffer(bh);
1484}
1485
1da177e4
LT
1486/**
1487 * block_invalidatepage - invalidate part of all of a buffer-backed page
1488 *
1489 * @page: the page which is affected
1490 * @offset: the index of the truncation point
1491 *
1492 * block_invalidatepage() is called when all or part of the page has become
1493 * invalidatedby a truncate operation.
1494 *
1495 * block_invalidatepage() does not have to release all buffers, but it must
1496 * ensure that no dirty buffer is left outside @offset and that no I/O
1497 * is underway against any of the blocks which are outside the truncation
1498 * point. Because the caller is about to free (and possibly reuse) those
1499 * blocks on-disk.
1500 */
2ff28e22 1501void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1502{
1503 struct buffer_head *head, *bh, *next;
1504 unsigned int curr_off = 0;
1da177e4
LT
1505
1506 BUG_ON(!PageLocked(page));
1507 if (!page_has_buffers(page))
1508 goto out;
1509
1510 head = page_buffers(page);
1511 bh = head;
1512 do {
1513 unsigned int next_off = curr_off + bh->b_size;
1514 next = bh->b_this_page;
1515
1516 /*
1517 * is this block fully invalidated?
1518 */
1519 if (offset <= curr_off)
1520 discard_buffer(bh);
1521 curr_off = next_off;
1522 bh = next;
1523 } while (bh != head);
1524
1525 /*
1526 * We release buffers only if the entire page is being invalidated.
1527 * The get_block cached value has been unconditionally invalidated,
1528 * so real IO is not possible anymore.
1529 */
1530 if (offset == 0)
2ff28e22 1531 try_to_release_page(page, 0);
1da177e4 1532out:
2ff28e22 1533 return;
1da177e4
LT
1534}
1535EXPORT_SYMBOL(block_invalidatepage);
1536
1537/*
1538 * We attach and possibly dirty the buffers atomically wrt
1539 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1540 * is already excluded via the page lock.
1541 */
1542void create_empty_buffers(struct page *page,
1543 unsigned long blocksize, unsigned long b_state)
1544{
1545 struct buffer_head *bh, *head, *tail;
1546
1547 head = alloc_page_buffers(page, blocksize, 1);
1548 bh = head;
1549 do {
1550 bh->b_state |= b_state;
1551 tail = bh;
1552 bh = bh->b_this_page;
1553 } while (bh);
1554 tail->b_this_page = head;
1555
1556 spin_lock(&page->mapping->private_lock);
1557 if (PageUptodate(page) || PageDirty(page)) {
1558 bh = head;
1559 do {
1560 if (PageDirty(page))
1561 set_buffer_dirty(bh);
1562 if (PageUptodate(page))
1563 set_buffer_uptodate(bh);
1564 bh = bh->b_this_page;
1565 } while (bh != head);
1566 }
1567 attach_page_buffers(page, head);
1568 spin_unlock(&page->mapping->private_lock);
1569}
1570EXPORT_SYMBOL(create_empty_buffers);
1571
1572/*
1573 * We are taking a block for data and we don't want any output from any
1574 * buffer-cache aliases starting from return from that function and
1575 * until the moment when something will explicitly mark the buffer
1576 * dirty (hopefully that will not happen until we will free that block ;-)
1577 * We don't even need to mark it not-uptodate - nobody can expect
1578 * anything from a newly allocated buffer anyway. We used to used
1579 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1580 * don't want to mark the alias unmapped, for example - it would confuse
1581 * anyone who might pick it with bread() afterwards...
1582 *
1583 * Also.. Note that bforget() doesn't lock the buffer. So there can
1584 * be writeout I/O going on against recently-freed buffers. We don't
1585 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1586 * only if we really need to. That happens here.
1587 */
1588void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1589{
1590 struct buffer_head *old_bh;
1591
1592 might_sleep();
1593
385fd4c5 1594 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1595 if (old_bh) {
1596 clear_buffer_dirty(old_bh);
1597 wait_on_buffer(old_bh);
1598 clear_buffer_req(old_bh);
1599 __brelse(old_bh);
1600 }
1601}
1602EXPORT_SYMBOL(unmap_underlying_metadata);
1603
1604/*
1605 * NOTE! All mapped/uptodate combinations are valid:
1606 *
1607 * Mapped Uptodate Meaning
1608 *
1609 * No No "unknown" - must do get_block()
1610 * No Yes "hole" - zero-filled
1611 * Yes No "allocated" - allocated on disk, not read in
1612 * Yes Yes "valid" - allocated and up-to-date in memory.
1613 *
1614 * "Dirty" is valid only with the last case (mapped+uptodate).
1615 */
1616
1617/*
1618 * While block_write_full_page is writing back the dirty buffers under
1619 * the page lock, whoever dirtied the buffers may decide to clean them
1620 * again at any time. We handle that by only looking at the buffer
1621 * state inside lock_buffer().
1622 *
1623 * If block_write_full_page() is called for regular writeback
1624 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1625 * locked buffer. This only can happen if someone has written the buffer
1626 * directly, with submit_bh(). At the address_space level PageWriteback
1627 * prevents this contention from occurring.
1628 */
1629static int __block_write_full_page(struct inode *inode, struct page *page,
1630 get_block_t *get_block, struct writeback_control *wbc)
1631{
1632 int err;
1633 sector_t block;
1634 sector_t last_block;
f0fbd5fc 1635 struct buffer_head *bh, *head;
b0cf2321 1636 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1637 int nr_underway = 0;
1638
1639 BUG_ON(!PageLocked(page));
1640
1641 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1642
1643 if (!page_has_buffers(page)) {
b0cf2321 1644 create_empty_buffers(page, blocksize,
1da177e4
LT
1645 (1 << BH_Dirty)|(1 << BH_Uptodate));
1646 }
1647
1648 /*
1649 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1650 * here, and the (potentially unmapped) buffers may become dirty at
1651 * any time. If a buffer becomes dirty here after we've inspected it
1652 * then we just miss that fact, and the page stays dirty.
1653 *
1654 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1655 * handle that here by just cleaning them.
1656 */
1657
54b21a79 1658 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1659 head = page_buffers(page);
1660 bh = head;
1661
1662 /*
1663 * Get all the dirty buffers mapped to disk addresses and
1664 * handle any aliases from the underlying blockdev's mapping.
1665 */
1666 do {
1667 if (block > last_block) {
1668 /*
1669 * mapped buffers outside i_size will occur, because
1670 * this page can be outside i_size when there is a
1671 * truncate in progress.
1672 */
1673 /*
1674 * The buffer was zeroed by block_write_full_page()
1675 */
1676 clear_buffer_dirty(bh);
1677 set_buffer_uptodate(bh);
1678 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
b0cf2321 1679 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1680 err = get_block(inode, block, bh, 1);
1681 if (err)
1682 goto recover;
1683 if (buffer_new(bh)) {
1684 /* blockdev mappings never come here */
1685 clear_buffer_new(bh);
1686 unmap_underlying_metadata(bh->b_bdev,
1687 bh->b_blocknr);
1688 }
1689 }
1690 bh = bh->b_this_page;
1691 block++;
1692 } while (bh != head);
1693
1694 do {
1da177e4
LT
1695 if (!buffer_mapped(bh))
1696 continue;
1697 /*
1698 * If it's a fully non-blocking write attempt and we cannot
1699 * lock the buffer then redirty the page. Note that this can
1700 * potentially cause a busy-wait loop from pdflush and kswapd
1701 * activity, but those code paths have their own higher-level
1702 * throttling.
1703 */
1704 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1705 lock_buffer(bh);
1706 } else if (test_set_buffer_locked(bh)) {
1707 redirty_page_for_writepage(wbc, page);
1708 continue;
1709 }
1710 if (test_clear_buffer_dirty(bh)) {
1711 mark_buffer_async_write(bh);
1712 } else {
1713 unlock_buffer(bh);
1714 }
1715 } while ((bh = bh->b_this_page) != head);
1716
1717 /*
1718 * The page and its buffers are protected by PageWriteback(), so we can
1719 * drop the bh refcounts early.
1720 */
1721 BUG_ON(PageWriteback(page));
1722 set_page_writeback(page);
1da177e4
LT
1723
1724 do {
1725 struct buffer_head *next = bh->b_this_page;
1726 if (buffer_async_write(bh)) {
1727 submit_bh(WRITE, bh);
1728 nr_underway++;
1729 }
1da177e4
LT
1730 bh = next;
1731 } while (bh != head);
05937baa 1732 unlock_page(page);
1da177e4
LT
1733
1734 err = 0;
1735done:
1736 if (nr_underway == 0) {
1737 /*
1738 * The page was marked dirty, but the buffers were
1739 * clean. Someone wrote them back by hand with
1740 * ll_rw_block/submit_bh. A rare case.
1741 */
1da177e4 1742 end_page_writeback(page);
3d67f2d7 1743
1da177e4
LT
1744 /*
1745 * The page and buffer_heads can be released at any time from
1746 * here on.
1747 */
1da177e4
LT
1748 }
1749 return err;
1750
1751recover:
1752 /*
1753 * ENOSPC, or some other error. We may already have added some
1754 * blocks to the file, so we need to write these out to avoid
1755 * exposing stale data.
1756 * The page is currently locked and not marked for writeback
1757 */
1758 bh = head;
1759 /* Recovery: lock and submit the mapped buffers */
1760 do {
1da177e4
LT
1761 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1762 lock_buffer(bh);
1763 mark_buffer_async_write(bh);
1764 } else {
1765 /*
1766 * The buffer may have been set dirty during
1767 * attachment to a dirty page.
1768 */
1769 clear_buffer_dirty(bh);
1770 }
1771 } while ((bh = bh->b_this_page) != head);
1772 SetPageError(page);
1773 BUG_ON(PageWriteback(page));
7e4c3690 1774 mapping_set_error(page->mapping, err);
1da177e4 1775 set_page_writeback(page);
1da177e4
LT
1776 do {
1777 struct buffer_head *next = bh->b_this_page;
1778 if (buffer_async_write(bh)) {
1779 clear_buffer_dirty(bh);
1780 submit_bh(WRITE, bh);
1781 nr_underway++;
1782 }
1da177e4
LT
1783 bh = next;
1784 } while (bh != head);
ffda9d30 1785 unlock_page(page);
1da177e4
LT
1786 goto done;
1787}
1788
afddba49
NP
1789/*
1790 * If a page has any new buffers, zero them out here, and mark them uptodate
1791 * and dirty so they'll be written out (in order to prevent uninitialised
1792 * block data from leaking). And clear the new bit.
1793 */
1794void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1795{
1796 unsigned int block_start, block_end;
1797 struct buffer_head *head, *bh;
1798
1799 BUG_ON(!PageLocked(page));
1800 if (!page_has_buffers(page))
1801 return;
1802
1803 bh = head = page_buffers(page);
1804 block_start = 0;
1805 do {
1806 block_end = block_start + bh->b_size;
1807
1808 if (buffer_new(bh)) {
1809 if (block_end > from && block_start < to) {
1810 if (!PageUptodate(page)) {
1811 unsigned start, size;
1812
1813 start = max(from, block_start);
1814 size = min(to, block_end) - start;
1815
eebd2aa3 1816 zero_user(page, start, size);
afddba49
NP
1817 set_buffer_uptodate(bh);
1818 }
1819
1820 clear_buffer_new(bh);
1821 mark_buffer_dirty(bh);
1822 }
1823 }
1824
1825 block_start = block_end;
1826 bh = bh->b_this_page;
1827 } while (bh != head);
1828}
1829EXPORT_SYMBOL(page_zero_new_buffers);
1830
1da177e4
LT
1831static int __block_prepare_write(struct inode *inode, struct page *page,
1832 unsigned from, unsigned to, get_block_t *get_block)
1833{
1834 unsigned block_start, block_end;
1835 sector_t block;
1836 int err = 0;
1837 unsigned blocksize, bbits;
1838 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1839
1840 BUG_ON(!PageLocked(page));
1841 BUG_ON(from > PAGE_CACHE_SIZE);
1842 BUG_ON(to > PAGE_CACHE_SIZE);
1843 BUG_ON(from > to);
1844
1845 blocksize = 1 << inode->i_blkbits;
1846 if (!page_has_buffers(page))
1847 create_empty_buffers(page, blocksize, 0);
1848 head = page_buffers(page);
1849
1850 bbits = inode->i_blkbits;
1851 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1852
1853 for(bh = head, block_start = 0; bh != head || !block_start;
1854 block++, block_start=block_end, bh = bh->b_this_page) {
1855 block_end = block_start + blocksize;
1856 if (block_end <= from || block_start >= to) {
1857 if (PageUptodate(page)) {
1858 if (!buffer_uptodate(bh))
1859 set_buffer_uptodate(bh);
1860 }
1861 continue;
1862 }
1863 if (buffer_new(bh))
1864 clear_buffer_new(bh);
1865 if (!buffer_mapped(bh)) {
b0cf2321 1866 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1867 err = get_block(inode, block, bh, 1);
1868 if (err)
f3ddbdc6 1869 break;
1da177e4 1870 if (buffer_new(bh)) {
1da177e4
LT
1871 unmap_underlying_metadata(bh->b_bdev,
1872 bh->b_blocknr);
1873 if (PageUptodate(page)) {
637aff46 1874 clear_buffer_new(bh);
1da177e4 1875 set_buffer_uptodate(bh);
637aff46 1876 mark_buffer_dirty(bh);
1da177e4
LT
1877 continue;
1878 }
eebd2aa3
CL
1879 if (block_end > to || block_start < from)
1880 zero_user_segments(page,
1881 to, block_end,
1882 block_start, from);
1da177e4
LT
1883 continue;
1884 }
1885 }
1886 if (PageUptodate(page)) {
1887 if (!buffer_uptodate(bh))
1888 set_buffer_uptodate(bh);
1889 continue;
1890 }
1891 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1892 !buffer_unwritten(bh) &&
1da177e4
LT
1893 (block_start < from || block_end > to)) {
1894 ll_rw_block(READ, 1, &bh);
1895 *wait_bh++=bh;
1896 }
1897 }
1898 /*
1899 * If we issued read requests - let them complete.
1900 */
1901 while(wait_bh > wait) {
1902 wait_on_buffer(*--wait_bh);
1903 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1904 err = -EIO;
1da177e4 1905 }
afddba49
NP
1906 if (unlikely(err))
1907 page_zero_new_buffers(page, from, to);
1da177e4
LT
1908 return err;
1909}
1910
1911static int __block_commit_write(struct inode *inode, struct page *page,
1912 unsigned from, unsigned to)
1913{
1914 unsigned block_start, block_end;
1915 int partial = 0;
1916 unsigned blocksize;
1917 struct buffer_head *bh, *head;
1918
1919 blocksize = 1 << inode->i_blkbits;
1920
1921 for(bh = head = page_buffers(page), block_start = 0;
1922 bh != head || !block_start;
1923 block_start=block_end, bh = bh->b_this_page) {
1924 block_end = block_start + blocksize;
1925 if (block_end <= from || block_start >= to) {
1926 if (!buffer_uptodate(bh))
1927 partial = 1;
1928 } else {
1929 set_buffer_uptodate(bh);
1930 mark_buffer_dirty(bh);
1931 }
afddba49 1932 clear_buffer_new(bh);
1da177e4
LT
1933 }
1934
1935 /*
1936 * If this is a partial write which happened to make all buffers
1937 * uptodate then we can optimize away a bogus readpage() for
1938 * the next read(). Here we 'discover' whether the page went
1939 * uptodate as a result of this (potentially partial) write.
1940 */
1941 if (!partial)
1942 SetPageUptodate(page);
1943 return 0;
1944}
1945
afddba49
NP
1946/*
1947 * block_write_begin takes care of the basic task of block allocation and
1948 * bringing partial write blocks uptodate first.
1949 *
1950 * If *pagep is not NULL, then block_write_begin uses the locked page
1951 * at *pagep rather than allocating its own. In this case, the page will
1952 * not be unlocked or deallocated on failure.
1953 */
1954int block_write_begin(struct file *file, struct address_space *mapping,
1955 loff_t pos, unsigned len, unsigned flags,
1956 struct page **pagep, void **fsdata,
1957 get_block_t *get_block)
1958{
1959 struct inode *inode = mapping->host;
1960 int status = 0;
1961 struct page *page;
1962 pgoff_t index;
1963 unsigned start, end;
1964 int ownpage = 0;
1965
1966 index = pos >> PAGE_CACHE_SHIFT;
1967 start = pos & (PAGE_CACHE_SIZE - 1);
1968 end = start + len;
1969
1970 page = *pagep;
1971 if (page == NULL) {
1972 ownpage = 1;
1973 page = __grab_cache_page(mapping, index);
1974 if (!page) {
1975 status = -ENOMEM;
1976 goto out;
1977 }
1978 *pagep = page;
1979 } else
1980 BUG_ON(!PageLocked(page));
1981
1982 status = __block_prepare_write(inode, page, start, end, get_block);
1983 if (unlikely(status)) {
1984 ClearPageUptodate(page);
1985
1986 if (ownpage) {
1987 unlock_page(page);
1988 page_cache_release(page);
1989 *pagep = NULL;
1990
1991 /*
1992 * prepare_write() may have instantiated a few blocks
1993 * outside i_size. Trim these off again. Don't need
1994 * i_size_read because we hold i_mutex.
1995 */
1996 if (pos + len > inode->i_size)
1997 vmtruncate(inode, inode->i_size);
1998 }
1999 goto out;
2000 }
2001
2002out:
2003 return status;
2004}
2005EXPORT_SYMBOL(block_write_begin);
2006
2007int block_write_end(struct file *file, struct address_space *mapping,
2008 loff_t pos, unsigned len, unsigned copied,
2009 struct page *page, void *fsdata)
2010{
2011 struct inode *inode = mapping->host;
2012 unsigned start;
2013
2014 start = pos & (PAGE_CACHE_SIZE - 1);
2015
2016 if (unlikely(copied < len)) {
2017 /*
2018 * The buffers that were written will now be uptodate, so we
2019 * don't have to worry about a readpage reading them and
2020 * overwriting a partial write. However if we have encountered
2021 * a short write and only partially written into a buffer, it
2022 * will not be marked uptodate, so a readpage might come in and
2023 * destroy our partial write.
2024 *
2025 * Do the simplest thing, and just treat any short write to a
2026 * non uptodate page as a zero-length write, and force the
2027 * caller to redo the whole thing.
2028 */
2029 if (!PageUptodate(page))
2030 copied = 0;
2031
2032 page_zero_new_buffers(page, start+copied, start+len);
2033 }
2034 flush_dcache_page(page);
2035
2036 /* This could be a short (even 0-length) commit */
2037 __block_commit_write(inode, page, start, start+copied);
2038
2039 return copied;
2040}
2041EXPORT_SYMBOL(block_write_end);
2042
2043int generic_write_end(struct file *file, struct address_space *mapping,
2044 loff_t pos, unsigned len, unsigned copied,
2045 struct page *page, void *fsdata)
2046{
2047 struct inode *inode = mapping->host;
2048
2049 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2050
2051 /*
2052 * No need to use i_size_read() here, the i_size
2053 * cannot change under us because we hold i_mutex.
2054 *
2055 * But it's important to update i_size while still holding page lock:
2056 * page writeout could otherwise come in and zero beyond i_size.
2057 */
2058 if (pos+copied > inode->i_size) {
2059 i_size_write(inode, pos+copied);
2060 mark_inode_dirty(inode);
2061 }
2062
2063 unlock_page(page);
2064 page_cache_release(page);
2065
2066 return copied;
2067}
2068EXPORT_SYMBOL(generic_write_end);
2069
1da177e4
LT
2070/*
2071 * Generic "read page" function for block devices that have the normal
2072 * get_block functionality. This is most of the block device filesystems.
2073 * Reads the page asynchronously --- the unlock_buffer() and
2074 * set/clear_buffer_uptodate() functions propagate buffer state into the
2075 * page struct once IO has completed.
2076 */
2077int block_read_full_page(struct page *page, get_block_t *get_block)
2078{
2079 struct inode *inode = page->mapping->host;
2080 sector_t iblock, lblock;
2081 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2082 unsigned int blocksize;
2083 int nr, i;
2084 int fully_mapped = 1;
2085
cd7619d6 2086 BUG_ON(!PageLocked(page));
1da177e4
LT
2087 blocksize = 1 << inode->i_blkbits;
2088 if (!page_has_buffers(page))
2089 create_empty_buffers(page, blocksize, 0);
2090 head = page_buffers(page);
2091
2092 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2093 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2094 bh = head;
2095 nr = 0;
2096 i = 0;
2097
2098 do {
2099 if (buffer_uptodate(bh))
2100 continue;
2101
2102 if (!buffer_mapped(bh)) {
c64610ba
AM
2103 int err = 0;
2104
1da177e4
LT
2105 fully_mapped = 0;
2106 if (iblock < lblock) {
b0cf2321 2107 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2108 err = get_block(inode, iblock, bh, 0);
2109 if (err)
1da177e4
LT
2110 SetPageError(page);
2111 }
2112 if (!buffer_mapped(bh)) {
eebd2aa3 2113 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2114 if (!err)
2115 set_buffer_uptodate(bh);
1da177e4
LT
2116 continue;
2117 }
2118 /*
2119 * get_block() might have updated the buffer
2120 * synchronously
2121 */
2122 if (buffer_uptodate(bh))
2123 continue;
2124 }
2125 arr[nr++] = bh;
2126 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2127
2128 if (fully_mapped)
2129 SetPageMappedToDisk(page);
2130
2131 if (!nr) {
2132 /*
2133 * All buffers are uptodate - we can set the page uptodate
2134 * as well. But not if get_block() returned an error.
2135 */
2136 if (!PageError(page))
2137 SetPageUptodate(page);
2138 unlock_page(page);
2139 return 0;
2140 }
2141
2142 /* Stage two: lock the buffers */
2143 for (i = 0; i < nr; i++) {
2144 bh = arr[i];
2145 lock_buffer(bh);
2146 mark_buffer_async_read(bh);
2147 }
2148
2149 /*
2150 * Stage 3: start the IO. Check for uptodateness
2151 * inside the buffer lock in case another process reading
2152 * the underlying blockdev brought it uptodate (the sct fix).
2153 */
2154 for (i = 0; i < nr; i++) {
2155 bh = arr[i];
2156 if (buffer_uptodate(bh))
2157 end_buffer_async_read(bh, 1);
2158 else
2159 submit_bh(READ, bh);
2160 }
2161 return 0;
2162}
2163
2164/* utility function for filesystems that need to do work on expanding
89e10787 2165 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2166 * deal with the hole.
2167 */
89e10787 2168int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2169{
2170 struct address_space *mapping = inode->i_mapping;
2171 struct page *page;
89e10787 2172 void *fsdata;
05eb0b51 2173 unsigned long limit;
1da177e4
LT
2174 int err;
2175
2176 err = -EFBIG;
2177 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2178 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2179 send_sig(SIGXFSZ, current, 0);
2180 goto out;
2181 }
2182 if (size > inode->i_sb->s_maxbytes)
2183 goto out;
2184
89e10787
NP
2185 err = pagecache_write_begin(NULL, mapping, size, 0,
2186 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2187 &page, &fsdata);
2188 if (err)
05eb0b51 2189 goto out;
05eb0b51 2190
89e10787
NP
2191 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2192 BUG_ON(err > 0);
05eb0b51 2193
1da177e4
LT
2194out:
2195 return err;
2196}
2197
89e10787
NP
2198int cont_expand_zero(struct file *file, struct address_space *mapping,
2199 loff_t pos, loff_t *bytes)
1da177e4 2200{
1da177e4 2201 struct inode *inode = mapping->host;
1da177e4 2202 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2203 struct page *page;
2204 void *fsdata;
2205 pgoff_t index, curidx;
2206 loff_t curpos;
2207 unsigned zerofrom, offset, len;
2208 int err = 0;
1da177e4 2209
89e10787
NP
2210 index = pos >> PAGE_CACHE_SHIFT;
2211 offset = pos & ~PAGE_CACHE_MASK;
2212
2213 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2214 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2215 if (zerofrom & (blocksize-1)) {
2216 *bytes |= (blocksize-1);
2217 (*bytes)++;
2218 }
89e10787 2219 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2220
89e10787
NP
2221 err = pagecache_write_begin(file, mapping, curpos, len,
2222 AOP_FLAG_UNINTERRUPTIBLE,
2223 &page, &fsdata);
2224 if (err)
2225 goto out;
eebd2aa3 2226 zero_user(page, zerofrom, len);
89e10787
NP
2227 err = pagecache_write_end(file, mapping, curpos, len, len,
2228 page, fsdata);
2229 if (err < 0)
2230 goto out;
2231 BUG_ON(err != len);
2232 err = 0;
2233 }
1da177e4 2234
89e10787
NP
2235 /* page covers the boundary, find the boundary offset */
2236 if (index == curidx) {
2237 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2238 /* if we will expand the thing last block will be filled */
89e10787
NP
2239 if (offset <= zerofrom) {
2240 goto out;
2241 }
2242 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2243 *bytes |= (blocksize-1);
2244 (*bytes)++;
2245 }
89e10787 2246 len = offset - zerofrom;
1da177e4 2247
89e10787
NP
2248 err = pagecache_write_begin(file, mapping, curpos, len,
2249 AOP_FLAG_UNINTERRUPTIBLE,
2250 &page, &fsdata);
2251 if (err)
2252 goto out;
eebd2aa3 2253 zero_user(page, zerofrom, len);
89e10787
NP
2254 err = pagecache_write_end(file, mapping, curpos, len, len,
2255 page, fsdata);
2256 if (err < 0)
2257 goto out;
2258 BUG_ON(err != len);
2259 err = 0;
1da177e4 2260 }
89e10787
NP
2261out:
2262 return err;
2263}
2264
2265/*
2266 * For moronic filesystems that do not allow holes in file.
2267 * We may have to extend the file.
2268 */
2269int cont_write_begin(struct file *file, struct address_space *mapping,
2270 loff_t pos, unsigned len, unsigned flags,
2271 struct page **pagep, void **fsdata,
2272 get_block_t *get_block, loff_t *bytes)
2273{
2274 struct inode *inode = mapping->host;
2275 unsigned blocksize = 1 << inode->i_blkbits;
2276 unsigned zerofrom;
2277 int err;
2278
2279 err = cont_expand_zero(file, mapping, pos, bytes);
2280 if (err)
2281 goto out;
2282
2283 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2284 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2285 *bytes |= (blocksize-1);
2286 (*bytes)++;
1da177e4 2287 }
1da177e4 2288
89e10787
NP
2289 *pagep = NULL;
2290 err = block_write_begin(file, mapping, pos, len,
2291 flags, pagep, fsdata, get_block);
1da177e4 2292out:
89e10787 2293 return err;
1da177e4
LT
2294}
2295
2296int block_prepare_write(struct page *page, unsigned from, unsigned to,
2297 get_block_t *get_block)
2298{
2299 struct inode *inode = page->mapping->host;
2300 int err = __block_prepare_write(inode, page, from, to, get_block);
2301 if (err)
2302 ClearPageUptodate(page);
2303 return err;
2304}
2305
2306int block_commit_write(struct page *page, unsigned from, unsigned to)
2307{
2308 struct inode *inode = page->mapping->host;
2309 __block_commit_write(inode,page,from,to);
2310 return 0;
2311}
2312
2313int generic_commit_write(struct file *file, struct page *page,
2314 unsigned from, unsigned to)
2315{
2316 struct inode *inode = page->mapping->host;
2317 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2318 __block_commit_write(inode,page,from,to);
2319 /*
2320 * No need to use i_size_read() here, the i_size
1b1dcc1b 2321 * cannot change under us because we hold i_mutex.
1da177e4
LT
2322 */
2323 if (pos > inode->i_size) {
2324 i_size_write(inode, pos);
2325 mark_inode_dirty(inode);
2326 }
2327 return 0;
2328}
2329
54171690
DC
2330/*
2331 * block_page_mkwrite() is not allowed to change the file size as it gets
2332 * called from a page fault handler when a page is first dirtied. Hence we must
2333 * be careful to check for EOF conditions here. We set the page up correctly
2334 * for a written page which means we get ENOSPC checking when writing into
2335 * holes and correct delalloc and unwritten extent mapping on filesystems that
2336 * support these features.
2337 *
2338 * We are not allowed to take the i_mutex here so we have to play games to
2339 * protect against truncate races as the page could now be beyond EOF. Because
2340 * vmtruncate() writes the inode size before removing pages, once we have the
2341 * page lock we can determine safely if the page is beyond EOF. If it is not
2342 * beyond EOF, then the page is guaranteed safe against truncation until we
2343 * unlock the page.
2344 */
2345int
2346block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2347 get_block_t get_block)
2348{
2349 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2350 unsigned long end;
2351 loff_t size;
2352 int ret = -EINVAL;
2353
2354 lock_page(page);
2355 size = i_size_read(inode);
2356 if ((page->mapping != inode->i_mapping) ||
18336338 2357 (page_offset(page) > size)) {
54171690
DC
2358 /* page got truncated out from underneath us */
2359 goto out_unlock;
2360 }
2361
2362 /* page is wholly or partially inside EOF */
2363 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2364 end = size & ~PAGE_CACHE_MASK;
2365 else
2366 end = PAGE_CACHE_SIZE;
2367
2368 ret = block_prepare_write(page, 0, end, get_block);
2369 if (!ret)
2370 ret = block_commit_write(page, 0, end);
2371
2372out_unlock:
2373 unlock_page(page);
2374 return ret;
2375}
1da177e4
LT
2376
2377/*
03158cd7 2378 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2379 * immediately, while under the page lock. So it needs a special end_io
2380 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2381 */
2382static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2383{
68671f35 2384 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2385}
2386
03158cd7
NP
2387/*
2388 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2389 * the page (converting it to circular linked list and taking care of page
2390 * dirty races).
2391 */
2392static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2393{
2394 struct buffer_head *bh;
2395
2396 BUG_ON(!PageLocked(page));
2397
2398 spin_lock(&page->mapping->private_lock);
2399 bh = head;
2400 do {
2401 if (PageDirty(page))
2402 set_buffer_dirty(bh);
2403 if (!bh->b_this_page)
2404 bh->b_this_page = head;
2405 bh = bh->b_this_page;
2406 } while (bh != head);
2407 attach_page_buffers(page, head);
2408 spin_unlock(&page->mapping->private_lock);
2409}
2410
1da177e4
LT
2411/*
2412 * On entry, the page is fully not uptodate.
2413 * On exit the page is fully uptodate in the areas outside (from,to)
2414 */
03158cd7
NP
2415int nobh_write_begin(struct file *file, struct address_space *mapping,
2416 loff_t pos, unsigned len, unsigned flags,
2417 struct page **pagep, void **fsdata,
1da177e4
LT
2418 get_block_t *get_block)
2419{
03158cd7 2420 struct inode *inode = mapping->host;
1da177e4
LT
2421 const unsigned blkbits = inode->i_blkbits;
2422 const unsigned blocksize = 1 << blkbits;
a4b0672d 2423 struct buffer_head *head, *bh;
03158cd7
NP
2424 struct page *page;
2425 pgoff_t index;
2426 unsigned from, to;
1da177e4 2427 unsigned block_in_page;
a4b0672d 2428 unsigned block_start, block_end;
1da177e4 2429 sector_t block_in_file;
1da177e4 2430 int nr_reads = 0;
1da177e4
LT
2431 int ret = 0;
2432 int is_mapped_to_disk = 1;
1da177e4 2433
03158cd7
NP
2434 index = pos >> PAGE_CACHE_SHIFT;
2435 from = pos & (PAGE_CACHE_SIZE - 1);
2436 to = from + len;
2437
2438 page = __grab_cache_page(mapping, index);
2439 if (!page)
2440 return -ENOMEM;
2441 *pagep = page;
2442 *fsdata = NULL;
2443
2444 if (page_has_buffers(page)) {
2445 unlock_page(page);
2446 page_cache_release(page);
2447 *pagep = NULL;
2448 return block_write_begin(file, mapping, pos, len, flags, pagep,
2449 fsdata, get_block);
2450 }
a4b0672d 2451
1da177e4
LT
2452 if (PageMappedToDisk(page))
2453 return 0;
2454
a4b0672d
NP
2455 /*
2456 * Allocate buffers so that we can keep track of state, and potentially
2457 * attach them to the page if an error occurs. In the common case of
2458 * no error, they will just be freed again without ever being attached
2459 * to the page (which is all OK, because we're under the page lock).
2460 *
2461 * Be careful: the buffer linked list is a NULL terminated one, rather
2462 * than the circular one we're used to.
2463 */
2464 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2465 if (!head) {
2466 ret = -ENOMEM;
2467 goto out_release;
2468 }
a4b0672d 2469
1da177e4 2470 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2471
2472 /*
2473 * We loop across all blocks in the page, whether or not they are
2474 * part of the affected region. This is so we can discover if the
2475 * page is fully mapped-to-disk.
2476 */
a4b0672d 2477 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2478 block_start < PAGE_CACHE_SIZE;
a4b0672d 2479 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2480 int create;
2481
a4b0672d
NP
2482 block_end = block_start + blocksize;
2483 bh->b_state = 0;
1da177e4
LT
2484 create = 1;
2485 if (block_start >= to)
2486 create = 0;
2487 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2488 bh, create);
1da177e4
LT
2489 if (ret)
2490 goto failed;
a4b0672d 2491 if (!buffer_mapped(bh))
1da177e4 2492 is_mapped_to_disk = 0;
a4b0672d
NP
2493 if (buffer_new(bh))
2494 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2495 if (PageUptodate(page)) {
2496 set_buffer_uptodate(bh);
1da177e4 2497 continue;
a4b0672d
NP
2498 }
2499 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2500 zero_user_segments(page, block_start, from,
2501 to, block_end);
1da177e4
LT
2502 continue;
2503 }
a4b0672d 2504 if (buffer_uptodate(bh))
1da177e4
LT
2505 continue; /* reiserfs does this */
2506 if (block_start < from || block_end > to) {
a4b0672d
NP
2507 lock_buffer(bh);
2508 bh->b_end_io = end_buffer_read_nobh;
2509 submit_bh(READ, bh);
2510 nr_reads++;
1da177e4
LT
2511 }
2512 }
2513
2514 if (nr_reads) {
1da177e4
LT
2515 /*
2516 * The page is locked, so these buffers are protected from
2517 * any VM or truncate activity. Hence we don't need to care
2518 * for the buffer_head refcounts.
2519 */
a4b0672d 2520 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2521 wait_on_buffer(bh);
2522 if (!buffer_uptodate(bh))
2523 ret = -EIO;
1da177e4
LT
2524 }
2525 if (ret)
2526 goto failed;
2527 }
2528
2529 if (is_mapped_to_disk)
2530 SetPageMappedToDisk(page);
1da177e4 2531
03158cd7 2532 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2533
1da177e4
LT
2534 return 0;
2535
2536failed:
03158cd7 2537 BUG_ON(!ret);
1da177e4 2538 /*
a4b0672d
NP
2539 * Error recovery is a bit difficult. We need to zero out blocks that
2540 * were newly allocated, and dirty them to ensure they get written out.
2541 * Buffers need to be attached to the page at this point, otherwise
2542 * the handling of potential IO errors during writeout would be hard
2543 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2544 */
03158cd7
NP
2545 attach_nobh_buffers(page, head);
2546 page_zero_new_buffers(page, from, to);
a4b0672d 2547
03158cd7
NP
2548out_release:
2549 unlock_page(page);
2550 page_cache_release(page);
2551 *pagep = NULL;
a4b0672d 2552
03158cd7
NP
2553 if (pos + len > inode->i_size)
2554 vmtruncate(inode, inode->i_size);
a4b0672d 2555
1da177e4
LT
2556 return ret;
2557}
03158cd7 2558EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2559
03158cd7
NP
2560int nobh_write_end(struct file *file, struct address_space *mapping,
2561 loff_t pos, unsigned len, unsigned copied,
2562 struct page *page, void *fsdata)
1da177e4
LT
2563{
2564 struct inode *inode = page->mapping->host;
efdc3131 2565 struct buffer_head *head = fsdata;
03158cd7 2566 struct buffer_head *bh;
5b41e74a 2567 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2568
5b41e74a
DM
2569 if (unlikely(copied < len) && !page_has_buffers(page))
2570 attach_nobh_buffers(page, head);
2571 if (page_has_buffers(page))
2572 return generic_write_end(file, mapping, pos, len,
2573 copied, page, fsdata);
a4b0672d 2574
22c8ca78 2575 SetPageUptodate(page);
1da177e4 2576 set_page_dirty(page);
03158cd7
NP
2577 if (pos+copied > inode->i_size) {
2578 i_size_write(inode, pos+copied);
1da177e4
LT
2579 mark_inode_dirty(inode);
2580 }
03158cd7
NP
2581
2582 unlock_page(page);
2583 page_cache_release(page);
2584
03158cd7
NP
2585 while (head) {
2586 bh = head;
2587 head = head->b_this_page;
2588 free_buffer_head(bh);
2589 }
2590
2591 return copied;
1da177e4 2592}
03158cd7 2593EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2594
2595/*
2596 * nobh_writepage() - based on block_full_write_page() except
2597 * that it tries to operate without attaching bufferheads to
2598 * the page.
2599 */
2600int nobh_writepage(struct page *page, get_block_t *get_block,
2601 struct writeback_control *wbc)
2602{
2603 struct inode * const inode = page->mapping->host;
2604 loff_t i_size = i_size_read(inode);
2605 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2606 unsigned offset;
1da177e4
LT
2607 int ret;
2608
2609 /* Is the page fully inside i_size? */
2610 if (page->index < end_index)
2611 goto out;
2612
2613 /* Is the page fully outside i_size? (truncate in progress) */
2614 offset = i_size & (PAGE_CACHE_SIZE-1);
2615 if (page->index >= end_index+1 || !offset) {
2616 /*
2617 * The page may have dirty, unmapped buffers. For example,
2618 * they may have been added in ext3_writepage(). Make them
2619 * freeable here, so the page does not leak.
2620 */
2621#if 0
2622 /* Not really sure about this - do we need this ? */
2623 if (page->mapping->a_ops->invalidatepage)
2624 page->mapping->a_ops->invalidatepage(page, offset);
2625#endif
2626 unlock_page(page);
2627 return 0; /* don't care */
2628 }
2629
2630 /*
2631 * The page straddles i_size. It must be zeroed out on each and every
2632 * writepage invocation because it may be mmapped. "A file is mapped
2633 * in multiples of the page size. For a file that is not a multiple of
2634 * the page size, the remaining memory is zeroed when mapped, and
2635 * writes to that region are not written out to the file."
2636 */
eebd2aa3 2637 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2638out:
2639 ret = mpage_writepage(page, get_block, wbc);
2640 if (ret == -EAGAIN)
2641 ret = __block_write_full_page(inode, page, get_block, wbc);
2642 return ret;
2643}
2644EXPORT_SYMBOL(nobh_writepage);
2645
03158cd7
NP
2646int nobh_truncate_page(struct address_space *mapping,
2647 loff_t from, get_block_t *get_block)
1da177e4 2648{
1da177e4
LT
2649 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2650 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2651 unsigned blocksize;
2652 sector_t iblock;
2653 unsigned length, pos;
2654 struct inode *inode = mapping->host;
1da177e4 2655 struct page *page;
03158cd7
NP
2656 struct buffer_head map_bh;
2657 int err;
1da177e4 2658
03158cd7
NP
2659 blocksize = 1 << inode->i_blkbits;
2660 length = offset & (blocksize - 1);
2661
2662 /* Block boundary? Nothing to do */
2663 if (!length)
2664 return 0;
2665
2666 length = blocksize - length;
2667 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2668
1da177e4 2669 page = grab_cache_page(mapping, index);
03158cd7 2670 err = -ENOMEM;
1da177e4
LT
2671 if (!page)
2672 goto out;
2673
03158cd7
NP
2674 if (page_has_buffers(page)) {
2675has_buffers:
2676 unlock_page(page);
2677 page_cache_release(page);
2678 return block_truncate_page(mapping, from, get_block);
2679 }
2680
2681 /* Find the buffer that contains "offset" */
2682 pos = blocksize;
2683 while (offset >= pos) {
2684 iblock++;
2685 pos += blocksize;
2686 }
2687
2688 err = get_block(inode, iblock, &map_bh, 0);
2689 if (err)
2690 goto unlock;
2691 /* unmapped? It's a hole - nothing to do */
2692 if (!buffer_mapped(&map_bh))
2693 goto unlock;
2694
2695 /* Ok, it's mapped. Make sure it's up-to-date */
2696 if (!PageUptodate(page)) {
2697 err = mapping->a_ops->readpage(NULL, page);
2698 if (err) {
2699 page_cache_release(page);
2700 goto out;
2701 }
2702 lock_page(page);
2703 if (!PageUptodate(page)) {
2704 err = -EIO;
2705 goto unlock;
2706 }
2707 if (page_has_buffers(page))
2708 goto has_buffers;
1da177e4 2709 }
eebd2aa3 2710 zero_user(page, offset, length);
03158cd7
NP
2711 set_page_dirty(page);
2712 err = 0;
2713
2714unlock:
1da177e4
LT
2715 unlock_page(page);
2716 page_cache_release(page);
2717out:
03158cd7 2718 return err;
1da177e4
LT
2719}
2720EXPORT_SYMBOL(nobh_truncate_page);
2721
2722int block_truncate_page(struct address_space *mapping,
2723 loff_t from, get_block_t *get_block)
2724{
2725 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2726 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2727 unsigned blocksize;
54b21a79 2728 sector_t iblock;
1da177e4
LT
2729 unsigned length, pos;
2730 struct inode *inode = mapping->host;
2731 struct page *page;
2732 struct buffer_head *bh;
1da177e4
LT
2733 int err;
2734
2735 blocksize = 1 << inode->i_blkbits;
2736 length = offset & (blocksize - 1);
2737
2738 /* Block boundary? Nothing to do */
2739 if (!length)
2740 return 0;
2741
2742 length = blocksize - length;
54b21a79 2743 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2744
2745 page = grab_cache_page(mapping, index);
2746 err = -ENOMEM;
2747 if (!page)
2748 goto out;
2749
2750 if (!page_has_buffers(page))
2751 create_empty_buffers(page, blocksize, 0);
2752
2753 /* Find the buffer that contains "offset" */
2754 bh = page_buffers(page);
2755 pos = blocksize;
2756 while (offset >= pos) {
2757 bh = bh->b_this_page;
2758 iblock++;
2759 pos += blocksize;
2760 }
2761
2762 err = 0;
2763 if (!buffer_mapped(bh)) {
b0cf2321 2764 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2765 err = get_block(inode, iblock, bh, 0);
2766 if (err)
2767 goto unlock;
2768 /* unmapped? It's a hole - nothing to do */
2769 if (!buffer_mapped(bh))
2770 goto unlock;
2771 }
2772
2773 /* Ok, it's mapped. Make sure it's up-to-date */
2774 if (PageUptodate(page))
2775 set_buffer_uptodate(bh);
2776
33a266dd 2777 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2778 err = -EIO;
2779 ll_rw_block(READ, 1, &bh);
2780 wait_on_buffer(bh);
2781 /* Uhhuh. Read error. Complain and punt. */
2782 if (!buffer_uptodate(bh))
2783 goto unlock;
2784 }
2785
eebd2aa3 2786 zero_user(page, offset, length);
1da177e4
LT
2787 mark_buffer_dirty(bh);
2788 err = 0;
2789
2790unlock:
2791 unlock_page(page);
2792 page_cache_release(page);
2793out:
2794 return err;
2795}
2796
2797/*
2798 * The generic ->writepage function for buffer-backed address_spaces
2799 */
2800int block_write_full_page(struct page *page, get_block_t *get_block,
2801 struct writeback_control *wbc)
2802{
2803 struct inode * const inode = page->mapping->host;
2804 loff_t i_size = i_size_read(inode);
2805 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2806 unsigned offset;
1da177e4
LT
2807
2808 /* Is the page fully inside i_size? */
2809 if (page->index < end_index)
2810 return __block_write_full_page(inode, page, get_block, wbc);
2811
2812 /* Is the page fully outside i_size? (truncate in progress) */
2813 offset = i_size & (PAGE_CACHE_SIZE-1);
2814 if (page->index >= end_index+1 || !offset) {
2815 /*
2816 * The page may have dirty, unmapped buffers. For example,
2817 * they may have been added in ext3_writepage(). Make them
2818 * freeable here, so the page does not leak.
2819 */
aaa4059b 2820 do_invalidatepage(page, 0);
1da177e4
LT
2821 unlock_page(page);
2822 return 0; /* don't care */
2823 }
2824
2825 /*
2826 * The page straddles i_size. It must be zeroed out on each and every
2827 * writepage invokation because it may be mmapped. "A file is mapped
2828 * in multiples of the page size. For a file that is not a multiple of
2829 * the page size, the remaining memory is zeroed when mapped, and
2830 * writes to that region are not written out to the file."
2831 */
eebd2aa3 2832 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2833 return __block_write_full_page(inode, page, get_block, wbc);
2834}
2835
2836sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2837 get_block_t *get_block)
2838{
2839 struct buffer_head tmp;
2840 struct inode *inode = mapping->host;
2841 tmp.b_state = 0;
2842 tmp.b_blocknr = 0;
b0cf2321 2843 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2844 get_block(inode, block, &tmp, 0);
2845 return tmp.b_blocknr;
2846}
2847
6712ecf8 2848static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2849{
2850 struct buffer_head *bh = bio->bi_private;
2851
1da177e4
LT
2852 if (err == -EOPNOTSUPP) {
2853 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2854 set_bit(BH_Eopnotsupp, &bh->b_state);
2855 }
2856
2857 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2858 bio_put(bio);
1da177e4
LT
2859}
2860
2861int submit_bh(int rw, struct buffer_head * bh)
2862{
2863 struct bio *bio;
2864 int ret = 0;
2865
2866 BUG_ON(!buffer_locked(bh));
2867 BUG_ON(!buffer_mapped(bh));
2868 BUG_ON(!bh->b_end_io);
2869
2870 if (buffer_ordered(bh) && (rw == WRITE))
2871 rw = WRITE_BARRIER;
2872
2873 /*
2874 * Only clear out a write error when rewriting, should this
2875 * include WRITE_SYNC as well?
2876 */
2877 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2878 clear_buffer_write_io_error(bh);
2879
2880 /*
2881 * from here on down, it's all bio -- do the initial mapping,
2882 * submit_bio -> generic_make_request may further map this bio around
2883 */
2884 bio = bio_alloc(GFP_NOIO, 1);
2885
2886 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2887 bio->bi_bdev = bh->b_bdev;
2888 bio->bi_io_vec[0].bv_page = bh->b_page;
2889 bio->bi_io_vec[0].bv_len = bh->b_size;
2890 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2891
2892 bio->bi_vcnt = 1;
2893 bio->bi_idx = 0;
2894 bio->bi_size = bh->b_size;
2895
2896 bio->bi_end_io = end_bio_bh_io_sync;
2897 bio->bi_private = bh;
2898
2899 bio_get(bio);
2900 submit_bio(rw, bio);
2901
2902 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2903 ret = -EOPNOTSUPP;
2904
2905 bio_put(bio);
2906 return ret;
2907}
2908
2909/**
2910 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2911 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2912 * @nr: number of &struct buffer_heads in the array
2913 * @bhs: array of pointers to &struct buffer_head
2914 *
a7662236
JK
2915 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2916 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2917 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2918 * are sent to disk. The fourth %READA option is described in the documentation
2919 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2920 *
2921 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2922 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2923 * clean when doing a write request, and any buffer that appears to be
2924 * up-to-date when doing read request. Further it marks as clean buffers that
2925 * are processed for writing (the buffer cache won't assume that they are
2926 * actually clean until the buffer gets unlocked).
1da177e4
LT
2927 *
2928 * ll_rw_block sets b_end_io to simple completion handler that marks
2929 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2930 * any waiters.
2931 *
2932 * All of the buffers must be for the same device, and must also be a
2933 * multiple of the current approved size for the device.
2934 */
2935void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2936{
2937 int i;
2938
2939 for (i = 0; i < nr; i++) {
2940 struct buffer_head *bh = bhs[i];
2941
a7662236
JK
2942 if (rw == SWRITE)
2943 lock_buffer(bh);
2944 else if (test_set_buffer_locked(bh))
1da177e4
LT
2945 continue;
2946
a7662236 2947 if (rw == WRITE || rw == SWRITE) {
1da177e4 2948 if (test_clear_buffer_dirty(bh)) {
76c3073a 2949 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2950 get_bh(bh);
1da177e4
LT
2951 submit_bh(WRITE, bh);
2952 continue;
2953 }
2954 } else {
1da177e4 2955 if (!buffer_uptodate(bh)) {
76c3073a 2956 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2957 get_bh(bh);
1da177e4
LT
2958 submit_bh(rw, bh);
2959 continue;
2960 }
2961 }
2962 unlock_buffer(bh);
1da177e4
LT
2963 }
2964}
2965
2966/*
2967 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2968 * and then start new I/O and then wait upon it. The caller must have a ref on
2969 * the buffer_head.
2970 */
2971int sync_dirty_buffer(struct buffer_head *bh)
2972{
2973 int ret = 0;
2974
2975 WARN_ON(atomic_read(&bh->b_count) < 1);
2976 lock_buffer(bh);
2977 if (test_clear_buffer_dirty(bh)) {
2978 get_bh(bh);
2979 bh->b_end_io = end_buffer_write_sync;
2980 ret = submit_bh(WRITE, bh);
2981 wait_on_buffer(bh);
2982 if (buffer_eopnotsupp(bh)) {
2983 clear_buffer_eopnotsupp(bh);
2984 ret = -EOPNOTSUPP;
2985 }
2986 if (!ret && !buffer_uptodate(bh))
2987 ret = -EIO;
2988 } else {
2989 unlock_buffer(bh);
2990 }
2991 return ret;
2992}
2993
2994/*
2995 * try_to_free_buffers() checks if all the buffers on this particular page
2996 * are unused, and releases them if so.
2997 *
2998 * Exclusion against try_to_free_buffers may be obtained by either
2999 * locking the page or by holding its mapping's private_lock.
3000 *
3001 * If the page is dirty but all the buffers are clean then we need to
3002 * be sure to mark the page clean as well. This is because the page
3003 * may be against a block device, and a later reattachment of buffers
3004 * to a dirty page will set *all* buffers dirty. Which would corrupt
3005 * filesystem data on the same device.
3006 *
3007 * The same applies to regular filesystem pages: if all the buffers are
3008 * clean then we set the page clean and proceed. To do that, we require
3009 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3010 * private_lock.
3011 *
3012 * try_to_free_buffers() is non-blocking.
3013 */
3014static inline int buffer_busy(struct buffer_head *bh)
3015{
3016 return atomic_read(&bh->b_count) |
3017 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3018}
3019
3020static int
3021drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3022{
3023 struct buffer_head *head = page_buffers(page);
3024 struct buffer_head *bh;
3025
3026 bh = head;
3027 do {
de7d5a3b 3028 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3029 set_bit(AS_EIO, &page->mapping->flags);
3030 if (buffer_busy(bh))
3031 goto failed;
3032 bh = bh->b_this_page;
3033 } while (bh != head);
3034
3035 do {
3036 struct buffer_head *next = bh->b_this_page;
3037
535ee2fb 3038 if (bh->b_assoc_map)
1da177e4
LT
3039 __remove_assoc_queue(bh);
3040 bh = next;
3041 } while (bh != head);
3042 *buffers_to_free = head;
3043 __clear_page_buffers(page);
3044 return 1;
3045failed:
3046 return 0;
3047}
3048
3049int try_to_free_buffers(struct page *page)
3050{
3051 struct address_space * const mapping = page->mapping;
3052 struct buffer_head *buffers_to_free = NULL;
3053 int ret = 0;
3054
3055 BUG_ON(!PageLocked(page));
ecdfc978 3056 if (PageWriteback(page))
1da177e4
LT
3057 return 0;
3058
3059 if (mapping == NULL) { /* can this still happen? */
3060 ret = drop_buffers(page, &buffers_to_free);
3061 goto out;
3062 }
3063
3064 spin_lock(&mapping->private_lock);
3065 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3066
3067 /*
3068 * If the filesystem writes its buffers by hand (eg ext3)
3069 * then we can have clean buffers against a dirty page. We
3070 * clean the page here; otherwise the VM will never notice
3071 * that the filesystem did any IO at all.
3072 *
3073 * Also, during truncate, discard_buffer will have marked all
3074 * the page's buffers clean. We discover that here and clean
3075 * the page also.
87df7241
NP
3076 *
3077 * private_lock must be held over this entire operation in order
3078 * to synchronise against __set_page_dirty_buffers and prevent the
3079 * dirty bit from being lost.
ecdfc978
LT
3080 */
3081 if (ret)
3082 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3083 spin_unlock(&mapping->private_lock);
1da177e4
LT
3084out:
3085 if (buffers_to_free) {
3086 struct buffer_head *bh = buffers_to_free;
3087
3088 do {
3089 struct buffer_head *next = bh->b_this_page;
3090 free_buffer_head(bh);
3091 bh = next;
3092 } while (bh != buffers_to_free);
3093 }
3094 return ret;
3095}
3096EXPORT_SYMBOL(try_to_free_buffers);
3097
3978d717 3098void block_sync_page(struct page *page)
1da177e4
LT
3099{
3100 struct address_space *mapping;
3101
3102 smp_mb();
3103 mapping = page_mapping(page);
3104 if (mapping)
3105 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3106}
3107
3108/*
3109 * There are no bdflush tunables left. But distributions are
3110 * still running obsolete flush daemons, so we terminate them here.
3111 *
3112 * Use of bdflush() is deprecated and will be removed in a future kernel.
3113 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3114 */
3115asmlinkage long sys_bdflush(int func, long data)
3116{
3117 static int msg_count;
3118
3119 if (!capable(CAP_SYS_ADMIN))
3120 return -EPERM;
3121
3122 if (msg_count < 5) {
3123 msg_count++;
3124 printk(KERN_INFO
3125 "warning: process `%s' used the obsolete bdflush"
3126 " system call\n", current->comm);
3127 printk(KERN_INFO "Fix your initscripts?\n");
3128 }
3129
3130 if (func == 1)
3131 do_exit(0);
3132 return 0;
3133}
3134
3135/*
3136 * Buffer-head allocation
3137 */
e18b890b 3138static struct kmem_cache *bh_cachep;
1da177e4
LT
3139
3140/*
3141 * Once the number of bh's in the machine exceeds this level, we start
3142 * stripping them in writeback.
3143 */
3144static int max_buffer_heads;
3145
3146int buffer_heads_over_limit;
3147
3148struct bh_accounting {
3149 int nr; /* Number of live bh's */
3150 int ratelimit; /* Limit cacheline bouncing */
3151};
3152
3153static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3154
3155static void recalc_bh_state(void)
3156{
3157 int i;
3158 int tot = 0;
3159
3160 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3161 return;
3162 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3163 for_each_online_cpu(i)
1da177e4
LT
3164 tot += per_cpu(bh_accounting, i).nr;
3165 buffer_heads_over_limit = (tot > max_buffer_heads);
3166}
3167
dd0fc66f 3168struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3169{
b98938c3 3170 struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
e12ba74d 3171 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
1da177e4 3172 if (ret) {
a35afb83 3173 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3174 get_cpu_var(bh_accounting).nr++;
1da177e4 3175 recalc_bh_state();
736c7b80 3176 put_cpu_var(bh_accounting);
1da177e4
LT
3177 }
3178 return ret;
3179}
3180EXPORT_SYMBOL(alloc_buffer_head);
3181
3182void free_buffer_head(struct buffer_head *bh)
3183{
3184 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3185 kmem_cache_free(bh_cachep, bh);
736c7b80 3186 get_cpu_var(bh_accounting).nr--;
1da177e4 3187 recalc_bh_state();
736c7b80 3188 put_cpu_var(bh_accounting);
1da177e4
LT
3189}
3190EXPORT_SYMBOL(free_buffer_head);
3191
1da177e4
LT
3192static void buffer_exit_cpu(int cpu)
3193{
3194 int i;
3195 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3196
3197 for (i = 0; i < BH_LRU_SIZE; i++) {
3198 brelse(b->bhs[i]);
3199 b->bhs[i] = NULL;
3200 }
8a143426
ED
3201 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3202 per_cpu(bh_accounting, cpu).nr = 0;
3203 put_cpu_var(bh_accounting);
1da177e4
LT
3204}
3205
3206static int buffer_cpu_notify(struct notifier_block *self,
3207 unsigned long action, void *hcpu)
3208{
8bb78442 3209 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3210 buffer_exit_cpu((unsigned long)hcpu);
3211 return NOTIFY_OK;
3212}
1da177e4 3213
389d1b08 3214/**
a6b91919 3215 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3216 * @bh: struct buffer_head
3217 *
3218 * Return true if the buffer is up-to-date and false,
3219 * with the buffer locked, if not.
3220 */
3221int bh_uptodate_or_lock(struct buffer_head *bh)
3222{
3223 if (!buffer_uptodate(bh)) {
3224 lock_buffer(bh);
3225 if (!buffer_uptodate(bh))
3226 return 0;
3227 unlock_buffer(bh);
3228 }
3229 return 1;
3230}
3231EXPORT_SYMBOL(bh_uptodate_or_lock);
3232
3233/**
a6b91919 3234 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3235 * @bh: struct buffer_head
3236 *
3237 * Returns zero on success and -EIO on error.
3238 */
3239int bh_submit_read(struct buffer_head *bh)
3240{
3241 BUG_ON(!buffer_locked(bh));
3242
3243 if (buffer_uptodate(bh)) {
3244 unlock_buffer(bh);
3245 return 0;
3246 }
3247
3248 get_bh(bh);
3249 bh->b_end_io = end_buffer_read_sync;
3250 submit_bh(READ, bh);
3251 wait_on_buffer(bh);
3252 if (buffer_uptodate(bh))
3253 return 0;
3254 return -EIO;
3255}
3256EXPORT_SYMBOL(bh_submit_read);
3257
b98938c3
CL
3258static void
3259init_buffer_head(struct kmem_cache *cachep, void *data)
3260{
3261 struct buffer_head *bh = data;
3262
3263 memset(bh, 0, sizeof(*bh));
3264 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3265}
3266
1da177e4
LT
3267void __init buffer_init(void)
3268{
3269 int nrpages;
3270
b98938c3
CL
3271 bh_cachep = kmem_cache_create("buffer_head",
3272 sizeof(struct buffer_head), 0,
3273 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3274 SLAB_MEM_SPREAD),
3275 init_buffer_head);
1da177e4
LT
3276
3277 /*
3278 * Limit the bh occupancy to 10% of ZONE_NORMAL
3279 */
3280 nrpages = (nr_free_buffer_pages() * 10) / 100;
3281 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3282 hotcpu_notifier(buffer_cpu_notify, 0);
3283}
3284
3285EXPORT_SYMBOL(__bforget);
3286EXPORT_SYMBOL(__brelse);
3287EXPORT_SYMBOL(__wait_on_buffer);
3288EXPORT_SYMBOL(block_commit_write);
3289EXPORT_SYMBOL(block_prepare_write);
54171690 3290EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3291EXPORT_SYMBOL(block_read_full_page);
3292EXPORT_SYMBOL(block_sync_page);
3293EXPORT_SYMBOL(block_truncate_page);
3294EXPORT_SYMBOL(block_write_full_page);
89e10787 3295EXPORT_SYMBOL(cont_write_begin);
1da177e4
LT
3296EXPORT_SYMBOL(end_buffer_read_sync);
3297EXPORT_SYMBOL(end_buffer_write_sync);
3298EXPORT_SYMBOL(file_fsync);
3299EXPORT_SYMBOL(fsync_bdev);
3300EXPORT_SYMBOL(generic_block_bmap);
3301EXPORT_SYMBOL(generic_commit_write);
05eb0b51 3302EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3303EXPORT_SYMBOL(init_buffer);
3304EXPORT_SYMBOL(invalidate_bdev);
3305EXPORT_SYMBOL(ll_rw_block);
3306EXPORT_SYMBOL(mark_buffer_dirty);
3307EXPORT_SYMBOL(submit_bh);
3308EXPORT_SYMBOL(sync_dirty_buffer);
3309EXPORT_SYMBOL(unlock_buffer);