]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
vmscan: respect higher order in zone_reclaim()
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
fc9b52cd 70void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
fc9b52cd 77void unlock_buffer(struct buffer_head *bh)
1da177e4 78{
51b07fc3 79 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
4c21e2f2 98 set_page_private(page, 0);
1da177e4
LT
99 page_cache_release(page);
100}
101
08bafc03
KM
102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
1da177e4
LT
111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
1da177e4
LT
114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
68671f35
DM
120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
1da177e4 126 */
68671f35 127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
68671f35
DM
136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
08bafc03 155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1da177e4
LT
156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
1da177e4
LT
168/*
169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * private_lock.
173 *
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
178 */
179static struct buffer_head *
385fd4c5 180__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
181{
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct page *page;
189 int all_mapped = 1;
190
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
193 if (!page)
194 goto out;
195
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
198 goto out_unlock;
199 head = page_buffers(page);
200 bh = head;
201 do {
202 if (bh->b_blocknr == block) {
203 ret = bh;
204 get_bh(bh);
205 goto out_unlock;
206 }
207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 bh = bh->b_this_page;
210 } while (bh != head);
211
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
216 */
217 if (all_mapped) {
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
1da177e4
LT
224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 }
226out_unlock:
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
229out:
230 return ret;
231}
232
233/* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
236 by the user.
237
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
241
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
244
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
257
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
f98393a6 265void invalidate_bdev(struct block_device *bdev)
1da177e4 266{
0e1dfc66
AM
267 struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269 if (mapping->nrpages == 0)
270 return;
271
1da177e4 272 invalidate_bh_lrus();
fc0ecff6 273 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
274}
275
276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
278 */
279static void free_more_memory(void)
280{
19770b32 281 struct zone *zone;
0e88460d 282 int nid;
1da177e4 283
687a21ce 284 wakeup_pdflush(1024);
1da177e4
LT
285 yield();
286
0e88460d 287 for_each_online_node(nid) {
19770b32
MG
288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
290 &zone);
291 if (zone)
54a6eb5c
MG
292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293 GFP_NOFS);
1da177e4
LT
294 }
295}
296
297/*
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
300 */
301static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302{
1da177e4 303 unsigned long flags;
a3972203 304 struct buffer_head *first;
1da177e4
LT
305 struct buffer_head *tmp;
306 struct page *page;
307 int page_uptodate = 1;
308
309 BUG_ON(!buffer_async_read(bh));
310
311 page = bh->b_page;
312 if (uptodate) {
313 set_buffer_uptodate(bh);
314 } else {
315 clear_buffer_uptodate(bh);
08bafc03 316 if (!quiet_error(bh))
1da177e4
LT
317 buffer_io_error(bh);
318 SetPageError(page);
319 }
320
321 /*
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
325 */
a3972203
NP
326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
329 clear_buffer_async_read(bh);
330 unlock_buffer(bh);
331 tmp = bh;
332 do {
333 if (!buffer_uptodate(tmp))
334 page_uptodate = 0;
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
337 goto still_busy;
338 }
339 tmp = tmp->b_this_page;
340 } while (tmp != bh);
a3972203
NP
341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
1da177e4
LT
343
344 /*
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
347 */
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
350 unlock_page(page);
351 return;
352
353still_busy:
a3972203
NP
354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
1da177e4
LT
356 return;
357}
358
359/*
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */
b6cd0b77 363static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
364{
365 char b[BDEVNAME_SIZE];
1da177e4 366 unsigned long flags;
a3972203 367 struct buffer_head *first;
1da177e4
LT
368 struct buffer_head *tmp;
369 struct page *page;
370
371 BUG_ON(!buffer_async_write(bh));
372
373 page = bh->b_page;
374 if (uptodate) {
375 set_buffer_uptodate(bh);
376 } else {
08bafc03 377 if (!quiet_error(bh)) {
1da177e4
LT
378 buffer_io_error(bh);
379 printk(KERN_WARNING "lost page write due to "
380 "I/O error on %s\n",
381 bdevname(bh->b_bdev, b));
382 }
383 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 384 set_buffer_write_io_error(bh);
1da177e4
LT
385 clear_buffer_uptodate(bh);
386 SetPageError(page);
387 }
388
a3972203
NP
389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
1da177e4
LT
393 clear_buffer_async_write(bh);
394 unlock_buffer(bh);
395 tmp = bh->b_this_page;
396 while (tmp != bh) {
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
399 goto still_busy;
400 }
401 tmp = tmp->b_this_page;
402 }
a3972203
NP
403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
1da177e4
LT
405 end_page_writeback(page);
406 return;
407
408still_busy:
a3972203
NP
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
1da177e4
LT
411 return;
412}
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
441void mark_buffer_async_write(struct buffer_head *bh)
442{
443 bh->b_end_io = end_buffer_async_write;
444 set_buffer_async_write(bh);
445}
446EXPORT_SYMBOL(mark_buffer_async_write);
447
448
449/*
450 * fs/buffer.c contains helper functions for buffer-backed address space's
451 * fsync functions. A common requirement for buffer-based filesystems is
452 * that certain data from the backing blockdev needs to be written out for
453 * a successful fsync(). For example, ext2 indirect blocks need to be
454 * written back and waited upon before fsync() returns.
455 *
456 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
457 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
458 * management of a list of dependent buffers at ->i_mapping->private_list.
459 *
460 * Locking is a little subtle: try_to_free_buffers() will remove buffers
461 * from their controlling inode's queue when they are being freed. But
462 * try_to_free_buffers() will be operating against the *blockdev* mapping
463 * at the time, not against the S_ISREG file which depends on those buffers.
464 * So the locking for private_list is via the private_lock in the address_space
465 * which backs the buffers. Which is different from the address_space
466 * against which the buffers are listed. So for a particular address_space,
467 * mapping->private_lock does *not* protect mapping->private_list! In fact,
468 * mapping->private_list will always be protected by the backing blockdev's
469 * ->private_lock.
470 *
471 * Which introduces a requirement: all buffers on an address_space's
472 * ->private_list must be from the same address_space: the blockdev's.
473 *
474 * address_spaces which do not place buffers at ->private_list via these
475 * utility functions are free to use private_lock and private_list for
476 * whatever they want. The only requirement is that list_empty(private_list)
477 * be true at clear_inode() time.
478 *
479 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
480 * filesystems should do that. invalidate_inode_buffers() should just go
481 * BUG_ON(!list_empty).
482 *
483 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
484 * take an address_space, not an inode. And it should be called
485 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
486 * queued up.
487 *
488 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
489 * list if it is already on a list. Because if the buffer is on a list,
490 * it *must* already be on the right one. If not, the filesystem is being
491 * silly. This will save a ton of locking. But first we have to ensure
492 * that buffers are taken *off* the old inode's list when they are freed
493 * (presumably in truncate). That requires careful auditing of all
494 * filesystems (do it inside bforget()). It could also be done by bringing
495 * b_inode back.
496 */
497
498/*
499 * The buffer's backing address_space's private_lock must be held
500 */
dbacefc9 501static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
502{
503 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
504 WARN_ON(!bh->b_assoc_map);
505 if (buffer_write_io_error(bh))
506 set_bit(AS_EIO, &bh->b_assoc_map->flags);
507 bh->b_assoc_map = NULL;
1da177e4
LT
508}
509
510int inode_has_buffers(struct inode *inode)
511{
512 return !list_empty(&inode->i_data.private_list);
513}
514
515/*
516 * osync is designed to support O_SYNC io. It waits synchronously for
517 * all already-submitted IO to complete, but does not queue any new
518 * writes to the disk.
519 *
520 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
521 * you dirty the buffers, and then use osync_inode_buffers to wait for
522 * completion. Any other dirty buffers which are not yet queued for
523 * write will not be flushed to disk by the osync.
524 */
525static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
526{
527 struct buffer_head *bh;
528 struct list_head *p;
529 int err = 0;
530
531 spin_lock(lock);
532repeat:
533 list_for_each_prev(p, list) {
534 bh = BH_ENTRY(p);
535 if (buffer_locked(bh)) {
536 get_bh(bh);
537 spin_unlock(lock);
538 wait_on_buffer(bh);
539 if (!buffer_uptodate(bh))
540 err = -EIO;
541 brelse(bh);
542 spin_lock(lock);
543 goto repeat;
544 }
545 }
546 spin_unlock(lock);
547 return err;
548}
549
550/**
78a4a50a 551 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 552 * @mapping: the mapping which wants those buffers written
1da177e4
LT
553 *
554 * Starts I/O against the buffers at mapping->private_list, and waits upon
555 * that I/O.
556 *
67be2dd1
MW
557 * Basically, this is a convenience function for fsync().
558 * @mapping is a file or directory which needs those buffers to be written for
559 * a successful fsync().
1da177e4
LT
560 */
561int sync_mapping_buffers(struct address_space *mapping)
562{
563 struct address_space *buffer_mapping = mapping->assoc_mapping;
564
565 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
566 return 0;
567
568 return fsync_buffers_list(&buffer_mapping->private_lock,
569 &mapping->private_list);
570}
571EXPORT_SYMBOL(sync_mapping_buffers);
572
573/*
574 * Called when we've recently written block `bblock', and it is known that
575 * `bblock' was for a buffer_boundary() buffer. This means that the block at
576 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
577 * dirty, schedule it for IO. So that indirects merge nicely with their data.
578 */
579void write_boundary_block(struct block_device *bdev,
580 sector_t bblock, unsigned blocksize)
581{
582 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
583 if (bh) {
584 if (buffer_dirty(bh))
585 ll_rw_block(WRITE, 1, &bh);
586 put_bh(bh);
587 }
588}
589
590void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
591{
592 struct address_space *mapping = inode->i_mapping;
593 struct address_space *buffer_mapping = bh->b_page->mapping;
594
595 mark_buffer_dirty(bh);
596 if (!mapping->assoc_mapping) {
597 mapping->assoc_mapping = buffer_mapping;
598 } else {
e827f923 599 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 600 }
535ee2fb 601 if (!bh->b_assoc_map) {
1da177e4
LT
602 spin_lock(&buffer_mapping->private_lock);
603 list_move_tail(&bh->b_assoc_buffers,
604 &mapping->private_list);
58ff407b 605 bh->b_assoc_map = mapping;
1da177e4
LT
606 spin_unlock(&buffer_mapping->private_lock);
607 }
608}
609EXPORT_SYMBOL(mark_buffer_dirty_inode);
610
787d2214
NP
611/*
612 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
613 * dirty.
614 *
615 * If warn is true, then emit a warning if the page is not uptodate and has
616 * not been truncated.
617 */
a8e7d49a 618static void __set_page_dirty(struct page *page,
787d2214
NP
619 struct address_space *mapping, int warn)
620{
19fd6231 621 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
622 if (page->mapping) { /* Race with truncate? */
623 WARN_ON_ONCE(warn && !PageUptodate(page));
624
625 if (mapping_cap_account_dirty(mapping)) {
626 __inc_zone_page_state(page, NR_FILE_DIRTY);
c9e51e41
PZ
627 __inc_bdi_stat(mapping->backing_dev_info,
628 BDI_RECLAIMABLE);
1cf6e7d8 629 task_dirty_inc(current);
787d2214
NP
630 task_io_account_write(PAGE_CACHE_SIZE);
631 }
632 radix_tree_tag_set(&mapping->page_tree,
633 page_index(page), PAGECACHE_TAG_DIRTY);
634 }
19fd6231 635 spin_unlock_irq(&mapping->tree_lock);
787d2214 636 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
637}
638
1da177e4
LT
639/*
640 * Add a page to the dirty page list.
641 *
642 * It is a sad fact of life that this function is called from several places
643 * deeply under spinlocking. It may not sleep.
644 *
645 * If the page has buffers, the uptodate buffers are set dirty, to preserve
646 * dirty-state coherency between the page and the buffers. It the page does
647 * not have buffers then when they are later attached they will all be set
648 * dirty.
649 *
650 * The buffers are dirtied before the page is dirtied. There's a small race
651 * window in which a writepage caller may see the page cleanness but not the
652 * buffer dirtiness. That's fine. If this code were to set the page dirty
653 * before the buffers, a concurrent writepage caller could clear the page dirty
654 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
655 * page on the dirty page list.
656 *
657 * We use private_lock to lock against try_to_free_buffers while using the
658 * page's buffer list. Also use this to protect against clean buffers being
659 * added to the page after it was set dirty.
660 *
661 * FIXME: may need to call ->reservepage here as well. That's rather up to the
662 * address_space though.
663 */
664int __set_page_dirty_buffers(struct page *page)
665{
a8e7d49a 666 int newly_dirty;
787d2214 667 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
668
669 if (unlikely(!mapping))
670 return !TestSetPageDirty(page);
1da177e4
LT
671
672 spin_lock(&mapping->private_lock);
673 if (page_has_buffers(page)) {
674 struct buffer_head *head = page_buffers(page);
675 struct buffer_head *bh = head;
676
677 do {
678 set_buffer_dirty(bh);
679 bh = bh->b_this_page;
680 } while (bh != head);
681 }
a8e7d49a 682 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
683 spin_unlock(&mapping->private_lock);
684
a8e7d49a
LT
685 if (newly_dirty)
686 __set_page_dirty(page, mapping, 1);
687 return newly_dirty;
1da177e4
LT
688}
689EXPORT_SYMBOL(__set_page_dirty_buffers);
690
691/*
692 * Write out and wait upon a list of buffers.
693 *
694 * We have conflicting pressures: we want to make sure that all
695 * initially dirty buffers get waited on, but that any subsequently
696 * dirtied buffers don't. After all, we don't want fsync to last
697 * forever if somebody is actively writing to the file.
698 *
699 * Do this in two main stages: first we copy dirty buffers to a
700 * temporary inode list, queueing the writes as we go. Then we clean
701 * up, waiting for those writes to complete.
702 *
703 * During this second stage, any subsequent updates to the file may end
704 * up refiling the buffer on the original inode's dirty list again, so
705 * there is a chance we will end up with a buffer queued for write but
706 * not yet completed on that list. So, as a final cleanup we go through
707 * the osync code to catch these locked, dirty buffers without requeuing
708 * any newly dirty buffers for write.
709 */
710static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
711{
712 struct buffer_head *bh;
713 struct list_head tmp;
535ee2fb 714 struct address_space *mapping;
1da177e4
LT
715 int err = 0, err2;
716
717 INIT_LIST_HEAD(&tmp);
718
719 spin_lock(lock);
720 while (!list_empty(list)) {
721 bh = BH_ENTRY(list->next);
535ee2fb 722 mapping = bh->b_assoc_map;
58ff407b 723 __remove_assoc_queue(bh);
535ee2fb
JK
724 /* Avoid race with mark_buffer_dirty_inode() which does
725 * a lockless check and we rely on seeing the dirty bit */
726 smp_mb();
1da177e4
LT
727 if (buffer_dirty(bh) || buffer_locked(bh)) {
728 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 729 bh->b_assoc_map = mapping;
1da177e4
LT
730 if (buffer_dirty(bh)) {
731 get_bh(bh);
732 spin_unlock(lock);
733 /*
734 * Ensure any pending I/O completes so that
735 * ll_rw_block() actually writes the current
736 * contents - it is a noop if I/O is still in
737 * flight on potentially older contents.
738 */
18ce3751 739 ll_rw_block(SWRITE_SYNC, 1, &bh);
1da177e4
LT
740 brelse(bh);
741 spin_lock(lock);
742 }
743 }
744 }
745
746 while (!list_empty(&tmp)) {
747 bh = BH_ENTRY(tmp.prev);
1da177e4 748 get_bh(bh);
535ee2fb
JK
749 mapping = bh->b_assoc_map;
750 __remove_assoc_queue(bh);
751 /* Avoid race with mark_buffer_dirty_inode() which does
752 * a lockless check and we rely on seeing the dirty bit */
753 smp_mb();
754 if (buffer_dirty(bh)) {
755 list_add(&bh->b_assoc_buffers,
e3892296 756 &mapping->private_list);
535ee2fb
JK
757 bh->b_assoc_map = mapping;
758 }
1da177e4
LT
759 spin_unlock(lock);
760 wait_on_buffer(bh);
761 if (!buffer_uptodate(bh))
762 err = -EIO;
763 brelse(bh);
764 spin_lock(lock);
765 }
766
767 spin_unlock(lock);
768 err2 = osync_buffers_list(lock, list);
769 if (err)
770 return err;
771 else
772 return err2;
773}
774
775/*
776 * Invalidate any and all dirty buffers on a given inode. We are
777 * probably unmounting the fs, but that doesn't mean we have already
778 * done a sync(). Just drop the buffers from the inode list.
779 *
780 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
781 * assumes that all the buffers are against the blockdev. Not true
782 * for reiserfs.
783 */
784void invalidate_inode_buffers(struct inode *inode)
785{
786 if (inode_has_buffers(inode)) {
787 struct address_space *mapping = &inode->i_data;
788 struct list_head *list = &mapping->private_list;
789 struct address_space *buffer_mapping = mapping->assoc_mapping;
790
791 spin_lock(&buffer_mapping->private_lock);
792 while (!list_empty(list))
793 __remove_assoc_queue(BH_ENTRY(list->next));
794 spin_unlock(&buffer_mapping->private_lock);
795 }
796}
52b19ac9 797EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
798
799/*
800 * Remove any clean buffers from the inode's buffer list. This is called
801 * when we're trying to free the inode itself. Those buffers can pin it.
802 *
803 * Returns true if all buffers were removed.
804 */
805int remove_inode_buffers(struct inode *inode)
806{
807 int ret = 1;
808
809 if (inode_has_buffers(inode)) {
810 struct address_space *mapping = &inode->i_data;
811 struct list_head *list = &mapping->private_list;
812 struct address_space *buffer_mapping = mapping->assoc_mapping;
813
814 spin_lock(&buffer_mapping->private_lock);
815 while (!list_empty(list)) {
816 struct buffer_head *bh = BH_ENTRY(list->next);
817 if (buffer_dirty(bh)) {
818 ret = 0;
819 break;
820 }
821 __remove_assoc_queue(bh);
822 }
823 spin_unlock(&buffer_mapping->private_lock);
824 }
825 return ret;
826}
827
828/*
829 * Create the appropriate buffers when given a page for data area and
830 * the size of each buffer.. Use the bh->b_this_page linked list to
831 * follow the buffers created. Return NULL if unable to create more
832 * buffers.
833 *
834 * The retry flag is used to differentiate async IO (paging, swapping)
835 * which may not fail from ordinary buffer allocations.
836 */
837struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
838 int retry)
839{
840 struct buffer_head *bh, *head;
841 long offset;
842
843try_again:
844 head = NULL;
845 offset = PAGE_SIZE;
846 while ((offset -= size) >= 0) {
847 bh = alloc_buffer_head(GFP_NOFS);
848 if (!bh)
849 goto no_grow;
850
851 bh->b_bdev = NULL;
852 bh->b_this_page = head;
853 bh->b_blocknr = -1;
854 head = bh;
855
856 bh->b_state = 0;
857 atomic_set(&bh->b_count, 0);
fc5cd582 858 bh->b_private = NULL;
1da177e4
LT
859 bh->b_size = size;
860
861 /* Link the buffer to its page */
862 set_bh_page(bh, page, offset);
863
01ffe339 864 init_buffer(bh, NULL, NULL);
1da177e4
LT
865 }
866 return head;
867/*
868 * In case anything failed, we just free everything we got.
869 */
870no_grow:
871 if (head) {
872 do {
873 bh = head;
874 head = head->b_this_page;
875 free_buffer_head(bh);
876 } while (head);
877 }
878
879 /*
880 * Return failure for non-async IO requests. Async IO requests
881 * are not allowed to fail, so we have to wait until buffer heads
882 * become available. But we don't want tasks sleeping with
883 * partially complete buffers, so all were released above.
884 */
885 if (!retry)
886 return NULL;
887
888 /* We're _really_ low on memory. Now we just
889 * wait for old buffer heads to become free due to
890 * finishing IO. Since this is an async request and
891 * the reserve list is empty, we're sure there are
892 * async buffer heads in use.
893 */
894 free_more_memory();
895 goto try_again;
896}
897EXPORT_SYMBOL_GPL(alloc_page_buffers);
898
899static inline void
900link_dev_buffers(struct page *page, struct buffer_head *head)
901{
902 struct buffer_head *bh, *tail;
903
904 bh = head;
905 do {
906 tail = bh;
907 bh = bh->b_this_page;
908 } while (bh);
909 tail->b_this_page = head;
910 attach_page_buffers(page, head);
911}
912
913/*
914 * Initialise the state of a blockdev page's buffers.
915 */
916static void
917init_page_buffers(struct page *page, struct block_device *bdev,
918 sector_t block, int size)
919{
920 struct buffer_head *head = page_buffers(page);
921 struct buffer_head *bh = head;
922 int uptodate = PageUptodate(page);
923
924 do {
925 if (!buffer_mapped(bh)) {
926 init_buffer(bh, NULL, NULL);
927 bh->b_bdev = bdev;
928 bh->b_blocknr = block;
929 if (uptodate)
930 set_buffer_uptodate(bh);
931 set_buffer_mapped(bh);
932 }
933 block++;
934 bh = bh->b_this_page;
935 } while (bh != head);
936}
937
938/*
939 * Create the page-cache page that contains the requested block.
940 *
941 * This is user purely for blockdev mappings.
942 */
943static struct page *
944grow_dev_page(struct block_device *bdev, sector_t block,
945 pgoff_t index, int size)
946{
947 struct inode *inode = bdev->bd_inode;
948 struct page *page;
949 struct buffer_head *bh;
950
ea125892 951 page = find_or_create_page(inode->i_mapping, index,
769848c0 952 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
953 if (!page)
954 return NULL;
955
e827f923 956 BUG_ON(!PageLocked(page));
1da177e4
LT
957
958 if (page_has_buffers(page)) {
959 bh = page_buffers(page);
960 if (bh->b_size == size) {
961 init_page_buffers(page, bdev, block, size);
962 return page;
963 }
964 if (!try_to_free_buffers(page))
965 goto failed;
966 }
967
968 /*
969 * Allocate some buffers for this page
970 */
971 bh = alloc_page_buffers(page, size, 0);
972 if (!bh)
973 goto failed;
974
975 /*
976 * Link the page to the buffers and initialise them. Take the
977 * lock to be atomic wrt __find_get_block(), which does not
978 * run under the page lock.
979 */
980 spin_lock(&inode->i_mapping->private_lock);
981 link_dev_buffers(page, bh);
982 init_page_buffers(page, bdev, block, size);
983 spin_unlock(&inode->i_mapping->private_lock);
984 return page;
985
986failed:
987 BUG();
988 unlock_page(page);
989 page_cache_release(page);
990 return NULL;
991}
992
993/*
994 * Create buffers for the specified block device block's page. If
995 * that page was dirty, the buffers are set dirty also.
1da177e4 996 */
858119e1 997static int
1da177e4
LT
998grow_buffers(struct block_device *bdev, sector_t block, int size)
999{
1000 struct page *page;
1001 pgoff_t index;
1002 int sizebits;
1003
1004 sizebits = -1;
1005 do {
1006 sizebits++;
1007 } while ((size << sizebits) < PAGE_SIZE);
1008
1009 index = block >> sizebits;
1da177e4 1010
e5657933
AM
1011 /*
1012 * Check for a block which wants to lie outside our maximum possible
1013 * pagecache index. (this comparison is done using sector_t types).
1014 */
1015 if (unlikely(index != block >> sizebits)) {
1016 char b[BDEVNAME_SIZE];
1017
1018 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1019 "device %s\n",
8e24eea7 1020 __func__, (unsigned long long)block,
e5657933
AM
1021 bdevname(bdev, b));
1022 return -EIO;
1023 }
1024 block = index << sizebits;
1da177e4
LT
1025 /* Create a page with the proper size buffers.. */
1026 page = grow_dev_page(bdev, block, index, size);
1027 if (!page)
1028 return 0;
1029 unlock_page(page);
1030 page_cache_release(page);
1031 return 1;
1032}
1033
75c96f85 1034static struct buffer_head *
1da177e4
LT
1035__getblk_slow(struct block_device *bdev, sector_t block, int size)
1036{
1037 /* Size must be multiple of hard sectorsize */
1038 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1039 (size < 512 || size > PAGE_SIZE))) {
1040 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1041 size);
1042 printk(KERN_ERR "hardsect size: %d\n",
1043 bdev_hardsect_size(bdev));
1044
1045 dump_stack();
1046 return NULL;
1047 }
1048
1049 for (;;) {
1050 struct buffer_head * bh;
e5657933 1051 int ret;
1da177e4
LT
1052
1053 bh = __find_get_block(bdev, block, size);
1054 if (bh)
1055 return bh;
1056
e5657933
AM
1057 ret = grow_buffers(bdev, block, size);
1058 if (ret < 0)
1059 return NULL;
1060 if (ret == 0)
1da177e4
LT
1061 free_more_memory();
1062 }
1063}
1064
1065/*
1066 * The relationship between dirty buffers and dirty pages:
1067 *
1068 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1069 * the page is tagged dirty in its radix tree.
1070 *
1071 * At all times, the dirtiness of the buffers represents the dirtiness of
1072 * subsections of the page. If the page has buffers, the page dirty bit is
1073 * merely a hint about the true dirty state.
1074 *
1075 * When a page is set dirty in its entirety, all its buffers are marked dirty
1076 * (if the page has buffers).
1077 *
1078 * When a buffer is marked dirty, its page is dirtied, but the page's other
1079 * buffers are not.
1080 *
1081 * Also. When blockdev buffers are explicitly read with bread(), they
1082 * individually become uptodate. But their backing page remains not
1083 * uptodate - even if all of its buffers are uptodate. A subsequent
1084 * block_read_full_page() against that page will discover all the uptodate
1085 * buffers, will set the page uptodate and will perform no I/O.
1086 */
1087
1088/**
1089 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1090 * @bh: the buffer_head to mark dirty
1da177e4
LT
1091 *
1092 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1093 * backing page dirty, then tag the page as dirty in its address_space's radix
1094 * tree and then attach the address_space's inode to its superblock's dirty
1095 * inode list.
1096 *
1097 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1098 * mapping->tree_lock and the global inode_lock.
1099 */
fc9b52cd 1100void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1101{
787d2214 1102 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1103
1104 /*
1105 * Very *carefully* optimize the it-is-already-dirty case.
1106 *
1107 * Don't let the final "is it dirty" escape to before we
1108 * perhaps modified the buffer.
1109 */
1110 if (buffer_dirty(bh)) {
1111 smp_mb();
1112 if (buffer_dirty(bh))
1113 return;
1114 }
1115
a8e7d49a
LT
1116 if (!test_set_buffer_dirty(bh)) {
1117 struct page *page = bh->b_page;
1118 if (!TestSetPageDirty(page))
1119 __set_page_dirty(page, page_mapping(page), 0);
1120 }
1da177e4
LT
1121}
1122
1123/*
1124 * Decrement a buffer_head's reference count. If all buffers against a page
1125 * have zero reference count, are clean and unlocked, and if the page is clean
1126 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1127 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1128 * a page but it ends up not being freed, and buffers may later be reattached).
1129 */
1130void __brelse(struct buffer_head * buf)
1131{
1132 if (atomic_read(&buf->b_count)) {
1133 put_bh(buf);
1134 return;
1135 }
5c752ad9 1136 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4
LT
1137}
1138
1139/*
1140 * bforget() is like brelse(), except it discards any
1141 * potentially dirty data.
1142 */
1143void __bforget(struct buffer_head *bh)
1144{
1145 clear_buffer_dirty(bh);
535ee2fb 1146 if (bh->b_assoc_map) {
1da177e4
LT
1147 struct address_space *buffer_mapping = bh->b_page->mapping;
1148
1149 spin_lock(&buffer_mapping->private_lock);
1150 list_del_init(&bh->b_assoc_buffers);
58ff407b 1151 bh->b_assoc_map = NULL;
1da177e4
LT
1152 spin_unlock(&buffer_mapping->private_lock);
1153 }
1154 __brelse(bh);
1155}
1156
1157static struct buffer_head *__bread_slow(struct buffer_head *bh)
1158{
1159 lock_buffer(bh);
1160 if (buffer_uptodate(bh)) {
1161 unlock_buffer(bh);
1162 return bh;
1163 } else {
1164 get_bh(bh);
1165 bh->b_end_io = end_buffer_read_sync;
1166 submit_bh(READ, bh);
1167 wait_on_buffer(bh);
1168 if (buffer_uptodate(bh))
1169 return bh;
1170 }
1171 brelse(bh);
1172 return NULL;
1173}
1174
1175/*
1176 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1177 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1178 * refcount elevated by one when they're in an LRU. A buffer can only appear
1179 * once in a particular CPU's LRU. A single buffer can be present in multiple
1180 * CPU's LRUs at the same time.
1181 *
1182 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1183 * sb_find_get_block().
1184 *
1185 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1186 * a local interrupt disable for that.
1187 */
1188
1189#define BH_LRU_SIZE 8
1190
1191struct bh_lru {
1192 struct buffer_head *bhs[BH_LRU_SIZE];
1193};
1194
1195static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1196
1197#ifdef CONFIG_SMP
1198#define bh_lru_lock() local_irq_disable()
1199#define bh_lru_unlock() local_irq_enable()
1200#else
1201#define bh_lru_lock() preempt_disable()
1202#define bh_lru_unlock() preempt_enable()
1203#endif
1204
1205static inline void check_irqs_on(void)
1206{
1207#ifdef irqs_disabled
1208 BUG_ON(irqs_disabled());
1209#endif
1210}
1211
1212/*
1213 * The LRU management algorithm is dopey-but-simple. Sorry.
1214 */
1215static void bh_lru_install(struct buffer_head *bh)
1216{
1217 struct buffer_head *evictee = NULL;
1218 struct bh_lru *lru;
1219
1220 check_irqs_on();
1221 bh_lru_lock();
1222 lru = &__get_cpu_var(bh_lrus);
1223 if (lru->bhs[0] != bh) {
1224 struct buffer_head *bhs[BH_LRU_SIZE];
1225 int in;
1226 int out = 0;
1227
1228 get_bh(bh);
1229 bhs[out++] = bh;
1230 for (in = 0; in < BH_LRU_SIZE; in++) {
1231 struct buffer_head *bh2 = lru->bhs[in];
1232
1233 if (bh2 == bh) {
1234 __brelse(bh2);
1235 } else {
1236 if (out >= BH_LRU_SIZE) {
1237 BUG_ON(evictee != NULL);
1238 evictee = bh2;
1239 } else {
1240 bhs[out++] = bh2;
1241 }
1242 }
1243 }
1244 while (out < BH_LRU_SIZE)
1245 bhs[out++] = NULL;
1246 memcpy(lru->bhs, bhs, sizeof(bhs));
1247 }
1248 bh_lru_unlock();
1249
1250 if (evictee)
1251 __brelse(evictee);
1252}
1253
1254/*
1255 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1256 */
858119e1 1257static struct buffer_head *
3991d3bd 1258lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1259{
1260 struct buffer_head *ret = NULL;
1261 struct bh_lru *lru;
3991d3bd 1262 unsigned int i;
1da177e4
LT
1263
1264 check_irqs_on();
1265 bh_lru_lock();
1266 lru = &__get_cpu_var(bh_lrus);
1267 for (i = 0; i < BH_LRU_SIZE; i++) {
1268 struct buffer_head *bh = lru->bhs[i];
1269
1270 if (bh && bh->b_bdev == bdev &&
1271 bh->b_blocknr == block && bh->b_size == size) {
1272 if (i) {
1273 while (i) {
1274 lru->bhs[i] = lru->bhs[i - 1];
1275 i--;
1276 }
1277 lru->bhs[0] = bh;
1278 }
1279 get_bh(bh);
1280 ret = bh;
1281 break;
1282 }
1283 }
1284 bh_lru_unlock();
1285 return ret;
1286}
1287
1288/*
1289 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1290 * it in the LRU and mark it as accessed. If it is not present then return
1291 * NULL
1292 */
1293struct buffer_head *
3991d3bd 1294__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1295{
1296 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1297
1298 if (bh == NULL) {
385fd4c5 1299 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1300 if (bh)
1301 bh_lru_install(bh);
1302 }
1303 if (bh)
1304 touch_buffer(bh);
1305 return bh;
1306}
1307EXPORT_SYMBOL(__find_get_block);
1308
1309/*
1310 * __getblk will locate (and, if necessary, create) the buffer_head
1311 * which corresponds to the passed block_device, block and size. The
1312 * returned buffer has its reference count incremented.
1313 *
1314 * __getblk() cannot fail - it just keeps trying. If you pass it an
1315 * illegal block number, __getblk() will happily return a buffer_head
1316 * which represents the non-existent block. Very weird.
1317 *
1318 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1319 * attempt is failing. FIXME, perhaps?
1320 */
1321struct buffer_head *
3991d3bd 1322__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1323{
1324 struct buffer_head *bh = __find_get_block(bdev, block, size);
1325
1326 might_sleep();
1327 if (bh == NULL)
1328 bh = __getblk_slow(bdev, block, size);
1329 return bh;
1330}
1331EXPORT_SYMBOL(__getblk);
1332
1333/*
1334 * Do async read-ahead on a buffer..
1335 */
3991d3bd 1336void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1337{
1338 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1339 if (likely(bh)) {
1340 ll_rw_block(READA, 1, &bh);
1341 brelse(bh);
1342 }
1da177e4
LT
1343}
1344EXPORT_SYMBOL(__breadahead);
1345
1346/**
1347 * __bread() - reads a specified block and returns the bh
67be2dd1 1348 * @bdev: the block_device to read from
1da177e4
LT
1349 * @block: number of block
1350 * @size: size (in bytes) to read
1351 *
1352 * Reads a specified block, and returns buffer head that contains it.
1353 * It returns NULL if the block was unreadable.
1354 */
1355struct buffer_head *
3991d3bd 1356__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1357{
1358 struct buffer_head *bh = __getblk(bdev, block, size);
1359
a3e713b5 1360 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1361 bh = __bread_slow(bh);
1362 return bh;
1363}
1364EXPORT_SYMBOL(__bread);
1365
1366/*
1367 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1368 * This doesn't race because it runs in each cpu either in irq
1369 * or with preempt disabled.
1370 */
1371static void invalidate_bh_lru(void *arg)
1372{
1373 struct bh_lru *b = &get_cpu_var(bh_lrus);
1374 int i;
1375
1376 for (i = 0; i < BH_LRU_SIZE; i++) {
1377 brelse(b->bhs[i]);
1378 b->bhs[i] = NULL;
1379 }
1380 put_cpu_var(bh_lrus);
1381}
1382
f9a14399 1383void invalidate_bh_lrus(void)
1da177e4 1384{
15c8b6c1 1385 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1386}
9db5579b 1387EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1388
1389void set_bh_page(struct buffer_head *bh,
1390 struct page *page, unsigned long offset)
1391{
1392 bh->b_page = page;
e827f923 1393 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1394 if (PageHighMem(page))
1395 /*
1396 * This catches illegal uses and preserves the offset:
1397 */
1398 bh->b_data = (char *)(0 + offset);
1399 else
1400 bh->b_data = page_address(page) + offset;
1401}
1402EXPORT_SYMBOL(set_bh_page);
1403
1404/*
1405 * Called when truncating a buffer on a page completely.
1406 */
858119e1 1407static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1408{
1409 lock_buffer(bh);
1410 clear_buffer_dirty(bh);
1411 bh->b_bdev = NULL;
1412 clear_buffer_mapped(bh);
1413 clear_buffer_req(bh);
1414 clear_buffer_new(bh);
1415 clear_buffer_delay(bh);
33a266dd 1416 clear_buffer_unwritten(bh);
1da177e4
LT
1417 unlock_buffer(bh);
1418}
1419
1da177e4
LT
1420/**
1421 * block_invalidatepage - invalidate part of all of a buffer-backed page
1422 *
1423 * @page: the page which is affected
1424 * @offset: the index of the truncation point
1425 *
1426 * block_invalidatepage() is called when all or part of the page has become
1427 * invalidatedby a truncate operation.
1428 *
1429 * block_invalidatepage() does not have to release all buffers, but it must
1430 * ensure that no dirty buffer is left outside @offset and that no I/O
1431 * is underway against any of the blocks which are outside the truncation
1432 * point. Because the caller is about to free (and possibly reuse) those
1433 * blocks on-disk.
1434 */
2ff28e22 1435void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1436{
1437 struct buffer_head *head, *bh, *next;
1438 unsigned int curr_off = 0;
1da177e4
LT
1439
1440 BUG_ON(!PageLocked(page));
1441 if (!page_has_buffers(page))
1442 goto out;
1443
1444 head = page_buffers(page);
1445 bh = head;
1446 do {
1447 unsigned int next_off = curr_off + bh->b_size;
1448 next = bh->b_this_page;
1449
1450 /*
1451 * is this block fully invalidated?
1452 */
1453 if (offset <= curr_off)
1454 discard_buffer(bh);
1455 curr_off = next_off;
1456 bh = next;
1457 } while (bh != head);
1458
1459 /*
1460 * We release buffers only if the entire page is being invalidated.
1461 * The get_block cached value has been unconditionally invalidated,
1462 * so real IO is not possible anymore.
1463 */
1464 if (offset == 0)
2ff28e22 1465 try_to_release_page(page, 0);
1da177e4 1466out:
2ff28e22 1467 return;
1da177e4
LT
1468}
1469EXPORT_SYMBOL(block_invalidatepage);
1470
1471/*
1472 * We attach and possibly dirty the buffers atomically wrt
1473 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1474 * is already excluded via the page lock.
1475 */
1476void create_empty_buffers(struct page *page,
1477 unsigned long blocksize, unsigned long b_state)
1478{
1479 struct buffer_head *bh, *head, *tail;
1480
1481 head = alloc_page_buffers(page, blocksize, 1);
1482 bh = head;
1483 do {
1484 bh->b_state |= b_state;
1485 tail = bh;
1486 bh = bh->b_this_page;
1487 } while (bh);
1488 tail->b_this_page = head;
1489
1490 spin_lock(&page->mapping->private_lock);
1491 if (PageUptodate(page) || PageDirty(page)) {
1492 bh = head;
1493 do {
1494 if (PageDirty(page))
1495 set_buffer_dirty(bh);
1496 if (PageUptodate(page))
1497 set_buffer_uptodate(bh);
1498 bh = bh->b_this_page;
1499 } while (bh != head);
1500 }
1501 attach_page_buffers(page, head);
1502 spin_unlock(&page->mapping->private_lock);
1503}
1504EXPORT_SYMBOL(create_empty_buffers);
1505
1506/*
1507 * We are taking a block for data and we don't want any output from any
1508 * buffer-cache aliases starting from return from that function and
1509 * until the moment when something will explicitly mark the buffer
1510 * dirty (hopefully that will not happen until we will free that block ;-)
1511 * We don't even need to mark it not-uptodate - nobody can expect
1512 * anything from a newly allocated buffer anyway. We used to used
1513 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1514 * don't want to mark the alias unmapped, for example - it would confuse
1515 * anyone who might pick it with bread() afterwards...
1516 *
1517 * Also.. Note that bforget() doesn't lock the buffer. So there can
1518 * be writeout I/O going on against recently-freed buffers. We don't
1519 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1520 * only if we really need to. That happens here.
1521 */
1522void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1523{
1524 struct buffer_head *old_bh;
1525
1526 might_sleep();
1527
385fd4c5 1528 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1529 if (old_bh) {
1530 clear_buffer_dirty(old_bh);
1531 wait_on_buffer(old_bh);
1532 clear_buffer_req(old_bh);
1533 __brelse(old_bh);
1534 }
1535}
1536EXPORT_SYMBOL(unmap_underlying_metadata);
1537
1538/*
1539 * NOTE! All mapped/uptodate combinations are valid:
1540 *
1541 * Mapped Uptodate Meaning
1542 *
1543 * No No "unknown" - must do get_block()
1544 * No Yes "hole" - zero-filled
1545 * Yes No "allocated" - allocated on disk, not read in
1546 * Yes Yes "valid" - allocated and up-to-date in memory.
1547 *
1548 * "Dirty" is valid only with the last case (mapped+uptodate).
1549 */
1550
1551/*
1552 * While block_write_full_page is writing back the dirty buffers under
1553 * the page lock, whoever dirtied the buffers may decide to clean them
1554 * again at any time. We handle that by only looking at the buffer
1555 * state inside lock_buffer().
1556 *
1557 * If block_write_full_page() is called for regular writeback
1558 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1559 * locked buffer. This only can happen if someone has written the buffer
1560 * directly, with submit_bh(). At the address_space level PageWriteback
1561 * prevents this contention from occurring.
1562 */
1563static int __block_write_full_page(struct inode *inode, struct page *page,
1564 get_block_t *get_block, struct writeback_control *wbc)
1565{
1566 int err;
1567 sector_t block;
1568 sector_t last_block;
f0fbd5fc 1569 struct buffer_head *bh, *head;
b0cf2321 1570 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1571 int nr_underway = 0;
1572
1573 BUG_ON(!PageLocked(page));
1574
1575 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1576
1577 if (!page_has_buffers(page)) {
b0cf2321 1578 create_empty_buffers(page, blocksize,
1da177e4
LT
1579 (1 << BH_Dirty)|(1 << BH_Uptodate));
1580 }
1581
1582 /*
1583 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1584 * here, and the (potentially unmapped) buffers may become dirty at
1585 * any time. If a buffer becomes dirty here after we've inspected it
1586 * then we just miss that fact, and the page stays dirty.
1587 *
1588 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1589 * handle that here by just cleaning them.
1590 */
1591
54b21a79 1592 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1593 head = page_buffers(page);
1594 bh = head;
1595
1596 /*
1597 * Get all the dirty buffers mapped to disk addresses and
1598 * handle any aliases from the underlying blockdev's mapping.
1599 */
1600 do {
1601 if (block > last_block) {
1602 /*
1603 * mapped buffers outside i_size will occur, because
1604 * this page can be outside i_size when there is a
1605 * truncate in progress.
1606 */
1607 /*
1608 * The buffer was zeroed by block_write_full_page()
1609 */
1610 clear_buffer_dirty(bh);
1611 set_buffer_uptodate(bh);
29a814d2
AT
1612 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1613 buffer_dirty(bh)) {
b0cf2321 1614 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1615 err = get_block(inode, block, bh, 1);
1616 if (err)
1617 goto recover;
29a814d2 1618 clear_buffer_delay(bh);
1da177e4
LT
1619 if (buffer_new(bh)) {
1620 /* blockdev mappings never come here */
1621 clear_buffer_new(bh);
1622 unmap_underlying_metadata(bh->b_bdev,
1623 bh->b_blocknr);
1624 }
1625 }
1626 bh = bh->b_this_page;
1627 block++;
1628 } while (bh != head);
1629
1630 do {
1da177e4
LT
1631 if (!buffer_mapped(bh))
1632 continue;
1633 /*
1634 * If it's a fully non-blocking write attempt and we cannot
1635 * lock the buffer then redirty the page. Note that this can
1636 * potentially cause a busy-wait loop from pdflush and kswapd
1637 * activity, but those code paths have their own higher-level
1638 * throttling.
1639 */
1640 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1641 lock_buffer(bh);
ca5de404 1642 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1643 redirty_page_for_writepage(wbc, page);
1644 continue;
1645 }
1646 if (test_clear_buffer_dirty(bh)) {
1647 mark_buffer_async_write(bh);
1648 } else {
1649 unlock_buffer(bh);
1650 }
1651 } while ((bh = bh->b_this_page) != head);
1652
1653 /*
1654 * The page and its buffers are protected by PageWriteback(), so we can
1655 * drop the bh refcounts early.
1656 */
1657 BUG_ON(PageWriteback(page));
1658 set_page_writeback(page);
1da177e4
LT
1659
1660 do {
1661 struct buffer_head *next = bh->b_this_page;
1662 if (buffer_async_write(bh)) {
1663 submit_bh(WRITE, bh);
1664 nr_underway++;
1665 }
1da177e4
LT
1666 bh = next;
1667 } while (bh != head);
05937baa 1668 unlock_page(page);
1da177e4
LT
1669
1670 err = 0;
1671done:
1672 if (nr_underway == 0) {
1673 /*
1674 * The page was marked dirty, but the buffers were
1675 * clean. Someone wrote them back by hand with
1676 * ll_rw_block/submit_bh. A rare case.
1677 */
1da177e4 1678 end_page_writeback(page);
3d67f2d7 1679
1da177e4
LT
1680 /*
1681 * The page and buffer_heads can be released at any time from
1682 * here on.
1683 */
1da177e4
LT
1684 }
1685 return err;
1686
1687recover:
1688 /*
1689 * ENOSPC, or some other error. We may already have added some
1690 * blocks to the file, so we need to write these out to avoid
1691 * exposing stale data.
1692 * The page is currently locked and not marked for writeback
1693 */
1694 bh = head;
1695 /* Recovery: lock and submit the mapped buffers */
1696 do {
29a814d2
AT
1697 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1698 !buffer_delay(bh)) {
1da177e4
LT
1699 lock_buffer(bh);
1700 mark_buffer_async_write(bh);
1701 } else {
1702 /*
1703 * The buffer may have been set dirty during
1704 * attachment to a dirty page.
1705 */
1706 clear_buffer_dirty(bh);
1707 }
1708 } while ((bh = bh->b_this_page) != head);
1709 SetPageError(page);
1710 BUG_ON(PageWriteback(page));
7e4c3690 1711 mapping_set_error(page->mapping, err);
1da177e4 1712 set_page_writeback(page);
1da177e4
LT
1713 do {
1714 struct buffer_head *next = bh->b_this_page;
1715 if (buffer_async_write(bh)) {
1716 clear_buffer_dirty(bh);
1717 submit_bh(WRITE, bh);
1718 nr_underway++;
1719 }
1da177e4
LT
1720 bh = next;
1721 } while (bh != head);
ffda9d30 1722 unlock_page(page);
1da177e4
LT
1723 goto done;
1724}
1725
afddba49
NP
1726/*
1727 * If a page has any new buffers, zero them out here, and mark them uptodate
1728 * and dirty so they'll be written out (in order to prevent uninitialised
1729 * block data from leaking). And clear the new bit.
1730 */
1731void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1732{
1733 unsigned int block_start, block_end;
1734 struct buffer_head *head, *bh;
1735
1736 BUG_ON(!PageLocked(page));
1737 if (!page_has_buffers(page))
1738 return;
1739
1740 bh = head = page_buffers(page);
1741 block_start = 0;
1742 do {
1743 block_end = block_start + bh->b_size;
1744
1745 if (buffer_new(bh)) {
1746 if (block_end > from && block_start < to) {
1747 if (!PageUptodate(page)) {
1748 unsigned start, size;
1749
1750 start = max(from, block_start);
1751 size = min(to, block_end) - start;
1752
eebd2aa3 1753 zero_user(page, start, size);
afddba49
NP
1754 set_buffer_uptodate(bh);
1755 }
1756
1757 clear_buffer_new(bh);
1758 mark_buffer_dirty(bh);
1759 }
1760 }
1761
1762 block_start = block_end;
1763 bh = bh->b_this_page;
1764 } while (bh != head);
1765}
1766EXPORT_SYMBOL(page_zero_new_buffers);
1767
1da177e4
LT
1768static int __block_prepare_write(struct inode *inode, struct page *page,
1769 unsigned from, unsigned to, get_block_t *get_block)
1770{
1771 unsigned block_start, block_end;
1772 sector_t block;
1773 int err = 0;
1774 unsigned blocksize, bbits;
1775 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1776
1777 BUG_ON(!PageLocked(page));
1778 BUG_ON(from > PAGE_CACHE_SIZE);
1779 BUG_ON(to > PAGE_CACHE_SIZE);
1780 BUG_ON(from > to);
1781
1782 blocksize = 1 << inode->i_blkbits;
1783 if (!page_has_buffers(page))
1784 create_empty_buffers(page, blocksize, 0);
1785 head = page_buffers(page);
1786
1787 bbits = inode->i_blkbits;
1788 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1789
1790 for(bh = head, block_start = 0; bh != head || !block_start;
1791 block++, block_start=block_end, bh = bh->b_this_page) {
1792 block_end = block_start + blocksize;
1793 if (block_end <= from || block_start >= to) {
1794 if (PageUptodate(page)) {
1795 if (!buffer_uptodate(bh))
1796 set_buffer_uptodate(bh);
1797 }
1798 continue;
1799 }
1800 if (buffer_new(bh))
1801 clear_buffer_new(bh);
1802 if (!buffer_mapped(bh)) {
b0cf2321 1803 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1804 err = get_block(inode, block, bh, 1);
1805 if (err)
f3ddbdc6 1806 break;
1da177e4 1807 if (buffer_new(bh)) {
1da177e4
LT
1808 unmap_underlying_metadata(bh->b_bdev,
1809 bh->b_blocknr);
1810 if (PageUptodate(page)) {
637aff46 1811 clear_buffer_new(bh);
1da177e4 1812 set_buffer_uptodate(bh);
637aff46 1813 mark_buffer_dirty(bh);
1da177e4
LT
1814 continue;
1815 }
eebd2aa3
CL
1816 if (block_end > to || block_start < from)
1817 zero_user_segments(page,
1818 to, block_end,
1819 block_start, from);
1da177e4
LT
1820 continue;
1821 }
1822 }
1823 if (PageUptodate(page)) {
1824 if (!buffer_uptodate(bh))
1825 set_buffer_uptodate(bh);
1826 continue;
1827 }
1828 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1829 !buffer_unwritten(bh) &&
1da177e4
LT
1830 (block_start < from || block_end > to)) {
1831 ll_rw_block(READ, 1, &bh);
1832 *wait_bh++=bh;
1833 }
1834 }
1835 /*
1836 * If we issued read requests - let them complete.
1837 */
1838 while(wait_bh > wait) {
1839 wait_on_buffer(*--wait_bh);
1840 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1841 err = -EIO;
1da177e4 1842 }
afddba49
NP
1843 if (unlikely(err))
1844 page_zero_new_buffers(page, from, to);
1da177e4
LT
1845 return err;
1846}
1847
1848static int __block_commit_write(struct inode *inode, struct page *page,
1849 unsigned from, unsigned to)
1850{
1851 unsigned block_start, block_end;
1852 int partial = 0;
1853 unsigned blocksize;
1854 struct buffer_head *bh, *head;
1855
1856 blocksize = 1 << inode->i_blkbits;
1857
1858 for(bh = head = page_buffers(page), block_start = 0;
1859 bh != head || !block_start;
1860 block_start=block_end, bh = bh->b_this_page) {
1861 block_end = block_start + blocksize;
1862 if (block_end <= from || block_start >= to) {
1863 if (!buffer_uptodate(bh))
1864 partial = 1;
1865 } else {
1866 set_buffer_uptodate(bh);
1867 mark_buffer_dirty(bh);
1868 }
afddba49 1869 clear_buffer_new(bh);
1da177e4
LT
1870 }
1871
1872 /*
1873 * If this is a partial write which happened to make all buffers
1874 * uptodate then we can optimize away a bogus readpage() for
1875 * the next read(). Here we 'discover' whether the page went
1876 * uptodate as a result of this (potentially partial) write.
1877 */
1878 if (!partial)
1879 SetPageUptodate(page);
1880 return 0;
1881}
1882
afddba49
NP
1883/*
1884 * block_write_begin takes care of the basic task of block allocation and
1885 * bringing partial write blocks uptodate first.
1886 *
1887 * If *pagep is not NULL, then block_write_begin uses the locked page
1888 * at *pagep rather than allocating its own. In this case, the page will
1889 * not be unlocked or deallocated on failure.
1890 */
1891int block_write_begin(struct file *file, struct address_space *mapping,
1892 loff_t pos, unsigned len, unsigned flags,
1893 struct page **pagep, void **fsdata,
1894 get_block_t *get_block)
1895{
1896 struct inode *inode = mapping->host;
1897 int status = 0;
1898 struct page *page;
1899 pgoff_t index;
1900 unsigned start, end;
1901 int ownpage = 0;
1902
1903 index = pos >> PAGE_CACHE_SHIFT;
1904 start = pos & (PAGE_CACHE_SIZE - 1);
1905 end = start + len;
1906
1907 page = *pagep;
1908 if (page == NULL) {
1909 ownpage = 1;
54566b2c 1910 page = grab_cache_page_write_begin(mapping, index, flags);
afddba49
NP
1911 if (!page) {
1912 status = -ENOMEM;
1913 goto out;
1914 }
1915 *pagep = page;
1916 } else
1917 BUG_ON(!PageLocked(page));
1918
1919 status = __block_prepare_write(inode, page, start, end, get_block);
1920 if (unlikely(status)) {
1921 ClearPageUptodate(page);
1922
1923 if (ownpage) {
1924 unlock_page(page);
1925 page_cache_release(page);
1926 *pagep = NULL;
1927
1928 /*
1929 * prepare_write() may have instantiated a few blocks
1930 * outside i_size. Trim these off again. Don't need
1931 * i_size_read because we hold i_mutex.
1932 */
1933 if (pos + len > inode->i_size)
1934 vmtruncate(inode, inode->i_size);
1935 }
afddba49
NP
1936 }
1937
1938out:
1939 return status;
1940}
1941EXPORT_SYMBOL(block_write_begin);
1942
1943int block_write_end(struct file *file, struct address_space *mapping,
1944 loff_t pos, unsigned len, unsigned copied,
1945 struct page *page, void *fsdata)
1946{
1947 struct inode *inode = mapping->host;
1948 unsigned start;
1949
1950 start = pos & (PAGE_CACHE_SIZE - 1);
1951
1952 if (unlikely(copied < len)) {
1953 /*
1954 * The buffers that were written will now be uptodate, so we
1955 * don't have to worry about a readpage reading them and
1956 * overwriting a partial write. However if we have encountered
1957 * a short write and only partially written into a buffer, it
1958 * will not be marked uptodate, so a readpage might come in and
1959 * destroy our partial write.
1960 *
1961 * Do the simplest thing, and just treat any short write to a
1962 * non uptodate page as a zero-length write, and force the
1963 * caller to redo the whole thing.
1964 */
1965 if (!PageUptodate(page))
1966 copied = 0;
1967
1968 page_zero_new_buffers(page, start+copied, start+len);
1969 }
1970 flush_dcache_page(page);
1971
1972 /* This could be a short (even 0-length) commit */
1973 __block_commit_write(inode, page, start, start+copied);
1974
1975 return copied;
1976}
1977EXPORT_SYMBOL(block_write_end);
1978
1979int generic_write_end(struct file *file, struct address_space *mapping,
1980 loff_t pos, unsigned len, unsigned copied,
1981 struct page *page, void *fsdata)
1982{
1983 struct inode *inode = mapping->host;
c7d206b3 1984 int i_size_changed = 0;
afddba49
NP
1985
1986 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1987
1988 /*
1989 * No need to use i_size_read() here, the i_size
1990 * cannot change under us because we hold i_mutex.
1991 *
1992 * But it's important to update i_size while still holding page lock:
1993 * page writeout could otherwise come in and zero beyond i_size.
1994 */
1995 if (pos+copied > inode->i_size) {
1996 i_size_write(inode, pos+copied);
c7d206b3 1997 i_size_changed = 1;
afddba49
NP
1998 }
1999
2000 unlock_page(page);
2001 page_cache_release(page);
2002
c7d206b3
JK
2003 /*
2004 * Don't mark the inode dirty under page lock. First, it unnecessarily
2005 * makes the holding time of page lock longer. Second, it forces lock
2006 * ordering of page lock and transaction start for journaling
2007 * filesystems.
2008 */
2009 if (i_size_changed)
2010 mark_inode_dirty(inode);
2011
afddba49
NP
2012 return copied;
2013}
2014EXPORT_SYMBOL(generic_write_end);
2015
8ab22b9a
HH
2016/*
2017 * block_is_partially_uptodate checks whether buffers within a page are
2018 * uptodate or not.
2019 *
2020 * Returns true if all buffers which correspond to a file portion
2021 * we want to read are uptodate.
2022 */
2023int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2024 unsigned long from)
2025{
2026 struct inode *inode = page->mapping->host;
2027 unsigned block_start, block_end, blocksize;
2028 unsigned to;
2029 struct buffer_head *bh, *head;
2030 int ret = 1;
2031
2032 if (!page_has_buffers(page))
2033 return 0;
2034
2035 blocksize = 1 << inode->i_blkbits;
2036 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2037 to = from + to;
2038 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2039 return 0;
2040
2041 head = page_buffers(page);
2042 bh = head;
2043 block_start = 0;
2044 do {
2045 block_end = block_start + blocksize;
2046 if (block_end > from && block_start < to) {
2047 if (!buffer_uptodate(bh)) {
2048 ret = 0;
2049 break;
2050 }
2051 if (block_end >= to)
2052 break;
2053 }
2054 block_start = block_end;
2055 bh = bh->b_this_page;
2056 } while (bh != head);
2057
2058 return ret;
2059}
2060EXPORT_SYMBOL(block_is_partially_uptodate);
2061
1da177e4
LT
2062/*
2063 * Generic "read page" function for block devices that have the normal
2064 * get_block functionality. This is most of the block device filesystems.
2065 * Reads the page asynchronously --- the unlock_buffer() and
2066 * set/clear_buffer_uptodate() functions propagate buffer state into the
2067 * page struct once IO has completed.
2068 */
2069int block_read_full_page(struct page *page, get_block_t *get_block)
2070{
2071 struct inode *inode = page->mapping->host;
2072 sector_t iblock, lblock;
2073 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2074 unsigned int blocksize;
2075 int nr, i;
2076 int fully_mapped = 1;
2077
cd7619d6 2078 BUG_ON(!PageLocked(page));
1da177e4
LT
2079 blocksize = 1 << inode->i_blkbits;
2080 if (!page_has_buffers(page))
2081 create_empty_buffers(page, blocksize, 0);
2082 head = page_buffers(page);
2083
2084 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2085 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2086 bh = head;
2087 nr = 0;
2088 i = 0;
2089
2090 do {
2091 if (buffer_uptodate(bh))
2092 continue;
2093
2094 if (!buffer_mapped(bh)) {
c64610ba
AM
2095 int err = 0;
2096
1da177e4
LT
2097 fully_mapped = 0;
2098 if (iblock < lblock) {
b0cf2321 2099 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2100 err = get_block(inode, iblock, bh, 0);
2101 if (err)
1da177e4
LT
2102 SetPageError(page);
2103 }
2104 if (!buffer_mapped(bh)) {
eebd2aa3 2105 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2106 if (!err)
2107 set_buffer_uptodate(bh);
1da177e4
LT
2108 continue;
2109 }
2110 /*
2111 * get_block() might have updated the buffer
2112 * synchronously
2113 */
2114 if (buffer_uptodate(bh))
2115 continue;
2116 }
2117 arr[nr++] = bh;
2118 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2119
2120 if (fully_mapped)
2121 SetPageMappedToDisk(page);
2122
2123 if (!nr) {
2124 /*
2125 * All buffers are uptodate - we can set the page uptodate
2126 * as well. But not if get_block() returned an error.
2127 */
2128 if (!PageError(page))
2129 SetPageUptodate(page);
2130 unlock_page(page);
2131 return 0;
2132 }
2133
2134 /* Stage two: lock the buffers */
2135 for (i = 0; i < nr; i++) {
2136 bh = arr[i];
2137 lock_buffer(bh);
2138 mark_buffer_async_read(bh);
2139 }
2140
2141 /*
2142 * Stage 3: start the IO. Check for uptodateness
2143 * inside the buffer lock in case another process reading
2144 * the underlying blockdev brought it uptodate (the sct fix).
2145 */
2146 for (i = 0; i < nr; i++) {
2147 bh = arr[i];
2148 if (buffer_uptodate(bh))
2149 end_buffer_async_read(bh, 1);
2150 else
2151 submit_bh(READ, bh);
2152 }
2153 return 0;
2154}
2155
2156/* utility function for filesystems that need to do work on expanding
89e10787 2157 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2158 * deal with the hole.
2159 */
89e10787 2160int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2161{
2162 struct address_space *mapping = inode->i_mapping;
2163 struct page *page;
89e10787 2164 void *fsdata;
05eb0b51 2165 unsigned long limit;
1da177e4
LT
2166 int err;
2167
2168 err = -EFBIG;
2169 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2170 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2171 send_sig(SIGXFSZ, current, 0);
2172 goto out;
2173 }
2174 if (size > inode->i_sb->s_maxbytes)
2175 goto out;
2176
89e10787
NP
2177 err = pagecache_write_begin(NULL, mapping, size, 0,
2178 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2179 &page, &fsdata);
2180 if (err)
05eb0b51 2181 goto out;
05eb0b51 2182
89e10787
NP
2183 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2184 BUG_ON(err > 0);
05eb0b51 2185
1da177e4
LT
2186out:
2187 return err;
2188}
2189
f1e3af72
AB
2190static int cont_expand_zero(struct file *file, struct address_space *mapping,
2191 loff_t pos, loff_t *bytes)
1da177e4 2192{
1da177e4 2193 struct inode *inode = mapping->host;
1da177e4 2194 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2195 struct page *page;
2196 void *fsdata;
2197 pgoff_t index, curidx;
2198 loff_t curpos;
2199 unsigned zerofrom, offset, len;
2200 int err = 0;
1da177e4 2201
89e10787
NP
2202 index = pos >> PAGE_CACHE_SHIFT;
2203 offset = pos & ~PAGE_CACHE_MASK;
2204
2205 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2206 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2207 if (zerofrom & (blocksize-1)) {
2208 *bytes |= (blocksize-1);
2209 (*bytes)++;
2210 }
89e10787 2211 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2212
89e10787
NP
2213 err = pagecache_write_begin(file, mapping, curpos, len,
2214 AOP_FLAG_UNINTERRUPTIBLE,
2215 &page, &fsdata);
2216 if (err)
2217 goto out;
eebd2aa3 2218 zero_user(page, zerofrom, len);
89e10787
NP
2219 err = pagecache_write_end(file, mapping, curpos, len, len,
2220 page, fsdata);
2221 if (err < 0)
2222 goto out;
2223 BUG_ON(err != len);
2224 err = 0;
061e9746
OH
2225
2226 balance_dirty_pages_ratelimited(mapping);
89e10787 2227 }
1da177e4 2228
89e10787
NP
2229 /* page covers the boundary, find the boundary offset */
2230 if (index == curidx) {
2231 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2232 /* if we will expand the thing last block will be filled */
89e10787
NP
2233 if (offset <= zerofrom) {
2234 goto out;
2235 }
2236 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2237 *bytes |= (blocksize-1);
2238 (*bytes)++;
2239 }
89e10787 2240 len = offset - zerofrom;
1da177e4 2241
89e10787
NP
2242 err = pagecache_write_begin(file, mapping, curpos, len,
2243 AOP_FLAG_UNINTERRUPTIBLE,
2244 &page, &fsdata);
2245 if (err)
2246 goto out;
eebd2aa3 2247 zero_user(page, zerofrom, len);
89e10787
NP
2248 err = pagecache_write_end(file, mapping, curpos, len, len,
2249 page, fsdata);
2250 if (err < 0)
2251 goto out;
2252 BUG_ON(err != len);
2253 err = 0;
1da177e4 2254 }
89e10787
NP
2255out:
2256 return err;
2257}
2258
2259/*
2260 * For moronic filesystems that do not allow holes in file.
2261 * We may have to extend the file.
2262 */
2263int cont_write_begin(struct file *file, struct address_space *mapping,
2264 loff_t pos, unsigned len, unsigned flags,
2265 struct page **pagep, void **fsdata,
2266 get_block_t *get_block, loff_t *bytes)
2267{
2268 struct inode *inode = mapping->host;
2269 unsigned blocksize = 1 << inode->i_blkbits;
2270 unsigned zerofrom;
2271 int err;
2272
2273 err = cont_expand_zero(file, mapping, pos, bytes);
2274 if (err)
2275 goto out;
2276
2277 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2278 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2279 *bytes |= (blocksize-1);
2280 (*bytes)++;
1da177e4 2281 }
1da177e4 2282
89e10787
NP
2283 *pagep = NULL;
2284 err = block_write_begin(file, mapping, pos, len,
2285 flags, pagep, fsdata, get_block);
1da177e4 2286out:
89e10787 2287 return err;
1da177e4
LT
2288}
2289
2290int block_prepare_write(struct page *page, unsigned from, unsigned to,
2291 get_block_t *get_block)
2292{
2293 struct inode *inode = page->mapping->host;
2294 int err = __block_prepare_write(inode, page, from, to, get_block);
2295 if (err)
2296 ClearPageUptodate(page);
2297 return err;
2298}
2299
2300int block_commit_write(struct page *page, unsigned from, unsigned to)
2301{
2302 struct inode *inode = page->mapping->host;
2303 __block_commit_write(inode,page,from,to);
2304 return 0;
2305}
2306
54171690
DC
2307/*
2308 * block_page_mkwrite() is not allowed to change the file size as it gets
2309 * called from a page fault handler when a page is first dirtied. Hence we must
2310 * be careful to check for EOF conditions here. We set the page up correctly
2311 * for a written page which means we get ENOSPC checking when writing into
2312 * holes and correct delalloc and unwritten extent mapping on filesystems that
2313 * support these features.
2314 *
2315 * We are not allowed to take the i_mutex here so we have to play games to
2316 * protect against truncate races as the page could now be beyond EOF. Because
2317 * vmtruncate() writes the inode size before removing pages, once we have the
2318 * page lock we can determine safely if the page is beyond EOF. If it is not
2319 * beyond EOF, then the page is guaranteed safe against truncation until we
2320 * unlock the page.
2321 */
2322int
2323block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2324 get_block_t get_block)
2325{
2326 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2327 unsigned long end;
2328 loff_t size;
2329 int ret = -EINVAL;
2330
2331 lock_page(page);
2332 size = i_size_read(inode);
2333 if ((page->mapping != inode->i_mapping) ||
18336338 2334 (page_offset(page) > size)) {
54171690
DC
2335 /* page got truncated out from underneath us */
2336 goto out_unlock;
2337 }
2338
2339 /* page is wholly or partially inside EOF */
2340 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2341 end = size & ~PAGE_CACHE_MASK;
2342 else
2343 end = PAGE_CACHE_SIZE;
2344
2345 ret = block_prepare_write(page, 0, end, get_block);
2346 if (!ret)
2347 ret = block_commit_write(page, 0, end);
2348
2349out_unlock:
2350 unlock_page(page);
2351 return ret;
2352}
1da177e4
LT
2353
2354/*
03158cd7 2355 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2356 * immediately, while under the page lock. So it needs a special end_io
2357 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2358 */
2359static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2360{
68671f35 2361 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2362}
2363
03158cd7
NP
2364/*
2365 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2366 * the page (converting it to circular linked list and taking care of page
2367 * dirty races).
2368 */
2369static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2370{
2371 struct buffer_head *bh;
2372
2373 BUG_ON(!PageLocked(page));
2374
2375 spin_lock(&page->mapping->private_lock);
2376 bh = head;
2377 do {
2378 if (PageDirty(page))
2379 set_buffer_dirty(bh);
2380 if (!bh->b_this_page)
2381 bh->b_this_page = head;
2382 bh = bh->b_this_page;
2383 } while (bh != head);
2384 attach_page_buffers(page, head);
2385 spin_unlock(&page->mapping->private_lock);
2386}
2387
1da177e4
LT
2388/*
2389 * On entry, the page is fully not uptodate.
2390 * On exit the page is fully uptodate in the areas outside (from,to)
2391 */
03158cd7
NP
2392int nobh_write_begin(struct file *file, struct address_space *mapping,
2393 loff_t pos, unsigned len, unsigned flags,
2394 struct page **pagep, void **fsdata,
1da177e4
LT
2395 get_block_t *get_block)
2396{
03158cd7 2397 struct inode *inode = mapping->host;
1da177e4
LT
2398 const unsigned blkbits = inode->i_blkbits;
2399 const unsigned blocksize = 1 << blkbits;
a4b0672d 2400 struct buffer_head *head, *bh;
03158cd7
NP
2401 struct page *page;
2402 pgoff_t index;
2403 unsigned from, to;
1da177e4 2404 unsigned block_in_page;
a4b0672d 2405 unsigned block_start, block_end;
1da177e4 2406 sector_t block_in_file;
1da177e4 2407 int nr_reads = 0;
1da177e4
LT
2408 int ret = 0;
2409 int is_mapped_to_disk = 1;
1da177e4 2410
03158cd7
NP
2411 index = pos >> PAGE_CACHE_SHIFT;
2412 from = pos & (PAGE_CACHE_SIZE - 1);
2413 to = from + len;
2414
54566b2c 2415 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2416 if (!page)
2417 return -ENOMEM;
2418 *pagep = page;
2419 *fsdata = NULL;
2420
2421 if (page_has_buffers(page)) {
2422 unlock_page(page);
2423 page_cache_release(page);
2424 *pagep = NULL;
2425 return block_write_begin(file, mapping, pos, len, flags, pagep,
2426 fsdata, get_block);
2427 }
a4b0672d 2428
1da177e4
LT
2429 if (PageMappedToDisk(page))
2430 return 0;
2431
a4b0672d
NP
2432 /*
2433 * Allocate buffers so that we can keep track of state, and potentially
2434 * attach them to the page if an error occurs. In the common case of
2435 * no error, they will just be freed again without ever being attached
2436 * to the page (which is all OK, because we're under the page lock).
2437 *
2438 * Be careful: the buffer linked list is a NULL terminated one, rather
2439 * than the circular one we're used to.
2440 */
2441 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2442 if (!head) {
2443 ret = -ENOMEM;
2444 goto out_release;
2445 }
a4b0672d 2446
1da177e4 2447 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2448
2449 /*
2450 * We loop across all blocks in the page, whether or not they are
2451 * part of the affected region. This is so we can discover if the
2452 * page is fully mapped-to-disk.
2453 */
a4b0672d 2454 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2455 block_start < PAGE_CACHE_SIZE;
a4b0672d 2456 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2457 int create;
2458
a4b0672d
NP
2459 block_end = block_start + blocksize;
2460 bh->b_state = 0;
1da177e4
LT
2461 create = 1;
2462 if (block_start >= to)
2463 create = 0;
2464 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2465 bh, create);
1da177e4
LT
2466 if (ret)
2467 goto failed;
a4b0672d 2468 if (!buffer_mapped(bh))
1da177e4 2469 is_mapped_to_disk = 0;
a4b0672d
NP
2470 if (buffer_new(bh))
2471 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2472 if (PageUptodate(page)) {
2473 set_buffer_uptodate(bh);
1da177e4 2474 continue;
a4b0672d
NP
2475 }
2476 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2477 zero_user_segments(page, block_start, from,
2478 to, block_end);
1da177e4
LT
2479 continue;
2480 }
a4b0672d 2481 if (buffer_uptodate(bh))
1da177e4
LT
2482 continue; /* reiserfs does this */
2483 if (block_start < from || block_end > to) {
a4b0672d
NP
2484 lock_buffer(bh);
2485 bh->b_end_io = end_buffer_read_nobh;
2486 submit_bh(READ, bh);
2487 nr_reads++;
1da177e4
LT
2488 }
2489 }
2490
2491 if (nr_reads) {
1da177e4
LT
2492 /*
2493 * The page is locked, so these buffers are protected from
2494 * any VM or truncate activity. Hence we don't need to care
2495 * for the buffer_head refcounts.
2496 */
a4b0672d 2497 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2498 wait_on_buffer(bh);
2499 if (!buffer_uptodate(bh))
2500 ret = -EIO;
1da177e4
LT
2501 }
2502 if (ret)
2503 goto failed;
2504 }
2505
2506 if (is_mapped_to_disk)
2507 SetPageMappedToDisk(page);
1da177e4 2508
03158cd7 2509 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2510
1da177e4
LT
2511 return 0;
2512
2513failed:
03158cd7 2514 BUG_ON(!ret);
1da177e4 2515 /*
a4b0672d
NP
2516 * Error recovery is a bit difficult. We need to zero out blocks that
2517 * were newly allocated, and dirty them to ensure they get written out.
2518 * Buffers need to be attached to the page at this point, otherwise
2519 * the handling of potential IO errors during writeout would be hard
2520 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2521 */
03158cd7
NP
2522 attach_nobh_buffers(page, head);
2523 page_zero_new_buffers(page, from, to);
a4b0672d 2524
03158cd7
NP
2525out_release:
2526 unlock_page(page);
2527 page_cache_release(page);
2528 *pagep = NULL;
a4b0672d 2529
03158cd7
NP
2530 if (pos + len > inode->i_size)
2531 vmtruncate(inode, inode->i_size);
a4b0672d 2532
1da177e4
LT
2533 return ret;
2534}
03158cd7 2535EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2536
03158cd7
NP
2537int nobh_write_end(struct file *file, struct address_space *mapping,
2538 loff_t pos, unsigned len, unsigned copied,
2539 struct page *page, void *fsdata)
1da177e4
LT
2540{
2541 struct inode *inode = page->mapping->host;
efdc3131 2542 struct buffer_head *head = fsdata;
03158cd7 2543 struct buffer_head *bh;
5b41e74a 2544 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2545
d4cf109f 2546 if (unlikely(copied < len) && head)
5b41e74a
DM
2547 attach_nobh_buffers(page, head);
2548 if (page_has_buffers(page))
2549 return generic_write_end(file, mapping, pos, len,
2550 copied, page, fsdata);
a4b0672d 2551
22c8ca78 2552 SetPageUptodate(page);
1da177e4 2553 set_page_dirty(page);
03158cd7
NP
2554 if (pos+copied > inode->i_size) {
2555 i_size_write(inode, pos+copied);
1da177e4
LT
2556 mark_inode_dirty(inode);
2557 }
03158cd7
NP
2558
2559 unlock_page(page);
2560 page_cache_release(page);
2561
03158cd7
NP
2562 while (head) {
2563 bh = head;
2564 head = head->b_this_page;
2565 free_buffer_head(bh);
2566 }
2567
2568 return copied;
1da177e4 2569}
03158cd7 2570EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2571
2572/*
2573 * nobh_writepage() - based on block_full_write_page() except
2574 * that it tries to operate without attaching bufferheads to
2575 * the page.
2576 */
2577int nobh_writepage(struct page *page, get_block_t *get_block,
2578 struct writeback_control *wbc)
2579{
2580 struct inode * const inode = page->mapping->host;
2581 loff_t i_size = i_size_read(inode);
2582 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2583 unsigned offset;
1da177e4
LT
2584 int ret;
2585
2586 /* Is the page fully inside i_size? */
2587 if (page->index < end_index)
2588 goto out;
2589
2590 /* Is the page fully outside i_size? (truncate in progress) */
2591 offset = i_size & (PAGE_CACHE_SIZE-1);
2592 if (page->index >= end_index+1 || !offset) {
2593 /*
2594 * The page may have dirty, unmapped buffers. For example,
2595 * they may have been added in ext3_writepage(). Make them
2596 * freeable here, so the page does not leak.
2597 */
2598#if 0
2599 /* Not really sure about this - do we need this ? */
2600 if (page->mapping->a_ops->invalidatepage)
2601 page->mapping->a_ops->invalidatepage(page, offset);
2602#endif
2603 unlock_page(page);
2604 return 0; /* don't care */
2605 }
2606
2607 /*
2608 * The page straddles i_size. It must be zeroed out on each and every
2609 * writepage invocation because it may be mmapped. "A file is mapped
2610 * in multiples of the page size. For a file that is not a multiple of
2611 * the page size, the remaining memory is zeroed when mapped, and
2612 * writes to that region are not written out to the file."
2613 */
eebd2aa3 2614 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2615out:
2616 ret = mpage_writepage(page, get_block, wbc);
2617 if (ret == -EAGAIN)
2618 ret = __block_write_full_page(inode, page, get_block, wbc);
2619 return ret;
2620}
2621EXPORT_SYMBOL(nobh_writepage);
2622
03158cd7
NP
2623int nobh_truncate_page(struct address_space *mapping,
2624 loff_t from, get_block_t *get_block)
1da177e4 2625{
1da177e4
LT
2626 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2627 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2628 unsigned blocksize;
2629 sector_t iblock;
2630 unsigned length, pos;
2631 struct inode *inode = mapping->host;
1da177e4 2632 struct page *page;
03158cd7
NP
2633 struct buffer_head map_bh;
2634 int err;
1da177e4 2635
03158cd7
NP
2636 blocksize = 1 << inode->i_blkbits;
2637 length = offset & (blocksize - 1);
2638
2639 /* Block boundary? Nothing to do */
2640 if (!length)
2641 return 0;
2642
2643 length = blocksize - length;
2644 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2645
1da177e4 2646 page = grab_cache_page(mapping, index);
03158cd7 2647 err = -ENOMEM;
1da177e4
LT
2648 if (!page)
2649 goto out;
2650
03158cd7
NP
2651 if (page_has_buffers(page)) {
2652has_buffers:
2653 unlock_page(page);
2654 page_cache_release(page);
2655 return block_truncate_page(mapping, from, get_block);
2656 }
2657
2658 /* Find the buffer that contains "offset" */
2659 pos = blocksize;
2660 while (offset >= pos) {
2661 iblock++;
2662 pos += blocksize;
2663 }
2664
2665 err = get_block(inode, iblock, &map_bh, 0);
2666 if (err)
2667 goto unlock;
2668 /* unmapped? It's a hole - nothing to do */
2669 if (!buffer_mapped(&map_bh))
2670 goto unlock;
2671
2672 /* Ok, it's mapped. Make sure it's up-to-date */
2673 if (!PageUptodate(page)) {
2674 err = mapping->a_ops->readpage(NULL, page);
2675 if (err) {
2676 page_cache_release(page);
2677 goto out;
2678 }
2679 lock_page(page);
2680 if (!PageUptodate(page)) {
2681 err = -EIO;
2682 goto unlock;
2683 }
2684 if (page_has_buffers(page))
2685 goto has_buffers;
1da177e4 2686 }
eebd2aa3 2687 zero_user(page, offset, length);
03158cd7
NP
2688 set_page_dirty(page);
2689 err = 0;
2690
2691unlock:
1da177e4
LT
2692 unlock_page(page);
2693 page_cache_release(page);
2694out:
03158cd7 2695 return err;
1da177e4
LT
2696}
2697EXPORT_SYMBOL(nobh_truncate_page);
2698
2699int block_truncate_page(struct address_space *mapping,
2700 loff_t from, get_block_t *get_block)
2701{
2702 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2703 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2704 unsigned blocksize;
54b21a79 2705 sector_t iblock;
1da177e4
LT
2706 unsigned length, pos;
2707 struct inode *inode = mapping->host;
2708 struct page *page;
2709 struct buffer_head *bh;
1da177e4
LT
2710 int err;
2711
2712 blocksize = 1 << inode->i_blkbits;
2713 length = offset & (blocksize - 1);
2714
2715 /* Block boundary? Nothing to do */
2716 if (!length)
2717 return 0;
2718
2719 length = blocksize - length;
54b21a79 2720 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2721
2722 page = grab_cache_page(mapping, index);
2723 err = -ENOMEM;
2724 if (!page)
2725 goto out;
2726
2727 if (!page_has_buffers(page))
2728 create_empty_buffers(page, blocksize, 0);
2729
2730 /* Find the buffer that contains "offset" */
2731 bh = page_buffers(page);
2732 pos = blocksize;
2733 while (offset >= pos) {
2734 bh = bh->b_this_page;
2735 iblock++;
2736 pos += blocksize;
2737 }
2738
2739 err = 0;
2740 if (!buffer_mapped(bh)) {
b0cf2321 2741 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2742 err = get_block(inode, iblock, bh, 0);
2743 if (err)
2744 goto unlock;
2745 /* unmapped? It's a hole - nothing to do */
2746 if (!buffer_mapped(bh))
2747 goto unlock;
2748 }
2749
2750 /* Ok, it's mapped. Make sure it's up-to-date */
2751 if (PageUptodate(page))
2752 set_buffer_uptodate(bh);
2753
33a266dd 2754 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2755 err = -EIO;
2756 ll_rw_block(READ, 1, &bh);
2757 wait_on_buffer(bh);
2758 /* Uhhuh. Read error. Complain and punt. */
2759 if (!buffer_uptodate(bh))
2760 goto unlock;
2761 }
2762
eebd2aa3 2763 zero_user(page, offset, length);
1da177e4
LT
2764 mark_buffer_dirty(bh);
2765 err = 0;
2766
2767unlock:
2768 unlock_page(page);
2769 page_cache_release(page);
2770out:
2771 return err;
2772}
2773
2774/*
2775 * The generic ->writepage function for buffer-backed address_spaces
2776 */
2777int block_write_full_page(struct page *page, get_block_t *get_block,
2778 struct writeback_control *wbc)
2779{
2780 struct inode * const inode = page->mapping->host;
2781 loff_t i_size = i_size_read(inode);
2782 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2783 unsigned offset;
1da177e4
LT
2784
2785 /* Is the page fully inside i_size? */
2786 if (page->index < end_index)
2787 return __block_write_full_page(inode, page, get_block, wbc);
2788
2789 /* Is the page fully outside i_size? (truncate in progress) */
2790 offset = i_size & (PAGE_CACHE_SIZE-1);
2791 if (page->index >= end_index+1 || !offset) {
2792 /*
2793 * The page may have dirty, unmapped buffers. For example,
2794 * they may have been added in ext3_writepage(). Make them
2795 * freeable here, so the page does not leak.
2796 */
aaa4059b 2797 do_invalidatepage(page, 0);
1da177e4
LT
2798 unlock_page(page);
2799 return 0; /* don't care */
2800 }
2801
2802 /*
2803 * The page straddles i_size. It must be zeroed out on each and every
2804 * writepage invokation because it may be mmapped. "A file is mapped
2805 * in multiples of the page size. For a file that is not a multiple of
2806 * the page size, the remaining memory is zeroed when mapped, and
2807 * writes to that region are not written out to the file."
2808 */
eebd2aa3 2809 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2810 return __block_write_full_page(inode, page, get_block, wbc);
2811}
2812
2813sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2814 get_block_t *get_block)
2815{
2816 struct buffer_head tmp;
2817 struct inode *inode = mapping->host;
2818 tmp.b_state = 0;
2819 tmp.b_blocknr = 0;
b0cf2321 2820 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2821 get_block(inode, block, &tmp, 0);
2822 return tmp.b_blocknr;
2823}
2824
6712ecf8 2825static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2826{
2827 struct buffer_head *bh = bio->bi_private;
2828
1da177e4
LT
2829 if (err == -EOPNOTSUPP) {
2830 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2831 set_bit(BH_Eopnotsupp, &bh->b_state);
2832 }
2833
08bafc03
KM
2834 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2835 set_bit(BH_Quiet, &bh->b_state);
2836
1da177e4
LT
2837 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2838 bio_put(bio);
1da177e4
LT
2839}
2840
2841int submit_bh(int rw, struct buffer_head * bh)
2842{
2843 struct bio *bio;
2844 int ret = 0;
2845
2846 BUG_ON(!buffer_locked(bh));
2847 BUG_ON(!buffer_mapped(bh));
2848 BUG_ON(!bh->b_end_io);
2849
48fd4f93
JA
2850 /*
2851 * Mask in barrier bit for a write (could be either a WRITE or a
2852 * WRITE_SYNC
2853 */
2854 if (buffer_ordered(bh) && (rw & WRITE))
2855 rw |= WRITE_BARRIER;
1da177e4
LT
2856
2857 /*
48fd4f93 2858 * Only clear out a write error when rewriting
1da177e4 2859 */
48fd4f93 2860 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2861 clear_buffer_write_io_error(bh);
2862
2863 /*
2864 * from here on down, it's all bio -- do the initial mapping,
2865 * submit_bio -> generic_make_request may further map this bio around
2866 */
2867 bio = bio_alloc(GFP_NOIO, 1);
2868
2869 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2870 bio->bi_bdev = bh->b_bdev;
2871 bio->bi_io_vec[0].bv_page = bh->b_page;
2872 bio->bi_io_vec[0].bv_len = bh->b_size;
2873 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2874
2875 bio->bi_vcnt = 1;
2876 bio->bi_idx = 0;
2877 bio->bi_size = bh->b_size;
2878
2879 bio->bi_end_io = end_bio_bh_io_sync;
2880 bio->bi_private = bh;
2881
2882 bio_get(bio);
2883 submit_bio(rw, bio);
2884
2885 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2886 ret = -EOPNOTSUPP;
2887
2888 bio_put(bio);
2889 return ret;
2890}
2891
2892/**
2893 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2894 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2895 * @nr: number of &struct buffer_heads in the array
2896 * @bhs: array of pointers to &struct buffer_head
2897 *
a7662236
JK
2898 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2899 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2900 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2901 * are sent to disk. The fourth %READA option is described in the documentation
2902 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2903 *
2904 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2905 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2906 * clean when doing a write request, and any buffer that appears to be
2907 * up-to-date when doing read request. Further it marks as clean buffers that
2908 * are processed for writing (the buffer cache won't assume that they are
2909 * actually clean until the buffer gets unlocked).
1da177e4
LT
2910 *
2911 * ll_rw_block sets b_end_io to simple completion handler that marks
2912 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2913 * any waiters.
2914 *
2915 * All of the buffers must be for the same device, and must also be a
2916 * multiple of the current approved size for the device.
2917 */
2918void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2919{
2920 int i;
2921
2922 for (i = 0; i < nr; i++) {
2923 struct buffer_head *bh = bhs[i];
2924
18ce3751 2925 if (rw == SWRITE || rw == SWRITE_SYNC)
a7662236 2926 lock_buffer(bh);
ca5de404 2927 else if (!trylock_buffer(bh))
1da177e4
LT
2928 continue;
2929
18ce3751 2930 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
1da177e4 2931 if (test_clear_buffer_dirty(bh)) {
76c3073a 2932 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2933 get_bh(bh);
18ce3751
JA
2934 if (rw == SWRITE_SYNC)
2935 submit_bh(WRITE_SYNC, bh);
2936 else
2937 submit_bh(WRITE, bh);
1da177e4
LT
2938 continue;
2939 }
2940 } else {
1da177e4 2941 if (!buffer_uptodate(bh)) {
76c3073a 2942 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2943 get_bh(bh);
1da177e4
LT
2944 submit_bh(rw, bh);
2945 continue;
2946 }
2947 }
2948 unlock_buffer(bh);
1da177e4
LT
2949 }
2950}
2951
2952/*
2953 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2954 * and then start new I/O and then wait upon it. The caller must have a ref on
2955 * the buffer_head.
2956 */
2957int sync_dirty_buffer(struct buffer_head *bh)
2958{
2959 int ret = 0;
2960
2961 WARN_ON(atomic_read(&bh->b_count) < 1);
2962 lock_buffer(bh);
2963 if (test_clear_buffer_dirty(bh)) {
2964 get_bh(bh);
2965 bh->b_end_io = end_buffer_write_sync;
78f707bf 2966 ret = submit_bh(WRITE, bh);
1da177e4
LT
2967 wait_on_buffer(bh);
2968 if (buffer_eopnotsupp(bh)) {
2969 clear_buffer_eopnotsupp(bh);
2970 ret = -EOPNOTSUPP;
2971 }
2972 if (!ret && !buffer_uptodate(bh))
2973 ret = -EIO;
2974 } else {
2975 unlock_buffer(bh);
2976 }
2977 return ret;
2978}
2979
2980/*
2981 * try_to_free_buffers() checks if all the buffers on this particular page
2982 * are unused, and releases them if so.
2983 *
2984 * Exclusion against try_to_free_buffers may be obtained by either
2985 * locking the page or by holding its mapping's private_lock.
2986 *
2987 * If the page is dirty but all the buffers are clean then we need to
2988 * be sure to mark the page clean as well. This is because the page
2989 * may be against a block device, and a later reattachment of buffers
2990 * to a dirty page will set *all* buffers dirty. Which would corrupt
2991 * filesystem data on the same device.
2992 *
2993 * The same applies to regular filesystem pages: if all the buffers are
2994 * clean then we set the page clean and proceed. To do that, we require
2995 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2996 * private_lock.
2997 *
2998 * try_to_free_buffers() is non-blocking.
2999 */
3000static inline int buffer_busy(struct buffer_head *bh)
3001{
3002 return atomic_read(&bh->b_count) |
3003 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3004}
3005
3006static int
3007drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3008{
3009 struct buffer_head *head = page_buffers(page);
3010 struct buffer_head *bh;
3011
3012 bh = head;
3013 do {
de7d5a3b 3014 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3015 set_bit(AS_EIO, &page->mapping->flags);
3016 if (buffer_busy(bh))
3017 goto failed;
3018 bh = bh->b_this_page;
3019 } while (bh != head);
3020
3021 do {
3022 struct buffer_head *next = bh->b_this_page;
3023
535ee2fb 3024 if (bh->b_assoc_map)
1da177e4
LT
3025 __remove_assoc_queue(bh);
3026 bh = next;
3027 } while (bh != head);
3028 *buffers_to_free = head;
3029 __clear_page_buffers(page);
3030 return 1;
3031failed:
3032 return 0;
3033}
3034
3035int try_to_free_buffers(struct page *page)
3036{
3037 struct address_space * const mapping = page->mapping;
3038 struct buffer_head *buffers_to_free = NULL;
3039 int ret = 0;
3040
3041 BUG_ON(!PageLocked(page));
ecdfc978 3042 if (PageWriteback(page))
1da177e4
LT
3043 return 0;
3044
3045 if (mapping == NULL) { /* can this still happen? */
3046 ret = drop_buffers(page, &buffers_to_free);
3047 goto out;
3048 }
3049
3050 spin_lock(&mapping->private_lock);
3051 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3052
3053 /*
3054 * If the filesystem writes its buffers by hand (eg ext3)
3055 * then we can have clean buffers against a dirty page. We
3056 * clean the page here; otherwise the VM will never notice
3057 * that the filesystem did any IO at all.
3058 *
3059 * Also, during truncate, discard_buffer will have marked all
3060 * the page's buffers clean. We discover that here and clean
3061 * the page also.
87df7241
NP
3062 *
3063 * private_lock must be held over this entire operation in order
3064 * to synchronise against __set_page_dirty_buffers and prevent the
3065 * dirty bit from being lost.
ecdfc978
LT
3066 */
3067 if (ret)
3068 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3069 spin_unlock(&mapping->private_lock);
1da177e4
LT
3070out:
3071 if (buffers_to_free) {
3072 struct buffer_head *bh = buffers_to_free;
3073
3074 do {
3075 struct buffer_head *next = bh->b_this_page;
3076 free_buffer_head(bh);
3077 bh = next;
3078 } while (bh != buffers_to_free);
3079 }
3080 return ret;
3081}
3082EXPORT_SYMBOL(try_to_free_buffers);
3083
3978d717 3084void block_sync_page(struct page *page)
1da177e4
LT
3085{
3086 struct address_space *mapping;
3087
3088 smp_mb();
3089 mapping = page_mapping(page);
3090 if (mapping)
3091 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3092}
3093
3094/*
3095 * There are no bdflush tunables left. But distributions are
3096 * still running obsolete flush daemons, so we terminate them here.
3097 *
3098 * Use of bdflush() is deprecated and will be removed in a future kernel.
3099 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3100 */
bdc480e3 3101SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3102{
3103 static int msg_count;
3104
3105 if (!capable(CAP_SYS_ADMIN))
3106 return -EPERM;
3107
3108 if (msg_count < 5) {
3109 msg_count++;
3110 printk(KERN_INFO
3111 "warning: process `%s' used the obsolete bdflush"
3112 " system call\n", current->comm);
3113 printk(KERN_INFO "Fix your initscripts?\n");
3114 }
3115
3116 if (func == 1)
3117 do_exit(0);
3118 return 0;
3119}
3120
3121/*
3122 * Buffer-head allocation
3123 */
e18b890b 3124static struct kmem_cache *bh_cachep;
1da177e4
LT
3125
3126/*
3127 * Once the number of bh's in the machine exceeds this level, we start
3128 * stripping them in writeback.
3129 */
3130static int max_buffer_heads;
3131
3132int buffer_heads_over_limit;
3133
3134struct bh_accounting {
3135 int nr; /* Number of live bh's */
3136 int ratelimit; /* Limit cacheline bouncing */
3137};
3138
3139static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3140
3141static void recalc_bh_state(void)
3142{
3143 int i;
3144 int tot = 0;
3145
3146 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3147 return;
3148 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3149 for_each_online_cpu(i)
1da177e4
LT
3150 tot += per_cpu(bh_accounting, i).nr;
3151 buffer_heads_over_limit = (tot > max_buffer_heads);
3152}
3153
dd0fc66f 3154struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3155{
488514d1 3156 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
1da177e4 3157 if (ret) {
a35afb83 3158 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3159 get_cpu_var(bh_accounting).nr++;
1da177e4 3160 recalc_bh_state();
736c7b80 3161 put_cpu_var(bh_accounting);
1da177e4
LT
3162 }
3163 return ret;
3164}
3165EXPORT_SYMBOL(alloc_buffer_head);
3166
3167void free_buffer_head(struct buffer_head *bh)
3168{
3169 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3170 kmem_cache_free(bh_cachep, bh);
736c7b80 3171 get_cpu_var(bh_accounting).nr--;
1da177e4 3172 recalc_bh_state();
736c7b80 3173 put_cpu_var(bh_accounting);
1da177e4
LT
3174}
3175EXPORT_SYMBOL(free_buffer_head);
3176
1da177e4
LT
3177static void buffer_exit_cpu(int cpu)
3178{
3179 int i;
3180 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3181
3182 for (i = 0; i < BH_LRU_SIZE; i++) {
3183 brelse(b->bhs[i]);
3184 b->bhs[i] = NULL;
3185 }
8a143426
ED
3186 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3187 per_cpu(bh_accounting, cpu).nr = 0;
3188 put_cpu_var(bh_accounting);
1da177e4
LT
3189}
3190
3191static int buffer_cpu_notify(struct notifier_block *self,
3192 unsigned long action, void *hcpu)
3193{
8bb78442 3194 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3195 buffer_exit_cpu((unsigned long)hcpu);
3196 return NOTIFY_OK;
3197}
1da177e4 3198
389d1b08 3199/**
a6b91919 3200 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3201 * @bh: struct buffer_head
3202 *
3203 * Return true if the buffer is up-to-date and false,
3204 * with the buffer locked, if not.
3205 */
3206int bh_uptodate_or_lock(struct buffer_head *bh)
3207{
3208 if (!buffer_uptodate(bh)) {
3209 lock_buffer(bh);
3210 if (!buffer_uptodate(bh))
3211 return 0;
3212 unlock_buffer(bh);
3213 }
3214 return 1;
3215}
3216EXPORT_SYMBOL(bh_uptodate_or_lock);
3217
3218/**
a6b91919 3219 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3220 * @bh: struct buffer_head
3221 *
3222 * Returns zero on success and -EIO on error.
3223 */
3224int bh_submit_read(struct buffer_head *bh)
3225{
3226 BUG_ON(!buffer_locked(bh));
3227
3228 if (buffer_uptodate(bh)) {
3229 unlock_buffer(bh);
3230 return 0;
3231 }
3232
3233 get_bh(bh);
3234 bh->b_end_io = end_buffer_read_sync;
3235 submit_bh(READ, bh);
3236 wait_on_buffer(bh);
3237 if (buffer_uptodate(bh))
3238 return 0;
3239 return -EIO;
3240}
3241EXPORT_SYMBOL(bh_submit_read);
3242
b98938c3 3243static void
51cc5068 3244init_buffer_head(void *data)
b98938c3
CL
3245{
3246 struct buffer_head *bh = data;
3247
3248 memset(bh, 0, sizeof(*bh));
3249 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3250}
3251
1da177e4
LT
3252void __init buffer_init(void)
3253{
3254 int nrpages;
3255
b98938c3
CL
3256 bh_cachep = kmem_cache_create("buffer_head",
3257 sizeof(struct buffer_head), 0,
3258 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3259 SLAB_MEM_SPREAD),
3260 init_buffer_head);
1da177e4
LT
3261
3262 /*
3263 * Limit the bh occupancy to 10% of ZONE_NORMAL
3264 */
3265 nrpages = (nr_free_buffer_pages() * 10) / 100;
3266 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3267 hotcpu_notifier(buffer_cpu_notify, 0);
3268}
3269
3270EXPORT_SYMBOL(__bforget);
3271EXPORT_SYMBOL(__brelse);
3272EXPORT_SYMBOL(__wait_on_buffer);
3273EXPORT_SYMBOL(block_commit_write);
3274EXPORT_SYMBOL(block_prepare_write);
54171690 3275EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3276EXPORT_SYMBOL(block_read_full_page);
3277EXPORT_SYMBOL(block_sync_page);
3278EXPORT_SYMBOL(block_truncate_page);
3279EXPORT_SYMBOL(block_write_full_page);
89e10787 3280EXPORT_SYMBOL(cont_write_begin);
1da177e4
LT
3281EXPORT_SYMBOL(end_buffer_read_sync);
3282EXPORT_SYMBOL(end_buffer_write_sync);
3283EXPORT_SYMBOL(file_fsync);
3284EXPORT_SYMBOL(fsync_bdev);
3285EXPORT_SYMBOL(generic_block_bmap);
05eb0b51 3286EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3287EXPORT_SYMBOL(init_buffer);
3288EXPORT_SYMBOL(invalidate_bdev);
3289EXPORT_SYMBOL(ll_rw_block);
3290EXPORT_SYMBOL(mark_buffer_dirty);
3291EXPORT_SYMBOL(submit_bh);
3292EXPORT_SYMBOL(sync_dirty_buffer);
3293EXPORT_SYMBOL(unlock_buffer);