]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
Export filemap_write_and_wait_range
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
fc9b52cd 70void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
fc9b52cd 77void unlock_buffer(struct buffer_head *bh)
1da177e4 78{
51b07fc3 79 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
4c21e2f2 98 set_page_private(page, 0);
1da177e4
LT
99 page_cache_release(page);
100}
101
08bafc03
KM
102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
1da177e4
LT
111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
1da177e4
LT
114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
68671f35
DM
120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
1da177e4 126 */
68671f35 127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
68671f35
DM
136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
08bafc03 155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1da177e4
LT
156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
1da177e4
LT
168/*
169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * private_lock.
173 *
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
178 */
179static struct buffer_head *
385fd4c5 180__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
181{
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct page *page;
189 int all_mapped = 1;
190
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
193 if (!page)
194 goto out;
195
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
198 goto out_unlock;
199 head = page_buffers(page);
200 bh = head;
201 do {
97f76d3d
NK
202 if (!buffer_mapped(bh))
203 all_mapped = 0;
204 else if (bh->b_blocknr == block) {
1da177e4
LT
205 ret = bh;
206 get_bh(bh);
207 goto out_unlock;
208 }
1da177e4
LT
209 bh = bh->b_this_page;
210 } while (bh != head);
211
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
216 */
217 if (all_mapped) {
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
1da177e4
LT
224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 }
226out_unlock:
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
229out:
230 return ret;
231}
232
233/* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
236 by the user.
237
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
241
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
244
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
257
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
f98393a6 265void invalidate_bdev(struct block_device *bdev)
1da177e4 266{
0e1dfc66
AM
267 struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269 if (mapping->nrpages == 0)
270 return;
271
1da177e4 272 invalidate_bh_lrus();
fc0ecff6 273 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
274}
275
276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
278 */
279static void free_more_memory(void)
280{
19770b32 281 struct zone *zone;
0e88460d 282 int nid;
1da177e4 283
687a21ce 284 wakeup_pdflush(1024);
1da177e4
LT
285 yield();
286
0e88460d 287 for_each_online_node(nid) {
19770b32
MG
288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
290 &zone);
291 if (zone)
54a6eb5c 292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
327c0e96 293 GFP_NOFS, NULL);
1da177e4
LT
294 }
295}
296
297/*
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
300 */
301static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302{
1da177e4 303 unsigned long flags;
a3972203 304 struct buffer_head *first;
1da177e4
LT
305 struct buffer_head *tmp;
306 struct page *page;
307 int page_uptodate = 1;
308
309 BUG_ON(!buffer_async_read(bh));
310
311 page = bh->b_page;
312 if (uptodate) {
313 set_buffer_uptodate(bh);
314 } else {
315 clear_buffer_uptodate(bh);
08bafc03 316 if (!quiet_error(bh))
1da177e4
LT
317 buffer_io_error(bh);
318 SetPageError(page);
319 }
320
321 /*
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
325 */
a3972203
NP
326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
329 clear_buffer_async_read(bh);
330 unlock_buffer(bh);
331 tmp = bh;
332 do {
333 if (!buffer_uptodate(tmp))
334 page_uptodate = 0;
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
337 goto still_busy;
338 }
339 tmp = tmp->b_this_page;
340 } while (tmp != bh);
a3972203
NP
341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
1da177e4
LT
343
344 /*
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
347 */
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
350 unlock_page(page);
351 return;
352
353still_busy:
a3972203
NP
354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
1da177e4
LT
356 return;
357}
358
359/*
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */
b6cd0b77 363static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
364{
365 char b[BDEVNAME_SIZE];
1da177e4 366 unsigned long flags;
a3972203 367 struct buffer_head *first;
1da177e4
LT
368 struct buffer_head *tmp;
369 struct page *page;
370
371 BUG_ON(!buffer_async_write(bh));
372
373 page = bh->b_page;
374 if (uptodate) {
375 set_buffer_uptodate(bh);
376 } else {
08bafc03 377 if (!quiet_error(bh)) {
1da177e4
LT
378 buffer_io_error(bh);
379 printk(KERN_WARNING "lost page write due to "
380 "I/O error on %s\n",
381 bdevname(bh->b_bdev, b));
382 }
383 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 384 set_buffer_write_io_error(bh);
1da177e4
LT
385 clear_buffer_uptodate(bh);
386 SetPageError(page);
387 }
388
a3972203
NP
389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
1da177e4
LT
393 clear_buffer_async_write(bh);
394 unlock_buffer(bh);
395 tmp = bh->b_this_page;
396 while (tmp != bh) {
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
399 goto still_busy;
400 }
401 tmp = tmp->b_this_page;
402 }
a3972203
NP
403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
1da177e4
LT
405 end_page_writeback(page);
406 return;
407
408still_busy:
a3972203
NP
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
1da177e4
LT
411 return;
412}
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
441void mark_buffer_async_write(struct buffer_head *bh)
442{
443 bh->b_end_io = end_buffer_async_write;
444 set_buffer_async_write(bh);
445}
446EXPORT_SYMBOL(mark_buffer_async_write);
447
448
449/*
450 * fs/buffer.c contains helper functions for buffer-backed address space's
451 * fsync functions. A common requirement for buffer-based filesystems is
452 * that certain data from the backing blockdev needs to be written out for
453 * a successful fsync(). For example, ext2 indirect blocks need to be
454 * written back and waited upon before fsync() returns.
455 *
456 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
457 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
458 * management of a list of dependent buffers at ->i_mapping->private_list.
459 *
460 * Locking is a little subtle: try_to_free_buffers() will remove buffers
461 * from their controlling inode's queue when they are being freed. But
462 * try_to_free_buffers() will be operating against the *blockdev* mapping
463 * at the time, not against the S_ISREG file which depends on those buffers.
464 * So the locking for private_list is via the private_lock in the address_space
465 * which backs the buffers. Which is different from the address_space
466 * against which the buffers are listed. So for a particular address_space,
467 * mapping->private_lock does *not* protect mapping->private_list! In fact,
468 * mapping->private_list will always be protected by the backing blockdev's
469 * ->private_lock.
470 *
471 * Which introduces a requirement: all buffers on an address_space's
472 * ->private_list must be from the same address_space: the blockdev's.
473 *
474 * address_spaces which do not place buffers at ->private_list via these
475 * utility functions are free to use private_lock and private_list for
476 * whatever they want. The only requirement is that list_empty(private_list)
477 * be true at clear_inode() time.
478 *
479 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
480 * filesystems should do that. invalidate_inode_buffers() should just go
481 * BUG_ON(!list_empty).
482 *
483 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
484 * take an address_space, not an inode. And it should be called
485 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
486 * queued up.
487 *
488 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
489 * list if it is already on a list. Because if the buffer is on a list,
490 * it *must* already be on the right one. If not, the filesystem is being
491 * silly. This will save a ton of locking. But first we have to ensure
492 * that buffers are taken *off* the old inode's list when they are freed
493 * (presumably in truncate). That requires careful auditing of all
494 * filesystems (do it inside bforget()). It could also be done by bringing
495 * b_inode back.
496 */
497
498/*
499 * The buffer's backing address_space's private_lock must be held
500 */
dbacefc9 501static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
502{
503 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
504 WARN_ON(!bh->b_assoc_map);
505 if (buffer_write_io_error(bh))
506 set_bit(AS_EIO, &bh->b_assoc_map->flags);
507 bh->b_assoc_map = NULL;
1da177e4
LT
508}
509
510int inode_has_buffers(struct inode *inode)
511{
512 return !list_empty(&inode->i_data.private_list);
513}
514
515/*
516 * osync is designed to support O_SYNC io. It waits synchronously for
517 * all already-submitted IO to complete, but does not queue any new
518 * writes to the disk.
519 *
520 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
521 * you dirty the buffers, and then use osync_inode_buffers to wait for
522 * completion. Any other dirty buffers which are not yet queued for
523 * write will not be flushed to disk by the osync.
524 */
525static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
526{
527 struct buffer_head *bh;
528 struct list_head *p;
529 int err = 0;
530
531 spin_lock(lock);
532repeat:
533 list_for_each_prev(p, list) {
534 bh = BH_ENTRY(p);
535 if (buffer_locked(bh)) {
536 get_bh(bh);
537 spin_unlock(lock);
538 wait_on_buffer(bh);
539 if (!buffer_uptodate(bh))
540 err = -EIO;
541 brelse(bh);
542 spin_lock(lock);
543 goto repeat;
544 }
545 }
546 spin_unlock(lock);
547 return err;
548}
549
053c525f 550void do_thaw_all(struct work_struct *work)
c2d75438
ES
551{
552 struct super_block *sb;
553 char b[BDEVNAME_SIZE];
554
555 spin_lock(&sb_lock);
556restart:
557 list_for_each_entry(sb, &super_blocks, s_list) {
558 sb->s_count++;
559 spin_unlock(&sb_lock);
560 down_read(&sb->s_umount);
561 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
562 printk(KERN_WARNING "Emergency Thaw on %s\n",
563 bdevname(sb->s_bdev, b));
564 up_read(&sb->s_umount);
565 spin_lock(&sb_lock);
566 if (__put_super_and_need_restart(sb))
567 goto restart;
568 }
569 spin_unlock(&sb_lock);
053c525f 570 kfree(work);
c2d75438
ES
571 printk(KERN_WARNING "Emergency Thaw complete\n");
572}
573
574/**
575 * emergency_thaw_all -- forcibly thaw every frozen filesystem
576 *
577 * Used for emergency unfreeze of all filesystems via SysRq
578 */
579void emergency_thaw_all(void)
580{
053c525f
JA
581 struct work_struct *work;
582
583 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 if (work) {
585 INIT_WORK(work, do_thaw_all);
586 schedule_work(work);
587 }
c2d75438
ES
588}
589
1da177e4 590/**
78a4a50a 591 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 592 * @mapping: the mapping which wants those buffers written
1da177e4
LT
593 *
594 * Starts I/O against the buffers at mapping->private_list, and waits upon
595 * that I/O.
596 *
67be2dd1
MW
597 * Basically, this is a convenience function for fsync().
598 * @mapping is a file or directory which needs those buffers to be written for
599 * a successful fsync().
1da177e4
LT
600 */
601int sync_mapping_buffers(struct address_space *mapping)
602{
603 struct address_space *buffer_mapping = mapping->assoc_mapping;
604
605 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
606 return 0;
607
608 return fsync_buffers_list(&buffer_mapping->private_lock,
609 &mapping->private_list);
610}
611EXPORT_SYMBOL(sync_mapping_buffers);
612
613/*
614 * Called when we've recently written block `bblock', and it is known that
615 * `bblock' was for a buffer_boundary() buffer. This means that the block at
616 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
617 * dirty, schedule it for IO. So that indirects merge nicely with their data.
618 */
619void write_boundary_block(struct block_device *bdev,
620 sector_t bblock, unsigned blocksize)
621{
622 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
623 if (bh) {
624 if (buffer_dirty(bh))
625 ll_rw_block(WRITE, 1, &bh);
626 put_bh(bh);
627 }
628}
629
630void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
631{
632 struct address_space *mapping = inode->i_mapping;
633 struct address_space *buffer_mapping = bh->b_page->mapping;
634
635 mark_buffer_dirty(bh);
636 if (!mapping->assoc_mapping) {
637 mapping->assoc_mapping = buffer_mapping;
638 } else {
e827f923 639 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 640 }
535ee2fb 641 if (!bh->b_assoc_map) {
1da177e4
LT
642 spin_lock(&buffer_mapping->private_lock);
643 list_move_tail(&bh->b_assoc_buffers,
644 &mapping->private_list);
58ff407b 645 bh->b_assoc_map = mapping;
1da177e4
LT
646 spin_unlock(&buffer_mapping->private_lock);
647 }
648}
649EXPORT_SYMBOL(mark_buffer_dirty_inode);
650
787d2214
NP
651/*
652 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
653 * dirty.
654 *
655 * If warn is true, then emit a warning if the page is not uptodate and has
656 * not been truncated.
657 */
a8e7d49a 658static void __set_page_dirty(struct page *page,
787d2214
NP
659 struct address_space *mapping, int warn)
660{
19fd6231 661 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
662 if (page->mapping) { /* Race with truncate? */
663 WARN_ON_ONCE(warn && !PageUptodate(page));
e3a7cca1 664 account_page_dirtied(page, mapping);
787d2214
NP
665 radix_tree_tag_set(&mapping->page_tree,
666 page_index(page), PAGECACHE_TAG_DIRTY);
667 }
19fd6231 668 spin_unlock_irq(&mapping->tree_lock);
787d2214 669 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
670}
671
1da177e4
LT
672/*
673 * Add a page to the dirty page list.
674 *
675 * It is a sad fact of life that this function is called from several places
676 * deeply under spinlocking. It may not sleep.
677 *
678 * If the page has buffers, the uptodate buffers are set dirty, to preserve
679 * dirty-state coherency between the page and the buffers. It the page does
680 * not have buffers then when they are later attached they will all be set
681 * dirty.
682 *
683 * The buffers are dirtied before the page is dirtied. There's a small race
684 * window in which a writepage caller may see the page cleanness but not the
685 * buffer dirtiness. That's fine. If this code were to set the page dirty
686 * before the buffers, a concurrent writepage caller could clear the page dirty
687 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
688 * page on the dirty page list.
689 *
690 * We use private_lock to lock against try_to_free_buffers while using the
691 * page's buffer list. Also use this to protect against clean buffers being
692 * added to the page after it was set dirty.
693 *
694 * FIXME: may need to call ->reservepage here as well. That's rather up to the
695 * address_space though.
696 */
697int __set_page_dirty_buffers(struct page *page)
698{
a8e7d49a 699 int newly_dirty;
787d2214 700 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
701
702 if (unlikely(!mapping))
703 return !TestSetPageDirty(page);
1da177e4
LT
704
705 spin_lock(&mapping->private_lock);
706 if (page_has_buffers(page)) {
707 struct buffer_head *head = page_buffers(page);
708 struct buffer_head *bh = head;
709
710 do {
711 set_buffer_dirty(bh);
712 bh = bh->b_this_page;
713 } while (bh != head);
714 }
a8e7d49a 715 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
716 spin_unlock(&mapping->private_lock);
717
a8e7d49a
LT
718 if (newly_dirty)
719 __set_page_dirty(page, mapping, 1);
720 return newly_dirty;
1da177e4
LT
721}
722EXPORT_SYMBOL(__set_page_dirty_buffers);
723
724/*
725 * Write out and wait upon a list of buffers.
726 *
727 * We have conflicting pressures: we want to make sure that all
728 * initially dirty buffers get waited on, but that any subsequently
729 * dirtied buffers don't. After all, we don't want fsync to last
730 * forever if somebody is actively writing to the file.
731 *
732 * Do this in two main stages: first we copy dirty buffers to a
733 * temporary inode list, queueing the writes as we go. Then we clean
734 * up, waiting for those writes to complete.
735 *
736 * During this second stage, any subsequent updates to the file may end
737 * up refiling the buffer on the original inode's dirty list again, so
738 * there is a chance we will end up with a buffer queued for write but
739 * not yet completed on that list. So, as a final cleanup we go through
740 * the osync code to catch these locked, dirty buffers without requeuing
741 * any newly dirty buffers for write.
742 */
743static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
744{
745 struct buffer_head *bh;
746 struct list_head tmp;
9cf6b720 747 struct address_space *mapping, *prev_mapping = NULL;
1da177e4
LT
748 int err = 0, err2;
749
750 INIT_LIST_HEAD(&tmp);
751
752 spin_lock(lock);
753 while (!list_empty(list)) {
754 bh = BH_ENTRY(list->next);
535ee2fb 755 mapping = bh->b_assoc_map;
58ff407b 756 __remove_assoc_queue(bh);
535ee2fb
JK
757 /* Avoid race with mark_buffer_dirty_inode() which does
758 * a lockless check and we rely on seeing the dirty bit */
759 smp_mb();
1da177e4
LT
760 if (buffer_dirty(bh) || buffer_locked(bh)) {
761 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 762 bh->b_assoc_map = mapping;
1da177e4
LT
763 if (buffer_dirty(bh)) {
764 get_bh(bh);
765 spin_unlock(lock);
766 /*
767 * Ensure any pending I/O completes so that
768 * ll_rw_block() actually writes the current
769 * contents - it is a noop if I/O is still in
770 * flight on potentially older contents.
771 */
9cf6b720
JA
772 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
773
774 /*
775 * Kick off IO for the previous mapping. Note
776 * that we will not run the very last mapping,
777 * wait_on_buffer() will do that for us
778 * through sync_buffer().
779 */
780 if (prev_mapping && prev_mapping != mapping)
781 blk_run_address_space(prev_mapping);
782 prev_mapping = mapping;
783
1da177e4
LT
784 brelse(bh);
785 spin_lock(lock);
786 }
787 }
788 }
789
790 while (!list_empty(&tmp)) {
791 bh = BH_ENTRY(tmp.prev);
1da177e4 792 get_bh(bh);
535ee2fb
JK
793 mapping = bh->b_assoc_map;
794 __remove_assoc_queue(bh);
795 /* Avoid race with mark_buffer_dirty_inode() which does
796 * a lockless check and we rely on seeing the dirty bit */
797 smp_mb();
798 if (buffer_dirty(bh)) {
799 list_add(&bh->b_assoc_buffers,
e3892296 800 &mapping->private_list);
535ee2fb
JK
801 bh->b_assoc_map = mapping;
802 }
1da177e4
LT
803 spin_unlock(lock);
804 wait_on_buffer(bh);
805 if (!buffer_uptodate(bh))
806 err = -EIO;
807 brelse(bh);
808 spin_lock(lock);
809 }
810
811 spin_unlock(lock);
812 err2 = osync_buffers_list(lock, list);
813 if (err)
814 return err;
815 else
816 return err2;
817}
818
819/*
820 * Invalidate any and all dirty buffers on a given inode. We are
821 * probably unmounting the fs, but that doesn't mean we have already
822 * done a sync(). Just drop the buffers from the inode list.
823 *
824 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
825 * assumes that all the buffers are against the blockdev. Not true
826 * for reiserfs.
827 */
828void invalidate_inode_buffers(struct inode *inode)
829{
830 if (inode_has_buffers(inode)) {
831 struct address_space *mapping = &inode->i_data;
832 struct list_head *list = &mapping->private_list;
833 struct address_space *buffer_mapping = mapping->assoc_mapping;
834
835 spin_lock(&buffer_mapping->private_lock);
836 while (!list_empty(list))
837 __remove_assoc_queue(BH_ENTRY(list->next));
838 spin_unlock(&buffer_mapping->private_lock);
839 }
840}
52b19ac9 841EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
842
843/*
844 * Remove any clean buffers from the inode's buffer list. This is called
845 * when we're trying to free the inode itself. Those buffers can pin it.
846 *
847 * Returns true if all buffers were removed.
848 */
849int remove_inode_buffers(struct inode *inode)
850{
851 int ret = 1;
852
853 if (inode_has_buffers(inode)) {
854 struct address_space *mapping = &inode->i_data;
855 struct list_head *list = &mapping->private_list;
856 struct address_space *buffer_mapping = mapping->assoc_mapping;
857
858 spin_lock(&buffer_mapping->private_lock);
859 while (!list_empty(list)) {
860 struct buffer_head *bh = BH_ENTRY(list->next);
861 if (buffer_dirty(bh)) {
862 ret = 0;
863 break;
864 }
865 __remove_assoc_queue(bh);
866 }
867 spin_unlock(&buffer_mapping->private_lock);
868 }
869 return ret;
870}
871
872/*
873 * Create the appropriate buffers when given a page for data area and
874 * the size of each buffer.. Use the bh->b_this_page linked list to
875 * follow the buffers created. Return NULL if unable to create more
876 * buffers.
877 *
878 * The retry flag is used to differentiate async IO (paging, swapping)
879 * which may not fail from ordinary buffer allocations.
880 */
881struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
882 int retry)
883{
884 struct buffer_head *bh, *head;
885 long offset;
886
887try_again:
888 head = NULL;
889 offset = PAGE_SIZE;
890 while ((offset -= size) >= 0) {
891 bh = alloc_buffer_head(GFP_NOFS);
892 if (!bh)
893 goto no_grow;
894
895 bh->b_bdev = NULL;
896 bh->b_this_page = head;
897 bh->b_blocknr = -1;
898 head = bh;
899
900 bh->b_state = 0;
901 atomic_set(&bh->b_count, 0);
fc5cd582 902 bh->b_private = NULL;
1da177e4
LT
903 bh->b_size = size;
904
905 /* Link the buffer to its page */
906 set_bh_page(bh, page, offset);
907
01ffe339 908 init_buffer(bh, NULL, NULL);
1da177e4
LT
909 }
910 return head;
911/*
912 * In case anything failed, we just free everything we got.
913 */
914no_grow:
915 if (head) {
916 do {
917 bh = head;
918 head = head->b_this_page;
919 free_buffer_head(bh);
920 } while (head);
921 }
922
923 /*
924 * Return failure for non-async IO requests. Async IO requests
925 * are not allowed to fail, so we have to wait until buffer heads
926 * become available. But we don't want tasks sleeping with
927 * partially complete buffers, so all were released above.
928 */
929 if (!retry)
930 return NULL;
931
932 /* We're _really_ low on memory. Now we just
933 * wait for old buffer heads to become free due to
934 * finishing IO. Since this is an async request and
935 * the reserve list is empty, we're sure there are
936 * async buffer heads in use.
937 */
938 free_more_memory();
939 goto try_again;
940}
941EXPORT_SYMBOL_GPL(alloc_page_buffers);
942
943static inline void
944link_dev_buffers(struct page *page, struct buffer_head *head)
945{
946 struct buffer_head *bh, *tail;
947
948 bh = head;
949 do {
950 tail = bh;
951 bh = bh->b_this_page;
952 } while (bh);
953 tail->b_this_page = head;
954 attach_page_buffers(page, head);
955}
956
957/*
958 * Initialise the state of a blockdev page's buffers.
959 */
960static void
961init_page_buffers(struct page *page, struct block_device *bdev,
962 sector_t block, int size)
963{
964 struct buffer_head *head = page_buffers(page);
965 struct buffer_head *bh = head;
966 int uptodate = PageUptodate(page);
967
968 do {
969 if (!buffer_mapped(bh)) {
970 init_buffer(bh, NULL, NULL);
971 bh->b_bdev = bdev;
972 bh->b_blocknr = block;
973 if (uptodate)
974 set_buffer_uptodate(bh);
975 set_buffer_mapped(bh);
976 }
977 block++;
978 bh = bh->b_this_page;
979 } while (bh != head);
980}
981
982/*
983 * Create the page-cache page that contains the requested block.
984 *
985 * This is user purely for blockdev mappings.
986 */
987static struct page *
988grow_dev_page(struct block_device *bdev, sector_t block,
989 pgoff_t index, int size)
990{
991 struct inode *inode = bdev->bd_inode;
992 struct page *page;
993 struct buffer_head *bh;
994
ea125892 995 page = find_or_create_page(inode->i_mapping, index,
769848c0 996 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
997 if (!page)
998 return NULL;
999
e827f923 1000 BUG_ON(!PageLocked(page));
1da177e4
LT
1001
1002 if (page_has_buffers(page)) {
1003 bh = page_buffers(page);
1004 if (bh->b_size == size) {
1005 init_page_buffers(page, bdev, block, size);
1006 return page;
1007 }
1008 if (!try_to_free_buffers(page))
1009 goto failed;
1010 }
1011
1012 /*
1013 * Allocate some buffers for this page
1014 */
1015 bh = alloc_page_buffers(page, size, 0);
1016 if (!bh)
1017 goto failed;
1018
1019 /*
1020 * Link the page to the buffers and initialise them. Take the
1021 * lock to be atomic wrt __find_get_block(), which does not
1022 * run under the page lock.
1023 */
1024 spin_lock(&inode->i_mapping->private_lock);
1025 link_dev_buffers(page, bh);
1026 init_page_buffers(page, bdev, block, size);
1027 spin_unlock(&inode->i_mapping->private_lock);
1028 return page;
1029
1030failed:
1031 BUG();
1032 unlock_page(page);
1033 page_cache_release(page);
1034 return NULL;
1035}
1036
1037/*
1038 * Create buffers for the specified block device block's page. If
1039 * that page was dirty, the buffers are set dirty also.
1da177e4 1040 */
858119e1 1041static int
1da177e4
LT
1042grow_buffers(struct block_device *bdev, sector_t block, int size)
1043{
1044 struct page *page;
1045 pgoff_t index;
1046 int sizebits;
1047
1048 sizebits = -1;
1049 do {
1050 sizebits++;
1051 } while ((size << sizebits) < PAGE_SIZE);
1052
1053 index = block >> sizebits;
1da177e4 1054
e5657933
AM
1055 /*
1056 * Check for a block which wants to lie outside our maximum possible
1057 * pagecache index. (this comparison is done using sector_t types).
1058 */
1059 if (unlikely(index != block >> sizebits)) {
1060 char b[BDEVNAME_SIZE];
1061
1062 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1063 "device %s\n",
8e24eea7 1064 __func__, (unsigned long long)block,
e5657933
AM
1065 bdevname(bdev, b));
1066 return -EIO;
1067 }
1068 block = index << sizebits;
1da177e4
LT
1069 /* Create a page with the proper size buffers.. */
1070 page = grow_dev_page(bdev, block, index, size);
1071 if (!page)
1072 return 0;
1073 unlock_page(page);
1074 page_cache_release(page);
1075 return 1;
1076}
1077
75c96f85 1078static struct buffer_head *
1da177e4
LT
1079__getblk_slow(struct block_device *bdev, sector_t block, int size)
1080{
1081 /* Size must be multiple of hard sectorsize */
1082 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1083 (size < 512 || size > PAGE_SIZE))) {
1084 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1085 size);
1086 printk(KERN_ERR "hardsect size: %d\n",
1087 bdev_hardsect_size(bdev));
1088
1089 dump_stack();
1090 return NULL;
1091 }
1092
1093 for (;;) {
1094 struct buffer_head * bh;
e5657933 1095 int ret;
1da177e4
LT
1096
1097 bh = __find_get_block(bdev, block, size);
1098 if (bh)
1099 return bh;
1100
e5657933
AM
1101 ret = grow_buffers(bdev, block, size);
1102 if (ret < 0)
1103 return NULL;
1104 if (ret == 0)
1da177e4
LT
1105 free_more_memory();
1106 }
1107}
1108
1109/*
1110 * The relationship between dirty buffers and dirty pages:
1111 *
1112 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1113 * the page is tagged dirty in its radix tree.
1114 *
1115 * At all times, the dirtiness of the buffers represents the dirtiness of
1116 * subsections of the page. If the page has buffers, the page dirty bit is
1117 * merely a hint about the true dirty state.
1118 *
1119 * When a page is set dirty in its entirety, all its buffers are marked dirty
1120 * (if the page has buffers).
1121 *
1122 * When a buffer is marked dirty, its page is dirtied, but the page's other
1123 * buffers are not.
1124 *
1125 * Also. When blockdev buffers are explicitly read with bread(), they
1126 * individually become uptodate. But their backing page remains not
1127 * uptodate - even if all of its buffers are uptodate. A subsequent
1128 * block_read_full_page() against that page will discover all the uptodate
1129 * buffers, will set the page uptodate and will perform no I/O.
1130 */
1131
1132/**
1133 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1134 * @bh: the buffer_head to mark dirty
1da177e4
LT
1135 *
1136 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1137 * backing page dirty, then tag the page as dirty in its address_space's radix
1138 * tree and then attach the address_space's inode to its superblock's dirty
1139 * inode list.
1140 *
1141 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1142 * mapping->tree_lock and the global inode_lock.
1143 */
fc9b52cd 1144void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1145{
787d2214 1146 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1147
1148 /*
1149 * Very *carefully* optimize the it-is-already-dirty case.
1150 *
1151 * Don't let the final "is it dirty" escape to before we
1152 * perhaps modified the buffer.
1153 */
1154 if (buffer_dirty(bh)) {
1155 smp_mb();
1156 if (buffer_dirty(bh))
1157 return;
1158 }
1159
a8e7d49a
LT
1160 if (!test_set_buffer_dirty(bh)) {
1161 struct page *page = bh->b_page;
1162 if (!TestSetPageDirty(page))
1163 __set_page_dirty(page, page_mapping(page), 0);
1164 }
1da177e4
LT
1165}
1166
1167/*
1168 * Decrement a buffer_head's reference count. If all buffers against a page
1169 * have zero reference count, are clean and unlocked, and if the page is clean
1170 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1171 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1172 * a page but it ends up not being freed, and buffers may later be reattached).
1173 */
1174void __brelse(struct buffer_head * buf)
1175{
1176 if (atomic_read(&buf->b_count)) {
1177 put_bh(buf);
1178 return;
1179 }
5c752ad9 1180 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4
LT
1181}
1182
1183/*
1184 * bforget() is like brelse(), except it discards any
1185 * potentially dirty data.
1186 */
1187void __bforget(struct buffer_head *bh)
1188{
1189 clear_buffer_dirty(bh);
535ee2fb 1190 if (bh->b_assoc_map) {
1da177e4
LT
1191 struct address_space *buffer_mapping = bh->b_page->mapping;
1192
1193 spin_lock(&buffer_mapping->private_lock);
1194 list_del_init(&bh->b_assoc_buffers);
58ff407b 1195 bh->b_assoc_map = NULL;
1da177e4
LT
1196 spin_unlock(&buffer_mapping->private_lock);
1197 }
1198 __brelse(bh);
1199}
1200
1201static struct buffer_head *__bread_slow(struct buffer_head *bh)
1202{
1203 lock_buffer(bh);
1204 if (buffer_uptodate(bh)) {
1205 unlock_buffer(bh);
1206 return bh;
1207 } else {
1208 get_bh(bh);
1209 bh->b_end_io = end_buffer_read_sync;
1210 submit_bh(READ, bh);
1211 wait_on_buffer(bh);
1212 if (buffer_uptodate(bh))
1213 return bh;
1214 }
1215 brelse(bh);
1216 return NULL;
1217}
1218
1219/*
1220 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1221 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1222 * refcount elevated by one when they're in an LRU. A buffer can only appear
1223 * once in a particular CPU's LRU. A single buffer can be present in multiple
1224 * CPU's LRUs at the same time.
1225 *
1226 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1227 * sb_find_get_block().
1228 *
1229 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1230 * a local interrupt disable for that.
1231 */
1232
1233#define BH_LRU_SIZE 8
1234
1235struct bh_lru {
1236 struct buffer_head *bhs[BH_LRU_SIZE];
1237};
1238
1239static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1240
1241#ifdef CONFIG_SMP
1242#define bh_lru_lock() local_irq_disable()
1243#define bh_lru_unlock() local_irq_enable()
1244#else
1245#define bh_lru_lock() preempt_disable()
1246#define bh_lru_unlock() preempt_enable()
1247#endif
1248
1249static inline void check_irqs_on(void)
1250{
1251#ifdef irqs_disabled
1252 BUG_ON(irqs_disabled());
1253#endif
1254}
1255
1256/*
1257 * The LRU management algorithm is dopey-but-simple. Sorry.
1258 */
1259static void bh_lru_install(struct buffer_head *bh)
1260{
1261 struct buffer_head *evictee = NULL;
1262 struct bh_lru *lru;
1263
1264 check_irqs_on();
1265 bh_lru_lock();
1266 lru = &__get_cpu_var(bh_lrus);
1267 if (lru->bhs[0] != bh) {
1268 struct buffer_head *bhs[BH_LRU_SIZE];
1269 int in;
1270 int out = 0;
1271
1272 get_bh(bh);
1273 bhs[out++] = bh;
1274 for (in = 0; in < BH_LRU_SIZE; in++) {
1275 struct buffer_head *bh2 = lru->bhs[in];
1276
1277 if (bh2 == bh) {
1278 __brelse(bh2);
1279 } else {
1280 if (out >= BH_LRU_SIZE) {
1281 BUG_ON(evictee != NULL);
1282 evictee = bh2;
1283 } else {
1284 bhs[out++] = bh2;
1285 }
1286 }
1287 }
1288 while (out < BH_LRU_SIZE)
1289 bhs[out++] = NULL;
1290 memcpy(lru->bhs, bhs, sizeof(bhs));
1291 }
1292 bh_lru_unlock();
1293
1294 if (evictee)
1295 __brelse(evictee);
1296}
1297
1298/*
1299 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1300 */
858119e1 1301static struct buffer_head *
3991d3bd 1302lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1303{
1304 struct buffer_head *ret = NULL;
1305 struct bh_lru *lru;
3991d3bd 1306 unsigned int i;
1da177e4
LT
1307
1308 check_irqs_on();
1309 bh_lru_lock();
1310 lru = &__get_cpu_var(bh_lrus);
1311 for (i = 0; i < BH_LRU_SIZE; i++) {
1312 struct buffer_head *bh = lru->bhs[i];
1313
1314 if (bh && bh->b_bdev == bdev &&
1315 bh->b_blocknr == block && bh->b_size == size) {
1316 if (i) {
1317 while (i) {
1318 lru->bhs[i] = lru->bhs[i - 1];
1319 i--;
1320 }
1321 lru->bhs[0] = bh;
1322 }
1323 get_bh(bh);
1324 ret = bh;
1325 break;
1326 }
1327 }
1328 bh_lru_unlock();
1329 return ret;
1330}
1331
1332/*
1333 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1334 * it in the LRU and mark it as accessed. If it is not present then return
1335 * NULL
1336 */
1337struct buffer_head *
3991d3bd 1338__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1339{
1340 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1341
1342 if (bh == NULL) {
385fd4c5 1343 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1344 if (bh)
1345 bh_lru_install(bh);
1346 }
1347 if (bh)
1348 touch_buffer(bh);
1349 return bh;
1350}
1351EXPORT_SYMBOL(__find_get_block);
1352
1353/*
1354 * __getblk will locate (and, if necessary, create) the buffer_head
1355 * which corresponds to the passed block_device, block and size. The
1356 * returned buffer has its reference count incremented.
1357 *
1358 * __getblk() cannot fail - it just keeps trying. If you pass it an
1359 * illegal block number, __getblk() will happily return a buffer_head
1360 * which represents the non-existent block. Very weird.
1361 *
1362 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1363 * attempt is failing. FIXME, perhaps?
1364 */
1365struct buffer_head *
3991d3bd 1366__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1367{
1368 struct buffer_head *bh = __find_get_block(bdev, block, size);
1369
1370 might_sleep();
1371 if (bh == NULL)
1372 bh = __getblk_slow(bdev, block, size);
1373 return bh;
1374}
1375EXPORT_SYMBOL(__getblk);
1376
1377/*
1378 * Do async read-ahead on a buffer..
1379 */
3991d3bd 1380void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1381{
1382 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1383 if (likely(bh)) {
1384 ll_rw_block(READA, 1, &bh);
1385 brelse(bh);
1386 }
1da177e4
LT
1387}
1388EXPORT_SYMBOL(__breadahead);
1389
1390/**
1391 * __bread() - reads a specified block and returns the bh
67be2dd1 1392 * @bdev: the block_device to read from
1da177e4
LT
1393 * @block: number of block
1394 * @size: size (in bytes) to read
1395 *
1396 * Reads a specified block, and returns buffer head that contains it.
1397 * It returns NULL if the block was unreadable.
1398 */
1399struct buffer_head *
3991d3bd 1400__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1401{
1402 struct buffer_head *bh = __getblk(bdev, block, size);
1403
a3e713b5 1404 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1405 bh = __bread_slow(bh);
1406 return bh;
1407}
1408EXPORT_SYMBOL(__bread);
1409
1410/*
1411 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1412 * This doesn't race because it runs in each cpu either in irq
1413 * or with preempt disabled.
1414 */
1415static void invalidate_bh_lru(void *arg)
1416{
1417 struct bh_lru *b = &get_cpu_var(bh_lrus);
1418 int i;
1419
1420 for (i = 0; i < BH_LRU_SIZE; i++) {
1421 brelse(b->bhs[i]);
1422 b->bhs[i] = NULL;
1423 }
1424 put_cpu_var(bh_lrus);
1425}
1426
f9a14399 1427void invalidate_bh_lrus(void)
1da177e4 1428{
15c8b6c1 1429 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1430}
9db5579b 1431EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1432
1433void set_bh_page(struct buffer_head *bh,
1434 struct page *page, unsigned long offset)
1435{
1436 bh->b_page = page;
e827f923 1437 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1438 if (PageHighMem(page))
1439 /*
1440 * This catches illegal uses and preserves the offset:
1441 */
1442 bh->b_data = (char *)(0 + offset);
1443 else
1444 bh->b_data = page_address(page) + offset;
1445}
1446EXPORT_SYMBOL(set_bh_page);
1447
1448/*
1449 * Called when truncating a buffer on a page completely.
1450 */
858119e1 1451static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1452{
1453 lock_buffer(bh);
1454 clear_buffer_dirty(bh);
1455 bh->b_bdev = NULL;
1456 clear_buffer_mapped(bh);
1457 clear_buffer_req(bh);
1458 clear_buffer_new(bh);
1459 clear_buffer_delay(bh);
33a266dd 1460 clear_buffer_unwritten(bh);
1da177e4
LT
1461 unlock_buffer(bh);
1462}
1463
1da177e4
LT
1464/**
1465 * block_invalidatepage - invalidate part of all of a buffer-backed page
1466 *
1467 * @page: the page which is affected
1468 * @offset: the index of the truncation point
1469 *
1470 * block_invalidatepage() is called when all or part of the page has become
1471 * invalidatedby a truncate operation.
1472 *
1473 * block_invalidatepage() does not have to release all buffers, but it must
1474 * ensure that no dirty buffer is left outside @offset and that no I/O
1475 * is underway against any of the blocks which are outside the truncation
1476 * point. Because the caller is about to free (and possibly reuse) those
1477 * blocks on-disk.
1478 */
2ff28e22 1479void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1480{
1481 struct buffer_head *head, *bh, *next;
1482 unsigned int curr_off = 0;
1da177e4
LT
1483
1484 BUG_ON(!PageLocked(page));
1485 if (!page_has_buffers(page))
1486 goto out;
1487
1488 head = page_buffers(page);
1489 bh = head;
1490 do {
1491 unsigned int next_off = curr_off + bh->b_size;
1492 next = bh->b_this_page;
1493
1494 /*
1495 * is this block fully invalidated?
1496 */
1497 if (offset <= curr_off)
1498 discard_buffer(bh);
1499 curr_off = next_off;
1500 bh = next;
1501 } while (bh != head);
1502
1503 /*
1504 * We release buffers only if the entire page is being invalidated.
1505 * The get_block cached value has been unconditionally invalidated,
1506 * so real IO is not possible anymore.
1507 */
1508 if (offset == 0)
2ff28e22 1509 try_to_release_page(page, 0);
1da177e4 1510out:
2ff28e22 1511 return;
1da177e4
LT
1512}
1513EXPORT_SYMBOL(block_invalidatepage);
1514
1515/*
1516 * We attach and possibly dirty the buffers atomically wrt
1517 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1518 * is already excluded via the page lock.
1519 */
1520void create_empty_buffers(struct page *page,
1521 unsigned long blocksize, unsigned long b_state)
1522{
1523 struct buffer_head *bh, *head, *tail;
1524
1525 head = alloc_page_buffers(page, blocksize, 1);
1526 bh = head;
1527 do {
1528 bh->b_state |= b_state;
1529 tail = bh;
1530 bh = bh->b_this_page;
1531 } while (bh);
1532 tail->b_this_page = head;
1533
1534 spin_lock(&page->mapping->private_lock);
1535 if (PageUptodate(page) || PageDirty(page)) {
1536 bh = head;
1537 do {
1538 if (PageDirty(page))
1539 set_buffer_dirty(bh);
1540 if (PageUptodate(page))
1541 set_buffer_uptodate(bh);
1542 bh = bh->b_this_page;
1543 } while (bh != head);
1544 }
1545 attach_page_buffers(page, head);
1546 spin_unlock(&page->mapping->private_lock);
1547}
1548EXPORT_SYMBOL(create_empty_buffers);
1549
1550/*
1551 * We are taking a block for data and we don't want any output from any
1552 * buffer-cache aliases starting from return from that function and
1553 * until the moment when something will explicitly mark the buffer
1554 * dirty (hopefully that will not happen until we will free that block ;-)
1555 * We don't even need to mark it not-uptodate - nobody can expect
1556 * anything from a newly allocated buffer anyway. We used to used
1557 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1558 * don't want to mark the alias unmapped, for example - it would confuse
1559 * anyone who might pick it with bread() afterwards...
1560 *
1561 * Also.. Note that bforget() doesn't lock the buffer. So there can
1562 * be writeout I/O going on against recently-freed buffers. We don't
1563 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1564 * only if we really need to. That happens here.
1565 */
1566void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1567{
1568 struct buffer_head *old_bh;
1569
1570 might_sleep();
1571
385fd4c5 1572 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1573 if (old_bh) {
1574 clear_buffer_dirty(old_bh);
1575 wait_on_buffer(old_bh);
1576 clear_buffer_req(old_bh);
1577 __brelse(old_bh);
1578 }
1579}
1580EXPORT_SYMBOL(unmap_underlying_metadata);
1581
1582/*
1583 * NOTE! All mapped/uptodate combinations are valid:
1584 *
1585 * Mapped Uptodate Meaning
1586 *
1587 * No No "unknown" - must do get_block()
1588 * No Yes "hole" - zero-filled
1589 * Yes No "allocated" - allocated on disk, not read in
1590 * Yes Yes "valid" - allocated and up-to-date in memory.
1591 *
1592 * "Dirty" is valid only with the last case (mapped+uptodate).
1593 */
1594
1595/*
1596 * While block_write_full_page is writing back the dirty buffers under
1597 * the page lock, whoever dirtied the buffers may decide to clean them
1598 * again at any time. We handle that by only looking at the buffer
1599 * state inside lock_buffer().
1600 *
1601 * If block_write_full_page() is called for regular writeback
1602 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1603 * locked buffer. This only can happen if someone has written the buffer
1604 * directly, with submit_bh(). At the address_space level PageWriteback
1605 * prevents this contention from occurring.
6e34eedd
TT
1606 *
1607 * If block_write_full_page() is called with wbc->sync_mode ==
1608 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1609 * causes the writes to be flagged as synchronous writes, but the
1610 * block device queue will NOT be unplugged, since usually many pages
1611 * will be pushed to the out before the higher-level caller actually
1612 * waits for the writes to be completed. The various wait functions,
1613 * such as wait_on_writeback_range() will ultimately call sync_page()
1614 * which will ultimately call blk_run_backing_dev(), which will end up
1615 * unplugging the device queue.
1da177e4
LT
1616 */
1617static int __block_write_full_page(struct inode *inode, struct page *page,
1618 get_block_t *get_block, struct writeback_control *wbc)
1619{
1620 int err;
1621 sector_t block;
1622 sector_t last_block;
f0fbd5fc 1623 struct buffer_head *bh, *head;
b0cf2321 1624 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4 1625 int nr_underway = 0;
6e34eedd
TT
1626 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1627 WRITE_SYNC_PLUG : WRITE);
1da177e4
LT
1628
1629 BUG_ON(!PageLocked(page));
1630
1631 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1632
1633 if (!page_has_buffers(page)) {
b0cf2321 1634 create_empty_buffers(page, blocksize,
1da177e4
LT
1635 (1 << BH_Dirty)|(1 << BH_Uptodate));
1636 }
1637
1638 /*
1639 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1640 * here, and the (potentially unmapped) buffers may become dirty at
1641 * any time. If a buffer becomes dirty here after we've inspected it
1642 * then we just miss that fact, and the page stays dirty.
1643 *
1644 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1645 * handle that here by just cleaning them.
1646 */
1647
54b21a79 1648 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1649 head = page_buffers(page);
1650 bh = head;
1651
1652 /*
1653 * Get all the dirty buffers mapped to disk addresses and
1654 * handle any aliases from the underlying blockdev's mapping.
1655 */
1656 do {
1657 if (block > last_block) {
1658 /*
1659 * mapped buffers outside i_size will occur, because
1660 * this page can be outside i_size when there is a
1661 * truncate in progress.
1662 */
1663 /*
1664 * The buffer was zeroed by block_write_full_page()
1665 */
1666 clear_buffer_dirty(bh);
1667 set_buffer_uptodate(bh);
29a814d2
AT
1668 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1669 buffer_dirty(bh)) {
b0cf2321 1670 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1671 err = get_block(inode, block, bh, 1);
1672 if (err)
1673 goto recover;
29a814d2 1674 clear_buffer_delay(bh);
1da177e4
LT
1675 if (buffer_new(bh)) {
1676 /* blockdev mappings never come here */
1677 clear_buffer_new(bh);
1678 unmap_underlying_metadata(bh->b_bdev,
1679 bh->b_blocknr);
1680 }
1681 }
1682 bh = bh->b_this_page;
1683 block++;
1684 } while (bh != head);
1685
1686 do {
1da177e4
LT
1687 if (!buffer_mapped(bh))
1688 continue;
1689 /*
1690 * If it's a fully non-blocking write attempt and we cannot
1691 * lock the buffer then redirty the page. Note that this can
1692 * potentially cause a busy-wait loop from pdflush and kswapd
1693 * activity, but those code paths have their own higher-level
1694 * throttling.
1695 */
1696 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1697 lock_buffer(bh);
ca5de404 1698 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1699 redirty_page_for_writepage(wbc, page);
1700 continue;
1701 }
1702 if (test_clear_buffer_dirty(bh)) {
1703 mark_buffer_async_write(bh);
1704 } else {
1705 unlock_buffer(bh);
1706 }
1707 } while ((bh = bh->b_this_page) != head);
1708
1709 /*
1710 * The page and its buffers are protected by PageWriteback(), so we can
1711 * drop the bh refcounts early.
1712 */
1713 BUG_ON(PageWriteback(page));
1714 set_page_writeback(page);
1da177e4
LT
1715
1716 do {
1717 struct buffer_head *next = bh->b_this_page;
1718 if (buffer_async_write(bh)) {
a64c8610 1719 submit_bh(write_op, bh);
1da177e4
LT
1720 nr_underway++;
1721 }
1da177e4
LT
1722 bh = next;
1723 } while (bh != head);
05937baa 1724 unlock_page(page);
1da177e4
LT
1725
1726 err = 0;
1727done:
1728 if (nr_underway == 0) {
1729 /*
1730 * The page was marked dirty, but the buffers were
1731 * clean. Someone wrote them back by hand with
1732 * ll_rw_block/submit_bh. A rare case.
1733 */
1da177e4 1734 end_page_writeback(page);
3d67f2d7 1735
1da177e4
LT
1736 /*
1737 * The page and buffer_heads can be released at any time from
1738 * here on.
1739 */
1da177e4
LT
1740 }
1741 return err;
1742
1743recover:
1744 /*
1745 * ENOSPC, or some other error. We may already have added some
1746 * blocks to the file, so we need to write these out to avoid
1747 * exposing stale data.
1748 * The page is currently locked and not marked for writeback
1749 */
1750 bh = head;
1751 /* Recovery: lock and submit the mapped buffers */
1752 do {
29a814d2
AT
1753 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1754 !buffer_delay(bh)) {
1da177e4
LT
1755 lock_buffer(bh);
1756 mark_buffer_async_write(bh);
1757 } else {
1758 /*
1759 * The buffer may have been set dirty during
1760 * attachment to a dirty page.
1761 */
1762 clear_buffer_dirty(bh);
1763 }
1764 } while ((bh = bh->b_this_page) != head);
1765 SetPageError(page);
1766 BUG_ON(PageWriteback(page));
7e4c3690 1767 mapping_set_error(page->mapping, err);
1da177e4 1768 set_page_writeback(page);
1da177e4
LT
1769 do {
1770 struct buffer_head *next = bh->b_this_page;
1771 if (buffer_async_write(bh)) {
1772 clear_buffer_dirty(bh);
a64c8610 1773 submit_bh(write_op, bh);
1da177e4
LT
1774 nr_underway++;
1775 }
1da177e4
LT
1776 bh = next;
1777 } while (bh != head);
ffda9d30 1778 unlock_page(page);
1da177e4
LT
1779 goto done;
1780}
1781
afddba49
NP
1782/*
1783 * If a page has any new buffers, zero them out here, and mark them uptodate
1784 * and dirty so they'll be written out (in order to prevent uninitialised
1785 * block data from leaking). And clear the new bit.
1786 */
1787void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1788{
1789 unsigned int block_start, block_end;
1790 struct buffer_head *head, *bh;
1791
1792 BUG_ON(!PageLocked(page));
1793 if (!page_has_buffers(page))
1794 return;
1795
1796 bh = head = page_buffers(page);
1797 block_start = 0;
1798 do {
1799 block_end = block_start + bh->b_size;
1800
1801 if (buffer_new(bh)) {
1802 if (block_end > from && block_start < to) {
1803 if (!PageUptodate(page)) {
1804 unsigned start, size;
1805
1806 start = max(from, block_start);
1807 size = min(to, block_end) - start;
1808
eebd2aa3 1809 zero_user(page, start, size);
afddba49
NP
1810 set_buffer_uptodate(bh);
1811 }
1812
1813 clear_buffer_new(bh);
1814 mark_buffer_dirty(bh);
1815 }
1816 }
1817
1818 block_start = block_end;
1819 bh = bh->b_this_page;
1820 } while (bh != head);
1821}
1822EXPORT_SYMBOL(page_zero_new_buffers);
1823
1da177e4
LT
1824static int __block_prepare_write(struct inode *inode, struct page *page,
1825 unsigned from, unsigned to, get_block_t *get_block)
1826{
1827 unsigned block_start, block_end;
1828 sector_t block;
1829 int err = 0;
1830 unsigned blocksize, bbits;
1831 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1832
1833 BUG_ON(!PageLocked(page));
1834 BUG_ON(from > PAGE_CACHE_SIZE);
1835 BUG_ON(to > PAGE_CACHE_SIZE);
1836 BUG_ON(from > to);
1837
1838 blocksize = 1 << inode->i_blkbits;
1839 if (!page_has_buffers(page))
1840 create_empty_buffers(page, blocksize, 0);
1841 head = page_buffers(page);
1842
1843 bbits = inode->i_blkbits;
1844 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1845
1846 for(bh = head, block_start = 0; bh != head || !block_start;
1847 block++, block_start=block_end, bh = bh->b_this_page) {
1848 block_end = block_start + blocksize;
1849 if (block_end <= from || block_start >= to) {
1850 if (PageUptodate(page)) {
1851 if (!buffer_uptodate(bh))
1852 set_buffer_uptodate(bh);
1853 }
1854 continue;
1855 }
1856 if (buffer_new(bh))
1857 clear_buffer_new(bh);
1858 if (!buffer_mapped(bh)) {
b0cf2321 1859 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1860 err = get_block(inode, block, bh, 1);
1861 if (err)
f3ddbdc6 1862 break;
1da177e4 1863 if (buffer_new(bh)) {
1da177e4
LT
1864 unmap_underlying_metadata(bh->b_bdev,
1865 bh->b_blocknr);
1866 if (PageUptodate(page)) {
637aff46 1867 clear_buffer_new(bh);
1da177e4 1868 set_buffer_uptodate(bh);
637aff46 1869 mark_buffer_dirty(bh);
1da177e4
LT
1870 continue;
1871 }
eebd2aa3
CL
1872 if (block_end > to || block_start < from)
1873 zero_user_segments(page,
1874 to, block_end,
1875 block_start, from);
1da177e4
LT
1876 continue;
1877 }
1878 }
1879 if (PageUptodate(page)) {
1880 if (!buffer_uptodate(bh))
1881 set_buffer_uptodate(bh);
1882 continue;
1883 }
1884 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1885 !buffer_unwritten(bh) &&
1da177e4
LT
1886 (block_start < from || block_end > to)) {
1887 ll_rw_block(READ, 1, &bh);
1888 *wait_bh++=bh;
1889 }
1890 }
1891 /*
1892 * If we issued read requests - let them complete.
1893 */
1894 while(wait_bh > wait) {
1895 wait_on_buffer(*--wait_bh);
1896 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1897 err = -EIO;
1da177e4 1898 }
afddba49
NP
1899 if (unlikely(err))
1900 page_zero_new_buffers(page, from, to);
1da177e4
LT
1901 return err;
1902}
1903
1904static int __block_commit_write(struct inode *inode, struct page *page,
1905 unsigned from, unsigned to)
1906{
1907 unsigned block_start, block_end;
1908 int partial = 0;
1909 unsigned blocksize;
1910 struct buffer_head *bh, *head;
1911
1912 blocksize = 1 << inode->i_blkbits;
1913
1914 for(bh = head = page_buffers(page), block_start = 0;
1915 bh != head || !block_start;
1916 block_start=block_end, bh = bh->b_this_page) {
1917 block_end = block_start + blocksize;
1918 if (block_end <= from || block_start >= to) {
1919 if (!buffer_uptodate(bh))
1920 partial = 1;
1921 } else {
1922 set_buffer_uptodate(bh);
1923 mark_buffer_dirty(bh);
1924 }
afddba49 1925 clear_buffer_new(bh);
1da177e4
LT
1926 }
1927
1928 /*
1929 * If this is a partial write which happened to make all buffers
1930 * uptodate then we can optimize away a bogus readpage() for
1931 * the next read(). Here we 'discover' whether the page went
1932 * uptodate as a result of this (potentially partial) write.
1933 */
1934 if (!partial)
1935 SetPageUptodate(page);
1936 return 0;
1937}
1938
afddba49
NP
1939/*
1940 * block_write_begin takes care of the basic task of block allocation and
1941 * bringing partial write blocks uptodate first.
1942 *
1943 * If *pagep is not NULL, then block_write_begin uses the locked page
1944 * at *pagep rather than allocating its own. In this case, the page will
1945 * not be unlocked or deallocated on failure.
1946 */
1947int block_write_begin(struct file *file, struct address_space *mapping,
1948 loff_t pos, unsigned len, unsigned flags,
1949 struct page **pagep, void **fsdata,
1950 get_block_t *get_block)
1951{
1952 struct inode *inode = mapping->host;
1953 int status = 0;
1954 struct page *page;
1955 pgoff_t index;
1956 unsigned start, end;
1957 int ownpage = 0;
1958
1959 index = pos >> PAGE_CACHE_SHIFT;
1960 start = pos & (PAGE_CACHE_SIZE - 1);
1961 end = start + len;
1962
1963 page = *pagep;
1964 if (page == NULL) {
1965 ownpage = 1;
54566b2c 1966 page = grab_cache_page_write_begin(mapping, index, flags);
afddba49
NP
1967 if (!page) {
1968 status = -ENOMEM;
1969 goto out;
1970 }
1971 *pagep = page;
1972 } else
1973 BUG_ON(!PageLocked(page));
1974
1975 status = __block_prepare_write(inode, page, start, end, get_block);
1976 if (unlikely(status)) {
1977 ClearPageUptodate(page);
1978
1979 if (ownpage) {
1980 unlock_page(page);
1981 page_cache_release(page);
1982 *pagep = NULL;
1983
1984 /*
1985 * prepare_write() may have instantiated a few blocks
1986 * outside i_size. Trim these off again. Don't need
1987 * i_size_read because we hold i_mutex.
1988 */
1989 if (pos + len > inode->i_size)
1990 vmtruncate(inode, inode->i_size);
1991 }
afddba49
NP
1992 }
1993
1994out:
1995 return status;
1996}
1997EXPORT_SYMBOL(block_write_begin);
1998
1999int block_write_end(struct file *file, struct address_space *mapping,
2000 loff_t pos, unsigned len, unsigned copied,
2001 struct page *page, void *fsdata)
2002{
2003 struct inode *inode = mapping->host;
2004 unsigned start;
2005
2006 start = pos & (PAGE_CACHE_SIZE - 1);
2007
2008 if (unlikely(copied < len)) {
2009 /*
2010 * The buffers that were written will now be uptodate, so we
2011 * don't have to worry about a readpage reading them and
2012 * overwriting a partial write. However if we have encountered
2013 * a short write and only partially written into a buffer, it
2014 * will not be marked uptodate, so a readpage might come in and
2015 * destroy our partial write.
2016 *
2017 * Do the simplest thing, and just treat any short write to a
2018 * non uptodate page as a zero-length write, and force the
2019 * caller to redo the whole thing.
2020 */
2021 if (!PageUptodate(page))
2022 copied = 0;
2023
2024 page_zero_new_buffers(page, start+copied, start+len);
2025 }
2026 flush_dcache_page(page);
2027
2028 /* This could be a short (even 0-length) commit */
2029 __block_commit_write(inode, page, start, start+copied);
2030
2031 return copied;
2032}
2033EXPORT_SYMBOL(block_write_end);
2034
2035int generic_write_end(struct file *file, struct address_space *mapping,
2036 loff_t pos, unsigned len, unsigned copied,
2037 struct page *page, void *fsdata)
2038{
2039 struct inode *inode = mapping->host;
c7d206b3 2040 int i_size_changed = 0;
afddba49
NP
2041
2042 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2043
2044 /*
2045 * No need to use i_size_read() here, the i_size
2046 * cannot change under us because we hold i_mutex.
2047 *
2048 * But it's important to update i_size while still holding page lock:
2049 * page writeout could otherwise come in and zero beyond i_size.
2050 */
2051 if (pos+copied > inode->i_size) {
2052 i_size_write(inode, pos+copied);
c7d206b3 2053 i_size_changed = 1;
afddba49
NP
2054 }
2055
2056 unlock_page(page);
2057 page_cache_release(page);
2058
c7d206b3
JK
2059 /*
2060 * Don't mark the inode dirty under page lock. First, it unnecessarily
2061 * makes the holding time of page lock longer. Second, it forces lock
2062 * ordering of page lock and transaction start for journaling
2063 * filesystems.
2064 */
2065 if (i_size_changed)
2066 mark_inode_dirty(inode);
2067
afddba49
NP
2068 return copied;
2069}
2070EXPORT_SYMBOL(generic_write_end);
2071
8ab22b9a
HH
2072/*
2073 * block_is_partially_uptodate checks whether buffers within a page are
2074 * uptodate or not.
2075 *
2076 * Returns true if all buffers which correspond to a file portion
2077 * we want to read are uptodate.
2078 */
2079int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2080 unsigned long from)
2081{
2082 struct inode *inode = page->mapping->host;
2083 unsigned block_start, block_end, blocksize;
2084 unsigned to;
2085 struct buffer_head *bh, *head;
2086 int ret = 1;
2087
2088 if (!page_has_buffers(page))
2089 return 0;
2090
2091 blocksize = 1 << inode->i_blkbits;
2092 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2093 to = from + to;
2094 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2095 return 0;
2096
2097 head = page_buffers(page);
2098 bh = head;
2099 block_start = 0;
2100 do {
2101 block_end = block_start + blocksize;
2102 if (block_end > from && block_start < to) {
2103 if (!buffer_uptodate(bh)) {
2104 ret = 0;
2105 break;
2106 }
2107 if (block_end >= to)
2108 break;
2109 }
2110 block_start = block_end;
2111 bh = bh->b_this_page;
2112 } while (bh != head);
2113
2114 return ret;
2115}
2116EXPORT_SYMBOL(block_is_partially_uptodate);
2117
1da177e4
LT
2118/*
2119 * Generic "read page" function for block devices that have the normal
2120 * get_block functionality. This is most of the block device filesystems.
2121 * Reads the page asynchronously --- the unlock_buffer() and
2122 * set/clear_buffer_uptodate() functions propagate buffer state into the
2123 * page struct once IO has completed.
2124 */
2125int block_read_full_page(struct page *page, get_block_t *get_block)
2126{
2127 struct inode *inode = page->mapping->host;
2128 sector_t iblock, lblock;
2129 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2130 unsigned int blocksize;
2131 int nr, i;
2132 int fully_mapped = 1;
2133
cd7619d6 2134 BUG_ON(!PageLocked(page));
1da177e4
LT
2135 blocksize = 1 << inode->i_blkbits;
2136 if (!page_has_buffers(page))
2137 create_empty_buffers(page, blocksize, 0);
2138 head = page_buffers(page);
2139
2140 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2141 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2142 bh = head;
2143 nr = 0;
2144 i = 0;
2145
2146 do {
2147 if (buffer_uptodate(bh))
2148 continue;
2149
2150 if (!buffer_mapped(bh)) {
c64610ba
AM
2151 int err = 0;
2152
1da177e4
LT
2153 fully_mapped = 0;
2154 if (iblock < lblock) {
b0cf2321 2155 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2156 err = get_block(inode, iblock, bh, 0);
2157 if (err)
1da177e4
LT
2158 SetPageError(page);
2159 }
2160 if (!buffer_mapped(bh)) {
eebd2aa3 2161 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2162 if (!err)
2163 set_buffer_uptodate(bh);
1da177e4
LT
2164 continue;
2165 }
2166 /*
2167 * get_block() might have updated the buffer
2168 * synchronously
2169 */
2170 if (buffer_uptodate(bh))
2171 continue;
2172 }
2173 arr[nr++] = bh;
2174 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2175
2176 if (fully_mapped)
2177 SetPageMappedToDisk(page);
2178
2179 if (!nr) {
2180 /*
2181 * All buffers are uptodate - we can set the page uptodate
2182 * as well. But not if get_block() returned an error.
2183 */
2184 if (!PageError(page))
2185 SetPageUptodate(page);
2186 unlock_page(page);
2187 return 0;
2188 }
2189
2190 /* Stage two: lock the buffers */
2191 for (i = 0; i < nr; i++) {
2192 bh = arr[i];
2193 lock_buffer(bh);
2194 mark_buffer_async_read(bh);
2195 }
2196
2197 /*
2198 * Stage 3: start the IO. Check for uptodateness
2199 * inside the buffer lock in case another process reading
2200 * the underlying blockdev brought it uptodate (the sct fix).
2201 */
2202 for (i = 0; i < nr; i++) {
2203 bh = arr[i];
2204 if (buffer_uptodate(bh))
2205 end_buffer_async_read(bh, 1);
2206 else
2207 submit_bh(READ, bh);
2208 }
2209 return 0;
2210}
2211
2212/* utility function for filesystems that need to do work on expanding
89e10787 2213 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2214 * deal with the hole.
2215 */
89e10787 2216int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2217{
2218 struct address_space *mapping = inode->i_mapping;
2219 struct page *page;
89e10787 2220 void *fsdata;
05eb0b51 2221 unsigned long limit;
1da177e4
LT
2222 int err;
2223
2224 err = -EFBIG;
2225 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2226 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2227 send_sig(SIGXFSZ, current, 0);
2228 goto out;
2229 }
2230 if (size > inode->i_sb->s_maxbytes)
2231 goto out;
2232
89e10787
NP
2233 err = pagecache_write_begin(NULL, mapping, size, 0,
2234 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2235 &page, &fsdata);
2236 if (err)
05eb0b51 2237 goto out;
05eb0b51 2238
89e10787
NP
2239 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2240 BUG_ON(err > 0);
05eb0b51 2241
1da177e4
LT
2242out:
2243 return err;
2244}
2245
f1e3af72
AB
2246static int cont_expand_zero(struct file *file, struct address_space *mapping,
2247 loff_t pos, loff_t *bytes)
1da177e4 2248{
1da177e4 2249 struct inode *inode = mapping->host;
1da177e4 2250 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2251 struct page *page;
2252 void *fsdata;
2253 pgoff_t index, curidx;
2254 loff_t curpos;
2255 unsigned zerofrom, offset, len;
2256 int err = 0;
1da177e4 2257
89e10787
NP
2258 index = pos >> PAGE_CACHE_SHIFT;
2259 offset = pos & ~PAGE_CACHE_MASK;
2260
2261 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2262 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2263 if (zerofrom & (blocksize-1)) {
2264 *bytes |= (blocksize-1);
2265 (*bytes)++;
2266 }
89e10787 2267 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2268
89e10787
NP
2269 err = pagecache_write_begin(file, mapping, curpos, len,
2270 AOP_FLAG_UNINTERRUPTIBLE,
2271 &page, &fsdata);
2272 if (err)
2273 goto out;
eebd2aa3 2274 zero_user(page, zerofrom, len);
89e10787
NP
2275 err = pagecache_write_end(file, mapping, curpos, len, len,
2276 page, fsdata);
2277 if (err < 0)
2278 goto out;
2279 BUG_ON(err != len);
2280 err = 0;
061e9746
OH
2281
2282 balance_dirty_pages_ratelimited(mapping);
89e10787 2283 }
1da177e4 2284
89e10787
NP
2285 /* page covers the boundary, find the boundary offset */
2286 if (index == curidx) {
2287 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2288 /* if we will expand the thing last block will be filled */
89e10787
NP
2289 if (offset <= zerofrom) {
2290 goto out;
2291 }
2292 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2293 *bytes |= (blocksize-1);
2294 (*bytes)++;
2295 }
89e10787 2296 len = offset - zerofrom;
1da177e4 2297
89e10787
NP
2298 err = pagecache_write_begin(file, mapping, curpos, len,
2299 AOP_FLAG_UNINTERRUPTIBLE,
2300 &page, &fsdata);
2301 if (err)
2302 goto out;
eebd2aa3 2303 zero_user(page, zerofrom, len);
89e10787
NP
2304 err = pagecache_write_end(file, mapping, curpos, len, len,
2305 page, fsdata);
2306 if (err < 0)
2307 goto out;
2308 BUG_ON(err != len);
2309 err = 0;
1da177e4 2310 }
89e10787
NP
2311out:
2312 return err;
2313}
2314
2315/*
2316 * For moronic filesystems that do not allow holes in file.
2317 * We may have to extend the file.
2318 */
2319int cont_write_begin(struct file *file, struct address_space *mapping,
2320 loff_t pos, unsigned len, unsigned flags,
2321 struct page **pagep, void **fsdata,
2322 get_block_t *get_block, loff_t *bytes)
2323{
2324 struct inode *inode = mapping->host;
2325 unsigned blocksize = 1 << inode->i_blkbits;
2326 unsigned zerofrom;
2327 int err;
2328
2329 err = cont_expand_zero(file, mapping, pos, bytes);
2330 if (err)
2331 goto out;
2332
2333 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2334 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2335 *bytes |= (blocksize-1);
2336 (*bytes)++;
1da177e4 2337 }
1da177e4 2338
89e10787
NP
2339 *pagep = NULL;
2340 err = block_write_begin(file, mapping, pos, len,
2341 flags, pagep, fsdata, get_block);
1da177e4 2342out:
89e10787 2343 return err;
1da177e4
LT
2344}
2345
2346int block_prepare_write(struct page *page, unsigned from, unsigned to,
2347 get_block_t *get_block)
2348{
2349 struct inode *inode = page->mapping->host;
2350 int err = __block_prepare_write(inode, page, from, to, get_block);
2351 if (err)
2352 ClearPageUptodate(page);
2353 return err;
2354}
2355
2356int block_commit_write(struct page *page, unsigned from, unsigned to)
2357{
2358 struct inode *inode = page->mapping->host;
2359 __block_commit_write(inode,page,from,to);
2360 return 0;
2361}
2362
54171690
DC
2363/*
2364 * block_page_mkwrite() is not allowed to change the file size as it gets
2365 * called from a page fault handler when a page is first dirtied. Hence we must
2366 * be careful to check for EOF conditions here. We set the page up correctly
2367 * for a written page which means we get ENOSPC checking when writing into
2368 * holes and correct delalloc and unwritten extent mapping on filesystems that
2369 * support these features.
2370 *
2371 * We are not allowed to take the i_mutex here so we have to play games to
2372 * protect against truncate races as the page could now be beyond EOF. Because
2373 * vmtruncate() writes the inode size before removing pages, once we have the
2374 * page lock we can determine safely if the page is beyond EOF. If it is not
2375 * beyond EOF, then the page is guaranteed safe against truncation until we
2376 * unlock the page.
2377 */
2378int
c2ec175c 2379block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690
DC
2380 get_block_t get_block)
2381{
c2ec175c 2382 struct page *page = vmf->page;
54171690
DC
2383 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2384 unsigned long end;
2385 loff_t size;
56a76f82 2386 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
54171690
DC
2387
2388 lock_page(page);
2389 size = i_size_read(inode);
2390 if ((page->mapping != inode->i_mapping) ||
18336338 2391 (page_offset(page) > size)) {
54171690
DC
2392 /* page got truncated out from underneath us */
2393 goto out_unlock;
2394 }
2395
2396 /* page is wholly or partially inside EOF */
2397 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2398 end = size & ~PAGE_CACHE_MASK;
2399 else
2400 end = PAGE_CACHE_SIZE;
2401
2402 ret = block_prepare_write(page, 0, end, get_block);
2403 if (!ret)
2404 ret = block_commit_write(page, 0, end);
2405
56a76f82
NP
2406 if (unlikely(ret)) {
2407 if (ret == -ENOMEM)
2408 ret = VM_FAULT_OOM;
2409 else /* -ENOSPC, -EIO, etc */
2410 ret = VM_FAULT_SIGBUS;
2411 }
c2ec175c 2412
56a76f82 2413out_unlock:
54171690
DC
2414 unlock_page(page);
2415 return ret;
2416}
1da177e4
LT
2417
2418/*
03158cd7 2419 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2420 * immediately, while under the page lock. So it needs a special end_io
2421 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2422 */
2423static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2424{
68671f35 2425 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2426}
2427
03158cd7
NP
2428/*
2429 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2430 * the page (converting it to circular linked list and taking care of page
2431 * dirty races).
2432 */
2433static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2434{
2435 struct buffer_head *bh;
2436
2437 BUG_ON(!PageLocked(page));
2438
2439 spin_lock(&page->mapping->private_lock);
2440 bh = head;
2441 do {
2442 if (PageDirty(page))
2443 set_buffer_dirty(bh);
2444 if (!bh->b_this_page)
2445 bh->b_this_page = head;
2446 bh = bh->b_this_page;
2447 } while (bh != head);
2448 attach_page_buffers(page, head);
2449 spin_unlock(&page->mapping->private_lock);
2450}
2451
1da177e4
LT
2452/*
2453 * On entry, the page is fully not uptodate.
2454 * On exit the page is fully uptodate in the areas outside (from,to)
2455 */
03158cd7
NP
2456int nobh_write_begin(struct file *file, struct address_space *mapping,
2457 loff_t pos, unsigned len, unsigned flags,
2458 struct page **pagep, void **fsdata,
1da177e4
LT
2459 get_block_t *get_block)
2460{
03158cd7 2461 struct inode *inode = mapping->host;
1da177e4
LT
2462 const unsigned blkbits = inode->i_blkbits;
2463 const unsigned blocksize = 1 << blkbits;
a4b0672d 2464 struct buffer_head *head, *bh;
03158cd7
NP
2465 struct page *page;
2466 pgoff_t index;
2467 unsigned from, to;
1da177e4 2468 unsigned block_in_page;
a4b0672d 2469 unsigned block_start, block_end;
1da177e4 2470 sector_t block_in_file;
1da177e4 2471 int nr_reads = 0;
1da177e4
LT
2472 int ret = 0;
2473 int is_mapped_to_disk = 1;
1da177e4 2474
03158cd7
NP
2475 index = pos >> PAGE_CACHE_SHIFT;
2476 from = pos & (PAGE_CACHE_SIZE - 1);
2477 to = from + len;
2478
54566b2c 2479 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2480 if (!page)
2481 return -ENOMEM;
2482 *pagep = page;
2483 *fsdata = NULL;
2484
2485 if (page_has_buffers(page)) {
2486 unlock_page(page);
2487 page_cache_release(page);
2488 *pagep = NULL;
2489 return block_write_begin(file, mapping, pos, len, flags, pagep,
2490 fsdata, get_block);
2491 }
a4b0672d 2492
1da177e4
LT
2493 if (PageMappedToDisk(page))
2494 return 0;
2495
a4b0672d
NP
2496 /*
2497 * Allocate buffers so that we can keep track of state, and potentially
2498 * attach them to the page if an error occurs. In the common case of
2499 * no error, they will just be freed again without ever being attached
2500 * to the page (which is all OK, because we're under the page lock).
2501 *
2502 * Be careful: the buffer linked list is a NULL terminated one, rather
2503 * than the circular one we're used to.
2504 */
2505 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2506 if (!head) {
2507 ret = -ENOMEM;
2508 goto out_release;
2509 }
a4b0672d 2510
1da177e4 2511 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2512
2513 /*
2514 * We loop across all blocks in the page, whether or not they are
2515 * part of the affected region. This is so we can discover if the
2516 * page is fully mapped-to-disk.
2517 */
a4b0672d 2518 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2519 block_start < PAGE_CACHE_SIZE;
a4b0672d 2520 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2521 int create;
2522
a4b0672d
NP
2523 block_end = block_start + blocksize;
2524 bh->b_state = 0;
1da177e4
LT
2525 create = 1;
2526 if (block_start >= to)
2527 create = 0;
2528 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2529 bh, create);
1da177e4
LT
2530 if (ret)
2531 goto failed;
a4b0672d 2532 if (!buffer_mapped(bh))
1da177e4 2533 is_mapped_to_disk = 0;
a4b0672d
NP
2534 if (buffer_new(bh))
2535 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2536 if (PageUptodate(page)) {
2537 set_buffer_uptodate(bh);
1da177e4 2538 continue;
a4b0672d
NP
2539 }
2540 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2541 zero_user_segments(page, block_start, from,
2542 to, block_end);
1da177e4
LT
2543 continue;
2544 }
a4b0672d 2545 if (buffer_uptodate(bh))
1da177e4
LT
2546 continue; /* reiserfs does this */
2547 if (block_start < from || block_end > to) {
a4b0672d
NP
2548 lock_buffer(bh);
2549 bh->b_end_io = end_buffer_read_nobh;
2550 submit_bh(READ, bh);
2551 nr_reads++;
1da177e4
LT
2552 }
2553 }
2554
2555 if (nr_reads) {
1da177e4
LT
2556 /*
2557 * The page is locked, so these buffers are protected from
2558 * any VM or truncate activity. Hence we don't need to care
2559 * for the buffer_head refcounts.
2560 */
a4b0672d 2561 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2562 wait_on_buffer(bh);
2563 if (!buffer_uptodate(bh))
2564 ret = -EIO;
1da177e4
LT
2565 }
2566 if (ret)
2567 goto failed;
2568 }
2569
2570 if (is_mapped_to_disk)
2571 SetPageMappedToDisk(page);
1da177e4 2572
03158cd7 2573 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2574
1da177e4
LT
2575 return 0;
2576
2577failed:
03158cd7 2578 BUG_ON(!ret);
1da177e4 2579 /*
a4b0672d
NP
2580 * Error recovery is a bit difficult. We need to zero out blocks that
2581 * were newly allocated, and dirty them to ensure they get written out.
2582 * Buffers need to be attached to the page at this point, otherwise
2583 * the handling of potential IO errors during writeout would be hard
2584 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2585 */
03158cd7
NP
2586 attach_nobh_buffers(page, head);
2587 page_zero_new_buffers(page, from, to);
a4b0672d 2588
03158cd7
NP
2589out_release:
2590 unlock_page(page);
2591 page_cache_release(page);
2592 *pagep = NULL;
a4b0672d 2593
03158cd7
NP
2594 if (pos + len > inode->i_size)
2595 vmtruncate(inode, inode->i_size);
a4b0672d 2596
1da177e4
LT
2597 return ret;
2598}
03158cd7 2599EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2600
03158cd7
NP
2601int nobh_write_end(struct file *file, struct address_space *mapping,
2602 loff_t pos, unsigned len, unsigned copied,
2603 struct page *page, void *fsdata)
1da177e4
LT
2604{
2605 struct inode *inode = page->mapping->host;
efdc3131 2606 struct buffer_head *head = fsdata;
03158cd7 2607 struct buffer_head *bh;
5b41e74a 2608 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2609
d4cf109f 2610 if (unlikely(copied < len) && head)
5b41e74a
DM
2611 attach_nobh_buffers(page, head);
2612 if (page_has_buffers(page))
2613 return generic_write_end(file, mapping, pos, len,
2614 copied, page, fsdata);
a4b0672d 2615
22c8ca78 2616 SetPageUptodate(page);
1da177e4 2617 set_page_dirty(page);
03158cd7
NP
2618 if (pos+copied > inode->i_size) {
2619 i_size_write(inode, pos+copied);
1da177e4
LT
2620 mark_inode_dirty(inode);
2621 }
03158cd7
NP
2622
2623 unlock_page(page);
2624 page_cache_release(page);
2625
03158cd7
NP
2626 while (head) {
2627 bh = head;
2628 head = head->b_this_page;
2629 free_buffer_head(bh);
2630 }
2631
2632 return copied;
1da177e4 2633}
03158cd7 2634EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2635
2636/*
2637 * nobh_writepage() - based on block_full_write_page() except
2638 * that it tries to operate without attaching bufferheads to
2639 * the page.
2640 */
2641int nobh_writepage(struct page *page, get_block_t *get_block,
2642 struct writeback_control *wbc)
2643{
2644 struct inode * const inode = page->mapping->host;
2645 loff_t i_size = i_size_read(inode);
2646 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2647 unsigned offset;
1da177e4
LT
2648 int ret;
2649
2650 /* Is the page fully inside i_size? */
2651 if (page->index < end_index)
2652 goto out;
2653
2654 /* Is the page fully outside i_size? (truncate in progress) */
2655 offset = i_size & (PAGE_CACHE_SIZE-1);
2656 if (page->index >= end_index+1 || !offset) {
2657 /*
2658 * The page may have dirty, unmapped buffers. For example,
2659 * they may have been added in ext3_writepage(). Make them
2660 * freeable here, so the page does not leak.
2661 */
2662#if 0
2663 /* Not really sure about this - do we need this ? */
2664 if (page->mapping->a_ops->invalidatepage)
2665 page->mapping->a_ops->invalidatepage(page, offset);
2666#endif
2667 unlock_page(page);
2668 return 0; /* don't care */
2669 }
2670
2671 /*
2672 * The page straddles i_size. It must be zeroed out on each and every
2673 * writepage invocation because it may be mmapped. "A file is mapped
2674 * in multiples of the page size. For a file that is not a multiple of
2675 * the page size, the remaining memory is zeroed when mapped, and
2676 * writes to that region are not written out to the file."
2677 */
eebd2aa3 2678 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2679out:
2680 ret = mpage_writepage(page, get_block, wbc);
2681 if (ret == -EAGAIN)
2682 ret = __block_write_full_page(inode, page, get_block, wbc);
2683 return ret;
2684}
2685EXPORT_SYMBOL(nobh_writepage);
2686
03158cd7
NP
2687int nobh_truncate_page(struct address_space *mapping,
2688 loff_t from, get_block_t *get_block)
1da177e4 2689{
1da177e4
LT
2690 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2691 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2692 unsigned blocksize;
2693 sector_t iblock;
2694 unsigned length, pos;
2695 struct inode *inode = mapping->host;
1da177e4 2696 struct page *page;
03158cd7
NP
2697 struct buffer_head map_bh;
2698 int err;
1da177e4 2699
03158cd7
NP
2700 blocksize = 1 << inode->i_blkbits;
2701 length = offset & (blocksize - 1);
2702
2703 /* Block boundary? Nothing to do */
2704 if (!length)
2705 return 0;
2706
2707 length = blocksize - length;
2708 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2709
1da177e4 2710 page = grab_cache_page(mapping, index);
03158cd7 2711 err = -ENOMEM;
1da177e4
LT
2712 if (!page)
2713 goto out;
2714
03158cd7
NP
2715 if (page_has_buffers(page)) {
2716has_buffers:
2717 unlock_page(page);
2718 page_cache_release(page);
2719 return block_truncate_page(mapping, from, get_block);
2720 }
2721
2722 /* Find the buffer that contains "offset" */
2723 pos = blocksize;
2724 while (offset >= pos) {
2725 iblock++;
2726 pos += blocksize;
2727 }
2728
2729 err = get_block(inode, iblock, &map_bh, 0);
2730 if (err)
2731 goto unlock;
2732 /* unmapped? It's a hole - nothing to do */
2733 if (!buffer_mapped(&map_bh))
2734 goto unlock;
2735
2736 /* Ok, it's mapped. Make sure it's up-to-date */
2737 if (!PageUptodate(page)) {
2738 err = mapping->a_ops->readpage(NULL, page);
2739 if (err) {
2740 page_cache_release(page);
2741 goto out;
2742 }
2743 lock_page(page);
2744 if (!PageUptodate(page)) {
2745 err = -EIO;
2746 goto unlock;
2747 }
2748 if (page_has_buffers(page))
2749 goto has_buffers;
1da177e4 2750 }
eebd2aa3 2751 zero_user(page, offset, length);
03158cd7
NP
2752 set_page_dirty(page);
2753 err = 0;
2754
2755unlock:
1da177e4
LT
2756 unlock_page(page);
2757 page_cache_release(page);
2758out:
03158cd7 2759 return err;
1da177e4
LT
2760}
2761EXPORT_SYMBOL(nobh_truncate_page);
2762
2763int block_truncate_page(struct address_space *mapping,
2764 loff_t from, get_block_t *get_block)
2765{
2766 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2767 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2768 unsigned blocksize;
54b21a79 2769 sector_t iblock;
1da177e4
LT
2770 unsigned length, pos;
2771 struct inode *inode = mapping->host;
2772 struct page *page;
2773 struct buffer_head *bh;
1da177e4
LT
2774 int err;
2775
2776 blocksize = 1 << inode->i_blkbits;
2777 length = offset & (blocksize - 1);
2778
2779 /* Block boundary? Nothing to do */
2780 if (!length)
2781 return 0;
2782
2783 length = blocksize - length;
54b21a79 2784 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2785
2786 page = grab_cache_page(mapping, index);
2787 err = -ENOMEM;
2788 if (!page)
2789 goto out;
2790
2791 if (!page_has_buffers(page))
2792 create_empty_buffers(page, blocksize, 0);
2793
2794 /* Find the buffer that contains "offset" */
2795 bh = page_buffers(page);
2796 pos = blocksize;
2797 while (offset >= pos) {
2798 bh = bh->b_this_page;
2799 iblock++;
2800 pos += blocksize;
2801 }
2802
2803 err = 0;
2804 if (!buffer_mapped(bh)) {
b0cf2321 2805 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2806 err = get_block(inode, iblock, bh, 0);
2807 if (err)
2808 goto unlock;
2809 /* unmapped? It's a hole - nothing to do */
2810 if (!buffer_mapped(bh))
2811 goto unlock;
2812 }
2813
2814 /* Ok, it's mapped. Make sure it's up-to-date */
2815 if (PageUptodate(page))
2816 set_buffer_uptodate(bh);
2817
33a266dd 2818 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2819 err = -EIO;
2820 ll_rw_block(READ, 1, &bh);
2821 wait_on_buffer(bh);
2822 /* Uhhuh. Read error. Complain and punt. */
2823 if (!buffer_uptodate(bh))
2824 goto unlock;
2825 }
2826
eebd2aa3 2827 zero_user(page, offset, length);
1da177e4
LT
2828 mark_buffer_dirty(bh);
2829 err = 0;
2830
2831unlock:
2832 unlock_page(page);
2833 page_cache_release(page);
2834out:
2835 return err;
2836}
2837
2838/*
2839 * The generic ->writepage function for buffer-backed address_spaces
2840 */
2841int block_write_full_page(struct page *page, get_block_t *get_block,
2842 struct writeback_control *wbc)
2843{
2844 struct inode * const inode = page->mapping->host;
2845 loff_t i_size = i_size_read(inode);
2846 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2847 unsigned offset;
1da177e4
LT
2848
2849 /* Is the page fully inside i_size? */
2850 if (page->index < end_index)
2851 return __block_write_full_page(inode, page, get_block, wbc);
2852
2853 /* Is the page fully outside i_size? (truncate in progress) */
2854 offset = i_size & (PAGE_CACHE_SIZE-1);
2855 if (page->index >= end_index+1 || !offset) {
2856 /*
2857 * The page may have dirty, unmapped buffers. For example,
2858 * they may have been added in ext3_writepage(). Make them
2859 * freeable here, so the page does not leak.
2860 */
aaa4059b 2861 do_invalidatepage(page, 0);
1da177e4
LT
2862 unlock_page(page);
2863 return 0; /* don't care */
2864 }
2865
2866 /*
2867 * The page straddles i_size. It must be zeroed out on each and every
2868 * writepage invokation because it may be mmapped. "A file is mapped
2869 * in multiples of the page size. For a file that is not a multiple of
2870 * the page size, the remaining memory is zeroed when mapped, and
2871 * writes to that region are not written out to the file."
2872 */
eebd2aa3 2873 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2874 return __block_write_full_page(inode, page, get_block, wbc);
2875}
2876
2877sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2878 get_block_t *get_block)
2879{
2880 struct buffer_head tmp;
2881 struct inode *inode = mapping->host;
2882 tmp.b_state = 0;
2883 tmp.b_blocknr = 0;
b0cf2321 2884 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2885 get_block(inode, block, &tmp, 0);
2886 return tmp.b_blocknr;
2887}
2888
6712ecf8 2889static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2890{
2891 struct buffer_head *bh = bio->bi_private;
2892
1da177e4
LT
2893 if (err == -EOPNOTSUPP) {
2894 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2895 set_bit(BH_Eopnotsupp, &bh->b_state);
2896 }
2897
08bafc03
KM
2898 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2899 set_bit(BH_Quiet, &bh->b_state);
2900
1da177e4
LT
2901 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2902 bio_put(bio);
1da177e4
LT
2903}
2904
2905int submit_bh(int rw, struct buffer_head * bh)
2906{
2907 struct bio *bio;
2908 int ret = 0;
2909
2910 BUG_ON(!buffer_locked(bh));
2911 BUG_ON(!buffer_mapped(bh));
2912 BUG_ON(!bh->b_end_io);
2913
48fd4f93
JA
2914 /*
2915 * Mask in barrier bit for a write (could be either a WRITE or a
2916 * WRITE_SYNC
2917 */
2918 if (buffer_ordered(bh) && (rw & WRITE))
2919 rw |= WRITE_BARRIER;
1da177e4
LT
2920
2921 /*
48fd4f93 2922 * Only clear out a write error when rewriting
1da177e4 2923 */
48fd4f93 2924 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2925 clear_buffer_write_io_error(bh);
2926
2927 /*
2928 * from here on down, it's all bio -- do the initial mapping,
2929 * submit_bio -> generic_make_request may further map this bio around
2930 */
2931 bio = bio_alloc(GFP_NOIO, 1);
2932
2933 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2934 bio->bi_bdev = bh->b_bdev;
2935 bio->bi_io_vec[0].bv_page = bh->b_page;
2936 bio->bi_io_vec[0].bv_len = bh->b_size;
2937 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2938
2939 bio->bi_vcnt = 1;
2940 bio->bi_idx = 0;
2941 bio->bi_size = bh->b_size;
2942
2943 bio->bi_end_io = end_bio_bh_io_sync;
2944 bio->bi_private = bh;
2945
2946 bio_get(bio);
2947 submit_bio(rw, bio);
2948
2949 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2950 ret = -EOPNOTSUPP;
2951
2952 bio_put(bio);
2953 return ret;
2954}
2955
2956/**
2957 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2958 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2959 * @nr: number of &struct buffer_heads in the array
2960 * @bhs: array of pointers to &struct buffer_head
2961 *
a7662236
JK
2962 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2963 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2964 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2965 * are sent to disk. The fourth %READA option is described in the documentation
2966 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2967 *
2968 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2969 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2970 * clean when doing a write request, and any buffer that appears to be
2971 * up-to-date when doing read request. Further it marks as clean buffers that
2972 * are processed for writing (the buffer cache won't assume that they are
2973 * actually clean until the buffer gets unlocked).
1da177e4
LT
2974 *
2975 * ll_rw_block sets b_end_io to simple completion handler that marks
2976 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2977 * any waiters.
2978 *
2979 * All of the buffers must be for the same device, and must also be a
2980 * multiple of the current approved size for the device.
2981 */
2982void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2983{
2984 int i;
2985
2986 for (i = 0; i < nr; i++) {
2987 struct buffer_head *bh = bhs[i];
2988
9cf6b720 2989 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
a7662236 2990 lock_buffer(bh);
ca5de404 2991 else if (!trylock_buffer(bh))
1da177e4
LT
2992 continue;
2993
9cf6b720
JA
2994 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2995 rw == SWRITE_SYNC_PLUG) {
1da177e4 2996 if (test_clear_buffer_dirty(bh)) {
76c3073a 2997 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2998 get_bh(bh);
18ce3751
JA
2999 if (rw == SWRITE_SYNC)
3000 submit_bh(WRITE_SYNC, bh);
3001 else
3002 submit_bh(WRITE, bh);
1da177e4
LT
3003 continue;
3004 }
3005 } else {
1da177e4 3006 if (!buffer_uptodate(bh)) {
76c3073a 3007 bh->b_end_io = end_buffer_read_sync;
e60e5c50 3008 get_bh(bh);
1da177e4
LT
3009 submit_bh(rw, bh);
3010 continue;
3011 }
3012 }
3013 unlock_buffer(bh);
1da177e4
LT
3014 }
3015}
3016
3017/*
3018 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3019 * and then start new I/O and then wait upon it. The caller must have a ref on
3020 * the buffer_head.
3021 */
3022int sync_dirty_buffer(struct buffer_head *bh)
3023{
3024 int ret = 0;
3025
3026 WARN_ON(atomic_read(&bh->b_count) < 1);
3027 lock_buffer(bh);
3028 if (test_clear_buffer_dirty(bh)) {
3029 get_bh(bh);
3030 bh->b_end_io = end_buffer_write_sync;
1aa2a7cc 3031 ret = submit_bh(WRITE_SYNC, bh);
1da177e4
LT
3032 wait_on_buffer(bh);
3033 if (buffer_eopnotsupp(bh)) {
3034 clear_buffer_eopnotsupp(bh);
3035 ret = -EOPNOTSUPP;
3036 }
3037 if (!ret && !buffer_uptodate(bh))
3038 ret = -EIO;
3039 } else {
3040 unlock_buffer(bh);
3041 }
3042 return ret;
3043}
3044
3045/*
3046 * try_to_free_buffers() checks if all the buffers on this particular page
3047 * are unused, and releases them if so.
3048 *
3049 * Exclusion against try_to_free_buffers may be obtained by either
3050 * locking the page or by holding its mapping's private_lock.
3051 *
3052 * If the page is dirty but all the buffers are clean then we need to
3053 * be sure to mark the page clean as well. This is because the page
3054 * may be against a block device, and a later reattachment of buffers
3055 * to a dirty page will set *all* buffers dirty. Which would corrupt
3056 * filesystem data on the same device.
3057 *
3058 * The same applies to regular filesystem pages: if all the buffers are
3059 * clean then we set the page clean and proceed. To do that, we require
3060 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3061 * private_lock.
3062 *
3063 * try_to_free_buffers() is non-blocking.
3064 */
3065static inline int buffer_busy(struct buffer_head *bh)
3066{
3067 return atomic_read(&bh->b_count) |
3068 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3069}
3070
3071static int
3072drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3073{
3074 struct buffer_head *head = page_buffers(page);
3075 struct buffer_head *bh;
3076
3077 bh = head;
3078 do {
de7d5a3b 3079 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3080 set_bit(AS_EIO, &page->mapping->flags);
3081 if (buffer_busy(bh))
3082 goto failed;
3083 bh = bh->b_this_page;
3084 } while (bh != head);
3085
3086 do {
3087 struct buffer_head *next = bh->b_this_page;
3088
535ee2fb 3089 if (bh->b_assoc_map)
1da177e4
LT
3090 __remove_assoc_queue(bh);
3091 bh = next;
3092 } while (bh != head);
3093 *buffers_to_free = head;
3094 __clear_page_buffers(page);
3095 return 1;
3096failed:
3097 return 0;
3098}
3099
3100int try_to_free_buffers(struct page *page)
3101{
3102 struct address_space * const mapping = page->mapping;
3103 struct buffer_head *buffers_to_free = NULL;
3104 int ret = 0;
3105
3106 BUG_ON(!PageLocked(page));
ecdfc978 3107 if (PageWriteback(page))
1da177e4
LT
3108 return 0;
3109
3110 if (mapping == NULL) { /* can this still happen? */
3111 ret = drop_buffers(page, &buffers_to_free);
3112 goto out;
3113 }
3114
3115 spin_lock(&mapping->private_lock);
3116 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3117
3118 /*
3119 * If the filesystem writes its buffers by hand (eg ext3)
3120 * then we can have clean buffers against a dirty page. We
3121 * clean the page here; otherwise the VM will never notice
3122 * that the filesystem did any IO at all.
3123 *
3124 * Also, during truncate, discard_buffer will have marked all
3125 * the page's buffers clean. We discover that here and clean
3126 * the page also.
87df7241
NP
3127 *
3128 * private_lock must be held over this entire operation in order
3129 * to synchronise against __set_page_dirty_buffers and prevent the
3130 * dirty bit from being lost.
ecdfc978
LT
3131 */
3132 if (ret)
3133 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3134 spin_unlock(&mapping->private_lock);
1da177e4
LT
3135out:
3136 if (buffers_to_free) {
3137 struct buffer_head *bh = buffers_to_free;
3138
3139 do {
3140 struct buffer_head *next = bh->b_this_page;
3141 free_buffer_head(bh);
3142 bh = next;
3143 } while (bh != buffers_to_free);
3144 }
3145 return ret;
3146}
3147EXPORT_SYMBOL(try_to_free_buffers);
3148
3978d717 3149void block_sync_page(struct page *page)
1da177e4
LT
3150{
3151 struct address_space *mapping;
3152
3153 smp_mb();
3154 mapping = page_mapping(page);
3155 if (mapping)
3156 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3157}
3158
3159/*
3160 * There are no bdflush tunables left. But distributions are
3161 * still running obsolete flush daemons, so we terminate them here.
3162 *
3163 * Use of bdflush() is deprecated and will be removed in a future kernel.
3164 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3165 */
bdc480e3 3166SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3167{
3168 static int msg_count;
3169
3170 if (!capable(CAP_SYS_ADMIN))
3171 return -EPERM;
3172
3173 if (msg_count < 5) {
3174 msg_count++;
3175 printk(KERN_INFO
3176 "warning: process `%s' used the obsolete bdflush"
3177 " system call\n", current->comm);
3178 printk(KERN_INFO "Fix your initscripts?\n");
3179 }
3180
3181 if (func == 1)
3182 do_exit(0);
3183 return 0;
3184}
3185
3186/*
3187 * Buffer-head allocation
3188 */
e18b890b 3189static struct kmem_cache *bh_cachep;
1da177e4
LT
3190
3191/*
3192 * Once the number of bh's in the machine exceeds this level, we start
3193 * stripping them in writeback.
3194 */
3195static int max_buffer_heads;
3196
3197int buffer_heads_over_limit;
3198
3199struct bh_accounting {
3200 int nr; /* Number of live bh's */
3201 int ratelimit; /* Limit cacheline bouncing */
3202};
3203
3204static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3205
3206static void recalc_bh_state(void)
3207{
3208 int i;
3209 int tot = 0;
3210
3211 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3212 return;
3213 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3214 for_each_online_cpu(i)
1da177e4
LT
3215 tot += per_cpu(bh_accounting, i).nr;
3216 buffer_heads_over_limit = (tot > max_buffer_heads);
3217}
3218
dd0fc66f 3219struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3220{
488514d1 3221 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
1da177e4 3222 if (ret) {
a35afb83 3223 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3224 get_cpu_var(bh_accounting).nr++;
1da177e4 3225 recalc_bh_state();
736c7b80 3226 put_cpu_var(bh_accounting);
1da177e4
LT
3227 }
3228 return ret;
3229}
3230EXPORT_SYMBOL(alloc_buffer_head);
3231
3232void free_buffer_head(struct buffer_head *bh)
3233{
3234 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3235 kmem_cache_free(bh_cachep, bh);
736c7b80 3236 get_cpu_var(bh_accounting).nr--;
1da177e4 3237 recalc_bh_state();
736c7b80 3238 put_cpu_var(bh_accounting);
1da177e4
LT
3239}
3240EXPORT_SYMBOL(free_buffer_head);
3241
1da177e4
LT
3242static void buffer_exit_cpu(int cpu)
3243{
3244 int i;
3245 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3246
3247 for (i = 0; i < BH_LRU_SIZE; i++) {
3248 brelse(b->bhs[i]);
3249 b->bhs[i] = NULL;
3250 }
8a143426
ED
3251 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3252 per_cpu(bh_accounting, cpu).nr = 0;
3253 put_cpu_var(bh_accounting);
1da177e4
LT
3254}
3255
3256static int buffer_cpu_notify(struct notifier_block *self,
3257 unsigned long action, void *hcpu)
3258{
8bb78442 3259 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3260 buffer_exit_cpu((unsigned long)hcpu);
3261 return NOTIFY_OK;
3262}
1da177e4 3263
389d1b08 3264/**
a6b91919 3265 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3266 * @bh: struct buffer_head
3267 *
3268 * Return true if the buffer is up-to-date and false,
3269 * with the buffer locked, if not.
3270 */
3271int bh_uptodate_or_lock(struct buffer_head *bh)
3272{
3273 if (!buffer_uptodate(bh)) {
3274 lock_buffer(bh);
3275 if (!buffer_uptodate(bh))
3276 return 0;
3277 unlock_buffer(bh);
3278 }
3279 return 1;
3280}
3281EXPORT_SYMBOL(bh_uptodate_or_lock);
3282
3283/**
a6b91919 3284 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3285 * @bh: struct buffer_head
3286 *
3287 * Returns zero on success and -EIO on error.
3288 */
3289int bh_submit_read(struct buffer_head *bh)
3290{
3291 BUG_ON(!buffer_locked(bh));
3292
3293 if (buffer_uptodate(bh)) {
3294 unlock_buffer(bh);
3295 return 0;
3296 }
3297
3298 get_bh(bh);
3299 bh->b_end_io = end_buffer_read_sync;
3300 submit_bh(READ, bh);
3301 wait_on_buffer(bh);
3302 if (buffer_uptodate(bh))
3303 return 0;
3304 return -EIO;
3305}
3306EXPORT_SYMBOL(bh_submit_read);
3307
b98938c3 3308static void
51cc5068 3309init_buffer_head(void *data)
b98938c3
CL
3310{
3311 struct buffer_head *bh = data;
3312
3313 memset(bh, 0, sizeof(*bh));
3314 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3315}
3316
1da177e4
LT
3317void __init buffer_init(void)
3318{
3319 int nrpages;
3320
b98938c3
CL
3321 bh_cachep = kmem_cache_create("buffer_head",
3322 sizeof(struct buffer_head), 0,
3323 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3324 SLAB_MEM_SPREAD),
3325 init_buffer_head);
1da177e4
LT
3326
3327 /*
3328 * Limit the bh occupancy to 10% of ZONE_NORMAL
3329 */
3330 nrpages = (nr_free_buffer_pages() * 10) / 100;
3331 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3332 hotcpu_notifier(buffer_cpu_notify, 0);
3333}
3334
3335EXPORT_SYMBOL(__bforget);
3336EXPORT_SYMBOL(__brelse);
3337EXPORT_SYMBOL(__wait_on_buffer);
3338EXPORT_SYMBOL(block_commit_write);
3339EXPORT_SYMBOL(block_prepare_write);
54171690 3340EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3341EXPORT_SYMBOL(block_read_full_page);
3342EXPORT_SYMBOL(block_sync_page);
3343EXPORT_SYMBOL(block_truncate_page);
3344EXPORT_SYMBOL(block_write_full_page);
89e10787 3345EXPORT_SYMBOL(cont_write_begin);
1da177e4
LT
3346EXPORT_SYMBOL(end_buffer_read_sync);
3347EXPORT_SYMBOL(end_buffer_write_sync);
3348EXPORT_SYMBOL(file_fsync);
1da177e4 3349EXPORT_SYMBOL(generic_block_bmap);
05eb0b51 3350EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3351EXPORT_SYMBOL(init_buffer);
3352EXPORT_SYMBOL(invalidate_bdev);
3353EXPORT_SYMBOL(ll_rw_block);
3354EXPORT_SYMBOL(mark_buffer_dirty);
3355EXPORT_SYMBOL(submit_bh);
3356EXPORT_SYMBOL(sync_dirty_buffer);
3357EXPORT_SYMBOL(unlock_buffer);