]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
fs/ocfs2/cluster/tcp.c: remove use of NIPQUAD, use %pI4
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
1fe72eaa 55EXPORT_SYMBOL(init_buffer);
1da177e4
LT
56
57static int sync_buffer(void *word)
58{
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
62
63 smp_mb();
64 bd = bh->b_bdev;
65 if (bd)
66 blk_run_address_space(bd->bd_inode->i_mapping);
67 io_schedule();
68 return 0;
69}
70
fc9b52cd 71void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
72{
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
75}
76EXPORT_SYMBOL(__lock_buffer);
77
fc9b52cd 78void unlock_buffer(struct buffer_head *bh)
1da177e4 79{
51b07fc3 80 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
1fe72eaa 84EXPORT_SYMBOL(unlock_buffer);
1da177e4
LT
85
86/*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91void __wait_on_buffer(struct buffer_head * bh)
92{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94}
1fe72eaa 95EXPORT_SYMBOL(__wait_on_buffer);
1da177e4
LT
96
97static void
98__clear_page_buffers(struct page *page)
99{
100 ClearPagePrivate(page);
4c21e2f2 101 set_page_private(page, 0);
1da177e4
LT
102 page_cache_release(page);
103}
104
08bafc03
KM
105
106static int quiet_error(struct buffer_head *bh)
107{
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109 return 0;
110 return 1;
111}
112
113
1da177e4
LT
114static void buffer_io_error(struct buffer_head *bh)
115{
116 char b[BDEVNAME_SIZE];
1da177e4
LT
117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
120}
121
122/*
68671f35
DM
123 * End-of-IO handler helper function which does not touch the bh after
124 * unlocking it.
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
128 * itself.
1da177e4 129 */
68671f35 130static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
131{
132 if (uptodate) {
133 set_buffer_uptodate(bh);
134 } else {
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
137 }
138 unlock_buffer(bh);
68671f35
DM
139}
140
141/*
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
144 */
145void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146{
147 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
148 put_bh(bh);
149}
1fe72eaa 150EXPORT_SYMBOL(end_buffer_read_sync);
1da177e4
LT
151
152void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153{
154 char b[BDEVNAME_SIZE];
155
156 if (uptodate) {
157 set_buffer_uptodate(bh);
158 } else {
08bafc03 159 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1da177e4
LT
160 buffer_io_error(bh);
161 printk(KERN_WARNING "lost page write due to "
162 "I/O error on %s\n",
163 bdevname(bh->b_bdev, b));
164 }
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
167 }
168 unlock_buffer(bh);
169 put_bh(bh);
170}
1fe72eaa 171EXPORT_SYMBOL(end_buffer_write_sync);
1da177e4 172
1da177e4
LT
173/*
174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
177 * private_lock.
178 *
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
183 */
184static struct buffer_head *
385fd4c5 185__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
186{
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
190 pgoff_t index;
191 struct buffer_head *bh;
192 struct buffer_head *head;
193 struct page *page;
194 int all_mapped = 1;
195
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
198 if (!page)
199 goto out;
200
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
203 goto out_unlock;
204 head = page_buffers(page);
205 bh = head;
206 do {
97f76d3d
NK
207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 else if (bh->b_blocknr == block) {
1da177e4
LT
210 ret = bh;
211 get_bh(bh);
212 goto out_unlock;
213 }
1da177e4
LT
214 bh = bh->b_this_page;
215 } while (bh != head);
216
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
221 */
222 if (all_mapped) {
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
1da177e4
LT
229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230 }
231out_unlock:
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
234out:
235 return ret;
236}
237
238/* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
241 by the user.
242
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
246
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
249
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
262
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
f98393a6 270void invalidate_bdev(struct block_device *bdev)
1da177e4 271{
0e1dfc66
AM
272 struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274 if (mapping->nrpages == 0)
275 return;
276
1da177e4 277 invalidate_bh_lrus();
fc0ecff6 278 invalidate_mapping_pages(mapping, 0, -1);
1da177e4 279}
1fe72eaa 280EXPORT_SYMBOL(invalidate_bdev);
1da177e4
LT
281
282/*
5b0830cb 283 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
1da177e4
LT
284 */
285static void free_more_memory(void)
286{
19770b32 287 struct zone *zone;
0e88460d 288 int nid;
1da177e4 289
03ba3782 290 wakeup_flusher_threads(1024);
1da177e4
LT
291 yield();
292
0e88460d 293 for_each_online_node(nid) {
19770b32
MG
294 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
295 gfp_zone(GFP_NOFS), NULL,
296 &zone);
297 if (zone)
54a6eb5c 298 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
327c0e96 299 GFP_NOFS, NULL);
1da177e4
LT
300 }
301}
302
303/*
304 * I/O completion handler for block_read_full_page() - pages
305 * which come unlocked at the end of I/O.
306 */
307static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
308{
1da177e4 309 unsigned long flags;
a3972203 310 struct buffer_head *first;
1da177e4
LT
311 struct buffer_head *tmp;
312 struct page *page;
313 int page_uptodate = 1;
314
315 BUG_ON(!buffer_async_read(bh));
316
317 page = bh->b_page;
318 if (uptodate) {
319 set_buffer_uptodate(bh);
320 } else {
321 clear_buffer_uptodate(bh);
08bafc03 322 if (!quiet_error(bh))
1da177e4
LT
323 buffer_io_error(bh);
324 SetPageError(page);
325 }
326
327 /*
328 * Be _very_ careful from here on. Bad things can happen if
329 * two buffer heads end IO at almost the same time and both
330 * decide that the page is now completely done.
331 */
a3972203
NP
332 first = page_buffers(page);
333 local_irq_save(flags);
334 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
335 clear_buffer_async_read(bh);
336 unlock_buffer(bh);
337 tmp = bh;
338 do {
339 if (!buffer_uptodate(tmp))
340 page_uptodate = 0;
341 if (buffer_async_read(tmp)) {
342 BUG_ON(!buffer_locked(tmp));
343 goto still_busy;
344 }
345 tmp = tmp->b_this_page;
346 } while (tmp != bh);
a3972203
NP
347 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
348 local_irq_restore(flags);
1da177e4
LT
349
350 /*
351 * If none of the buffers had errors and they are all
352 * uptodate then we can set the page uptodate.
353 */
354 if (page_uptodate && !PageError(page))
355 SetPageUptodate(page);
356 unlock_page(page);
357 return;
358
359still_busy:
a3972203
NP
360 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
361 local_irq_restore(flags);
1da177e4
LT
362 return;
363}
364
365/*
366 * Completion handler for block_write_full_page() - pages which are unlocked
367 * during I/O, and which have PageWriteback cleared upon I/O completion.
368 */
35c80d5f 369void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
370{
371 char b[BDEVNAME_SIZE];
1da177e4 372 unsigned long flags;
a3972203 373 struct buffer_head *first;
1da177e4
LT
374 struct buffer_head *tmp;
375 struct page *page;
376
377 BUG_ON(!buffer_async_write(bh));
378
379 page = bh->b_page;
380 if (uptodate) {
381 set_buffer_uptodate(bh);
382 } else {
08bafc03 383 if (!quiet_error(bh)) {
1da177e4
LT
384 buffer_io_error(bh);
385 printk(KERN_WARNING "lost page write due to "
386 "I/O error on %s\n",
387 bdevname(bh->b_bdev, b));
388 }
389 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 390 set_buffer_write_io_error(bh);
1da177e4
LT
391 clear_buffer_uptodate(bh);
392 SetPageError(page);
393 }
394
a3972203
NP
395 first = page_buffers(page);
396 local_irq_save(flags);
397 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
398
1da177e4
LT
399 clear_buffer_async_write(bh);
400 unlock_buffer(bh);
401 tmp = bh->b_this_page;
402 while (tmp != bh) {
403 if (buffer_async_write(tmp)) {
404 BUG_ON(!buffer_locked(tmp));
405 goto still_busy;
406 }
407 tmp = tmp->b_this_page;
408 }
a3972203
NP
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
1da177e4
LT
411 end_page_writeback(page);
412 return;
413
414still_busy:
a3972203
NP
415 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
416 local_irq_restore(flags);
1da177e4
LT
417 return;
418}
1fe72eaa 419EXPORT_SYMBOL(end_buffer_async_write);
1da177e4
LT
420
421/*
422 * If a page's buffers are under async readin (end_buffer_async_read
423 * completion) then there is a possibility that another thread of
424 * control could lock one of the buffers after it has completed
425 * but while some of the other buffers have not completed. This
426 * locked buffer would confuse end_buffer_async_read() into not unlocking
427 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
428 * that this buffer is not under async I/O.
429 *
430 * The page comes unlocked when it has no locked buffer_async buffers
431 * left.
432 *
433 * PageLocked prevents anyone starting new async I/O reads any of
434 * the buffers.
435 *
436 * PageWriteback is used to prevent simultaneous writeout of the same
437 * page.
438 *
439 * PageLocked prevents anyone from starting writeback of a page which is
440 * under read I/O (PageWriteback is only ever set against a locked page).
441 */
442static void mark_buffer_async_read(struct buffer_head *bh)
443{
444 bh->b_end_io = end_buffer_async_read;
445 set_buffer_async_read(bh);
446}
447
1fe72eaa
HS
448static void mark_buffer_async_write_endio(struct buffer_head *bh,
449 bh_end_io_t *handler)
1da177e4 450{
35c80d5f 451 bh->b_end_io = handler;
1da177e4
LT
452 set_buffer_async_write(bh);
453}
35c80d5f
CM
454
455void mark_buffer_async_write(struct buffer_head *bh)
456{
457 mark_buffer_async_write_endio(bh, end_buffer_async_write);
458}
1da177e4
LT
459EXPORT_SYMBOL(mark_buffer_async_write);
460
461
462/*
463 * fs/buffer.c contains helper functions for buffer-backed address space's
464 * fsync functions. A common requirement for buffer-based filesystems is
465 * that certain data from the backing blockdev needs to be written out for
466 * a successful fsync(). For example, ext2 indirect blocks need to be
467 * written back and waited upon before fsync() returns.
468 *
469 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
470 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
471 * management of a list of dependent buffers at ->i_mapping->private_list.
472 *
473 * Locking is a little subtle: try_to_free_buffers() will remove buffers
474 * from their controlling inode's queue when they are being freed. But
475 * try_to_free_buffers() will be operating against the *blockdev* mapping
476 * at the time, not against the S_ISREG file which depends on those buffers.
477 * So the locking for private_list is via the private_lock in the address_space
478 * which backs the buffers. Which is different from the address_space
479 * against which the buffers are listed. So for a particular address_space,
480 * mapping->private_lock does *not* protect mapping->private_list! In fact,
481 * mapping->private_list will always be protected by the backing blockdev's
482 * ->private_lock.
483 *
484 * Which introduces a requirement: all buffers on an address_space's
485 * ->private_list must be from the same address_space: the blockdev's.
486 *
487 * address_spaces which do not place buffers at ->private_list via these
488 * utility functions are free to use private_lock and private_list for
489 * whatever they want. The only requirement is that list_empty(private_list)
490 * be true at clear_inode() time.
491 *
492 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
493 * filesystems should do that. invalidate_inode_buffers() should just go
494 * BUG_ON(!list_empty).
495 *
496 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
497 * take an address_space, not an inode. And it should be called
498 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
499 * queued up.
500 *
501 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
502 * list if it is already on a list. Because if the buffer is on a list,
503 * it *must* already be on the right one. If not, the filesystem is being
504 * silly. This will save a ton of locking. But first we have to ensure
505 * that buffers are taken *off* the old inode's list when they are freed
506 * (presumably in truncate). That requires careful auditing of all
507 * filesystems (do it inside bforget()). It could also be done by bringing
508 * b_inode back.
509 */
510
511/*
512 * The buffer's backing address_space's private_lock must be held
513 */
dbacefc9 514static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
515{
516 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
517 WARN_ON(!bh->b_assoc_map);
518 if (buffer_write_io_error(bh))
519 set_bit(AS_EIO, &bh->b_assoc_map->flags);
520 bh->b_assoc_map = NULL;
1da177e4
LT
521}
522
523int inode_has_buffers(struct inode *inode)
524{
525 return !list_empty(&inode->i_data.private_list);
526}
527
528/*
529 * osync is designed to support O_SYNC io. It waits synchronously for
530 * all already-submitted IO to complete, but does not queue any new
531 * writes to the disk.
532 *
533 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
534 * you dirty the buffers, and then use osync_inode_buffers to wait for
535 * completion. Any other dirty buffers which are not yet queued for
536 * write will not be flushed to disk by the osync.
537 */
538static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
539{
540 struct buffer_head *bh;
541 struct list_head *p;
542 int err = 0;
543
544 spin_lock(lock);
545repeat:
546 list_for_each_prev(p, list) {
547 bh = BH_ENTRY(p);
548 if (buffer_locked(bh)) {
549 get_bh(bh);
550 spin_unlock(lock);
551 wait_on_buffer(bh);
552 if (!buffer_uptodate(bh))
553 err = -EIO;
554 brelse(bh);
555 spin_lock(lock);
556 goto repeat;
557 }
558 }
559 spin_unlock(lock);
560 return err;
561}
562
1fe72eaa 563static void do_thaw_all(struct work_struct *work)
c2d75438
ES
564{
565 struct super_block *sb;
566 char b[BDEVNAME_SIZE];
567
568 spin_lock(&sb_lock);
569restart:
570 list_for_each_entry(sb, &super_blocks, s_list) {
571 sb->s_count++;
572 spin_unlock(&sb_lock);
573 down_read(&sb->s_umount);
574 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
575 printk(KERN_WARNING "Emergency Thaw on %s\n",
576 bdevname(sb->s_bdev, b));
577 up_read(&sb->s_umount);
578 spin_lock(&sb_lock);
579 if (__put_super_and_need_restart(sb))
580 goto restart;
581 }
582 spin_unlock(&sb_lock);
053c525f 583 kfree(work);
c2d75438
ES
584 printk(KERN_WARNING "Emergency Thaw complete\n");
585}
586
587/**
588 * emergency_thaw_all -- forcibly thaw every frozen filesystem
589 *
590 * Used for emergency unfreeze of all filesystems via SysRq
591 */
592void emergency_thaw_all(void)
593{
053c525f
JA
594 struct work_struct *work;
595
596 work = kmalloc(sizeof(*work), GFP_ATOMIC);
597 if (work) {
598 INIT_WORK(work, do_thaw_all);
599 schedule_work(work);
600 }
c2d75438
ES
601}
602
1da177e4 603/**
78a4a50a 604 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 605 * @mapping: the mapping which wants those buffers written
1da177e4
LT
606 *
607 * Starts I/O against the buffers at mapping->private_list, and waits upon
608 * that I/O.
609 *
67be2dd1
MW
610 * Basically, this is a convenience function for fsync().
611 * @mapping is a file or directory which needs those buffers to be written for
612 * a successful fsync().
1da177e4
LT
613 */
614int sync_mapping_buffers(struct address_space *mapping)
615{
616 struct address_space *buffer_mapping = mapping->assoc_mapping;
617
618 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
619 return 0;
620
621 return fsync_buffers_list(&buffer_mapping->private_lock,
622 &mapping->private_list);
623}
624EXPORT_SYMBOL(sync_mapping_buffers);
625
626/*
627 * Called when we've recently written block `bblock', and it is known that
628 * `bblock' was for a buffer_boundary() buffer. This means that the block at
629 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
630 * dirty, schedule it for IO. So that indirects merge nicely with their data.
631 */
632void write_boundary_block(struct block_device *bdev,
633 sector_t bblock, unsigned blocksize)
634{
635 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
636 if (bh) {
637 if (buffer_dirty(bh))
638 ll_rw_block(WRITE, 1, &bh);
639 put_bh(bh);
640 }
641}
642
643void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
644{
645 struct address_space *mapping = inode->i_mapping;
646 struct address_space *buffer_mapping = bh->b_page->mapping;
647
648 mark_buffer_dirty(bh);
649 if (!mapping->assoc_mapping) {
650 mapping->assoc_mapping = buffer_mapping;
651 } else {
e827f923 652 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 653 }
535ee2fb 654 if (!bh->b_assoc_map) {
1da177e4
LT
655 spin_lock(&buffer_mapping->private_lock);
656 list_move_tail(&bh->b_assoc_buffers,
657 &mapping->private_list);
58ff407b 658 bh->b_assoc_map = mapping;
1da177e4
LT
659 spin_unlock(&buffer_mapping->private_lock);
660 }
661}
662EXPORT_SYMBOL(mark_buffer_dirty_inode);
663
787d2214
NP
664/*
665 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
666 * dirty.
667 *
668 * If warn is true, then emit a warning if the page is not uptodate and has
669 * not been truncated.
670 */
a8e7d49a 671static void __set_page_dirty(struct page *page,
787d2214
NP
672 struct address_space *mapping, int warn)
673{
19fd6231 674 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
675 if (page->mapping) { /* Race with truncate? */
676 WARN_ON_ONCE(warn && !PageUptodate(page));
e3a7cca1 677 account_page_dirtied(page, mapping);
787d2214
NP
678 radix_tree_tag_set(&mapping->page_tree,
679 page_index(page), PAGECACHE_TAG_DIRTY);
680 }
19fd6231 681 spin_unlock_irq(&mapping->tree_lock);
787d2214 682 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
683}
684
1da177e4
LT
685/*
686 * Add a page to the dirty page list.
687 *
688 * It is a sad fact of life that this function is called from several places
689 * deeply under spinlocking. It may not sleep.
690 *
691 * If the page has buffers, the uptodate buffers are set dirty, to preserve
692 * dirty-state coherency between the page and the buffers. It the page does
693 * not have buffers then when they are later attached they will all be set
694 * dirty.
695 *
696 * The buffers are dirtied before the page is dirtied. There's a small race
697 * window in which a writepage caller may see the page cleanness but not the
698 * buffer dirtiness. That's fine. If this code were to set the page dirty
699 * before the buffers, a concurrent writepage caller could clear the page dirty
700 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
701 * page on the dirty page list.
702 *
703 * We use private_lock to lock against try_to_free_buffers while using the
704 * page's buffer list. Also use this to protect against clean buffers being
705 * added to the page after it was set dirty.
706 *
707 * FIXME: may need to call ->reservepage here as well. That's rather up to the
708 * address_space though.
709 */
710int __set_page_dirty_buffers(struct page *page)
711{
a8e7d49a 712 int newly_dirty;
787d2214 713 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
714
715 if (unlikely(!mapping))
716 return !TestSetPageDirty(page);
1da177e4
LT
717
718 spin_lock(&mapping->private_lock);
719 if (page_has_buffers(page)) {
720 struct buffer_head *head = page_buffers(page);
721 struct buffer_head *bh = head;
722
723 do {
724 set_buffer_dirty(bh);
725 bh = bh->b_this_page;
726 } while (bh != head);
727 }
a8e7d49a 728 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
729 spin_unlock(&mapping->private_lock);
730
a8e7d49a
LT
731 if (newly_dirty)
732 __set_page_dirty(page, mapping, 1);
733 return newly_dirty;
1da177e4
LT
734}
735EXPORT_SYMBOL(__set_page_dirty_buffers);
736
737/*
738 * Write out and wait upon a list of buffers.
739 *
740 * We have conflicting pressures: we want to make sure that all
741 * initially dirty buffers get waited on, but that any subsequently
742 * dirtied buffers don't. After all, we don't want fsync to last
743 * forever if somebody is actively writing to the file.
744 *
745 * Do this in two main stages: first we copy dirty buffers to a
746 * temporary inode list, queueing the writes as we go. Then we clean
747 * up, waiting for those writes to complete.
748 *
749 * During this second stage, any subsequent updates to the file may end
750 * up refiling the buffer on the original inode's dirty list again, so
751 * there is a chance we will end up with a buffer queued for write but
752 * not yet completed on that list. So, as a final cleanup we go through
753 * the osync code to catch these locked, dirty buffers without requeuing
754 * any newly dirty buffers for write.
755 */
756static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
757{
758 struct buffer_head *bh;
759 struct list_head tmp;
9cf6b720 760 struct address_space *mapping, *prev_mapping = NULL;
1da177e4
LT
761 int err = 0, err2;
762
763 INIT_LIST_HEAD(&tmp);
764
765 spin_lock(lock);
766 while (!list_empty(list)) {
767 bh = BH_ENTRY(list->next);
535ee2fb 768 mapping = bh->b_assoc_map;
58ff407b 769 __remove_assoc_queue(bh);
535ee2fb
JK
770 /* Avoid race with mark_buffer_dirty_inode() which does
771 * a lockless check and we rely on seeing the dirty bit */
772 smp_mb();
1da177e4
LT
773 if (buffer_dirty(bh) || buffer_locked(bh)) {
774 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 775 bh->b_assoc_map = mapping;
1da177e4
LT
776 if (buffer_dirty(bh)) {
777 get_bh(bh);
778 spin_unlock(lock);
779 /*
780 * Ensure any pending I/O completes so that
781 * ll_rw_block() actually writes the current
782 * contents - it is a noop if I/O is still in
783 * flight on potentially older contents.
784 */
9cf6b720
JA
785 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
786
787 /*
788 * Kick off IO for the previous mapping. Note
789 * that we will not run the very last mapping,
790 * wait_on_buffer() will do that for us
791 * through sync_buffer().
792 */
793 if (prev_mapping && prev_mapping != mapping)
794 blk_run_address_space(prev_mapping);
795 prev_mapping = mapping;
796
1da177e4
LT
797 brelse(bh);
798 spin_lock(lock);
799 }
800 }
801 }
802
803 while (!list_empty(&tmp)) {
804 bh = BH_ENTRY(tmp.prev);
1da177e4 805 get_bh(bh);
535ee2fb
JK
806 mapping = bh->b_assoc_map;
807 __remove_assoc_queue(bh);
808 /* Avoid race with mark_buffer_dirty_inode() which does
809 * a lockless check and we rely on seeing the dirty bit */
810 smp_mb();
811 if (buffer_dirty(bh)) {
812 list_add(&bh->b_assoc_buffers,
e3892296 813 &mapping->private_list);
535ee2fb
JK
814 bh->b_assoc_map = mapping;
815 }
1da177e4
LT
816 spin_unlock(lock);
817 wait_on_buffer(bh);
818 if (!buffer_uptodate(bh))
819 err = -EIO;
820 brelse(bh);
821 spin_lock(lock);
822 }
823
824 spin_unlock(lock);
825 err2 = osync_buffers_list(lock, list);
826 if (err)
827 return err;
828 else
829 return err2;
830}
831
832/*
833 * Invalidate any and all dirty buffers on a given inode. We are
834 * probably unmounting the fs, but that doesn't mean we have already
835 * done a sync(). Just drop the buffers from the inode list.
836 *
837 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
838 * assumes that all the buffers are against the blockdev. Not true
839 * for reiserfs.
840 */
841void invalidate_inode_buffers(struct inode *inode)
842{
843 if (inode_has_buffers(inode)) {
844 struct address_space *mapping = &inode->i_data;
845 struct list_head *list = &mapping->private_list;
846 struct address_space *buffer_mapping = mapping->assoc_mapping;
847
848 spin_lock(&buffer_mapping->private_lock);
849 while (!list_empty(list))
850 __remove_assoc_queue(BH_ENTRY(list->next));
851 spin_unlock(&buffer_mapping->private_lock);
852 }
853}
52b19ac9 854EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
855
856/*
857 * Remove any clean buffers from the inode's buffer list. This is called
858 * when we're trying to free the inode itself. Those buffers can pin it.
859 *
860 * Returns true if all buffers were removed.
861 */
862int remove_inode_buffers(struct inode *inode)
863{
864 int ret = 1;
865
866 if (inode_has_buffers(inode)) {
867 struct address_space *mapping = &inode->i_data;
868 struct list_head *list = &mapping->private_list;
869 struct address_space *buffer_mapping = mapping->assoc_mapping;
870
871 spin_lock(&buffer_mapping->private_lock);
872 while (!list_empty(list)) {
873 struct buffer_head *bh = BH_ENTRY(list->next);
874 if (buffer_dirty(bh)) {
875 ret = 0;
876 break;
877 }
878 __remove_assoc_queue(bh);
879 }
880 spin_unlock(&buffer_mapping->private_lock);
881 }
882 return ret;
883}
884
885/*
886 * Create the appropriate buffers when given a page for data area and
887 * the size of each buffer.. Use the bh->b_this_page linked list to
888 * follow the buffers created. Return NULL if unable to create more
889 * buffers.
890 *
891 * The retry flag is used to differentiate async IO (paging, swapping)
892 * which may not fail from ordinary buffer allocations.
893 */
894struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
895 int retry)
896{
897 struct buffer_head *bh, *head;
898 long offset;
899
900try_again:
901 head = NULL;
902 offset = PAGE_SIZE;
903 while ((offset -= size) >= 0) {
904 bh = alloc_buffer_head(GFP_NOFS);
905 if (!bh)
906 goto no_grow;
907
908 bh->b_bdev = NULL;
909 bh->b_this_page = head;
910 bh->b_blocknr = -1;
911 head = bh;
912
913 bh->b_state = 0;
914 atomic_set(&bh->b_count, 0);
fc5cd582 915 bh->b_private = NULL;
1da177e4
LT
916 bh->b_size = size;
917
918 /* Link the buffer to its page */
919 set_bh_page(bh, page, offset);
920
01ffe339 921 init_buffer(bh, NULL, NULL);
1da177e4
LT
922 }
923 return head;
924/*
925 * In case anything failed, we just free everything we got.
926 */
927no_grow:
928 if (head) {
929 do {
930 bh = head;
931 head = head->b_this_page;
932 free_buffer_head(bh);
933 } while (head);
934 }
935
936 /*
937 * Return failure for non-async IO requests. Async IO requests
938 * are not allowed to fail, so we have to wait until buffer heads
939 * become available. But we don't want tasks sleeping with
940 * partially complete buffers, so all were released above.
941 */
942 if (!retry)
943 return NULL;
944
945 /* We're _really_ low on memory. Now we just
946 * wait for old buffer heads to become free due to
947 * finishing IO. Since this is an async request and
948 * the reserve list is empty, we're sure there are
949 * async buffer heads in use.
950 */
951 free_more_memory();
952 goto try_again;
953}
954EXPORT_SYMBOL_GPL(alloc_page_buffers);
955
956static inline void
957link_dev_buffers(struct page *page, struct buffer_head *head)
958{
959 struct buffer_head *bh, *tail;
960
961 bh = head;
962 do {
963 tail = bh;
964 bh = bh->b_this_page;
965 } while (bh);
966 tail->b_this_page = head;
967 attach_page_buffers(page, head);
968}
969
970/*
971 * Initialise the state of a blockdev page's buffers.
972 */
973static void
974init_page_buffers(struct page *page, struct block_device *bdev,
975 sector_t block, int size)
976{
977 struct buffer_head *head = page_buffers(page);
978 struct buffer_head *bh = head;
979 int uptodate = PageUptodate(page);
980
981 do {
982 if (!buffer_mapped(bh)) {
983 init_buffer(bh, NULL, NULL);
984 bh->b_bdev = bdev;
985 bh->b_blocknr = block;
986 if (uptodate)
987 set_buffer_uptodate(bh);
988 set_buffer_mapped(bh);
989 }
990 block++;
991 bh = bh->b_this_page;
992 } while (bh != head);
993}
994
995/*
996 * Create the page-cache page that contains the requested block.
997 *
998 * This is user purely for blockdev mappings.
999 */
1000static struct page *
1001grow_dev_page(struct block_device *bdev, sector_t block,
1002 pgoff_t index, int size)
1003{
1004 struct inode *inode = bdev->bd_inode;
1005 struct page *page;
1006 struct buffer_head *bh;
1007
ea125892 1008 page = find_or_create_page(inode->i_mapping, index,
769848c0 1009 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
1010 if (!page)
1011 return NULL;
1012
e827f923 1013 BUG_ON(!PageLocked(page));
1da177e4
LT
1014
1015 if (page_has_buffers(page)) {
1016 bh = page_buffers(page);
1017 if (bh->b_size == size) {
1018 init_page_buffers(page, bdev, block, size);
1019 return page;
1020 }
1021 if (!try_to_free_buffers(page))
1022 goto failed;
1023 }
1024
1025 /*
1026 * Allocate some buffers for this page
1027 */
1028 bh = alloc_page_buffers(page, size, 0);
1029 if (!bh)
1030 goto failed;
1031
1032 /*
1033 * Link the page to the buffers and initialise them. Take the
1034 * lock to be atomic wrt __find_get_block(), which does not
1035 * run under the page lock.
1036 */
1037 spin_lock(&inode->i_mapping->private_lock);
1038 link_dev_buffers(page, bh);
1039 init_page_buffers(page, bdev, block, size);
1040 spin_unlock(&inode->i_mapping->private_lock);
1041 return page;
1042
1043failed:
1044 BUG();
1045 unlock_page(page);
1046 page_cache_release(page);
1047 return NULL;
1048}
1049
1050/*
1051 * Create buffers for the specified block device block's page. If
1052 * that page was dirty, the buffers are set dirty also.
1da177e4 1053 */
858119e1 1054static int
1da177e4
LT
1055grow_buffers(struct block_device *bdev, sector_t block, int size)
1056{
1057 struct page *page;
1058 pgoff_t index;
1059 int sizebits;
1060
1061 sizebits = -1;
1062 do {
1063 sizebits++;
1064 } while ((size << sizebits) < PAGE_SIZE);
1065
1066 index = block >> sizebits;
1da177e4 1067
e5657933
AM
1068 /*
1069 * Check for a block which wants to lie outside our maximum possible
1070 * pagecache index. (this comparison is done using sector_t types).
1071 */
1072 if (unlikely(index != block >> sizebits)) {
1073 char b[BDEVNAME_SIZE];
1074
1075 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1076 "device %s\n",
8e24eea7 1077 __func__, (unsigned long long)block,
e5657933
AM
1078 bdevname(bdev, b));
1079 return -EIO;
1080 }
1081 block = index << sizebits;
1da177e4
LT
1082 /* Create a page with the proper size buffers.. */
1083 page = grow_dev_page(bdev, block, index, size);
1084 if (!page)
1085 return 0;
1086 unlock_page(page);
1087 page_cache_release(page);
1088 return 1;
1089}
1090
75c96f85 1091static struct buffer_head *
1da177e4
LT
1092__getblk_slow(struct block_device *bdev, sector_t block, int size)
1093{
1094 /* Size must be multiple of hard sectorsize */
e1defc4f 1095 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1da177e4
LT
1096 (size < 512 || size > PAGE_SIZE))) {
1097 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1098 size);
e1defc4f
MP
1099 printk(KERN_ERR "logical block size: %d\n",
1100 bdev_logical_block_size(bdev));
1da177e4
LT
1101
1102 dump_stack();
1103 return NULL;
1104 }
1105
1106 for (;;) {
1107 struct buffer_head * bh;
e5657933 1108 int ret;
1da177e4
LT
1109
1110 bh = __find_get_block(bdev, block, size);
1111 if (bh)
1112 return bh;
1113
e5657933
AM
1114 ret = grow_buffers(bdev, block, size);
1115 if (ret < 0)
1116 return NULL;
1117 if (ret == 0)
1da177e4
LT
1118 free_more_memory();
1119 }
1120}
1121
1122/*
1123 * The relationship between dirty buffers and dirty pages:
1124 *
1125 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1126 * the page is tagged dirty in its radix tree.
1127 *
1128 * At all times, the dirtiness of the buffers represents the dirtiness of
1129 * subsections of the page. If the page has buffers, the page dirty bit is
1130 * merely a hint about the true dirty state.
1131 *
1132 * When a page is set dirty in its entirety, all its buffers are marked dirty
1133 * (if the page has buffers).
1134 *
1135 * When a buffer is marked dirty, its page is dirtied, but the page's other
1136 * buffers are not.
1137 *
1138 * Also. When blockdev buffers are explicitly read with bread(), they
1139 * individually become uptodate. But their backing page remains not
1140 * uptodate - even if all of its buffers are uptodate. A subsequent
1141 * block_read_full_page() against that page will discover all the uptodate
1142 * buffers, will set the page uptodate and will perform no I/O.
1143 */
1144
1145/**
1146 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1147 * @bh: the buffer_head to mark dirty
1da177e4
LT
1148 *
1149 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1150 * backing page dirty, then tag the page as dirty in its address_space's radix
1151 * tree and then attach the address_space's inode to its superblock's dirty
1152 * inode list.
1153 *
1154 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1155 * mapping->tree_lock and the global inode_lock.
1156 */
fc9b52cd 1157void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1158{
787d2214 1159 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1160
1161 /*
1162 * Very *carefully* optimize the it-is-already-dirty case.
1163 *
1164 * Don't let the final "is it dirty" escape to before we
1165 * perhaps modified the buffer.
1166 */
1167 if (buffer_dirty(bh)) {
1168 smp_mb();
1169 if (buffer_dirty(bh))
1170 return;
1171 }
1172
a8e7d49a
LT
1173 if (!test_set_buffer_dirty(bh)) {
1174 struct page *page = bh->b_page;
8e9d78ed
LT
1175 if (!TestSetPageDirty(page)) {
1176 struct address_space *mapping = page_mapping(page);
1177 if (mapping)
1178 __set_page_dirty(page, mapping, 0);
1179 }
a8e7d49a 1180 }
1da177e4 1181}
1fe72eaa 1182EXPORT_SYMBOL(mark_buffer_dirty);
1da177e4
LT
1183
1184/*
1185 * Decrement a buffer_head's reference count. If all buffers against a page
1186 * have zero reference count, are clean and unlocked, and if the page is clean
1187 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1188 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1189 * a page but it ends up not being freed, and buffers may later be reattached).
1190 */
1191void __brelse(struct buffer_head * buf)
1192{
1193 if (atomic_read(&buf->b_count)) {
1194 put_bh(buf);
1195 return;
1196 }
5c752ad9 1197 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4 1198}
1fe72eaa 1199EXPORT_SYMBOL(__brelse);
1da177e4
LT
1200
1201/*
1202 * bforget() is like brelse(), except it discards any
1203 * potentially dirty data.
1204 */
1205void __bforget(struct buffer_head *bh)
1206{
1207 clear_buffer_dirty(bh);
535ee2fb 1208 if (bh->b_assoc_map) {
1da177e4
LT
1209 struct address_space *buffer_mapping = bh->b_page->mapping;
1210
1211 spin_lock(&buffer_mapping->private_lock);
1212 list_del_init(&bh->b_assoc_buffers);
58ff407b 1213 bh->b_assoc_map = NULL;
1da177e4
LT
1214 spin_unlock(&buffer_mapping->private_lock);
1215 }
1216 __brelse(bh);
1217}
1fe72eaa 1218EXPORT_SYMBOL(__bforget);
1da177e4
LT
1219
1220static struct buffer_head *__bread_slow(struct buffer_head *bh)
1221{
1222 lock_buffer(bh);
1223 if (buffer_uptodate(bh)) {
1224 unlock_buffer(bh);
1225 return bh;
1226 } else {
1227 get_bh(bh);
1228 bh->b_end_io = end_buffer_read_sync;
1229 submit_bh(READ, bh);
1230 wait_on_buffer(bh);
1231 if (buffer_uptodate(bh))
1232 return bh;
1233 }
1234 brelse(bh);
1235 return NULL;
1236}
1237
1238/*
1239 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1240 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1241 * refcount elevated by one when they're in an LRU. A buffer can only appear
1242 * once in a particular CPU's LRU. A single buffer can be present in multiple
1243 * CPU's LRUs at the same time.
1244 *
1245 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1246 * sb_find_get_block().
1247 *
1248 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1249 * a local interrupt disable for that.
1250 */
1251
1252#define BH_LRU_SIZE 8
1253
1254struct bh_lru {
1255 struct buffer_head *bhs[BH_LRU_SIZE];
1256};
1257
1258static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1259
1260#ifdef CONFIG_SMP
1261#define bh_lru_lock() local_irq_disable()
1262#define bh_lru_unlock() local_irq_enable()
1263#else
1264#define bh_lru_lock() preempt_disable()
1265#define bh_lru_unlock() preempt_enable()
1266#endif
1267
1268static inline void check_irqs_on(void)
1269{
1270#ifdef irqs_disabled
1271 BUG_ON(irqs_disabled());
1272#endif
1273}
1274
1275/*
1276 * The LRU management algorithm is dopey-but-simple. Sorry.
1277 */
1278static void bh_lru_install(struct buffer_head *bh)
1279{
1280 struct buffer_head *evictee = NULL;
1281 struct bh_lru *lru;
1282
1283 check_irqs_on();
1284 bh_lru_lock();
1285 lru = &__get_cpu_var(bh_lrus);
1286 if (lru->bhs[0] != bh) {
1287 struct buffer_head *bhs[BH_LRU_SIZE];
1288 int in;
1289 int out = 0;
1290
1291 get_bh(bh);
1292 bhs[out++] = bh;
1293 for (in = 0; in < BH_LRU_SIZE; in++) {
1294 struct buffer_head *bh2 = lru->bhs[in];
1295
1296 if (bh2 == bh) {
1297 __brelse(bh2);
1298 } else {
1299 if (out >= BH_LRU_SIZE) {
1300 BUG_ON(evictee != NULL);
1301 evictee = bh2;
1302 } else {
1303 bhs[out++] = bh2;
1304 }
1305 }
1306 }
1307 while (out < BH_LRU_SIZE)
1308 bhs[out++] = NULL;
1309 memcpy(lru->bhs, bhs, sizeof(bhs));
1310 }
1311 bh_lru_unlock();
1312
1313 if (evictee)
1314 __brelse(evictee);
1315}
1316
1317/*
1318 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1319 */
858119e1 1320static struct buffer_head *
3991d3bd 1321lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1322{
1323 struct buffer_head *ret = NULL;
1324 struct bh_lru *lru;
3991d3bd 1325 unsigned int i;
1da177e4
LT
1326
1327 check_irqs_on();
1328 bh_lru_lock();
1329 lru = &__get_cpu_var(bh_lrus);
1330 for (i = 0; i < BH_LRU_SIZE; i++) {
1331 struct buffer_head *bh = lru->bhs[i];
1332
1333 if (bh && bh->b_bdev == bdev &&
1334 bh->b_blocknr == block && bh->b_size == size) {
1335 if (i) {
1336 while (i) {
1337 lru->bhs[i] = lru->bhs[i - 1];
1338 i--;
1339 }
1340 lru->bhs[0] = bh;
1341 }
1342 get_bh(bh);
1343 ret = bh;
1344 break;
1345 }
1346 }
1347 bh_lru_unlock();
1348 return ret;
1349}
1350
1351/*
1352 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1353 * it in the LRU and mark it as accessed. If it is not present then return
1354 * NULL
1355 */
1356struct buffer_head *
3991d3bd 1357__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1358{
1359 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1360
1361 if (bh == NULL) {
385fd4c5 1362 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1363 if (bh)
1364 bh_lru_install(bh);
1365 }
1366 if (bh)
1367 touch_buffer(bh);
1368 return bh;
1369}
1370EXPORT_SYMBOL(__find_get_block);
1371
1372/*
1373 * __getblk will locate (and, if necessary, create) the buffer_head
1374 * which corresponds to the passed block_device, block and size. The
1375 * returned buffer has its reference count incremented.
1376 *
1377 * __getblk() cannot fail - it just keeps trying. If you pass it an
1378 * illegal block number, __getblk() will happily return a buffer_head
1379 * which represents the non-existent block. Very weird.
1380 *
1381 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1382 * attempt is failing. FIXME, perhaps?
1383 */
1384struct buffer_head *
3991d3bd 1385__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1386{
1387 struct buffer_head *bh = __find_get_block(bdev, block, size);
1388
1389 might_sleep();
1390 if (bh == NULL)
1391 bh = __getblk_slow(bdev, block, size);
1392 return bh;
1393}
1394EXPORT_SYMBOL(__getblk);
1395
1396/*
1397 * Do async read-ahead on a buffer..
1398 */
3991d3bd 1399void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1400{
1401 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1402 if (likely(bh)) {
1403 ll_rw_block(READA, 1, &bh);
1404 brelse(bh);
1405 }
1da177e4
LT
1406}
1407EXPORT_SYMBOL(__breadahead);
1408
1409/**
1410 * __bread() - reads a specified block and returns the bh
67be2dd1 1411 * @bdev: the block_device to read from
1da177e4
LT
1412 * @block: number of block
1413 * @size: size (in bytes) to read
1414 *
1415 * Reads a specified block, and returns buffer head that contains it.
1416 * It returns NULL if the block was unreadable.
1417 */
1418struct buffer_head *
3991d3bd 1419__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1420{
1421 struct buffer_head *bh = __getblk(bdev, block, size);
1422
a3e713b5 1423 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1424 bh = __bread_slow(bh);
1425 return bh;
1426}
1427EXPORT_SYMBOL(__bread);
1428
1429/*
1430 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1431 * This doesn't race because it runs in each cpu either in irq
1432 * or with preempt disabled.
1433 */
1434static void invalidate_bh_lru(void *arg)
1435{
1436 struct bh_lru *b = &get_cpu_var(bh_lrus);
1437 int i;
1438
1439 for (i = 0; i < BH_LRU_SIZE; i++) {
1440 brelse(b->bhs[i]);
1441 b->bhs[i] = NULL;
1442 }
1443 put_cpu_var(bh_lrus);
1444}
1445
f9a14399 1446void invalidate_bh_lrus(void)
1da177e4 1447{
15c8b6c1 1448 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1449}
9db5579b 1450EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1451
1452void set_bh_page(struct buffer_head *bh,
1453 struct page *page, unsigned long offset)
1454{
1455 bh->b_page = page;
e827f923 1456 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1457 if (PageHighMem(page))
1458 /*
1459 * This catches illegal uses and preserves the offset:
1460 */
1461 bh->b_data = (char *)(0 + offset);
1462 else
1463 bh->b_data = page_address(page) + offset;
1464}
1465EXPORT_SYMBOL(set_bh_page);
1466
1467/*
1468 * Called when truncating a buffer on a page completely.
1469 */
858119e1 1470static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1471{
1472 lock_buffer(bh);
1473 clear_buffer_dirty(bh);
1474 bh->b_bdev = NULL;
1475 clear_buffer_mapped(bh);
1476 clear_buffer_req(bh);
1477 clear_buffer_new(bh);
1478 clear_buffer_delay(bh);
33a266dd 1479 clear_buffer_unwritten(bh);
1da177e4
LT
1480 unlock_buffer(bh);
1481}
1482
1da177e4
LT
1483/**
1484 * block_invalidatepage - invalidate part of all of a buffer-backed page
1485 *
1486 * @page: the page which is affected
1487 * @offset: the index of the truncation point
1488 *
1489 * block_invalidatepage() is called when all or part of the page has become
1490 * invalidatedby a truncate operation.
1491 *
1492 * block_invalidatepage() does not have to release all buffers, but it must
1493 * ensure that no dirty buffer is left outside @offset and that no I/O
1494 * is underway against any of the blocks which are outside the truncation
1495 * point. Because the caller is about to free (and possibly reuse) those
1496 * blocks on-disk.
1497 */
2ff28e22 1498void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1499{
1500 struct buffer_head *head, *bh, *next;
1501 unsigned int curr_off = 0;
1da177e4
LT
1502
1503 BUG_ON(!PageLocked(page));
1504 if (!page_has_buffers(page))
1505 goto out;
1506
1507 head = page_buffers(page);
1508 bh = head;
1509 do {
1510 unsigned int next_off = curr_off + bh->b_size;
1511 next = bh->b_this_page;
1512
1513 /*
1514 * is this block fully invalidated?
1515 */
1516 if (offset <= curr_off)
1517 discard_buffer(bh);
1518 curr_off = next_off;
1519 bh = next;
1520 } while (bh != head);
1521
1522 /*
1523 * We release buffers only if the entire page is being invalidated.
1524 * The get_block cached value has been unconditionally invalidated,
1525 * so real IO is not possible anymore.
1526 */
1527 if (offset == 0)
2ff28e22 1528 try_to_release_page(page, 0);
1da177e4 1529out:
2ff28e22 1530 return;
1da177e4
LT
1531}
1532EXPORT_SYMBOL(block_invalidatepage);
1533
1534/*
1535 * We attach and possibly dirty the buffers atomically wrt
1536 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1537 * is already excluded via the page lock.
1538 */
1539void create_empty_buffers(struct page *page,
1540 unsigned long blocksize, unsigned long b_state)
1541{
1542 struct buffer_head *bh, *head, *tail;
1543
1544 head = alloc_page_buffers(page, blocksize, 1);
1545 bh = head;
1546 do {
1547 bh->b_state |= b_state;
1548 tail = bh;
1549 bh = bh->b_this_page;
1550 } while (bh);
1551 tail->b_this_page = head;
1552
1553 spin_lock(&page->mapping->private_lock);
1554 if (PageUptodate(page) || PageDirty(page)) {
1555 bh = head;
1556 do {
1557 if (PageDirty(page))
1558 set_buffer_dirty(bh);
1559 if (PageUptodate(page))
1560 set_buffer_uptodate(bh);
1561 bh = bh->b_this_page;
1562 } while (bh != head);
1563 }
1564 attach_page_buffers(page, head);
1565 spin_unlock(&page->mapping->private_lock);
1566}
1567EXPORT_SYMBOL(create_empty_buffers);
1568
1569/*
1570 * We are taking a block for data and we don't want any output from any
1571 * buffer-cache aliases starting from return from that function and
1572 * until the moment when something will explicitly mark the buffer
1573 * dirty (hopefully that will not happen until we will free that block ;-)
1574 * We don't even need to mark it not-uptodate - nobody can expect
1575 * anything from a newly allocated buffer anyway. We used to used
1576 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1577 * don't want to mark the alias unmapped, for example - it would confuse
1578 * anyone who might pick it with bread() afterwards...
1579 *
1580 * Also.. Note that bforget() doesn't lock the buffer. So there can
1581 * be writeout I/O going on against recently-freed buffers. We don't
1582 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1583 * only if we really need to. That happens here.
1584 */
1585void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1586{
1587 struct buffer_head *old_bh;
1588
1589 might_sleep();
1590
385fd4c5 1591 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1592 if (old_bh) {
1593 clear_buffer_dirty(old_bh);
1594 wait_on_buffer(old_bh);
1595 clear_buffer_req(old_bh);
1596 __brelse(old_bh);
1597 }
1598}
1599EXPORT_SYMBOL(unmap_underlying_metadata);
1600
1601/*
1602 * NOTE! All mapped/uptodate combinations are valid:
1603 *
1604 * Mapped Uptodate Meaning
1605 *
1606 * No No "unknown" - must do get_block()
1607 * No Yes "hole" - zero-filled
1608 * Yes No "allocated" - allocated on disk, not read in
1609 * Yes Yes "valid" - allocated and up-to-date in memory.
1610 *
1611 * "Dirty" is valid only with the last case (mapped+uptodate).
1612 */
1613
1614/*
1615 * While block_write_full_page is writing back the dirty buffers under
1616 * the page lock, whoever dirtied the buffers may decide to clean them
1617 * again at any time. We handle that by only looking at the buffer
1618 * state inside lock_buffer().
1619 *
1620 * If block_write_full_page() is called for regular writeback
1621 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1622 * locked buffer. This only can happen if someone has written the buffer
1623 * directly, with submit_bh(). At the address_space level PageWriteback
1624 * prevents this contention from occurring.
6e34eedd
TT
1625 *
1626 * If block_write_full_page() is called with wbc->sync_mode ==
1627 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1628 * causes the writes to be flagged as synchronous writes, but the
1629 * block device queue will NOT be unplugged, since usually many pages
1630 * will be pushed to the out before the higher-level caller actually
1631 * waits for the writes to be completed. The various wait functions,
1632 * such as wait_on_writeback_range() will ultimately call sync_page()
1633 * which will ultimately call blk_run_backing_dev(), which will end up
1634 * unplugging the device queue.
1da177e4
LT
1635 */
1636static int __block_write_full_page(struct inode *inode, struct page *page,
35c80d5f
CM
1637 get_block_t *get_block, struct writeback_control *wbc,
1638 bh_end_io_t *handler)
1da177e4
LT
1639{
1640 int err;
1641 sector_t block;
1642 sector_t last_block;
f0fbd5fc 1643 struct buffer_head *bh, *head;
b0cf2321 1644 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4 1645 int nr_underway = 0;
6e34eedd
TT
1646 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1647 WRITE_SYNC_PLUG : WRITE);
1da177e4
LT
1648
1649 BUG_ON(!PageLocked(page));
1650
1651 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1652
1653 if (!page_has_buffers(page)) {
b0cf2321 1654 create_empty_buffers(page, blocksize,
1da177e4
LT
1655 (1 << BH_Dirty)|(1 << BH_Uptodate));
1656 }
1657
1658 /*
1659 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1660 * here, and the (potentially unmapped) buffers may become dirty at
1661 * any time. If a buffer becomes dirty here after we've inspected it
1662 * then we just miss that fact, and the page stays dirty.
1663 *
1664 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1665 * handle that here by just cleaning them.
1666 */
1667
54b21a79 1668 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1669 head = page_buffers(page);
1670 bh = head;
1671
1672 /*
1673 * Get all the dirty buffers mapped to disk addresses and
1674 * handle any aliases from the underlying blockdev's mapping.
1675 */
1676 do {
1677 if (block > last_block) {
1678 /*
1679 * mapped buffers outside i_size will occur, because
1680 * this page can be outside i_size when there is a
1681 * truncate in progress.
1682 */
1683 /*
1684 * The buffer was zeroed by block_write_full_page()
1685 */
1686 clear_buffer_dirty(bh);
1687 set_buffer_uptodate(bh);
29a814d2
AT
1688 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1689 buffer_dirty(bh)) {
b0cf2321 1690 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1691 err = get_block(inode, block, bh, 1);
1692 if (err)
1693 goto recover;
29a814d2 1694 clear_buffer_delay(bh);
1da177e4
LT
1695 if (buffer_new(bh)) {
1696 /* blockdev mappings never come here */
1697 clear_buffer_new(bh);
1698 unmap_underlying_metadata(bh->b_bdev,
1699 bh->b_blocknr);
1700 }
1701 }
1702 bh = bh->b_this_page;
1703 block++;
1704 } while (bh != head);
1705
1706 do {
1da177e4
LT
1707 if (!buffer_mapped(bh))
1708 continue;
1709 /*
1710 * If it's a fully non-blocking write attempt and we cannot
1711 * lock the buffer then redirty the page. Note that this can
5b0830cb
JA
1712 * potentially cause a busy-wait loop from writeback threads
1713 * and kswapd activity, but those code paths have their own
1714 * higher-level throttling.
1da177e4
LT
1715 */
1716 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1717 lock_buffer(bh);
ca5de404 1718 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1719 redirty_page_for_writepage(wbc, page);
1720 continue;
1721 }
1722 if (test_clear_buffer_dirty(bh)) {
35c80d5f 1723 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1724 } else {
1725 unlock_buffer(bh);
1726 }
1727 } while ((bh = bh->b_this_page) != head);
1728
1729 /*
1730 * The page and its buffers are protected by PageWriteback(), so we can
1731 * drop the bh refcounts early.
1732 */
1733 BUG_ON(PageWriteback(page));
1734 set_page_writeback(page);
1da177e4
LT
1735
1736 do {
1737 struct buffer_head *next = bh->b_this_page;
1738 if (buffer_async_write(bh)) {
a64c8610 1739 submit_bh(write_op, bh);
1da177e4
LT
1740 nr_underway++;
1741 }
1da177e4
LT
1742 bh = next;
1743 } while (bh != head);
05937baa 1744 unlock_page(page);
1da177e4
LT
1745
1746 err = 0;
1747done:
1748 if (nr_underway == 0) {
1749 /*
1750 * The page was marked dirty, but the buffers were
1751 * clean. Someone wrote them back by hand with
1752 * ll_rw_block/submit_bh. A rare case.
1753 */
1da177e4 1754 end_page_writeback(page);
3d67f2d7 1755
1da177e4
LT
1756 /*
1757 * The page and buffer_heads can be released at any time from
1758 * here on.
1759 */
1da177e4
LT
1760 }
1761 return err;
1762
1763recover:
1764 /*
1765 * ENOSPC, or some other error. We may already have added some
1766 * blocks to the file, so we need to write these out to avoid
1767 * exposing stale data.
1768 * The page is currently locked and not marked for writeback
1769 */
1770 bh = head;
1771 /* Recovery: lock and submit the mapped buffers */
1772 do {
29a814d2
AT
1773 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1774 !buffer_delay(bh)) {
1da177e4 1775 lock_buffer(bh);
35c80d5f 1776 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1777 } else {
1778 /*
1779 * The buffer may have been set dirty during
1780 * attachment to a dirty page.
1781 */
1782 clear_buffer_dirty(bh);
1783 }
1784 } while ((bh = bh->b_this_page) != head);
1785 SetPageError(page);
1786 BUG_ON(PageWriteback(page));
7e4c3690 1787 mapping_set_error(page->mapping, err);
1da177e4 1788 set_page_writeback(page);
1da177e4
LT
1789 do {
1790 struct buffer_head *next = bh->b_this_page;
1791 if (buffer_async_write(bh)) {
1792 clear_buffer_dirty(bh);
a64c8610 1793 submit_bh(write_op, bh);
1da177e4
LT
1794 nr_underway++;
1795 }
1da177e4
LT
1796 bh = next;
1797 } while (bh != head);
ffda9d30 1798 unlock_page(page);
1da177e4
LT
1799 goto done;
1800}
1801
afddba49
NP
1802/*
1803 * If a page has any new buffers, zero them out here, and mark them uptodate
1804 * and dirty so they'll be written out (in order to prevent uninitialised
1805 * block data from leaking). And clear the new bit.
1806 */
1807void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1808{
1809 unsigned int block_start, block_end;
1810 struct buffer_head *head, *bh;
1811
1812 BUG_ON(!PageLocked(page));
1813 if (!page_has_buffers(page))
1814 return;
1815
1816 bh = head = page_buffers(page);
1817 block_start = 0;
1818 do {
1819 block_end = block_start + bh->b_size;
1820
1821 if (buffer_new(bh)) {
1822 if (block_end > from && block_start < to) {
1823 if (!PageUptodate(page)) {
1824 unsigned start, size;
1825
1826 start = max(from, block_start);
1827 size = min(to, block_end) - start;
1828
eebd2aa3 1829 zero_user(page, start, size);
afddba49
NP
1830 set_buffer_uptodate(bh);
1831 }
1832
1833 clear_buffer_new(bh);
1834 mark_buffer_dirty(bh);
1835 }
1836 }
1837
1838 block_start = block_end;
1839 bh = bh->b_this_page;
1840 } while (bh != head);
1841}
1842EXPORT_SYMBOL(page_zero_new_buffers);
1843
1da177e4
LT
1844static int __block_prepare_write(struct inode *inode, struct page *page,
1845 unsigned from, unsigned to, get_block_t *get_block)
1846{
1847 unsigned block_start, block_end;
1848 sector_t block;
1849 int err = 0;
1850 unsigned blocksize, bbits;
1851 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1852
1853 BUG_ON(!PageLocked(page));
1854 BUG_ON(from > PAGE_CACHE_SIZE);
1855 BUG_ON(to > PAGE_CACHE_SIZE);
1856 BUG_ON(from > to);
1857
1858 blocksize = 1 << inode->i_blkbits;
1859 if (!page_has_buffers(page))
1860 create_empty_buffers(page, blocksize, 0);
1861 head = page_buffers(page);
1862
1863 bbits = inode->i_blkbits;
1864 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1865
1866 for(bh = head, block_start = 0; bh != head || !block_start;
1867 block++, block_start=block_end, bh = bh->b_this_page) {
1868 block_end = block_start + blocksize;
1869 if (block_end <= from || block_start >= to) {
1870 if (PageUptodate(page)) {
1871 if (!buffer_uptodate(bh))
1872 set_buffer_uptodate(bh);
1873 }
1874 continue;
1875 }
1876 if (buffer_new(bh))
1877 clear_buffer_new(bh);
1878 if (!buffer_mapped(bh)) {
b0cf2321 1879 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1880 err = get_block(inode, block, bh, 1);
1881 if (err)
f3ddbdc6 1882 break;
1da177e4 1883 if (buffer_new(bh)) {
1da177e4
LT
1884 unmap_underlying_metadata(bh->b_bdev,
1885 bh->b_blocknr);
1886 if (PageUptodate(page)) {
637aff46 1887 clear_buffer_new(bh);
1da177e4 1888 set_buffer_uptodate(bh);
637aff46 1889 mark_buffer_dirty(bh);
1da177e4
LT
1890 continue;
1891 }
eebd2aa3
CL
1892 if (block_end > to || block_start < from)
1893 zero_user_segments(page,
1894 to, block_end,
1895 block_start, from);
1da177e4
LT
1896 continue;
1897 }
1898 }
1899 if (PageUptodate(page)) {
1900 if (!buffer_uptodate(bh))
1901 set_buffer_uptodate(bh);
1902 continue;
1903 }
1904 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1905 !buffer_unwritten(bh) &&
1da177e4
LT
1906 (block_start < from || block_end > to)) {
1907 ll_rw_block(READ, 1, &bh);
1908 *wait_bh++=bh;
1909 }
1910 }
1911 /*
1912 * If we issued read requests - let them complete.
1913 */
1914 while(wait_bh > wait) {
1915 wait_on_buffer(*--wait_bh);
1916 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1917 err = -EIO;
1da177e4 1918 }
afddba49
NP
1919 if (unlikely(err))
1920 page_zero_new_buffers(page, from, to);
1da177e4
LT
1921 return err;
1922}
1923
1924static int __block_commit_write(struct inode *inode, struct page *page,
1925 unsigned from, unsigned to)
1926{
1927 unsigned block_start, block_end;
1928 int partial = 0;
1929 unsigned blocksize;
1930 struct buffer_head *bh, *head;
1931
1932 blocksize = 1 << inode->i_blkbits;
1933
1934 for(bh = head = page_buffers(page), block_start = 0;
1935 bh != head || !block_start;
1936 block_start=block_end, bh = bh->b_this_page) {
1937 block_end = block_start + blocksize;
1938 if (block_end <= from || block_start >= to) {
1939 if (!buffer_uptodate(bh))
1940 partial = 1;
1941 } else {
1942 set_buffer_uptodate(bh);
1943 mark_buffer_dirty(bh);
1944 }
afddba49 1945 clear_buffer_new(bh);
1da177e4
LT
1946 }
1947
1948 /*
1949 * If this is a partial write which happened to make all buffers
1950 * uptodate then we can optimize away a bogus readpage() for
1951 * the next read(). Here we 'discover' whether the page went
1952 * uptodate as a result of this (potentially partial) write.
1953 */
1954 if (!partial)
1955 SetPageUptodate(page);
1956 return 0;
1957}
1958
afddba49
NP
1959/*
1960 * block_write_begin takes care of the basic task of block allocation and
1961 * bringing partial write blocks uptodate first.
1962 *
1963 * If *pagep is not NULL, then block_write_begin uses the locked page
1964 * at *pagep rather than allocating its own. In this case, the page will
1965 * not be unlocked or deallocated on failure.
1966 */
1967int block_write_begin(struct file *file, struct address_space *mapping,
1968 loff_t pos, unsigned len, unsigned flags,
1969 struct page **pagep, void **fsdata,
1970 get_block_t *get_block)
1971{
1972 struct inode *inode = mapping->host;
1973 int status = 0;
1974 struct page *page;
1975 pgoff_t index;
1976 unsigned start, end;
1977 int ownpage = 0;
1978
1979 index = pos >> PAGE_CACHE_SHIFT;
1980 start = pos & (PAGE_CACHE_SIZE - 1);
1981 end = start + len;
1982
1983 page = *pagep;
1984 if (page == NULL) {
1985 ownpage = 1;
54566b2c 1986 page = grab_cache_page_write_begin(mapping, index, flags);
afddba49
NP
1987 if (!page) {
1988 status = -ENOMEM;
1989 goto out;
1990 }
1991 *pagep = page;
1992 } else
1993 BUG_ON(!PageLocked(page));
1994
1995 status = __block_prepare_write(inode, page, start, end, get_block);
1996 if (unlikely(status)) {
1997 ClearPageUptodate(page);
1998
1999 if (ownpage) {
2000 unlock_page(page);
2001 page_cache_release(page);
2002 *pagep = NULL;
2003
2004 /*
2005 * prepare_write() may have instantiated a few blocks
2006 * outside i_size. Trim these off again. Don't need
2007 * i_size_read because we hold i_mutex.
2008 */
2009 if (pos + len > inode->i_size)
2010 vmtruncate(inode, inode->i_size);
2011 }
afddba49
NP
2012 }
2013
2014out:
2015 return status;
2016}
2017EXPORT_SYMBOL(block_write_begin);
2018
2019int block_write_end(struct file *file, struct address_space *mapping,
2020 loff_t pos, unsigned len, unsigned copied,
2021 struct page *page, void *fsdata)
2022{
2023 struct inode *inode = mapping->host;
2024 unsigned start;
2025
2026 start = pos & (PAGE_CACHE_SIZE - 1);
2027
2028 if (unlikely(copied < len)) {
2029 /*
2030 * The buffers that were written will now be uptodate, so we
2031 * don't have to worry about a readpage reading them and
2032 * overwriting a partial write. However if we have encountered
2033 * a short write and only partially written into a buffer, it
2034 * will not be marked uptodate, so a readpage might come in and
2035 * destroy our partial write.
2036 *
2037 * Do the simplest thing, and just treat any short write to a
2038 * non uptodate page as a zero-length write, and force the
2039 * caller to redo the whole thing.
2040 */
2041 if (!PageUptodate(page))
2042 copied = 0;
2043
2044 page_zero_new_buffers(page, start+copied, start+len);
2045 }
2046 flush_dcache_page(page);
2047
2048 /* This could be a short (even 0-length) commit */
2049 __block_commit_write(inode, page, start, start+copied);
2050
2051 return copied;
2052}
2053EXPORT_SYMBOL(block_write_end);
2054
2055int generic_write_end(struct file *file, struct address_space *mapping,
2056 loff_t pos, unsigned len, unsigned copied,
2057 struct page *page, void *fsdata)
2058{
2059 struct inode *inode = mapping->host;
c7d206b3 2060 int i_size_changed = 0;
afddba49
NP
2061
2062 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2063
2064 /*
2065 * No need to use i_size_read() here, the i_size
2066 * cannot change under us because we hold i_mutex.
2067 *
2068 * But it's important to update i_size while still holding page lock:
2069 * page writeout could otherwise come in and zero beyond i_size.
2070 */
2071 if (pos+copied > inode->i_size) {
2072 i_size_write(inode, pos+copied);
c7d206b3 2073 i_size_changed = 1;
afddba49
NP
2074 }
2075
2076 unlock_page(page);
2077 page_cache_release(page);
2078
c7d206b3
JK
2079 /*
2080 * Don't mark the inode dirty under page lock. First, it unnecessarily
2081 * makes the holding time of page lock longer. Second, it forces lock
2082 * ordering of page lock and transaction start for journaling
2083 * filesystems.
2084 */
2085 if (i_size_changed)
2086 mark_inode_dirty(inode);
2087
afddba49
NP
2088 return copied;
2089}
2090EXPORT_SYMBOL(generic_write_end);
2091
8ab22b9a
HH
2092/*
2093 * block_is_partially_uptodate checks whether buffers within a page are
2094 * uptodate or not.
2095 *
2096 * Returns true if all buffers which correspond to a file portion
2097 * we want to read are uptodate.
2098 */
2099int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2100 unsigned long from)
2101{
2102 struct inode *inode = page->mapping->host;
2103 unsigned block_start, block_end, blocksize;
2104 unsigned to;
2105 struct buffer_head *bh, *head;
2106 int ret = 1;
2107
2108 if (!page_has_buffers(page))
2109 return 0;
2110
2111 blocksize = 1 << inode->i_blkbits;
2112 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2113 to = from + to;
2114 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2115 return 0;
2116
2117 head = page_buffers(page);
2118 bh = head;
2119 block_start = 0;
2120 do {
2121 block_end = block_start + blocksize;
2122 if (block_end > from && block_start < to) {
2123 if (!buffer_uptodate(bh)) {
2124 ret = 0;
2125 break;
2126 }
2127 if (block_end >= to)
2128 break;
2129 }
2130 block_start = block_end;
2131 bh = bh->b_this_page;
2132 } while (bh != head);
2133
2134 return ret;
2135}
2136EXPORT_SYMBOL(block_is_partially_uptodate);
2137
1da177e4
LT
2138/*
2139 * Generic "read page" function for block devices that have the normal
2140 * get_block functionality. This is most of the block device filesystems.
2141 * Reads the page asynchronously --- the unlock_buffer() and
2142 * set/clear_buffer_uptodate() functions propagate buffer state into the
2143 * page struct once IO has completed.
2144 */
2145int block_read_full_page(struct page *page, get_block_t *get_block)
2146{
2147 struct inode *inode = page->mapping->host;
2148 sector_t iblock, lblock;
2149 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2150 unsigned int blocksize;
2151 int nr, i;
2152 int fully_mapped = 1;
2153
cd7619d6 2154 BUG_ON(!PageLocked(page));
1da177e4
LT
2155 blocksize = 1 << inode->i_blkbits;
2156 if (!page_has_buffers(page))
2157 create_empty_buffers(page, blocksize, 0);
2158 head = page_buffers(page);
2159
2160 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2161 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2162 bh = head;
2163 nr = 0;
2164 i = 0;
2165
2166 do {
2167 if (buffer_uptodate(bh))
2168 continue;
2169
2170 if (!buffer_mapped(bh)) {
c64610ba
AM
2171 int err = 0;
2172
1da177e4
LT
2173 fully_mapped = 0;
2174 if (iblock < lblock) {
b0cf2321 2175 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2176 err = get_block(inode, iblock, bh, 0);
2177 if (err)
1da177e4
LT
2178 SetPageError(page);
2179 }
2180 if (!buffer_mapped(bh)) {
eebd2aa3 2181 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2182 if (!err)
2183 set_buffer_uptodate(bh);
1da177e4
LT
2184 continue;
2185 }
2186 /*
2187 * get_block() might have updated the buffer
2188 * synchronously
2189 */
2190 if (buffer_uptodate(bh))
2191 continue;
2192 }
2193 arr[nr++] = bh;
2194 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2195
2196 if (fully_mapped)
2197 SetPageMappedToDisk(page);
2198
2199 if (!nr) {
2200 /*
2201 * All buffers are uptodate - we can set the page uptodate
2202 * as well. But not if get_block() returned an error.
2203 */
2204 if (!PageError(page))
2205 SetPageUptodate(page);
2206 unlock_page(page);
2207 return 0;
2208 }
2209
2210 /* Stage two: lock the buffers */
2211 for (i = 0; i < nr; i++) {
2212 bh = arr[i];
2213 lock_buffer(bh);
2214 mark_buffer_async_read(bh);
2215 }
2216
2217 /*
2218 * Stage 3: start the IO. Check for uptodateness
2219 * inside the buffer lock in case another process reading
2220 * the underlying blockdev brought it uptodate (the sct fix).
2221 */
2222 for (i = 0; i < nr; i++) {
2223 bh = arr[i];
2224 if (buffer_uptodate(bh))
2225 end_buffer_async_read(bh, 1);
2226 else
2227 submit_bh(READ, bh);
2228 }
2229 return 0;
2230}
1fe72eaa 2231EXPORT_SYMBOL(block_read_full_page);
1da177e4
LT
2232
2233/* utility function for filesystems that need to do work on expanding
89e10787 2234 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2235 * deal with the hole.
2236 */
89e10787 2237int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2238{
2239 struct address_space *mapping = inode->i_mapping;
2240 struct page *page;
89e10787 2241 void *fsdata;
1da177e4
LT
2242 int err;
2243
c08d3b0e 2244 err = inode_newsize_ok(inode, size);
2245 if (err)
1da177e4
LT
2246 goto out;
2247
89e10787
NP
2248 err = pagecache_write_begin(NULL, mapping, size, 0,
2249 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2250 &page, &fsdata);
2251 if (err)
05eb0b51 2252 goto out;
05eb0b51 2253
89e10787
NP
2254 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2255 BUG_ON(err > 0);
05eb0b51 2256
1da177e4
LT
2257out:
2258 return err;
2259}
1fe72eaa 2260EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4 2261
f1e3af72
AB
2262static int cont_expand_zero(struct file *file, struct address_space *mapping,
2263 loff_t pos, loff_t *bytes)
1da177e4 2264{
1da177e4 2265 struct inode *inode = mapping->host;
1da177e4 2266 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2267 struct page *page;
2268 void *fsdata;
2269 pgoff_t index, curidx;
2270 loff_t curpos;
2271 unsigned zerofrom, offset, len;
2272 int err = 0;
1da177e4 2273
89e10787
NP
2274 index = pos >> PAGE_CACHE_SHIFT;
2275 offset = pos & ~PAGE_CACHE_MASK;
2276
2277 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2278 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2279 if (zerofrom & (blocksize-1)) {
2280 *bytes |= (blocksize-1);
2281 (*bytes)++;
2282 }
89e10787 2283 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2284
89e10787
NP
2285 err = pagecache_write_begin(file, mapping, curpos, len,
2286 AOP_FLAG_UNINTERRUPTIBLE,
2287 &page, &fsdata);
2288 if (err)
2289 goto out;
eebd2aa3 2290 zero_user(page, zerofrom, len);
89e10787
NP
2291 err = pagecache_write_end(file, mapping, curpos, len, len,
2292 page, fsdata);
2293 if (err < 0)
2294 goto out;
2295 BUG_ON(err != len);
2296 err = 0;
061e9746
OH
2297
2298 balance_dirty_pages_ratelimited(mapping);
89e10787 2299 }
1da177e4 2300
89e10787
NP
2301 /* page covers the boundary, find the boundary offset */
2302 if (index == curidx) {
2303 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2304 /* if we will expand the thing last block will be filled */
89e10787
NP
2305 if (offset <= zerofrom) {
2306 goto out;
2307 }
2308 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2309 *bytes |= (blocksize-1);
2310 (*bytes)++;
2311 }
89e10787 2312 len = offset - zerofrom;
1da177e4 2313
89e10787
NP
2314 err = pagecache_write_begin(file, mapping, curpos, len,
2315 AOP_FLAG_UNINTERRUPTIBLE,
2316 &page, &fsdata);
2317 if (err)
2318 goto out;
eebd2aa3 2319 zero_user(page, zerofrom, len);
89e10787
NP
2320 err = pagecache_write_end(file, mapping, curpos, len, len,
2321 page, fsdata);
2322 if (err < 0)
2323 goto out;
2324 BUG_ON(err != len);
2325 err = 0;
1da177e4 2326 }
89e10787
NP
2327out:
2328 return err;
2329}
2330
2331/*
2332 * For moronic filesystems that do not allow holes in file.
2333 * We may have to extend the file.
2334 */
2335int cont_write_begin(struct file *file, struct address_space *mapping,
2336 loff_t pos, unsigned len, unsigned flags,
2337 struct page **pagep, void **fsdata,
2338 get_block_t *get_block, loff_t *bytes)
2339{
2340 struct inode *inode = mapping->host;
2341 unsigned blocksize = 1 << inode->i_blkbits;
2342 unsigned zerofrom;
2343 int err;
2344
2345 err = cont_expand_zero(file, mapping, pos, bytes);
2346 if (err)
2347 goto out;
2348
2349 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2350 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2351 *bytes |= (blocksize-1);
2352 (*bytes)++;
1da177e4 2353 }
1da177e4 2354
89e10787
NP
2355 *pagep = NULL;
2356 err = block_write_begin(file, mapping, pos, len,
2357 flags, pagep, fsdata, get_block);
1da177e4 2358out:
89e10787 2359 return err;
1da177e4 2360}
1fe72eaa 2361EXPORT_SYMBOL(cont_write_begin);
1da177e4
LT
2362
2363int block_prepare_write(struct page *page, unsigned from, unsigned to,
2364 get_block_t *get_block)
2365{
2366 struct inode *inode = page->mapping->host;
2367 int err = __block_prepare_write(inode, page, from, to, get_block);
2368 if (err)
2369 ClearPageUptodate(page);
2370 return err;
2371}
1fe72eaa 2372EXPORT_SYMBOL(block_prepare_write);
1da177e4
LT
2373
2374int block_commit_write(struct page *page, unsigned from, unsigned to)
2375{
2376 struct inode *inode = page->mapping->host;
2377 __block_commit_write(inode,page,from,to);
2378 return 0;
2379}
1fe72eaa 2380EXPORT_SYMBOL(block_commit_write);
1da177e4 2381
54171690
DC
2382/*
2383 * block_page_mkwrite() is not allowed to change the file size as it gets
2384 * called from a page fault handler when a page is first dirtied. Hence we must
2385 * be careful to check for EOF conditions here. We set the page up correctly
2386 * for a written page which means we get ENOSPC checking when writing into
2387 * holes and correct delalloc and unwritten extent mapping on filesystems that
2388 * support these features.
2389 *
2390 * We are not allowed to take the i_mutex here so we have to play games to
2391 * protect against truncate races as the page could now be beyond EOF. Because
2392 * vmtruncate() writes the inode size before removing pages, once we have the
2393 * page lock we can determine safely if the page is beyond EOF. If it is not
2394 * beyond EOF, then the page is guaranteed safe against truncation until we
2395 * unlock the page.
2396 */
2397int
c2ec175c 2398block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690
DC
2399 get_block_t get_block)
2400{
c2ec175c 2401 struct page *page = vmf->page;
54171690
DC
2402 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2403 unsigned long end;
2404 loff_t size;
56a76f82 2405 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
54171690
DC
2406
2407 lock_page(page);
2408 size = i_size_read(inode);
2409 if ((page->mapping != inode->i_mapping) ||
18336338 2410 (page_offset(page) > size)) {
54171690 2411 /* page got truncated out from underneath us */
b827e496
NP
2412 unlock_page(page);
2413 goto out;
54171690
DC
2414 }
2415
2416 /* page is wholly or partially inside EOF */
2417 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2418 end = size & ~PAGE_CACHE_MASK;
2419 else
2420 end = PAGE_CACHE_SIZE;
2421
2422 ret = block_prepare_write(page, 0, end, get_block);
2423 if (!ret)
2424 ret = block_commit_write(page, 0, end);
2425
56a76f82 2426 if (unlikely(ret)) {
b827e496 2427 unlock_page(page);
56a76f82
NP
2428 if (ret == -ENOMEM)
2429 ret = VM_FAULT_OOM;
2430 else /* -ENOSPC, -EIO, etc */
2431 ret = VM_FAULT_SIGBUS;
b827e496
NP
2432 } else
2433 ret = VM_FAULT_LOCKED;
c2ec175c 2434
b827e496 2435out:
54171690
DC
2436 return ret;
2437}
1fe72eaa 2438EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
2439
2440/*
03158cd7 2441 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2442 * immediately, while under the page lock. So it needs a special end_io
2443 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2444 */
2445static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2446{
68671f35 2447 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2448}
2449
03158cd7
NP
2450/*
2451 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2452 * the page (converting it to circular linked list and taking care of page
2453 * dirty races).
2454 */
2455static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2456{
2457 struct buffer_head *bh;
2458
2459 BUG_ON(!PageLocked(page));
2460
2461 spin_lock(&page->mapping->private_lock);
2462 bh = head;
2463 do {
2464 if (PageDirty(page))
2465 set_buffer_dirty(bh);
2466 if (!bh->b_this_page)
2467 bh->b_this_page = head;
2468 bh = bh->b_this_page;
2469 } while (bh != head);
2470 attach_page_buffers(page, head);
2471 spin_unlock(&page->mapping->private_lock);
2472}
2473
1da177e4
LT
2474/*
2475 * On entry, the page is fully not uptodate.
2476 * On exit the page is fully uptodate in the areas outside (from,to)
2477 */
03158cd7
NP
2478int nobh_write_begin(struct file *file, struct address_space *mapping,
2479 loff_t pos, unsigned len, unsigned flags,
2480 struct page **pagep, void **fsdata,
1da177e4
LT
2481 get_block_t *get_block)
2482{
03158cd7 2483 struct inode *inode = mapping->host;
1da177e4
LT
2484 const unsigned blkbits = inode->i_blkbits;
2485 const unsigned blocksize = 1 << blkbits;
a4b0672d 2486 struct buffer_head *head, *bh;
03158cd7
NP
2487 struct page *page;
2488 pgoff_t index;
2489 unsigned from, to;
1da177e4 2490 unsigned block_in_page;
a4b0672d 2491 unsigned block_start, block_end;
1da177e4 2492 sector_t block_in_file;
1da177e4 2493 int nr_reads = 0;
1da177e4
LT
2494 int ret = 0;
2495 int is_mapped_to_disk = 1;
1da177e4 2496
03158cd7
NP
2497 index = pos >> PAGE_CACHE_SHIFT;
2498 from = pos & (PAGE_CACHE_SIZE - 1);
2499 to = from + len;
2500
54566b2c 2501 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2502 if (!page)
2503 return -ENOMEM;
2504 *pagep = page;
2505 *fsdata = NULL;
2506
2507 if (page_has_buffers(page)) {
2508 unlock_page(page);
2509 page_cache_release(page);
2510 *pagep = NULL;
2511 return block_write_begin(file, mapping, pos, len, flags, pagep,
2512 fsdata, get_block);
2513 }
a4b0672d 2514
1da177e4
LT
2515 if (PageMappedToDisk(page))
2516 return 0;
2517
a4b0672d
NP
2518 /*
2519 * Allocate buffers so that we can keep track of state, and potentially
2520 * attach them to the page if an error occurs. In the common case of
2521 * no error, they will just be freed again without ever being attached
2522 * to the page (which is all OK, because we're under the page lock).
2523 *
2524 * Be careful: the buffer linked list is a NULL terminated one, rather
2525 * than the circular one we're used to.
2526 */
2527 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2528 if (!head) {
2529 ret = -ENOMEM;
2530 goto out_release;
2531 }
a4b0672d 2532
1da177e4 2533 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2534
2535 /*
2536 * We loop across all blocks in the page, whether or not they are
2537 * part of the affected region. This is so we can discover if the
2538 * page is fully mapped-to-disk.
2539 */
a4b0672d 2540 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2541 block_start < PAGE_CACHE_SIZE;
a4b0672d 2542 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2543 int create;
2544
a4b0672d
NP
2545 block_end = block_start + blocksize;
2546 bh->b_state = 0;
1da177e4
LT
2547 create = 1;
2548 if (block_start >= to)
2549 create = 0;
2550 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2551 bh, create);
1da177e4
LT
2552 if (ret)
2553 goto failed;
a4b0672d 2554 if (!buffer_mapped(bh))
1da177e4 2555 is_mapped_to_disk = 0;
a4b0672d
NP
2556 if (buffer_new(bh))
2557 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2558 if (PageUptodate(page)) {
2559 set_buffer_uptodate(bh);
1da177e4 2560 continue;
a4b0672d
NP
2561 }
2562 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2563 zero_user_segments(page, block_start, from,
2564 to, block_end);
1da177e4
LT
2565 continue;
2566 }
a4b0672d 2567 if (buffer_uptodate(bh))
1da177e4
LT
2568 continue; /* reiserfs does this */
2569 if (block_start < from || block_end > to) {
a4b0672d
NP
2570 lock_buffer(bh);
2571 bh->b_end_io = end_buffer_read_nobh;
2572 submit_bh(READ, bh);
2573 nr_reads++;
1da177e4
LT
2574 }
2575 }
2576
2577 if (nr_reads) {
1da177e4
LT
2578 /*
2579 * The page is locked, so these buffers are protected from
2580 * any VM or truncate activity. Hence we don't need to care
2581 * for the buffer_head refcounts.
2582 */
a4b0672d 2583 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2584 wait_on_buffer(bh);
2585 if (!buffer_uptodate(bh))
2586 ret = -EIO;
1da177e4
LT
2587 }
2588 if (ret)
2589 goto failed;
2590 }
2591
2592 if (is_mapped_to_disk)
2593 SetPageMappedToDisk(page);
1da177e4 2594
03158cd7 2595 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2596
1da177e4
LT
2597 return 0;
2598
2599failed:
03158cd7 2600 BUG_ON(!ret);
1da177e4 2601 /*
a4b0672d
NP
2602 * Error recovery is a bit difficult. We need to zero out blocks that
2603 * were newly allocated, and dirty them to ensure they get written out.
2604 * Buffers need to be attached to the page at this point, otherwise
2605 * the handling of potential IO errors during writeout would be hard
2606 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2607 */
03158cd7
NP
2608 attach_nobh_buffers(page, head);
2609 page_zero_new_buffers(page, from, to);
a4b0672d 2610
03158cd7
NP
2611out_release:
2612 unlock_page(page);
2613 page_cache_release(page);
2614 *pagep = NULL;
a4b0672d 2615
03158cd7
NP
2616 if (pos + len > inode->i_size)
2617 vmtruncate(inode, inode->i_size);
a4b0672d 2618
1da177e4
LT
2619 return ret;
2620}
03158cd7 2621EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2622
03158cd7
NP
2623int nobh_write_end(struct file *file, struct address_space *mapping,
2624 loff_t pos, unsigned len, unsigned copied,
2625 struct page *page, void *fsdata)
1da177e4
LT
2626{
2627 struct inode *inode = page->mapping->host;
efdc3131 2628 struct buffer_head *head = fsdata;
03158cd7 2629 struct buffer_head *bh;
5b41e74a 2630 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2631
d4cf109f 2632 if (unlikely(copied < len) && head)
5b41e74a
DM
2633 attach_nobh_buffers(page, head);
2634 if (page_has_buffers(page))
2635 return generic_write_end(file, mapping, pos, len,
2636 copied, page, fsdata);
a4b0672d 2637
22c8ca78 2638 SetPageUptodate(page);
1da177e4 2639 set_page_dirty(page);
03158cd7
NP
2640 if (pos+copied > inode->i_size) {
2641 i_size_write(inode, pos+copied);
1da177e4
LT
2642 mark_inode_dirty(inode);
2643 }
03158cd7
NP
2644
2645 unlock_page(page);
2646 page_cache_release(page);
2647
03158cd7
NP
2648 while (head) {
2649 bh = head;
2650 head = head->b_this_page;
2651 free_buffer_head(bh);
2652 }
2653
2654 return copied;
1da177e4 2655}
03158cd7 2656EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2657
2658/*
2659 * nobh_writepage() - based on block_full_write_page() except
2660 * that it tries to operate without attaching bufferheads to
2661 * the page.
2662 */
2663int nobh_writepage(struct page *page, get_block_t *get_block,
2664 struct writeback_control *wbc)
2665{
2666 struct inode * const inode = page->mapping->host;
2667 loff_t i_size = i_size_read(inode);
2668 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2669 unsigned offset;
1da177e4
LT
2670 int ret;
2671
2672 /* Is the page fully inside i_size? */
2673 if (page->index < end_index)
2674 goto out;
2675
2676 /* Is the page fully outside i_size? (truncate in progress) */
2677 offset = i_size & (PAGE_CACHE_SIZE-1);
2678 if (page->index >= end_index+1 || !offset) {
2679 /*
2680 * The page may have dirty, unmapped buffers. For example,
2681 * they may have been added in ext3_writepage(). Make them
2682 * freeable here, so the page does not leak.
2683 */
2684#if 0
2685 /* Not really sure about this - do we need this ? */
2686 if (page->mapping->a_ops->invalidatepage)
2687 page->mapping->a_ops->invalidatepage(page, offset);
2688#endif
2689 unlock_page(page);
2690 return 0; /* don't care */
2691 }
2692
2693 /*
2694 * The page straddles i_size. It must be zeroed out on each and every
2695 * writepage invocation because it may be mmapped. "A file is mapped
2696 * in multiples of the page size. For a file that is not a multiple of
2697 * the page size, the remaining memory is zeroed when mapped, and
2698 * writes to that region are not written out to the file."
2699 */
eebd2aa3 2700 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2701out:
2702 ret = mpage_writepage(page, get_block, wbc);
2703 if (ret == -EAGAIN)
35c80d5f
CM
2704 ret = __block_write_full_page(inode, page, get_block, wbc,
2705 end_buffer_async_write);
1da177e4
LT
2706 return ret;
2707}
2708EXPORT_SYMBOL(nobh_writepage);
2709
03158cd7
NP
2710int nobh_truncate_page(struct address_space *mapping,
2711 loff_t from, get_block_t *get_block)
1da177e4 2712{
1da177e4
LT
2713 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2714 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2715 unsigned blocksize;
2716 sector_t iblock;
2717 unsigned length, pos;
2718 struct inode *inode = mapping->host;
1da177e4 2719 struct page *page;
03158cd7
NP
2720 struct buffer_head map_bh;
2721 int err;
1da177e4 2722
03158cd7
NP
2723 blocksize = 1 << inode->i_blkbits;
2724 length = offset & (blocksize - 1);
2725
2726 /* Block boundary? Nothing to do */
2727 if (!length)
2728 return 0;
2729
2730 length = blocksize - length;
2731 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2732
1da177e4 2733 page = grab_cache_page(mapping, index);
03158cd7 2734 err = -ENOMEM;
1da177e4
LT
2735 if (!page)
2736 goto out;
2737
03158cd7
NP
2738 if (page_has_buffers(page)) {
2739has_buffers:
2740 unlock_page(page);
2741 page_cache_release(page);
2742 return block_truncate_page(mapping, from, get_block);
2743 }
2744
2745 /* Find the buffer that contains "offset" */
2746 pos = blocksize;
2747 while (offset >= pos) {
2748 iblock++;
2749 pos += blocksize;
2750 }
2751
460bcf57
TT
2752 map_bh.b_size = blocksize;
2753 map_bh.b_state = 0;
03158cd7
NP
2754 err = get_block(inode, iblock, &map_bh, 0);
2755 if (err)
2756 goto unlock;
2757 /* unmapped? It's a hole - nothing to do */
2758 if (!buffer_mapped(&map_bh))
2759 goto unlock;
2760
2761 /* Ok, it's mapped. Make sure it's up-to-date */
2762 if (!PageUptodate(page)) {
2763 err = mapping->a_ops->readpage(NULL, page);
2764 if (err) {
2765 page_cache_release(page);
2766 goto out;
2767 }
2768 lock_page(page);
2769 if (!PageUptodate(page)) {
2770 err = -EIO;
2771 goto unlock;
2772 }
2773 if (page_has_buffers(page))
2774 goto has_buffers;
1da177e4 2775 }
eebd2aa3 2776 zero_user(page, offset, length);
03158cd7
NP
2777 set_page_dirty(page);
2778 err = 0;
2779
2780unlock:
1da177e4
LT
2781 unlock_page(page);
2782 page_cache_release(page);
2783out:
03158cd7 2784 return err;
1da177e4
LT
2785}
2786EXPORT_SYMBOL(nobh_truncate_page);
2787
2788int block_truncate_page(struct address_space *mapping,
2789 loff_t from, get_block_t *get_block)
2790{
2791 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2792 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2793 unsigned blocksize;
54b21a79 2794 sector_t iblock;
1da177e4
LT
2795 unsigned length, pos;
2796 struct inode *inode = mapping->host;
2797 struct page *page;
2798 struct buffer_head *bh;
1da177e4
LT
2799 int err;
2800
2801 blocksize = 1 << inode->i_blkbits;
2802 length = offset & (blocksize - 1);
2803
2804 /* Block boundary? Nothing to do */
2805 if (!length)
2806 return 0;
2807
2808 length = blocksize - length;
54b21a79 2809 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2810
2811 page = grab_cache_page(mapping, index);
2812 err = -ENOMEM;
2813 if (!page)
2814 goto out;
2815
2816 if (!page_has_buffers(page))
2817 create_empty_buffers(page, blocksize, 0);
2818
2819 /* Find the buffer that contains "offset" */
2820 bh = page_buffers(page);
2821 pos = blocksize;
2822 while (offset >= pos) {
2823 bh = bh->b_this_page;
2824 iblock++;
2825 pos += blocksize;
2826 }
2827
2828 err = 0;
2829 if (!buffer_mapped(bh)) {
b0cf2321 2830 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2831 err = get_block(inode, iblock, bh, 0);
2832 if (err)
2833 goto unlock;
2834 /* unmapped? It's a hole - nothing to do */
2835 if (!buffer_mapped(bh))
2836 goto unlock;
2837 }
2838
2839 /* Ok, it's mapped. Make sure it's up-to-date */
2840 if (PageUptodate(page))
2841 set_buffer_uptodate(bh);
2842
33a266dd 2843 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2844 err = -EIO;
2845 ll_rw_block(READ, 1, &bh);
2846 wait_on_buffer(bh);
2847 /* Uhhuh. Read error. Complain and punt. */
2848 if (!buffer_uptodate(bh))
2849 goto unlock;
2850 }
2851
eebd2aa3 2852 zero_user(page, offset, length);
1da177e4
LT
2853 mark_buffer_dirty(bh);
2854 err = 0;
2855
2856unlock:
2857 unlock_page(page);
2858 page_cache_release(page);
2859out:
2860 return err;
2861}
1fe72eaa 2862EXPORT_SYMBOL(block_truncate_page);
1da177e4
LT
2863
2864/*
2865 * The generic ->writepage function for buffer-backed address_spaces
35c80d5f 2866 * this form passes in the end_io handler used to finish the IO.
1da177e4 2867 */
35c80d5f
CM
2868int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2869 struct writeback_control *wbc, bh_end_io_t *handler)
1da177e4
LT
2870{
2871 struct inode * const inode = page->mapping->host;
2872 loff_t i_size = i_size_read(inode);
2873 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2874 unsigned offset;
1da177e4
LT
2875
2876 /* Is the page fully inside i_size? */
2877 if (page->index < end_index)
35c80d5f
CM
2878 return __block_write_full_page(inode, page, get_block, wbc,
2879 handler);
1da177e4
LT
2880
2881 /* Is the page fully outside i_size? (truncate in progress) */
2882 offset = i_size & (PAGE_CACHE_SIZE-1);
2883 if (page->index >= end_index+1 || !offset) {
2884 /*
2885 * The page may have dirty, unmapped buffers. For example,
2886 * they may have been added in ext3_writepage(). Make them
2887 * freeable here, so the page does not leak.
2888 */
aaa4059b 2889 do_invalidatepage(page, 0);
1da177e4
LT
2890 unlock_page(page);
2891 return 0; /* don't care */
2892 }
2893
2894 /*
2895 * The page straddles i_size. It must be zeroed out on each and every
2896 * writepage invokation because it may be mmapped. "A file is mapped
2897 * in multiples of the page size. For a file that is not a multiple of
2898 * the page size, the remaining memory is zeroed when mapped, and
2899 * writes to that region are not written out to the file."
2900 */
eebd2aa3 2901 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
35c80d5f 2902 return __block_write_full_page(inode, page, get_block, wbc, handler);
1da177e4 2903}
1fe72eaa 2904EXPORT_SYMBOL(block_write_full_page_endio);
1da177e4 2905
35c80d5f
CM
2906/*
2907 * The generic ->writepage function for buffer-backed address_spaces
2908 */
2909int block_write_full_page(struct page *page, get_block_t *get_block,
2910 struct writeback_control *wbc)
2911{
2912 return block_write_full_page_endio(page, get_block, wbc,
2913 end_buffer_async_write);
2914}
1fe72eaa 2915EXPORT_SYMBOL(block_write_full_page);
35c80d5f 2916
1da177e4
LT
2917sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2918 get_block_t *get_block)
2919{
2920 struct buffer_head tmp;
2921 struct inode *inode = mapping->host;
2922 tmp.b_state = 0;
2923 tmp.b_blocknr = 0;
b0cf2321 2924 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2925 get_block(inode, block, &tmp, 0);
2926 return tmp.b_blocknr;
2927}
1fe72eaa 2928EXPORT_SYMBOL(generic_block_bmap);
1da177e4 2929
6712ecf8 2930static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2931{
2932 struct buffer_head *bh = bio->bi_private;
2933
1da177e4
LT
2934 if (err == -EOPNOTSUPP) {
2935 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2936 set_bit(BH_Eopnotsupp, &bh->b_state);
2937 }
2938
08bafc03
KM
2939 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2940 set_bit(BH_Quiet, &bh->b_state);
2941
1da177e4
LT
2942 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2943 bio_put(bio);
1da177e4
LT
2944}
2945
2946int submit_bh(int rw, struct buffer_head * bh)
2947{
2948 struct bio *bio;
2949 int ret = 0;
2950
2951 BUG_ON(!buffer_locked(bh));
2952 BUG_ON(!buffer_mapped(bh));
2953 BUG_ON(!bh->b_end_io);
8fb0e342
AK
2954 BUG_ON(buffer_delay(bh));
2955 BUG_ON(buffer_unwritten(bh));
1da177e4 2956
48fd4f93
JA
2957 /*
2958 * Mask in barrier bit for a write (could be either a WRITE or a
2959 * WRITE_SYNC
2960 */
2961 if (buffer_ordered(bh) && (rw & WRITE))
2962 rw |= WRITE_BARRIER;
1da177e4
LT
2963
2964 /*
48fd4f93 2965 * Only clear out a write error when rewriting
1da177e4 2966 */
48fd4f93 2967 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2968 clear_buffer_write_io_error(bh);
2969
2970 /*
2971 * from here on down, it's all bio -- do the initial mapping,
2972 * submit_bio -> generic_make_request may further map this bio around
2973 */
2974 bio = bio_alloc(GFP_NOIO, 1);
2975
2976 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2977 bio->bi_bdev = bh->b_bdev;
2978 bio->bi_io_vec[0].bv_page = bh->b_page;
2979 bio->bi_io_vec[0].bv_len = bh->b_size;
2980 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2981
2982 bio->bi_vcnt = 1;
2983 bio->bi_idx = 0;
2984 bio->bi_size = bh->b_size;
2985
2986 bio->bi_end_io = end_bio_bh_io_sync;
2987 bio->bi_private = bh;
2988
2989 bio_get(bio);
2990 submit_bio(rw, bio);
2991
2992 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2993 ret = -EOPNOTSUPP;
2994
2995 bio_put(bio);
2996 return ret;
2997}
1fe72eaa 2998EXPORT_SYMBOL(submit_bh);
1da177e4
LT
2999
3000/**
3001 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 3002 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
3003 * @nr: number of &struct buffer_heads in the array
3004 * @bhs: array of pointers to &struct buffer_head
3005 *
a7662236
JK
3006 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3007 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3008 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3009 * are sent to disk. The fourth %READA option is described in the documentation
3010 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
3011 *
3012 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
3013 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3014 * clean when doing a write request, and any buffer that appears to be
3015 * up-to-date when doing read request. Further it marks as clean buffers that
3016 * are processed for writing (the buffer cache won't assume that they are
3017 * actually clean until the buffer gets unlocked).
1da177e4
LT
3018 *
3019 * ll_rw_block sets b_end_io to simple completion handler that marks
3020 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3021 * any waiters.
3022 *
3023 * All of the buffers must be for the same device, and must also be a
3024 * multiple of the current approved size for the device.
3025 */
3026void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3027{
3028 int i;
3029
3030 for (i = 0; i < nr; i++) {
3031 struct buffer_head *bh = bhs[i];
3032
9cf6b720 3033 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
a7662236 3034 lock_buffer(bh);
ca5de404 3035 else if (!trylock_buffer(bh))
1da177e4
LT
3036 continue;
3037
9cf6b720
JA
3038 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3039 rw == SWRITE_SYNC_PLUG) {
1da177e4 3040 if (test_clear_buffer_dirty(bh)) {
76c3073a 3041 bh->b_end_io = end_buffer_write_sync;
e60e5c50 3042 get_bh(bh);
18ce3751
JA
3043 if (rw == SWRITE_SYNC)
3044 submit_bh(WRITE_SYNC, bh);
3045 else
3046 submit_bh(WRITE, bh);
1da177e4
LT
3047 continue;
3048 }
3049 } else {
1da177e4 3050 if (!buffer_uptodate(bh)) {
76c3073a 3051 bh->b_end_io = end_buffer_read_sync;
e60e5c50 3052 get_bh(bh);
1da177e4
LT
3053 submit_bh(rw, bh);
3054 continue;
3055 }
3056 }
3057 unlock_buffer(bh);
1da177e4
LT
3058 }
3059}
1fe72eaa 3060EXPORT_SYMBOL(ll_rw_block);
1da177e4
LT
3061
3062/*
3063 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3064 * and then start new I/O and then wait upon it. The caller must have a ref on
3065 * the buffer_head.
3066 */
3067int sync_dirty_buffer(struct buffer_head *bh)
3068{
3069 int ret = 0;
3070
3071 WARN_ON(atomic_read(&bh->b_count) < 1);
3072 lock_buffer(bh);
3073 if (test_clear_buffer_dirty(bh)) {
3074 get_bh(bh);
3075 bh->b_end_io = end_buffer_write_sync;
1aa2a7cc 3076 ret = submit_bh(WRITE_SYNC, bh);
1da177e4
LT
3077 wait_on_buffer(bh);
3078 if (buffer_eopnotsupp(bh)) {
3079 clear_buffer_eopnotsupp(bh);
3080 ret = -EOPNOTSUPP;
3081 }
3082 if (!ret && !buffer_uptodate(bh))
3083 ret = -EIO;
3084 } else {
3085 unlock_buffer(bh);
3086 }
3087 return ret;
3088}
1fe72eaa 3089EXPORT_SYMBOL(sync_dirty_buffer);
1da177e4
LT
3090
3091/*
3092 * try_to_free_buffers() checks if all the buffers on this particular page
3093 * are unused, and releases them if so.
3094 *
3095 * Exclusion against try_to_free_buffers may be obtained by either
3096 * locking the page or by holding its mapping's private_lock.
3097 *
3098 * If the page is dirty but all the buffers are clean then we need to
3099 * be sure to mark the page clean as well. This is because the page
3100 * may be against a block device, and a later reattachment of buffers
3101 * to a dirty page will set *all* buffers dirty. Which would corrupt
3102 * filesystem data on the same device.
3103 *
3104 * The same applies to regular filesystem pages: if all the buffers are
3105 * clean then we set the page clean and proceed. To do that, we require
3106 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3107 * private_lock.
3108 *
3109 * try_to_free_buffers() is non-blocking.
3110 */
3111static inline int buffer_busy(struct buffer_head *bh)
3112{
3113 return atomic_read(&bh->b_count) |
3114 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3115}
3116
3117static int
3118drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3119{
3120 struct buffer_head *head = page_buffers(page);
3121 struct buffer_head *bh;
3122
3123 bh = head;
3124 do {
de7d5a3b 3125 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3126 set_bit(AS_EIO, &page->mapping->flags);
3127 if (buffer_busy(bh))
3128 goto failed;
3129 bh = bh->b_this_page;
3130 } while (bh != head);
3131
3132 do {
3133 struct buffer_head *next = bh->b_this_page;
3134
535ee2fb 3135 if (bh->b_assoc_map)
1da177e4
LT
3136 __remove_assoc_queue(bh);
3137 bh = next;
3138 } while (bh != head);
3139 *buffers_to_free = head;
3140 __clear_page_buffers(page);
3141 return 1;
3142failed:
3143 return 0;
3144}
3145
3146int try_to_free_buffers(struct page *page)
3147{
3148 struct address_space * const mapping = page->mapping;
3149 struct buffer_head *buffers_to_free = NULL;
3150 int ret = 0;
3151
3152 BUG_ON(!PageLocked(page));
ecdfc978 3153 if (PageWriteback(page))
1da177e4
LT
3154 return 0;
3155
3156 if (mapping == NULL) { /* can this still happen? */
3157 ret = drop_buffers(page, &buffers_to_free);
3158 goto out;
3159 }
3160
3161 spin_lock(&mapping->private_lock);
3162 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3163
3164 /*
3165 * If the filesystem writes its buffers by hand (eg ext3)
3166 * then we can have clean buffers against a dirty page. We
3167 * clean the page here; otherwise the VM will never notice
3168 * that the filesystem did any IO at all.
3169 *
3170 * Also, during truncate, discard_buffer will have marked all
3171 * the page's buffers clean. We discover that here and clean
3172 * the page also.
87df7241
NP
3173 *
3174 * private_lock must be held over this entire operation in order
3175 * to synchronise against __set_page_dirty_buffers and prevent the
3176 * dirty bit from being lost.
ecdfc978
LT
3177 */
3178 if (ret)
3179 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3180 spin_unlock(&mapping->private_lock);
1da177e4
LT
3181out:
3182 if (buffers_to_free) {
3183 struct buffer_head *bh = buffers_to_free;
3184
3185 do {
3186 struct buffer_head *next = bh->b_this_page;
3187 free_buffer_head(bh);
3188 bh = next;
3189 } while (bh != buffers_to_free);
3190 }
3191 return ret;
3192}
3193EXPORT_SYMBOL(try_to_free_buffers);
3194
3978d717 3195void block_sync_page(struct page *page)
1da177e4
LT
3196{
3197 struct address_space *mapping;
3198
3199 smp_mb();
3200 mapping = page_mapping(page);
3201 if (mapping)
3202 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4 3203}
1fe72eaa 3204EXPORT_SYMBOL(block_sync_page);
1da177e4
LT
3205
3206/*
3207 * There are no bdflush tunables left. But distributions are
3208 * still running obsolete flush daemons, so we terminate them here.
3209 *
3210 * Use of bdflush() is deprecated and will be removed in a future kernel.
5b0830cb 3211 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
1da177e4 3212 */
bdc480e3 3213SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3214{
3215 static int msg_count;
3216
3217 if (!capable(CAP_SYS_ADMIN))
3218 return -EPERM;
3219
3220 if (msg_count < 5) {
3221 msg_count++;
3222 printk(KERN_INFO
3223 "warning: process `%s' used the obsolete bdflush"
3224 " system call\n", current->comm);
3225 printk(KERN_INFO "Fix your initscripts?\n");
3226 }
3227
3228 if (func == 1)
3229 do_exit(0);
3230 return 0;
3231}
3232
3233/*
3234 * Buffer-head allocation
3235 */
e18b890b 3236static struct kmem_cache *bh_cachep;
1da177e4
LT
3237
3238/*
3239 * Once the number of bh's in the machine exceeds this level, we start
3240 * stripping them in writeback.
3241 */
3242static int max_buffer_heads;
3243
3244int buffer_heads_over_limit;
3245
3246struct bh_accounting {
3247 int nr; /* Number of live bh's */
3248 int ratelimit; /* Limit cacheline bouncing */
3249};
3250
3251static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3252
3253static void recalc_bh_state(void)
3254{
3255 int i;
3256 int tot = 0;
3257
3258 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3259 return;
3260 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3261 for_each_online_cpu(i)
1da177e4
LT
3262 tot += per_cpu(bh_accounting, i).nr;
3263 buffer_heads_over_limit = (tot > max_buffer_heads);
3264}
3265
dd0fc66f 3266struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3267{
488514d1 3268 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
1da177e4 3269 if (ret) {
a35afb83 3270 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3271 get_cpu_var(bh_accounting).nr++;
1da177e4 3272 recalc_bh_state();
736c7b80 3273 put_cpu_var(bh_accounting);
1da177e4
LT
3274 }
3275 return ret;
3276}
3277EXPORT_SYMBOL(alloc_buffer_head);
3278
3279void free_buffer_head(struct buffer_head *bh)
3280{
3281 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3282 kmem_cache_free(bh_cachep, bh);
736c7b80 3283 get_cpu_var(bh_accounting).nr--;
1da177e4 3284 recalc_bh_state();
736c7b80 3285 put_cpu_var(bh_accounting);
1da177e4
LT
3286}
3287EXPORT_SYMBOL(free_buffer_head);
3288
1da177e4
LT
3289static void buffer_exit_cpu(int cpu)
3290{
3291 int i;
3292 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3293
3294 for (i = 0; i < BH_LRU_SIZE; i++) {
3295 brelse(b->bhs[i]);
3296 b->bhs[i] = NULL;
3297 }
8a143426
ED
3298 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3299 per_cpu(bh_accounting, cpu).nr = 0;
3300 put_cpu_var(bh_accounting);
1da177e4
LT
3301}
3302
3303static int buffer_cpu_notify(struct notifier_block *self,
3304 unsigned long action, void *hcpu)
3305{
8bb78442 3306 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3307 buffer_exit_cpu((unsigned long)hcpu);
3308 return NOTIFY_OK;
3309}
1da177e4 3310
389d1b08 3311/**
a6b91919 3312 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3313 * @bh: struct buffer_head
3314 *
3315 * Return true if the buffer is up-to-date and false,
3316 * with the buffer locked, if not.
3317 */
3318int bh_uptodate_or_lock(struct buffer_head *bh)
3319{
3320 if (!buffer_uptodate(bh)) {
3321 lock_buffer(bh);
3322 if (!buffer_uptodate(bh))
3323 return 0;
3324 unlock_buffer(bh);
3325 }
3326 return 1;
3327}
3328EXPORT_SYMBOL(bh_uptodate_or_lock);
3329
3330/**
a6b91919 3331 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3332 * @bh: struct buffer_head
3333 *
3334 * Returns zero on success and -EIO on error.
3335 */
3336int bh_submit_read(struct buffer_head *bh)
3337{
3338 BUG_ON(!buffer_locked(bh));
3339
3340 if (buffer_uptodate(bh)) {
3341 unlock_buffer(bh);
3342 return 0;
3343 }
3344
3345 get_bh(bh);
3346 bh->b_end_io = end_buffer_read_sync;
3347 submit_bh(READ, bh);
3348 wait_on_buffer(bh);
3349 if (buffer_uptodate(bh))
3350 return 0;
3351 return -EIO;
3352}
3353EXPORT_SYMBOL(bh_submit_read);
3354
b98938c3 3355static void
51cc5068 3356init_buffer_head(void *data)
b98938c3
CL
3357{
3358 struct buffer_head *bh = data;
3359
3360 memset(bh, 0, sizeof(*bh));
3361 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3362}
3363
1da177e4
LT
3364void __init buffer_init(void)
3365{
3366 int nrpages;
3367
b98938c3
CL
3368 bh_cachep = kmem_cache_create("buffer_head",
3369 sizeof(struct buffer_head), 0,
3370 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3371 SLAB_MEM_SPREAD),
3372 init_buffer_head);
1da177e4
LT
3373
3374 /*
3375 * Limit the bh occupancy to 10% of ZONE_NORMAL
3376 */
3377 nrpages = (nr_free_buffer_pages() * 10) / 100;
3378 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3379 hotcpu_notifier(buffer_cpu_notify, 0);
3380}