]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/buffer.c
drm/radeon/kms: leave certain CP int bits enabled
[net-next-2.6.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
1fe72eaa 55EXPORT_SYMBOL(init_buffer);
1da177e4
LT
56
57static int sync_buffer(void *word)
58{
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
62
63 smp_mb();
64 bd = bh->b_bdev;
65 if (bd)
66 blk_run_address_space(bd->bd_inode->i_mapping);
67 io_schedule();
68 return 0;
69}
70
fc9b52cd 71void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
72{
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
75}
76EXPORT_SYMBOL(__lock_buffer);
77
fc9b52cd 78void unlock_buffer(struct buffer_head *bh)
1da177e4 79{
51b07fc3 80 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
1fe72eaa 84EXPORT_SYMBOL(unlock_buffer);
1da177e4
LT
85
86/*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91void __wait_on_buffer(struct buffer_head * bh)
92{
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94}
1fe72eaa 95EXPORT_SYMBOL(__wait_on_buffer);
1da177e4
LT
96
97static void
98__clear_page_buffers(struct page *page)
99{
100 ClearPagePrivate(page);
4c21e2f2 101 set_page_private(page, 0);
1da177e4
LT
102 page_cache_release(page);
103}
104
08bafc03
KM
105
106static int quiet_error(struct buffer_head *bh)
107{
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109 return 0;
110 return 1;
111}
112
113
1da177e4
LT
114static void buffer_io_error(struct buffer_head *bh)
115{
116 char b[BDEVNAME_SIZE];
1da177e4
LT
117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
120}
121
122/*
68671f35
DM
123 * End-of-IO handler helper function which does not touch the bh after
124 * unlocking it.
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
128 * itself.
1da177e4 129 */
68671f35 130static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
131{
132 if (uptodate) {
133 set_buffer_uptodate(bh);
134 } else {
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
137 }
138 unlock_buffer(bh);
68671f35
DM
139}
140
141/*
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
144 */
145void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146{
147 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
148 put_bh(bh);
149}
1fe72eaa 150EXPORT_SYMBOL(end_buffer_read_sync);
1da177e4
LT
151
152void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153{
154 char b[BDEVNAME_SIZE];
155
156 if (uptodate) {
157 set_buffer_uptodate(bh);
158 } else {
08bafc03 159 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1da177e4
LT
160 buffer_io_error(bh);
161 printk(KERN_WARNING "lost page write due to "
162 "I/O error on %s\n",
163 bdevname(bh->b_bdev, b));
164 }
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
167 }
168 unlock_buffer(bh);
169 put_bh(bh);
170}
1fe72eaa 171EXPORT_SYMBOL(end_buffer_write_sync);
1da177e4 172
1da177e4
LT
173/*
174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
177 * private_lock.
178 *
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
183 */
184static struct buffer_head *
385fd4c5 185__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
186{
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
190 pgoff_t index;
191 struct buffer_head *bh;
192 struct buffer_head *head;
193 struct page *page;
194 int all_mapped = 1;
195
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
198 if (!page)
199 goto out;
200
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
203 goto out_unlock;
204 head = page_buffers(page);
205 bh = head;
206 do {
97f76d3d
NK
207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 else if (bh->b_blocknr == block) {
1da177e4
LT
210 ret = bh;
211 get_bh(bh);
212 goto out_unlock;
213 }
1da177e4
LT
214 bh = bh->b_this_page;
215 } while (bh != head);
216
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
221 */
222 if (all_mapped) {
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
1da177e4
LT
229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230 }
231out_unlock:
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
234out:
235 return ret;
236}
237
238/* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
241 by the user.
242
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
246
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
249
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
262
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
f98393a6 270void invalidate_bdev(struct block_device *bdev)
1da177e4 271{
0e1dfc66
AM
272 struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274 if (mapping->nrpages == 0)
275 return;
276
1da177e4 277 invalidate_bh_lrus();
fa4b9074 278 lru_add_drain_all(); /* make sure all lru add caches are flushed */
fc0ecff6 279 invalidate_mapping_pages(mapping, 0, -1);
1da177e4 280}
1fe72eaa 281EXPORT_SYMBOL(invalidate_bdev);
1da177e4
LT
282
283/*
5b0830cb 284 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
1da177e4
LT
285 */
286static void free_more_memory(void)
287{
19770b32 288 struct zone *zone;
0e88460d 289 int nid;
1da177e4 290
03ba3782 291 wakeup_flusher_threads(1024);
1da177e4
LT
292 yield();
293
0e88460d 294 for_each_online_node(nid) {
19770b32
MG
295 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296 gfp_zone(GFP_NOFS), NULL,
297 &zone);
298 if (zone)
54a6eb5c 299 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
327c0e96 300 GFP_NOFS, NULL);
1da177e4
LT
301 }
302}
303
304/*
305 * I/O completion handler for block_read_full_page() - pages
306 * which come unlocked at the end of I/O.
307 */
308static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309{
1da177e4 310 unsigned long flags;
a3972203 311 struct buffer_head *first;
1da177e4
LT
312 struct buffer_head *tmp;
313 struct page *page;
314 int page_uptodate = 1;
315
316 BUG_ON(!buffer_async_read(bh));
317
318 page = bh->b_page;
319 if (uptodate) {
320 set_buffer_uptodate(bh);
321 } else {
322 clear_buffer_uptodate(bh);
08bafc03 323 if (!quiet_error(bh))
1da177e4
LT
324 buffer_io_error(bh);
325 SetPageError(page);
326 }
327
328 /*
329 * Be _very_ careful from here on. Bad things can happen if
330 * two buffer heads end IO at almost the same time and both
331 * decide that the page is now completely done.
332 */
a3972203
NP
333 first = page_buffers(page);
334 local_irq_save(flags);
335 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
336 clear_buffer_async_read(bh);
337 unlock_buffer(bh);
338 tmp = bh;
339 do {
340 if (!buffer_uptodate(tmp))
341 page_uptodate = 0;
342 if (buffer_async_read(tmp)) {
343 BUG_ON(!buffer_locked(tmp));
344 goto still_busy;
345 }
346 tmp = tmp->b_this_page;
347 } while (tmp != bh);
a3972203
NP
348 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349 local_irq_restore(flags);
1da177e4
LT
350
351 /*
352 * If none of the buffers had errors and they are all
353 * uptodate then we can set the page uptodate.
354 */
355 if (page_uptodate && !PageError(page))
356 SetPageUptodate(page);
357 unlock_page(page);
358 return;
359
360still_busy:
a3972203
NP
361 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362 local_irq_restore(flags);
1da177e4
LT
363 return;
364}
365
366/*
367 * Completion handler for block_write_full_page() - pages which are unlocked
368 * during I/O, and which have PageWriteback cleared upon I/O completion.
369 */
35c80d5f 370void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
371{
372 char b[BDEVNAME_SIZE];
1da177e4 373 unsigned long flags;
a3972203 374 struct buffer_head *first;
1da177e4
LT
375 struct buffer_head *tmp;
376 struct page *page;
377
378 BUG_ON(!buffer_async_write(bh));
379
380 page = bh->b_page;
381 if (uptodate) {
382 set_buffer_uptodate(bh);
383 } else {
08bafc03 384 if (!quiet_error(bh)) {
1da177e4
LT
385 buffer_io_error(bh);
386 printk(KERN_WARNING "lost page write due to "
387 "I/O error on %s\n",
388 bdevname(bh->b_bdev, b));
389 }
390 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 391 set_buffer_write_io_error(bh);
1da177e4
LT
392 clear_buffer_uptodate(bh);
393 SetPageError(page);
394 }
395
a3972203
NP
396 first = page_buffers(page);
397 local_irq_save(flags);
398 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399
1da177e4
LT
400 clear_buffer_async_write(bh);
401 unlock_buffer(bh);
402 tmp = bh->b_this_page;
403 while (tmp != bh) {
404 if (buffer_async_write(tmp)) {
405 BUG_ON(!buffer_locked(tmp));
406 goto still_busy;
407 }
408 tmp = tmp->b_this_page;
409 }
a3972203
NP
410 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411 local_irq_restore(flags);
1da177e4
LT
412 end_page_writeback(page);
413 return;
414
415still_busy:
a3972203
NP
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
1da177e4
LT
418 return;
419}
1fe72eaa 420EXPORT_SYMBOL(end_buffer_async_write);
1da177e4
LT
421
422/*
423 * If a page's buffers are under async readin (end_buffer_async_read
424 * completion) then there is a possibility that another thread of
425 * control could lock one of the buffers after it has completed
426 * but while some of the other buffers have not completed. This
427 * locked buffer would confuse end_buffer_async_read() into not unlocking
428 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
429 * that this buffer is not under async I/O.
430 *
431 * The page comes unlocked when it has no locked buffer_async buffers
432 * left.
433 *
434 * PageLocked prevents anyone starting new async I/O reads any of
435 * the buffers.
436 *
437 * PageWriteback is used to prevent simultaneous writeout of the same
438 * page.
439 *
440 * PageLocked prevents anyone from starting writeback of a page which is
441 * under read I/O (PageWriteback is only ever set against a locked page).
442 */
443static void mark_buffer_async_read(struct buffer_head *bh)
444{
445 bh->b_end_io = end_buffer_async_read;
446 set_buffer_async_read(bh);
447}
448
1fe72eaa
HS
449static void mark_buffer_async_write_endio(struct buffer_head *bh,
450 bh_end_io_t *handler)
1da177e4 451{
35c80d5f 452 bh->b_end_io = handler;
1da177e4
LT
453 set_buffer_async_write(bh);
454}
35c80d5f
CM
455
456void mark_buffer_async_write(struct buffer_head *bh)
457{
458 mark_buffer_async_write_endio(bh, end_buffer_async_write);
459}
1da177e4
LT
460EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463/*
464 * fs/buffer.c contains helper functions for buffer-backed address space's
465 * fsync functions. A common requirement for buffer-based filesystems is
466 * that certain data from the backing blockdev needs to be written out for
467 * a successful fsync(). For example, ext2 indirect blocks need to be
468 * written back and waited upon before fsync() returns.
469 *
470 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472 * management of a list of dependent buffers at ->i_mapping->private_list.
473 *
474 * Locking is a little subtle: try_to_free_buffers() will remove buffers
475 * from their controlling inode's queue when they are being freed. But
476 * try_to_free_buffers() will be operating against the *blockdev* mapping
477 * at the time, not against the S_ISREG file which depends on those buffers.
478 * So the locking for private_list is via the private_lock in the address_space
479 * which backs the buffers. Which is different from the address_space
480 * against which the buffers are listed. So for a particular address_space,
481 * mapping->private_lock does *not* protect mapping->private_list! In fact,
482 * mapping->private_list will always be protected by the backing blockdev's
483 * ->private_lock.
484 *
485 * Which introduces a requirement: all buffers on an address_space's
486 * ->private_list must be from the same address_space: the blockdev's.
487 *
488 * address_spaces which do not place buffers at ->private_list via these
489 * utility functions are free to use private_lock and private_list for
490 * whatever they want. The only requirement is that list_empty(private_list)
491 * be true at clear_inode() time.
492 *
493 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
494 * filesystems should do that. invalidate_inode_buffers() should just go
495 * BUG_ON(!list_empty).
496 *
497 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
498 * take an address_space, not an inode. And it should be called
499 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500 * queued up.
501 *
502 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503 * list if it is already on a list. Because if the buffer is on a list,
504 * it *must* already be on the right one. If not, the filesystem is being
505 * silly. This will save a ton of locking. But first we have to ensure
506 * that buffers are taken *off* the old inode's list when they are freed
507 * (presumably in truncate). That requires careful auditing of all
508 * filesystems (do it inside bforget()). It could also be done by bringing
509 * b_inode back.
510 */
511
512/*
513 * The buffer's backing address_space's private_lock must be held
514 */
dbacefc9 515static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
516{
517 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
518 WARN_ON(!bh->b_assoc_map);
519 if (buffer_write_io_error(bh))
520 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521 bh->b_assoc_map = NULL;
1da177e4
LT
522}
523
524int inode_has_buffers(struct inode *inode)
525{
526 return !list_empty(&inode->i_data.private_list);
527}
528
529/*
530 * osync is designed to support O_SYNC io. It waits synchronously for
531 * all already-submitted IO to complete, but does not queue any new
532 * writes to the disk.
533 *
534 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535 * you dirty the buffers, and then use osync_inode_buffers to wait for
536 * completion. Any other dirty buffers which are not yet queued for
537 * write will not be flushed to disk by the osync.
538 */
539static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540{
541 struct buffer_head *bh;
542 struct list_head *p;
543 int err = 0;
544
545 spin_lock(lock);
546repeat:
547 list_for_each_prev(p, list) {
548 bh = BH_ENTRY(p);
549 if (buffer_locked(bh)) {
550 get_bh(bh);
551 spin_unlock(lock);
552 wait_on_buffer(bh);
553 if (!buffer_uptodate(bh))
554 err = -EIO;
555 brelse(bh);
556 spin_lock(lock);
557 goto repeat;
558 }
559 }
560 spin_unlock(lock);
561 return err;
562}
563
01a05b33 564static void do_thaw_one(struct super_block *sb, void *unused)
c2d75438 565{
c2d75438 566 char b[BDEVNAME_SIZE];
01a05b33
AV
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
570}
c2d75438 571
01a05b33
AV
572static void do_thaw_all(struct work_struct *work)
573{
574 iterate_supers(do_thaw_one, NULL);
053c525f 575 kfree(work);
c2d75438
ES
576 printk(KERN_WARNING "Emergency Thaw complete\n");
577}
578
579/**
580 * emergency_thaw_all -- forcibly thaw every frozen filesystem
581 *
582 * Used for emergency unfreeze of all filesystems via SysRq
583 */
584void emergency_thaw_all(void)
585{
053c525f
JA
586 struct work_struct *work;
587
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
589 if (work) {
590 INIT_WORK(work, do_thaw_all);
591 schedule_work(work);
592 }
c2d75438
ES
593}
594
1da177e4 595/**
78a4a50a 596 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 597 * @mapping: the mapping which wants those buffers written
1da177e4
LT
598 *
599 * Starts I/O against the buffers at mapping->private_list, and waits upon
600 * that I/O.
601 *
67be2dd1
MW
602 * Basically, this is a convenience function for fsync().
603 * @mapping is a file or directory which needs those buffers to be written for
604 * a successful fsync().
1da177e4
LT
605 */
606int sync_mapping_buffers(struct address_space *mapping)
607{
608 struct address_space *buffer_mapping = mapping->assoc_mapping;
609
610 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611 return 0;
612
613 return fsync_buffers_list(&buffer_mapping->private_lock,
614 &mapping->private_list);
615}
616EXPORT_SYMBOL(sync_mapping_buffers);
617
618/*
619 * Called when we've recently written block `bblock', and it is known that
620 * `bblock' was for a buffer_boundary() buffer. This means that the block at
621 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
622 * dirty, schedule it for IO. So that indirects merge nicely with their data.
623 */
624void write_boundary_block(struct block_device *bdev,
625 sector_t bblock, unsigned blocksize)
626{
627 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628 if (bh) {
629 if (buffer_dirty(bh))
630 ll_rw_block(WRITE, 1, &bh);
631 put_bh(bh);
632 }
633}
634
635void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636{
637 struct address_space *mapping = inode->i_mapping;
638 struct address_space *buffer_mapping = bh->b_page->mapping;
639
640 mark_buffer_dirty(bh);
641 if (!mapping->assoc_mapping) {
642 mapping->assoc_mapping = buffer_mapping;
643 } else {
e827f923 644 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 645 }
535ee2fb 646 if (!bh->b_assoc_map) {
1da177e4
LT
647 spin_lock(&buffer_mapping->private_lock);
648 list_move_tail(&bh->b_assoc_buffers,
649 &mapping->private_list);
58ff407b 650 bh->b_assoc_map = mapping;
1da177e4
LT
651 spin_unlock(&buffer_mapping->private_lock);
652 }
653}
654EXPORT_SYMBOL(mark_buffer_dirty_inode);
655
787d2214
NP
656/*
657 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658 * dirty.
659 *
660 * If warn is true, then emit a warning if the page is not uptodate and has
661 * not been truncated.
662 */
a8e7d49a 663static void __set_page_dirty(struct page *page,
787d2214
NP
664 struct address_space *mapping, int warn)
665{
19fd6231 666 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
667 if (page->mapping) { /* Race with truncate? */
668 WARN_ON_ONCE(warn && !PageUptodate(page));
e3a7cca1 669 account_page_dirtied(page, mapping);
787d2214
NP
670 radix_tree_tag_set(&mapping->page_tree,
671 page_index(page), PAGECACHE_TAG_DIRTY);
672 }
19fd6231 673 spin_unlock_irq(&mapping->tree_lock);
787d2214 674 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
675}
676
1da177e4
LT
677/*
678 * Add a page to the dirty page list.
679 *
680 * It is a sad fact of life that this function is called from several places
681 * deeply under spinlocking. It may not sleep.
682 *
683 * If the page has buffers, the uptodate buffers are set dirty, to preserve
684 * dirty-state coherency between the page and the buffers. It the page does
685 * not have buffers then when they are later attached they will all be set
686 * dirty.
687 *
688 * The buffers are dirtied before the page is dirtied. There's a small race
689 * window in which a writepage caller may see the page cleanness but not the
690 * buffer dirtiness. That's fine. If this code were to set the page dirty
691 * before the buffers, a concurrent writepage caller could clear the page dirty
692 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693 * page on the dirty page list.
694 *
695 * We use private_lock to lock against try_to_free_buffers while using the
696 * page's buffer list. Also use this to protect against clean buffers being
697 * added to the page after it was set dirty.
698 *
699 * FIXME: may need to call ->reservepage here as well. That's rather up to the
700 * address_space though.
701 */
702int __set_page_dirty_buffers(struct page *page)
703{
a8e7d49a 704 int newly_dirty;
787d2214 705 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
706
707 if (unlikely(!mapping))
708 return !TestSetPageDirty(page);
1da177e4
LT
709
710 spin_lock(&mapping->private_lock);
711 if (page_has_buffers(page)) {
712 struct buffer_head *head = page_buffers(page);
713 struct buffer_head *bh = head;
714
715 do {
716 set_buffer_dirty(bh);
717 bh = bh->b_this_page;
718 } while (bh != head);
719 }
a8e7d49a 720 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
721 spin_unlock(&mapping->private_lock);
722
a8e7d49a
LT
723 if (newly_dirty)
724 __set_page_dirty(page, mapping, 1);
725 return newly_dirty;
1da177e4
LT
726}
727EXPORT_SYMBOL(__set_page_dirty_buffers);
728
729/*
730 * Write out and wait upon a list of buffers.
731 *
732 * We have conflicting pressures: we want to make sure that all
733 * initially dirty buffers get waited on, but that any subsequently
734 * dirtied buffers don't. After all, we don't want fsync to last
735 * forever if somebody is actively writing to the file.
736 *
737 * Do this in two main stages: first we copy dirty buffers to a
738 * temporary inode list, queueing the writes as we go. Then we clean
739 * up, waiting for those writes to complete.
740 *
741 * During this second stage, any subsequent updates to the file may end
742 * up refiling the buffer on the original inode's dirty list again, so
743 * there is a chance we will end up with a buffer queued for write but
744 * not yet completed on that list. So, as a final cleanup we go through
745 * the osync code to catch these locked, dirty buffers without requeuing
746 * any newly dirty buffers for write.
747 */
748static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749{
750 struct buffer_head *bh;
751 struct list_head tmp;
9cf6b720 752 struct address_space *mapping, *prev_mapping = NULL;
1da177e4
LT
753 int err = 0, err2;
754
755 INIT_LIST_HEAD(&tmp);
756
757 spin_lock(lock);
758 while (!list_empty(list)) {
759 bh = BH_ENTRY(list->next);
535ee2fb 760 mapping = bh->b_assoc_map;
58ff407b 761 __remove_assoc_queue(bh);
535ee2fb
JK
762 /* Avoid race with mark_buffer_dirty_inode() which does
763 * a lockless check and we rely on seeing the dirty bit */
764 smp_mb();
1da177e4
LT
765 if (buffer_dirty(bh) || buffer_locked(bh)) {
766 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 767 bh->b_assoc_map = mapping;
1da177e4
LT
768 if (buffer_dirty(bh)) {
769 get_bh(bh);
770 spin_unlock(lock);
771 /*
772 * Ensure any pending I/O completes so that
9cb569d6
CH
773 * write_dirty_buffer() actually writes the
774 * current contents - it is a noop if I/O is
775 * still in flight on potentially older
776 * contents.
1da177e4 777 */
9cb569d6 778 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
9cf6b720
JA
779
780 /*
781 * Kick off IO for the previous mapping. Note
782 * that we will not run the very last mapping,
783 * wait_on_buffer() will do that for us
784 * through sync_buffer().
785 */
786 if (prev_mapping && prev_mapping != mapping)
787 blk_run_address_space(prev_mapping);
788 prev_mapping = mapping;
789
1da177e4
LT
790 brelse(bh);
791 spin_lock(lock);
792 }
793 }
794 }
795
796 while (!list_empty(&tmp)) {
797 bh = BH_ENTRY(tmp.prev);
1da177e4 798 get_bh(bh);
535ee2fb
JK
799 mapping = bh->b_assoc_map;
800 __remove_assoc_queue(bh);
801 /* Avoid race with mark_buffer_dirty_inode() which does
802 * a lockless check and we rely on seeing the dirty bit */
803 smp_mb();
804 if (buffer_dirty(bh)) {
805 list_add(&bh->b_assoc_buffers,
e3892296 806 &mapping->private_list);
535ee2fb
JK
807 bh->b_assoc_map = mapping;
808 }
1da177e4
LT
809 spin_unlock(lock);
810 wait_on_buffer(bh);
811 if (!buffer_uptodate(bh))
812 err = -EIO;
813 brelse(bh);
814 spin_lock(lock);
815 }
816
817 spin_unlock(lock);
818 err2 = osync_buffers_list(lock, list);
819 if (err)
820 return err;
821 else
822 return err2;
823}
824
825/*
826 * Invalidate any and all dirty buffers on a given inode. We are
827 * probably unmounting the fs, but that doesn't mean we have already
828 * done a sync(). Just drop the buffers from the inode list.
829 *
830 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
831 * assumes that all the buffers are against the blockdev. Not true
832 * for reiserfs.
833 */
834void invalidate_inode_buffers(struct inode *inode)
835{
836 if (inode_has_buffers(inode)) {
837 struct address_space *mapping = &inode->i_data;
838 struct list_head *list = &mapping->private_list;
839 struct address_space *buffer_mapping = mapping->assoc_mapping;
840
841 spin_lock(&buffer_mapping->private_lock);
842 while (!list_empty(list))
843 __remove_assoc_queue(BH_ENTRY(list->next));
844 spin_unlock(&buffer_mapping->private_lock);
845 }
846}
52b19ac9 847EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
848
849/*
850 * Remove any clean buffers from the inode's buffer list. This is called
851 * when we're trying to free the inode itself. Those buffers can pin it.
852 *
853 * Returns true if all buffers were removed.
854 */
855int remove_inode_buffers(struct inode *inode)
856{
857 int ret = 1;
858
859 if (inode_has_buffers(inode)) {
860 struct address_space *mapping = &inode->i_data;
861 struct list_head *list = &mapping->private_list;
862 struct address_space *buffer_mapping = mapping->assoc_mapping;
863
864 spin_lock(&buffer_mapping->private_lock);
865 while (!list_empty(list)) {
866 struct buffer_head *bh = BH_ENTRY(list->next);
867 if (buffer_dirty(bh)) {
868 ret = 0;
869 break;
870 }
871 __remove_assoc_queue(bh);
872 }
873 spin_unlock(&buffer_mapping->private_lock);
874 }
875 return ret;
876}
877
878/*
879 * Create the appropriate buffers when given a page for data area and
880 * the size of each buffer.. Use the bh->b_this_page linked list to
881 * follow the buffers created. Return NULL if unable to create more
882 * buffers.
883 *
884 * The retry flag is used to differentiate async IO (paging, swapping)
885 * which may not fail from ordinary buffer allocations.
886 */
887struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
888 int retry)
889{
890 struct buffer_head *bh, *head;
891 long offset;
892
893try_again:
894 head = NULL;
895 offset = PAGE_SIZE;
896 while ((offset -= size) >= 0) {
897 bh = alloc_buffer_head(GFP_NOFS);
898 if (!bh)
899 goto no_grow;
900
901 bh->b_bdev = NULL;
902 bh->b_this_page = head;
903 bh->b_blocknr = -1;
904 head = bh;
905
906 bh->b_state = 0;
907 atomic_set(&bh->b_count, 0);
fc5cd582 908 bh->b_private = NULL;
1da177e4
LT
909 bh->b_size = size;
910
911 /* Link the buffer to its page */
912 set_bh_page(bh, page, offset);
913
01ffe339 914 init_buffer(bh, NULL, NULL);
1da177e4
LT
915 }
916 return head;
917/*
918 * In case anything failed, we just free everything we got.
919 */
920no_grow:
921 if (head) {
922 do {
923 bh = head;
924 head = head->b_this_page;
925 free_buffer_head(bh);
926 } while (head);
927 }
928
929 /*
930 * Return failure for non-async IO requests. Async IO requests
931 * are not allowed to fail, so we have to wait until buffer heads
932 * become available. But we don't want tasks sleeping with
933 * partially complete buffers, so all were released above.
934 */
935 if (!retry)
936 return NULL;
937
938 /* We're _really_ low on memory. Now we just
939 * wait for old buffer heads to become free due to
940 * finishing IO. Since this is an async request and
941 * the reserve list is empty, we're sure there are
942 * async buffer heads in use.
943 */
944 free_more_memory();
945 goto try_again;
946}
947EXPORT_SYMBOL_GPL(alloc_page_buffers);
948
949static inline void
950link_dev_buffers(struct page *page, struct buffer_head *head)
951{
952 struct buffer_head *bh, *tail;
953
954 bh = head;
955 do {
956 tail = bh;
957 bh = bh->b_this_page;
958 } while (bh);
959 tail->b_this_page = head;
960 attach_page_buffers(page, head);
961}
962
963/*
964 * Initialise the state of a blockdev page's buffers.
965 */
966static void
967init_page_buffers(struct page *page, struct block_device *bdev,
968 sector_t block, int size)
969{
970 struct buffer_head *head = page_buffers(page);
971 struct buffer_head *bh = head;
972 int uptodate = PageUptodate(page);
973
974 do {
975 if (!buffer_mapped(bh)) {
976 init_buffer(bh, NULL, NULL);
977 bh->b_bdev = bdev;
978 bh->b_blocknr = block;
979 if (uptodate)
980 set_buffer_uptodate(bh);
981 set_buffer_mapped(bh);
982 }
983 block++;
984 bh = bh->b_this_page;
985 } while (bh != head);
986}
987
988/*
989 * Create the page-cache page that contains the requested block.
990 *
991 * This is user purely for blockdev mappings.
992 */
993static struct page *
994grow_dev_page(struct block_device *bdev, sector_t block,
995 pgoff_t index, int size)
996{
997 struct inode *inode = bdev->bd_inode;
998 struct page *page;
999 struct buffer_head *bh;
1000
ea125892 1001 page = find_or_create_page(inode->i_mapping, index,
769848c0 1002 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
1003 if (!page)
1004 return NULL;
1005
e827f923 1006 BUG_ON(!PageLocked(page));
1da177e4
LT
1007
1008 if (page_has_buffers(page)) {
1009 bh = page_buffers(page);
1010 if (bh->b_size == size) {
1011 init_page_buffers(page, bdev, block, size);
1012 return page;
1013 }
1014 if (!try_to_free_buffers(page))
1015 goto failed;
1016 }
1017
1018 /*
1019 * Allocate some buffers for this page
1020 */
1021 bh = alloc_page_buffers(page, size, 0);
1022 if (!bh)
1023 goto failed;
1024
1025 /*
1026 * Link the page to the buffers and initialise them. Take the
1027 * lock to be atomic wrt __find_get_block(), which does not
1028 * run under the page lock.
1029 */
1030 spin_lock(&inode->i_mapping->private_lock);
1031 link_dev_buffers(page, bh);
1032 init_page_buffers(page, bdev, block, size);
1033 spin_unlock(&inode->i_mapping->private_lock);
1034 return page;
1035
1036failed:
1037 BUG();
1038 unlock_page(page);
1039 page_cache_release(page);
1040 return NULL;
1041}
1042
1043/*
1044 * Create buffers for the specified block device block's page. If
1045 * that page was dirty, the buffers are set dirty also.
1da177e4 1046 */
858119e1 1047static int
1da177e4
LT
1048grow_buffers(struct block_device *bdev, sector_t block, int size)
1049{
1050 struct page *page;
1051 pgoff_t index;
1052 int sizebits;
1053
1054 sizebits = -1;
1055 do {
1056 sizebits++;
1057 } while ((size << sizebits) < PAGE_SIZE);
1058
1059 index = block >> sizebits;
1da177e4 1060
e5657933
AM
1061 /*
1062 * Check for a block which wants to lie outside our maximum possible
1063 * pagecache index. (this comparison is done using sector_t types).
1064 */
1065 if (unlikely(index != block >> sizebits)) {
1066 char b[BDEVNAME_SIZE];
1067
1068 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1069 "device %s\n",
8e24eea7 1070 __func__, (unsigned long long)block,
e5657933
AM
1071 bdevname(bdev, b));
1072 return -EIO;
1073 }
1074 block = index << sizebits;
1da177e4
LT
1075 /* Create a page with the proper size buffers.. */
1076 page = grow_dev_page(bdev, block, index, size);
1077 if (!page)
1078 return 0;
1079 unlock_page(page);
1080 page_cache_release(page);
1081 return 1;
1082}
1083
75c96f85 1084static struct buffer_head *
1da177e4
LT
1085__getblk_slow(struct block_device *bdev, sector_t block, int size)
1086{
1087 /* Size must be multiple of hard sectorsize */
e1defc4f 1088 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1da177e4
LT
1089 (size < 512 || size > PAGE_SIZE))) {
1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1091 size);
e1defc4f
MP
1092 printk(KERN_ERR "logical block size: %d\n",
1093 bdev_logical_block_size(bdev));
1da177e4
LT
1094
1095 dump_stack();
1096 return NULL;
1097 }
1098
1099 for (;;) {
1100 struct buffer_head * bh;
e5657933 1101 int ret;
1da177e4
LT
1102
1103 bh = __find_get_block(bdev, block, size);
1104 if (bh)
1105 return bh;
1106
e5657933
AM
1107 ret = grow_buffers(bdev, block, size);
1108 if (ret < 0)
1109 return NULL;
1110 if (ret == 0)
1da177e4
LT
1111 free_more_memory();
1112 }
1113}
1114
1115/*
1116 * The relationship between dirty buffers and dirty pages:
1117 *
1118 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1119 * the page is tagged dirty in its radix tree.
1120 *
1121 * At all times, the dirtiness of the buffers represents the dirtiness of
1122 * subsections of the page. If the page has buffers, the page dirty bit is
1123 * merely a hint about the true dirty state.
1124 *
1125 * When a page is set dirty in its entirety, all its buffers are marked dirty
1126 * (if the page has buffers).
1127 *
1128 * When a buffer is marked dirty, its page is dirtied, but the page's other
1129 * buffers are not.
1130 *
1131 * Also. When blockdev buffers are explicitly read with bread(), they
1132 * individually become uptodate. But their backing page remains not
1133 * uptodate - even if all of its buffers are uptodate. A subsequent
1134 * block_read_full_page() against that page will discover all the uptodate
1135 * buffers, will set the page uptodate and will perform no I/O.
1136 */
1137
1138/**
1139 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1140 * @bh: the buffer_head to mark dirty
1da177e4
LT
1141 *
1142 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1143 * backing page dirty, then tag the page as dirty in its address_space's radix
1144 * tree and then attach the address_space's inode to its superblock's dirty
1145 * inode list.
1146 *
1147 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1148 * mapping->tree_lock and the global inode_lock.
1149 */
fc9b52cd 1150void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1151{
787d2214 1152 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1153
1154 /*
1155 * Very *carefully* optimize the it-is-already-dirty case.
1156 *
1157 * Don't let the final "is it dirty" escape to before we
1158 * perhaps modified the buffer.
1159 */
1160 if (buffer_dirty(bh)) {
1161 smp_mb();
1162 if (buffer_dirty(bh))
1163 return;
1164 }
1165
a8e7d49a
LT
1166 if (!test_set_buffer_dirty(bh)) {
1167 struct page *page = bh->b_page;
8e9d78ed
LT
1168 if (!TestSetPageDirty(page)) {
1169 struct address_space *mapping = page_mapping(page);
1170 if (mapping)
1171 __set_page_dirty(page, mapping, 0);
1172 }
a8e7d49a 1173 }
1da177e4 1174}
1fe72eaa 1175EXPORT_SYMBOL(mark_buffer_dirty);
1da177e4
LT
1176
1177/*
1178 * Decrement a buffer_head's reference count. If all buffers against a page
1179 * have zero reference count, are clean and unlocked, and if the page is clean
1180 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1181 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1182 * a page but it ends up not being freed, and buffers may later be reattached).
1183 */
1184void __brelse(struct buffer_head * buf)
1185{
1186 if (atomic_read(&buf->b_count)) {
1187 put_bh(buf);
1188 return;
1189 }
5c752ad9 1190 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4 1191}
1fe72eaa 1192EXPORT_SYMBOL(__brelse);
1da177e4
LT
1193
1194/*
1195 * bforget() is like brelse(), except it discards any
1196 * potentially dirty data.
1197 */
1198void __bforget(struct buffer_head *bh)
1199{
1200 clear_buffer_dirty(bh);
535ee2fb 1201 if (bh->b_assoc_map) {
1da177e4
LT
1202 struct address_space *buffer_mapping = bh->b_page->mapping;
1203
1204 spin_lock(&buffer_mapping->private_lock);
1205 list_del_init(&bh->b_assoc_buffers);
58ff407b 1206 bh->b_assoc_map = NULL;
1da177e4
LT
1207 spin_unlock(&buffer_mapping->private_lock);
1208 }
1209 __brelse(bh);
1210}
1fe72eaa 1211EXPORT_SYMBOL(__bforget);
1da177e4
LT
1212
1213static struct buffer_head *__bread_slow(struct buffer_head *bh)
1214{
1215 lock_buffer(bh);
1216 if (buffer_uptodate(bh)) {
1217 unlock_buffer(bh);
1218 return bh;
1219 } else {
1220 get_bh(bh);
1221 bh->b_end_io = end_buffer_read_sync;
1222 submit_bh(READ, bh);
1223 wait_on_buffer(bh);
1224 if (buffer_uptodate(bh))
1225 return bh;
1226 }
1227 brelse(bh);
1228 return NULL;
1229}
1230
1231/*
1232 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1233 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1234 * refcount elevated by one when they're in an LRU. A buffer can only appear
1235 * once in a particular CPU's LRU. A single buffer can be present in multiple
1236 * CPU's LRUs at the same time.
1237 *
1238 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1239 * sb_find_get_block().
1240 *
1241 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1242 * a local interrupt disable for that.
1243 */
1244
1245#define BH_LRU_SIZE 8
1246
1247struct bh_lru {
1248 struct buffer_head *bhs[BH_LRU_SIZE];
1249};
1250
1251static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1252
1253#ifdef CONFIG_SMP
1254#define bh_lru_lock() local_irq_disable()
1255#define bh_lru_unlock() local_irq_enable()
1256#else
1257#define bh_lru_lock() preempt_disable()
1258#define bh_lru_unlock() preempt_enable()
1259#endif
1260
1261static inline void check_irqs_on(void)
1262{
1263#ifdef irqs_disabled
1264 BUG_ON(irqs_disabled());
1265#endif
1266}
1267
1268/*
1269 * The LRU management algorithm is dopey-but-simple. Sorry.
1270 */
1271static void bh_lru_install(struct buffer_head *bh)
1272{
1273 struct buffer_head *evictee = NULL;
1274 struct bh_lru *lru;
1275
1276 check_irqs_on();
1277 bh_lru_lock();
1278 lru = &__get_cpu_var(bh_lrus);
1279 if (lru->bhs[0] != bh) {
1280 struct buffer_head *bhs[BH_LRU_SIZE];
1281 int in;
1282 int out = 0;
1283
1284 get_bh(bh);
1285 bhs[out++] = bh;
1286 for (in = 0; in < BH_LRU_SIZE; in++) {
1287 struct buffer_head *bh2 = lru->bhs[in];
1288
1289 if (bh2 == bh) {
1290 __brelse(bh2);
1291 } else {
1292 if (out >= BH_LRU_SIZE) {
1293 BUG_ON(evictee != NULL);
1294 evictee = bh2;
1295 } else {
1296 bhs[out++] = bh2;
1297 }
1298 }
1299 }
1300 while (out < BH_LRU_SIZE)
1301 bhs[out++] = NULL;
1302 memcpy(lru->bhs, bhs, sizeof(bhs));
1303 }
1304 bh_lru_unlock();
1305
1306 if (evictee)
1307 __brelse(evictee);
1308}
1309
1310/*
1311 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1312 */
858119e1 1313static struct buffer_head *
3991d3bd 1314lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1315{
1316 struct buffer_head *ret = NULL;
1317 struct bh_lru *lru;
3991d3bd 1318 unsigned int i;
1da177e4
LT
1319
1320 check_irqs_on();
1321 bh_lru_lock();
1322 lru = &__get_cpu_var(bh_lrus);
1323 for (i = 0; i < BH_LRU_SIZE; i++) {
1324 struct buffer_head *bh = lru->bhs[i];
1325
1326 if (bh && bh->b_bdev == bdev &&
1327 bh->b_blocknr == block && bh->b_size == size) {
1328 if (i) {
1329 while (i) {
1330 lru->bhs[i] = lru->bhs[i - 1];
1331 i--;
1332 }
1333 lru->bhs[0] = bh;
1334 }
1335 get_bh(bh);
1336 ret = bh;
1337 break;
1338 }
1339 }
1340 bh_lru_unlock();
1341 return ret;
1342}
1343
1344/*
1345 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1346 * it in the LRU and mark it as accessed. If it is not present then return
1347 * NULL
1348 */
1349struct buffer_head *
3991d3bd 1350__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1351{
1352 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1353
1354 if (bh == NULL) {
385fd4c5 1355 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1356 if (bh)
1357 bh_lru_install(bh);
1358 }
1359 if (bh)
1360 touch_buffer(bh);
1361 return bh;
1362}
1363EXPORT_SYMBOL(__find_get_block);
1364
1365/*
1366 * __getblk will locate (and, if necessary, create) the buffer_head
1367 * which corresponds to the passed block_device, block and size. The
1368 * returned buffer has its reference count incremented.
1369 *
1370 * __getblk() cannot fail - it just keeps trying. If you pass it an
1371 * illegal block number, __getblk() will happily return a buffer_head
1372 * which represents the non-existent block. Very weird.
1373 *
1374 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1375 * attempt is failing. FIXME, perhaps?
1376 */
1377struct buffer_head *
3991d3bd 1378__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1379{
1380 struct buffer_head *bh = __find_get_block(bdev, block, size);
1381
1382 might_sleep();
1383 if (bh == NULL)
1384 bh = __getblk_slow(bdev, block, size);
1385 return bh;
1386}
1387EXPORT_SYMBOL(__getblk);
1388
1389/*
1390 * Do async read-ahead on a buffer..
1391 */
3991d3bd 1392void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1393{
1394 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1395 if (likely(bh)) {
1396 ll_rw_block(READA, 1, &bh);
1397 brelse(bh);
1398 }
1da177e4
LT
1399}
1400EXPORT_SYMBOL(__breadahead);
1401
1402/**
1403 * __bread() - reads a specified block and returns the bh
67be2dd1 1404 * @bdev: the block_device to read from
1da177e4
LT
1405 * @block: number of block
1406 * @size: size (in bytes) to read
1407 *
1408 * Reads a specified block, and returns buffer head that contains it.
1409 * It returns NULL if the block was unreadable.
1410 */
1411struct buffer_head *
3991d3bd 1412__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1413{
1414 struct buffer_head *bh = __getblk(bdev, block, size);
1415
a3e713b5 1416 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1417 bh = __bread_slow(bh);
1418 return bh;
1419}
1420EXPORT_SYMBOL(__bread);
1421
1422/*
1423 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1424 * This doesn't race because it runs in each cpu either in irq
1425 * or with preempt disabled.
1426 */
1427static void invalidate_bh_lru(void *arg)
1428{
1429 struct bh_lru *b = &get_cpu_var(bh_lrus);
1430 int i;
1431
1432 for (i = 0; i < BH_LRU_SIZE; i++) {
1433 brelse(b->bhs[i]);
1434 b->bhs[i] = NULL;
1435 }
1436 put_cpu_var(bh_lrus);
1437}
1438
f9a14399 1439void invalidate_bh_lrus(void)
1da177e4 1440{
15c8b6c1 1441 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1442}
9db5579b 1443EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1444
1445void set_bh_page(struct buffer_head *bh,
1446 struct page *page, unsigned long offset)
1447{
1448 bh->b_page = page;
e827f923 1449 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1450 if (PageHighMem(page))
1451 /*
1452 * This catches illegal uses and preserves the offset:
1453 */
1454 bh->b_data = (char *)(0 + offset);
1455 else
1456 bh->b_data = page_address(page) + offset;
1457}
1458EXPORT_SYMBOL(set_bh_page);
1459
1460/*
1461 * Called when truncating a buffer on a page completely.
1462 */
858119e1 1463static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1464{
1465 lock_buffer(bh);
1466 clear_buffer_dirty(bh);
1467 bh->b_bdev = NULL;
1468 clear_buffer_mapped(bh);
1469 clear_buffer_req(bh);
1470 clear_buffer_new(bh);
1471 clear_buffer_delay(bh);
33a266dd 1472 clear_buffer_unwritten(bh);
1da177e4
LT
1473 unlock_buffer(bh);
1474}
1475
1da177e4
LT
1476/**
1477 * block_invalidatepage - invalidate part of all of a buffer-backed page
1478 *
1479 * @page: the page which is affected
1480 * @offset: the index of the truncation point
1481 *
1482 * block_invalidatepage() is called when all or part of the page has become
1483 * invalidatedby a truncate operation.
1484 *
1485 * block_invalidatepage() does not have to release all buffers, but it must
1486 * ensure that no dirty buffer is left outside @offset and that no I/O
1487 * is underway against any of the blocks which are outside the truncation
1488 * point. Because the caller is about to free (and possibly reuse) those
1489 * blocks on-disk.
1490 */
2ff28e22 1491void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1492{
1493 struct buffer_head *head, *bh, *next;
1494 unsigned int curr_off = 0;
1da177e4
LT
1495
1496 BUG_ON(!PageLocked(page));
1497 if (!page_has_buffers(page))
1498 goto out;
1499
1500 head = page_buffers(page);
1501 bh = head;
1502 do {
1503 unsigned int next_off = curr_off + bh->b_size;
1504 next = bh->b_this_page;
1505
1506 /*
1507 * is this block fully invalidated?
1508 */
1509 if (offset <= curr_off)
1510 discard_buffer(bh);
1511 curr_off = next_off;
1512 bh = next;
1513 } while (bh != head);
1514
1515 /*
1516 * We release buffers only if the entire page is being invalidated.
1517 * The get_block cached value has been unconditionally invalidated,
1518 * so real IO is not possible anymore.
1519 */
1520 if (offset == 0)
2ff28e22 1521 try_to_release_page(page, 0);
1da177e4 1522out:
2ff28e22 1523 return;
1da177e4
LT
1524}
1525EXPORT_SYMBOL(block_invalidatepage);
1526
1527/*
1528 * We attach and possibly dirty the buffers atomically wrt
1529 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1530 * is already excluded via the page lock.
1531 */
1532void create_empty_buffers(struct page *page,
1533 unsigned long blocksize, unsigned long b_state)
1534{
1535 struct buffer_head *bh, *head, *tail;
1536
1537 head = alloc_page_buffers(page, blocksize, 1);
1538 bh = head;
1539 do {
1540 bh->b_state |= b_state;
1541 tail = bh;
1542 bh = bh->b_this_page;
1543 } while (bh);
1544 tail->b_this_page = head;
1545
1546 spin_lock(&page->mapping->private_lock);
1547 if (PageUptodate(page) || PageDirty(page)) {
1548 bh = head;
1549 do {
1550 if (PageDirty(page))
1551 set_buffer_dirty(bh);
1552 if (PageUptodate(page))
1553 set_buffer_uptodate(bh);
1554 bh = bh->b_this_page;
1555 } while (bh != head);
1556 }
1557 attach_page_buffers(page, head);
1558 spin_unlock(&page->mapping->private_lock);
1559}
1560EXPORT_SYMBOL(create_empty_buffers);
1561
1562/*
1563 * We are taking a block for data and we don't want any output from any
1564 * buffer-cache aliases starting from return from that function and
1565 * until the moment when something will explicitly mark the buffer
1566 * dirty (hopefully that will not happen until we will free that block ;-)
1567 * We don't even need to mark it not-uptodate - nobody can expect
1568 * anything from a newly allocated buffer anyway. We used to used
1569 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1570 * don't want to mark the alias unmapped, for example - it would confuse
1571 * anyone who might pick it with bread() afterwards...
1572 *
1573 * Also.. Note that bforget() doesn't lock the buffer. So there can
1574 * be writeout I/O going on against recently-freed buffers. We don't
1575 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1576 * only if we really need to. That happens here.
1577 */
1578void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1579{
1580 struct buffer_head *old_bh;
1581
1582 might_sleep();
1583
385fd4c5 1584 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1585 if (old_bh) {
1586 clear_buffer_dirty(old_bh);
1587 wait_on_buffer(old_bh);
1588 clear_buffer_req(old_bh);
1589 __brelse(old_bh);
1590 }
1591}
1592EXPORT_SYMBOL(unmap_underlying_metadata);
1593
1594/*
1595 * NOTE! All mapped/uptodate combinations are valid:
1596 *
1597 * Mapped Uptodate Meaning
1598 *
1599 * No No "unknown" - must do get_block()
1600 * No Yes "hole" - zero-filled
1601 * Yes No "allocated" - allocated on disk, not read in
1602 * Yes Yes "valid" - allocated and up-to-date in memory.
1603 *
1604 * "Dirty" is valid only with the last case (mapped+uptodate).
1605 */
1606
1607/*
1608 * While block_write_full_page is writing back the dirty buffers under
1609 * the page lock, whoever dirtied the buffers may decide to clean them
1610 * again at any time. We handle that by only looking at the buffer
1611 * state inside lock_buffer().
1612 *
1613 * If block_write_full_page() is called for regular writeback
1614 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1615 * locked buffer. This only can happen if someone has written the buffer
1616 * directly, with submit_bh(). At the address_space level PageWriteback
1617 * prevents this contention from occurring.
6e34eedd
TT
1618 *
1619 * If block_write_full_page() is called with wbc->sync_mode ==
1620 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1621 * causes the writes to be flagged as synchronous writes, but the
1622 * block device queue will NOT be unplugged, since usually many pages
1623 * will be pushed to the out before the higher-level caller actually
1624 * waits for the writes to be completed. The various wait functions,
1625 * such as wait_on_writeback_range() will ultimately call sync_page()
1626 * which will ultimately call blk_run_backing_dev(), which will end up
1627 * unplugging the device queue.
1da177e4
LT
1628 */
1629static int __block_write_full_page(struct inode *inode, struct page *page,
35c80d5f
CM
1630 get_block_t *get_block, struct writeback_control *wbc,
1631 bh_end_io_t *handler)
1da177e4
LT
1632{
1633 int err;
1634 sector_t block;
1635 sector_t last_block;
f0fbd5fc 1636 struct buffer_head *bh, *head;
b0cf2321 1637 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4 1638 int nr_underway = 0;
6e34eedd
TT
1639 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1640 WRITE_SYNC_PLUG : WRITE);
1da177e4
LT
1641
1642 BUG_ON(!PageLocked(page));
1643
1644 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1645
1646 if (!page_has_buffers(page)) {
b0cf2321 1647 create_empty_buffers(page, blocksize,
1da177e4
LT
1648 (1 << BH_Dirty)|(1 << BH_Uptodate));
1649 }
1650
1651 /*
1652 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1653 * here, and the (potentially unmapped) buffers may become dirty at
1654 * any time. If a buffer becomes dirty here after we've inspected it
1655 * then we just miss that fact, and the page stays dirty.
1656 *
1657 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1658 * handle that here by just cleaning them.
1659 */
1660
54b21a79 1661 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1662 head = page_buffers(page);
1663 bh = head;
1664
1665 /*
1666 * Get all the dirty buffers mapped to disk addresses and
1667 * handle any aliases from the underlying blockdev's mapping.
1668 */
1669 do {
1670 if (block > last_block) {
1671 /*
1672 * mapped buffers outside i_size will occur, because
1673 * this page can be outside i_size when there is a
1674 * truncate in progress.
1675 */
1676 /*
1677 * The buffer was zeroed by block_write_full_page()
1678 */
1679 clear_buffer_dirty(bh);
1680 set_buffer_uptodate(bh);
29a814d2
AT
1681 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1682 buffer_dirty(bh)) {
b0cf2321 1683 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1684 err = get_block(inode, block, bh, 1);
1685 if (err)
1686 goto recover;
29a814d2 1687 clear_buffer_delay(bh);
1da177e4
LT
1688 if (buffer_new(bh)) {
1689 /* blockdev mappings never come here */
1690 clear_buffer_new(bh);
1691 unmap_underlying_metadata(bh->b_bdev,
1692 bh->b_blocknr);
1693 }
1694 }
1695 bh = bh->b_this_page;
1696 block++;
1697 } while (bh != head);
1698
1699 do {
1da177e4
LT
1700 if (!buffer_mapped(bh))
1701 continue;
1702 /*
1703 * If it's a fully non-blocking write attempt and we cannot
1704 * lock the buffer then redirty the page. Note that this can
5b0830cb
JA
1705 * potentially cause a busy-wait loop from writeback threads
1706 * and kswapd activity, but those code paths have their own
1707 * higher-level throttling.
1da177e4
LT
1708 */
1709 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1710 lock_buffer(bh);
ca5de404 1711 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1712 redirty_page_for_writepage(wbc, page);
1713 continue;
1714 }
1715 if (test_clear_buffer_dirty(bh)) {
35c80d5f 1716 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1717 } else {
1718 unlock_buffer(bh);
1719 }
1720 } while ((bh = bh->b_this_page) != head);
1721
1722 /*
1723 * The page and its buffers are protected by PageWriteback(), so we can
1724 * drop the bh refcounts early.
1725 */
1726 BUG_ON(PageWriteback(page));
1727 set_page_writeback(page);
1da177e4
LT
1728
1729 do {
1730 struct buffer_head *next = bh->b_this_page;
1731 if (buffer_async_write(bh)) {
a64c8610 1732 submit_bh(write_op, bh);
1da177e4
LT
1733 nr_underway++;
1734 }
1da177e4
LT
1735 bh = next;
1736 } while (bh != head);
05937baa 1737 unlock_page(page);
1da177e4
LT
1738
1739 err = 0;
1740done:
1741 if (nr_underway == 0) {
1742 /*
1743 * The page was marked dirty, but the buffers were
1744 * clean. Someone wrote them back by hand with
1745 * ll_rw_block/submit_bh. A rare case.
1746 */
1da177e4 1747 end_page_writeback(page);
3d67f2d7 1748
1da177e4
LT
1749 /*
1750 * The page and buffer_heads can be released at any time from
1751 * here on.
1752 */
1da177e4
LT
1753 }
1754 return err;
1755
1756recover:
1757 /*
1758 * ENOSPC, or some other error. We may already have added some
1759 * blocks to the file, so we need to write these out to avoid
1760 * exposing stale data.
1761 * The page is currently locked and not marked for writeback
1762 */
1763 bh = head;
1764 /* Recovery: lock and submit the mapped buffers */
1765 do {
29a814d2
AT
1766 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1767 !buffer_delay(bh)) {
1da177e4 1768 lock_buffer(bh);
35c80d5f 1769 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1770 } else {
1771 /*
1772 * The buffer may have been set dirty during
1773 * attachment to a dirty page.
1774 */
1775 clear_buffer_dirty(bh);
1776 }
1777 } while ((bh = bh->b_this_page) != head);
1778 SetPageError(page);
1779 BUG_ON(PageWriteback(page));
7e4c3690 1780 mapping_set_error(page->mapping, err);
1da177e4 1781 set_page_writeback(page);
1da177e4
LT
1782 do {
1783 struct buffer_head *next = bh->b_this_page;
1784 if (buffer_async_write(bh)) {
1785 clear_buffer_dirty(bh);
a64c8610 1786 submit_bh(write_op, bh);
1da177e4
LT
1787 nr_underway++;
1788 }
1da177e4
LT
1789 bh = next;
1790 } while (bh != head);
ffda9d30 1791 unlock_page(page);
1da177e4
LT
1792 goto done;
1793}
1794
afddba49
NP
1795/*
1796 * If a page has any new buffers, zero them out here, and mark them uptodate
1797 * and dirty so they'll be written out (in order to prevent uninitialised
1798 * block data from leaking). And clear the new bit.
1799 */
1800void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1801{
1802 unsigned int block_start, block_end;
1803 struct buffer_head *head, *bh;
1804
1805 BUG_ON(!PageLocked(page));
1806 if (!page_has_buffers(page))
1807 return;
1808
1809 bh = head = page_buffers(page);
1810 block_start = 0;
1811 do {
1812 block_end = block_start + bh->b_size;
1813
1814 if (buffer_new(bh)) {
1815 if (block_end > from && block_start < to) {
1816 if (!PageUptodate(page)) {
1817 unsigned start, size;
1818
1819 start = max(from, block_start);
1820 size = min(to, block_end) - start;
1821
eebd2aa3 1822 zero_user(page, start, size);
afddba49
NP
1823 set_buffer_uptodate(bh);
1824 }
1825
1826 clear_buffer_new(bh);
1827 mark_buffer_dirty(bh);
1828 }
1829 }
1830
1831 block_start = block_end;
1832 bh = bh->b_this_page;
1833 } while (bh != head);
1834}
1835EXPORT_SYMBOL(page_zero_new_buffers);
1836
6e1db88d
CH
1837int block_prepare_write(struct page *page, unsigned from, unsigned to,
1838 get_block_t *get_block)
1da177e4 1839{
6e1db88d 1840 struct inode *inode = page->mapping->host;
1da177e4
LT
1841 unsigned block_start, block_end;
1842 sector_t block;
1843 int err = 0;
1844 unsigned blocksize, bbits;
1845 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1846
1847 BUG_ON(!PageLocked(page));
1848 BUG_ON(from > PAGE_CACHE_SIZE);
1849 BUG_ON(to > PAGE_CACHE_SIZE);
1850 BUG_ON(from > to);
1851
1852 blocksize = 1 << inode->i_blkbits;
1853 if (!page_has_buffers(page))
1854 create_empty_buffers(page, blocksize, 0);
1855 head = page_buffers(page);
1856
1857 bbits = inode->i_blkbits;
1858 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1859
1860 for(bh = head, block_start = 0; bh != head || !block_start;
1861 block++, block_start=block_end, bh = bh->b_this_page) {
1862 block_end = block_start + blocksize;
1863 if (block_end <= from || block_start >= to) {
1864 if (PageUptodate(page)) {
1865 if (!buffer_uptodate(bh))
1866 set_buffer_uptodate(bh);
1867 }
1868 continue;
1869 }
1870 if (buffer_new(bh))
1871 clear_buffer_new(bh);
1872 if (!buffer_mapped(bh)) {
b0cf2321 1873 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1874 err = get_block(inode, block, bh, 1);
1875 if (err)
f3ddbdc6 1876 break;
1da177e4 1877 if (buffer_new(bh)) {
1da177e4
LT
1878 unmap_underlying_metadata(bh->b_bdev,
1879 bh->b_blocknr);
1880 if (PageUptodate(page)) {
637aff46 1881 clear_buffer_new(bh);
1da177e4 1882 set_buffer_uptodate(bh);
637aff46 1883 mark_buffer_dirty(bh);
1da177e4
LT
1884 continue;
1885 }
eebd2aa3
CL
1886 if (block_end > to || block_start < from)
1887 zero_user_segments(page,
1888 to, block_end,
1889 block_start, from);
1da177e4
LT
1890 continue;
1891 }
1892 }
1893 if (PageUptodate(page)) {
1894 if (!buffer_uptodate(bh))
1895 set_buffer_uptodate(bh);
1896 continue;
1897 }
1898 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1899 !buffer_unwritten(bh) &&
1da177e4
LT
1900 (block_start < from || block_end > to)) {
1901 ll_rw_block(READ, 1, &bh);
1902 *wait_bh++=bh;
1903 }
1904 }
1905 /*
1906 * If we issued read requests - let them complete.
1907 */
1908 while(wait_bh > wait) {
1909 wait_on_buffer(*--wait_bh);
1910 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1911 err = -EIO;
1da177e4 1912 }
6e1db88d 1913 if (unlikely(err)) {
afddba49 1914 page_zero_new_buffers(page, from, to);
6e1db88d
CH
1915 ClearPageUptodate(page);
1916 }
1da177e4
LT
1917 return err;
1918}
6e1db88d 1919EXPORT_SYMBOL(block_prepare_write);
1da177e4
LT
1920
1921static int __block_commit_write(struct inode *inode, struct page *page,
1922 unsigned from, unsigned to)
1923{
1924 unsigned block_start, block_end;
1925 int partial = 0;
1926 unsigned blocksize;
1927 struct buffer_head *bh, *head;
1928
1929 blocksize = 1 << inode->i_blkbits;
1930
1931 for(bh = head = page_buffers(page), block_start = 0;
1932 bh != head || !block_start;
1933 block_start=block_end, bh = bh->b_this_page) {
1934 block_end = block_start + blocksize;
1935 if (block_end <= from || block_start >= to) {
1936 if (!buffer_uptodate(bh))
1937 partial = 1;
1938 } else {
1939 set_buffer_uptodate(bh);
1940 mark_buffer_dirty(bh);
1941 }
afddba49 1942 clear_buffer_new(bh);
1da177e4
LT
1943 }
1944
1945 /*
1946 * If this is a partial write which happened to make all buffers
1947 * uptodate then we can optimize away a bogus readpage() for
1948 * the next read(). Here we 'discover' whether the page went
1949 * uptodate as a result of this (potentially partial) write.
1950 */
1951 if (!partial)
1952 SetPageUptodate(page);
1953 return 0;
1954}
1955
6e1db88d
CH
1956int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1957 get_block_t *get_block)
1958{
1959 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1960
1961 return block_prepare_write(page, start, start + len, get_block);
1962}
1963EXPORT_SYMBOL(__block_write_begin);
1964
afddba49 1965/*
155130a4
CH
1966 * block_write_begin takes care of the basic task of block allocation and
1967 * bringing partial write blocks uptodate first.
1968 *
7bb46a67 1969 * The filesystem needs to handle block truncation upon failure.
afddba49 1970 */
155130a4
CH
1971int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1972 unsigned flags, struct page **pagep, get_block_t *get_block)
afddba49 1973{
6e1db88d 1974 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
afddba49 1975 struct page *page;
6e1db88d 1976 int status;
afddba49 1977
6e1db88d
CH
1978 page = grab_cache_page_write_begin(mapping, index, flags);
1979 if (!page)
1980 return -ENOMEM;
afddba49 1981
6e1db88d 1982 status = __block_write_begin(page, pos, len, get_block);
afddba49 1983 if (unlikely(status)) {
6e1db88d
CH
1984 unlock_page(page);
1985 page_cache_release(page);
1986 page = NULL;
afddba49
NP
1987 }
1988
6e1db88d 1989 *pagep = page;
afddba49
NP
1990 return status;
1991}
1992EXPORT_SYMBOL(block_write_begin);
1993
1994int block_write_end(struct file *file, struct address_space *mapping,
1995 loff_t pos, unsigned len, unsigned copied,
1996 struct page *page, void *fsdata)
1997{
1998 struct inode *inode = mapping->host;
1999 unsigned start;
2000
2001 start = pos & (PAGE_CACHE_SIZE - 1);
2002
2003 if (unlikely(copied < len)) {
2004 /*
2005 * The buffers that were written will now be uptodate, so we
2006 * don't have to worry about a readpage reading them and
2007 * overwriting a partial write. However if we have encountered
2008 * a short write and only partially written into a buffer, it
2009 * will not be marked uptodate, so a readpage might come in and
2010 * destroy our partial write.
2011 *
2012 * Do the simplest thing, and just treat any short write to a
2013 * non uptodate page as a zero-length write, and force the
2014 * caller to redo the whole thing.
2015 */
2016 if (!PageUptodate(page))
2017 copied = 0;
2018
2019 page_zero_new_buffers(page, start+copied, start+len);
2020 }
2021 flush_dcache_page(page);
2022
2023 /* This could be a short (even 0-length) commit */
2024 __block_commit_write(inode, page, start, start+copied);
2025
2026 return copied;
2027}
2028EXPORT_SYMBOL(block_write_end);
2029
2030int generic_write_end(struct file *file, struct address_space *mapping,
2031 loff_t pos, unsigned len, unsigned copied,
2032 struct page *page, void *fsdata)
2033{
2034 struct inode *inode = mapping->host;
c7d206b3 2035 int i_size_changed = 0;
afddba49
NP
2036
2037 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2038
2039 /*
2040 * No need to use i_size_read() here, the i_size
2041 * cannot change under us because we hold i_mutex.
2042 *
2043 * But it's important to update i_size while still holding page lock:
2044 * page writeout could otherwise come in and zero beyond i_size.
2045 */
2046 if (pos+copied > inode->i_size) {
2047 i_size_write(inode, pos+copied);
c7d206b3 2048 i_size_changed = 1;
afddba49
NP
2049 }
2050
2051 unlock_page(page);
2052 page_cache_release(page);
2053
c7d206b3
JK
2054 /*
2055 * Don't mark the inode dirty under page lock. First, it unnecessarily
2056 * makes the holding time of page lock longer. Second, it forces lock
2057 * ordering of page lock and transaction start for journaling
2058 * filesystems.
2059 */
2060 if (i_size_changed)
2061 mark_inode_dirty(inode);
2062
afddba49
NP
2063 return copied;
2064}
2065EXPORT_SYMBOL(generic_write_end);
2066
8ab22b9a
HH
2067/*
2068 * block_is_partially_uptodate checks whether buffers within a page are
2069 * uptodate or not.
2070 *
2071 * Returns true if all buffers which correspond to a file portion
2072 * we want to read are uptodate.
2073 */
2074int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2075 unsigned long from)
2076{
2077 struct inode *inode = page->mapping->host;
2078 unsigned block_start, block_end, blocksize;
2079 unsigned to;
2080 struct buffer_head *bh, *head;
2081 int ret = 1;
2082
2083 if (!page_has_buffers(page))
2084 return 0;
2085
2086 blocksize = 1 << inode->i_blkbits;
2087 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2088 to = from + to;
2089 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2090 return 0;
2091
2092 head = page_buffers(page);
2093 bh = head;
2094 block_start = 0;
2095 do {
2096 block_end = block_start + blocksize;
2097 if (block_end > from && block_start < to) {
2098 if (!buffer_uptodate(bh)) {
2099 ret = 0;
2100 break;
2101 }
2102 if (block_end >= to)
2103 break;
2104 }
2105 block_start = block_end;
2106 bh = bh->b_this_page;
2107 } while (bh != head);
2108
2109 return ret;
2110}
2111EXPORT_SYMBOL(block_is_partially_uptodate);
2112
1da177e4
LT
2113/*
2114 * Generic "read page" function for block devices that have the normal
2115 * get_block functionality. This is most of the block device filesystems.
2116 * Reads the page asynchronously --- the unlock_buffer() and
2117 * set/clear_buffer_uptodate() functions propagate buffer state into the
2118 * page struct once IO has completed.
2119 */
2120int block_read_full_page(struct page *page, get_block_t *get_block)
2121{
2122 struct inode *inode = page->mapping->host;
2123 sector_t iblock, lblock;
2124 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2125 unsigned int blocksize;
2126 int nr, i;
2127 int fully_mapped = 1;
2128
cd7619d6 2129 BUG_ON(!PageLocked(page));
1da177e4
LT
2130 blocksize = 1 << inode->i_blkbits;
2131 if (!page_has_buffers(page))
2132 create_empty_buffers(page, blocksize, 0);
2133 head = page_buffers(page);
2134
2135 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2136 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2137 bh = head;
2138 nr = 0;
2139 i = 0;
2140
2141 do {
2142 if (buffer_uptodate(bh))
2143 continue;
2144
2145 if (!buffer_mapped(bh)) {
c64610ba
AM
2146 int err = 0;
2147
1da177e4
LT
2148 fully_mapped = 0;
2149 if (iblock < lblock) {
b0cf2321 2150 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2151 err = get_block(inode, iblock, bh, 0);
2152 if (err)
1da177e4
LT
2153 SetPageError(page);
2154 }
2155 if (!buffer_mapped(bh)) {
eebd2aa3 2156 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2157 if (!err)
2158 set_buffer_uptodate(bh);
1da177e4
LT
2159 continue;
2160 }
2161 /*
2162 * get_block() might have updated the buffer
2163 * synchronously
2164 */
2165 if (buffer_uptodate(bh))
2166 continue;
2167 }
2168 arr[nr++] = bh;
2169 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2170
2171 if (fully_mapped)
2172 SetPageMappedToDisk(page);
2173
2174 if (!nr) {
2175 /*
2176 * All buffers are uptodate - we can set the page uptodate
2177 * as well. But not if get_block() returned an error.
2178 */
2179 if (!PageError(page))
2180 SetPageUptodate(page);
2181 unlock_page(page);
2182 return 0;
2183 }
2184
2185 /* Stage two: lock the buffers */
2186 for (i = 0; i < nr; i++) {
2187 bh = arr[i];
2188 lock_buffer(bh);
2189 mark_buffer_async_read(bh);
2190 }
2191
2192 /*
2193 * Stage 3: start the IO. Check for uptodateness
2194 * inside the buffer lock in case another process reading
2195 * the underlying blockdev brought it uptodate (the sct fix).
2196 */
2197 for (i = 0; i < nr; i++) {
2198 bh = arr[i];
2199 if (buffer_uptodate(bh))
2200 end_buffer_async_read(bh, 1);
2201 else
2202 submit_bh(READ, bh);
2203 }
2204 return 0;
2205}
1fe72eaa 2206EXPORT_SYMBOL(block_read_full_page);
1da177e4
LT
2207
2208/* utility function for filesystems that need to do work on expanding
89e10787 2209 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2210 * deal with the hole.
2211 */
89e10787 2212int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2213{
2214 struct address_space *mapping = inode->i_mapping;
2215 struct page *page;
89e10787 2216 void *fsdata;
1da177e4
LT
2217 int err;
2218
c08d3b0e 2219 err = inode_newsize_ok(inode, size);
2220 if (err)
1da177e4
LT
2221 goto out;
2222
89e10787
NP
2223 err = pagecache_write_begin(NULL, mapping, size, 0,
2224 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2225 &page, &fsdata);
2226 if (err)
05eb0b51 2227 goto out;
05eb0b51 2228
89e10787
NP
2229 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2230 BUG_ON(err > 0);
05eb0b51 2231
1da177e4
LT
2232out:
2233 return err;
2234}
1fe72eaa 2235EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4 2236
f1e3af72
AB
2237static int cont_expand_zero(struct file *file, struct address_space *mapping,
2238 loff_t pos, loff_t *bytes)
1da177e4 2239{
1da177e4 2240 struct inode *inode = mapping->host;
1da177e4 2241 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2242 struct page *page;
2243 void *fsdata;
2244 pgoff_t index, curidx;
2245 loff_t curpos;
2246 unsigned zerofrom, offset, len;
2247 int err = 0;
1da177e4 2248
89e10787
NP
2249 index = pos >> PAGE_CACHE_SHIFT;
2250 offset = pos & ~PAGE_CACHE_MASK;
2251
2252 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2253 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2254 if (zerofrom & (blocksize-1)) {
2255 *bytes |= (blocksize-1);
2256 (*bytes)++;
2257 }
89e10787 2258 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2259
89e10787
NP
2260 err = pagecache_write_begin(file, mapping, curpos, len,
2261 AOP_FLAG_UNINTERRUPTIBLE,
2262 &page, &fsdata);
2263 if (err)
2264 goto out;
eebd2aa3 2265 zero_user(page, zerofrom, len);
89e10787
NP
2266 err = pagecache_write_end(file, mapping, curpos, len, len,
2267 page, fsdata);
2268 if (err < 0)
2269 goto out;
2270 BUG_ON(err != len);
2271 err = 0;
061e9746
OH
2272
2273 balance_dirty_pages_ratelimited(mapping);
89e10787 2274 }
1da177e4 2275
89e10787
NP
2276 /* page covers the boundary, find the boundary offset */
2277 if (index == curidx) {
2278 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2279 /* if we will expand the thing last block will be filled */
89e10787
NP
2280 if (offset <= zerofrom) {
2281 goto out;
2282 }
2283 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2284 *bytes |= (blocksize-1);
2285 (*bytes)++;
2286 }
89e10787 2287 len = offset - zerofrom;
1da177e4 2288
89e10787
NP
2289 err = pagecache_write_begin(file, mapping, curpos, len,
2290 AOP_FLAG_UNINTERRUPTIBLE,
2291 &page, &fsdata);
2292 if (err)
2293 goto out;
eebd2aa3 2294 zero_user(page, zerofrom, len);
89e10787
NP
2295 err = pagecache_write_end(file, mapping, curpos, len, len,
2296 page, fsdata);
2297 if (err < 0)
2298 goto out;
2299 BUG_ON(err != len);
2300 err = 0;
1da177e4 2301 }
89e10787
NP
2302out:
2303 return err;
2304}
2305
2306/*
2307 * For moronic filesystems that do not allow holes in file.
2308 * We may have to extend the file.
2309 */
282dc178 2310int cont_write_begin(struct file *file, struct address_space *mapping,
89e10787
NP
2311 loff_t pos, unsigned len, unsigned flags,
2312 struct page **pagep, void **fsdata,
2313 get_block_t *get_block, loff_t *bytes)
2314{
2315 struct inode *inode = mapping->host;
2316 unsigned blocksize = 1 << inode->i_blkbits;
2317 unsigned zerofrom;
2318 int err;
2319
2320 err = cont_expand_zero(file, mapping, pos, bytes);
2321 if (err)
155130a4 2322 return err;
89e10787
NP
2323
2324 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2325 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2326 *bytes |= (blocksize-1);
2327 (*bytes)++;
1da177e4 2328 }
1da177e4 2329
155130a4 2330 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
1da177e4 2331}
1fe72eaa 2332EXPORT_SYMBOL(cont_write_begin);
1da177e4 2333
1da177e4
LT
2334int block_commit_write(struct page *page, unsigned from, unsigned to)
2335{
2336 struct inode *inode = page->mapping->host;
2337 __block_commit_write(inode,page,from,to);
2338 return 0;
2339}
1fe72eaa 2340EXPORT_SYMBOL(block_commit_write);
1da177e4 2341
54171690
DC
2342/*
2343 * block_page_mkwrite() is not allowed to change the file size as it gets
2344 * called from a page fault handler when a page is first dirtied. Hence we must
2345 * be careful to check for EOF conditions here. We set the page up correctly
2346 * for a written page which means we get ENOSPC checking when writing into
2347 * holes and correct delalloc and unwritten extent mapping on filesystems that
2348 * support these features.
2349 *
2350 * We are not allowed to take the i_mutex here so we have to play games to
2351 * protect against truncate races as the page could now be beyond EOF. Because
7bb46a67 2352 * truncate writes the inode size before removing pages, once we have the
54171690
DC
2353 * page lock we can determine safely if the page is beyond EOF. If it is not
2354 * beyond EOF, then the page is guaranteed safe against truncation until we
2355 * unlock the page.
2356 */
2357int
c2ec175c 2358block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690
DC
2359 get_block_t get_block)
2360{
c2ec175c 2361 struct page *page = vmf->page;
54171690
DC
2362 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2363 unsigned long end;
2364 loff_t size;
56a76f82 2365 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
54171690
DC
2366
2367 lock_page(page);
2368 size = i_size_read(inode);
2369 if ((page->mapping != inode->i_mapping) ||
18336338 2370 (page_offset(page) > size)) {
54171690 2371 /* page got truncated out from underneath us */
b827e496
NP
2372 unlock_page(page);
2373 goto out;
54171690
DC
2374 }
2375
2376 /* page is wholly or partially inside EOF */
2377 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2378 end = size & ~PAGE_CACHE_MASK;
2379 else
2380 end = PAGE_CACHE_SIZE;
2381
2382 ret = block_prepare_write(page, 0, end, get_block);
2383 if (!ret)
2384 ret = block_commit_write(page, 0, end);
2385
56a76f82 2386 if (unlikely(ret)) {
b827e496 2387 unlock_page(page);
56a76f82
NP
2388 if (ret == -ENOMEM)
2389 ret = VM_FAULT_OOM;
2390 else /* -ENOSPC, -EIO, etc */
2391 ret = VM_FAULT_SIGBUS;
b827e496
NP
2392 } else
2393 ret = VM_FAULT_LOCKED;
c2ec175c 2394
b827e496 2395out:
54171690
DC
2396 return ret;
2397}
1fe72eaa 2398EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
2399
2400/*
03158cd7 2401 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2402 * immediately, while under the page lock. So it needs a special end_io
2403 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2404 */
2405static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2406{
68671f35 2407 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2408}
2409
03158cd7
NP
2410/*
2411 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2412 * the page (converting it to circular linked list and taking care of page
2413 * dirty races).
2414 */
2415static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2416{
2417 struct buffer_head *bh;
2418
2419 BUG_ON(!PageLocked(page));
2420
2421 spin_lock(&page->mapping->private_lock);
2422 bh = head;
2423 do {
2424 if (PageDirty(page))
2425 set_buffer_dirty(bh);
2426 if (!bh->b_this_page)
2427 bh->b_this_page = head;
2428 bh = bh->b_this_page;
2429 } while (bh != head);
2430 attach_page_buffers(page, head);
2431 spin_unlock(&page->mapping->private_lock);
2432}
2433
1da177e4 2434/*
ea0f04e5
CH
2435 * On entry, the page is fully not uptodate.
2436 * On exit the page is fully uptodate in the areas outside (from,to)
7bb46a67 2437 * The filesystem needs to handle block truncation upon failure.
1da177e4 2438 */
ea0f04e5 2439int nobh_write_begin(struct address_space *mapping,
03158cd7
NP
2440 loff_t pos, unsigned len, unsigned flags,
2441 struct page **pagep, void **fsdata,
1da177e4
LT
2442 get_block_t *get_block)
2443{
03158cd7 2444 struct inode *inode = mapping->host;
1da177e4
LT
2445 const unsigned blkbits = inode->i_blkbits;
2446 const unsigned blocksize = 1 << blkbits;
a4b0672d 2447 struct buffer_head *head, *bh;
03158cd7
NP
2448 struct page *page;
2449 pgoff_t index;
2450 unsigned from, to;
1da177e4 2451 unsigned block_in_page;
a4b0672d 2452 unsigned block_start, block_end;
1da177e4 2453 sector_t block_in_file;
1da177e4 2454 int nr_reads = 0;
1da177e4
LT
2455 int ret = 0;
2456 int is_mapped_to_disk = 1;
1da177e4 2457
03158cd7
NP
2458 index = pos >> PAGE_CACHE_SHIFT;
2459 from = pos & (PAGE_CACHE_SIZE - 1);
2460 to = from + len;
2461
54566b2c 2462 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2463 if (!page)
2464 return -ENOMEM;
2465 *pagep = page;
2466 *fsdata = NULL;
2467
2468 if (page_has_buffers(page)) {
2469 unlock_page(page);
2470 page_cache_release(page);
2471 *pagep = NULL;
155130a4
CH
2472 return block_write_begin(mapping, pos, len, flags, pagep,
2473 get_block);
03158cd7 2474 }
a4b0672d 2475
1da177e4
LT
2476 if (PageMappedToDisk(page))
2477 return 0;
2478
a4b0672d
NP
2479 /*
2480 * Allocate buffers so that we can keep track of state, and potentially
2481 * attach them to the page if an error occurs. In the common case of
2482 * no error, they will just be freed again without ever being attached
2483 * to the page (which is all OK, because we're under the page lock).
2484 *
2485 * Be careful: the buffer linked list is a NULL terminated one, rather
2486 * than the circular one we're used to.
2487 */
2488 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2489 if (!head) {
2490 ret = -ENOMEM;
2491 goto out_release;
2492 }
a4b0672d 2493
1da177e4 2494 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2495
2496 /*
2497 * We loop across all blocks in the page, whether or not they are
2498 * part of the affected region. This is so we can discover if the
2499 * page is fully mapped-to-disk.
2500 */
a4b0672d 2501 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2502 block_start < PAGE_CACHE_SIZE;
a4b0672d 2503 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2504 int create;
2505
a4b0672d
NP
2506 block_end = block_start + blocksize;
2507 bh->b_state = 0;
1da177e4
LT
2508 create = 1;
2509 if (block_start >= to)
2510 create = 0;
2511 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2512 bh, create);
1da177e4
LT
2513 if (ret)
2514 goto failed;
a4b0672d 2515 if (!buffer_mapped(bh))
1da177e4 2516 is_mapped_to_disk = 0;
a4b0672d
NP
2517 if (buffer_new(bh))
2518 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2519 if (PageUptodate(page)) {
2520 set_buffer_uptodate(bh);
1da177e4 2521 continue;
a4b0672d
NP
2522 }
2523 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2524 zero_user_segments(page, block_start, from,
2525 to, block_end);
1da177e4
LT
2526 continue;
2527 }
a4b0672d 2528 if (buffer_uptodate(bh))
1da177e4
LT
2529 continue; /* reiserfs does this */
2530 if (block_start < from || block_end > to) {
a4b0672d
NP
2531 lock_buffer(bh);
2532 bh->b_end_io = end_buffer_read_nobh;
2533 submit_bh(READ, bh);
2534 nr_reads++;
1da177e4
LT
2535 }
2536 }
2537
2538 if (nr_reads) {
1da177e4
LT
2539 /*
2540 * The page is locked, so these buffers are protected from
2541 * any VM or truncate activity. Hence we don't need to care
2542 * for the buffer_head refcounts.
2543 */
a4b0672d 2544 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2545 wait_on_buffer(bh);
2546 if (!buffer_uptodate(bh))
2547 ret = -EIO;
1da177e4
LT
2548 }
2549 if (ret)
2550 goto failed;
2551 }
2552
2553 if (is_mapped_to_disk)
2554 SetPageMappedToDisk(page);
1da177e4 2555
03158cd7 2556 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2557
1da177e4
LT
2558 return 0;
2559
2560failed:
03158cd7 2561 BUG_ON(!ret);
1da177e4 2562 /*
a4b0672d
NP
2563 * Error recovery is a bit difficult. We need to zero out blocks that
2564 * were newly allocated, and dirty them to ensure they get written out.
2565 * Buffers need to be attached to the page at this point, otherwise
2566 * the handling of potential IO errors during writeout would be hard
2567 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2568 */
03158cd7
NP
2569 attach_nobh_buffers(page, head);
2570 page_zero_new_buffers(page, from, to);
a4b0672d 2571
03158cd7
NP
2572out_release:
2573 unlock_page(page);
2574 page_cache_release(page);
2575 *pagep = NULL;
a4b0672d 2576
7bb46a67 2577 return ret;
2578}
03158cd7 2579EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2580
03158cd7
NP
2581int nobh_write_end(struct file *file, struct address_space *mapping,
2582 loff_t pos, unsigned len, unsigned copied,
2583 struct page *page, void *fsdata)
1da177e4
LT
2584{
2585 struct inode *inode = page->mapping->host;
efdc3131 2586 struct buffer_head *head = fsdata;
03158cd7 2587 struct buffer_head *bh;
5b41e74a 2588 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2589
d4cf109f 2590 if (unlikely(copied < len) && head)
5b41e74a
DM
2591 attach_nobh_buffers(page, head);
2592 if (page_has_buffers(page))
2593 return generic_write_end(file, mapping, pos, len,
2594 copied, page, fsdata);
a4b0672d 2595
22c8ca78 2596 SetPageUptodate(page);
1da177e4 2597 set_page_dirty(page);
03158cd7
NP
2598 if (pos+copied > inode->i_size) {
2599 i_size_write(inode, pos+copied);
1da177e4
LT
2600 mark_inode_dirty(inode);
2601 }
03158cd7
NP
2602
2603 unlock_page(page);
2604 page_cache_release(page);
2605
03158cd7
NP
2606 while (head) {
2607 bh = head;
2608 head = head->b_this_page;
2609 free_buffer_head(bh);
2610 }
2611
2612 return copied;
1da177e4 2613}
03158cd7 2614EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2615
2616/*
2617 * nobh_writepage() - based on block_full_write_page() except
2618 * that it tries to operate without attaching bufferheads to
2619 * the page.
2620 */
2621int nobh_writepage(struct page *page, get_block_t *get_block,
2622 struct writeback_control *wbc)
2623{
2624 struct inode * const inode = page->mapping->host;
2625 loff_t i_size = i_size_read(inode);
2626 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2627 unsigned offset;
1da177e4
LT
2628 int ret;
2629
2630 /* Is the page fully inside i_size? */
2631 if (page->index < end_index)
2632 goto out;
2633
2634 /* Is the page fully outside i_size? (truncate in progress) */
2635 offset = i_size & (PAGE_CACHE_SIZE-1);
2636 if (page->index >= end_index+1 || !offset) {
2637 /*
2638 * The page may have dirty, unmapped buffers. For example,
2639 * they may have been added in ext3_writepage(). Make them
2640 * freeable here, so the page does not leak.
2641 */
2642#if 0
2643 /* Not really sure about this - do we need this ? */
2644 if (page->mapping->a_ops->invalidatepage)
2645 page->mapping->a_ops->invalidatepage(page, offset);
2646#endif
2647 unlock_page(page);
2648 return 0; /* don't care */
2649 }
2650
2651 /*
2652 * The page straddles i_size. It must be zeroed out on each and every
2653 * writepage invocation because it may be mmapped. "A file is mapped
2654 * in multiples of the page size. For a file that is not a multiple of
2655 * the page size, the remaining memory is zeroed when mapped, and
2656 * writes to that region are not written out to the file."
2657 */
eebd2aa3 2658 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2659out:
2660 ret = mpage_writepage(page, get_block, wbc);
2661 if (ret == -EAGAIN)
35c80d5f
CM
2662 ret = __block_write_full_page(inode, page, get_block, wbc,
2663 end_buffer_async_write);
1da177e4
LT
2664 return ret;
2665}
2666EXPORT_SYMBOL(nobh_writepage);
2667
03158cd7
NP
2668int nobh_truncate_page(struct address_space *mapping,
2669 loff_t from, get_block_t *get_block)
1da177e4 2670{
1da177e4
LT
2671 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2672 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2673 unsigned blocksize;
2674 sector_t iblock;
2675 unsigned length, pos;
2676 struct inode *inode = mapping->host;
1da177e4 2677 struct page *page;
03158cd7
NP
2678 struct buffer_head map_bh;
2679 int err;
1da177e4 2680
03158cd7
NP
2681 blocksize = 1 << inode->i_blkbits;
2682 length = offset & (blocksize - 1);
2683
2684 /* Block boundary? Nothing to do */
2685 if (!length)
2686 return 0;
2687
2688 length = blocksize - length;
2689 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2690
1da177e4 2691 page = grab_cache_page(mapping, index);
03158cd7 2692 err = -ENOMEM;
1da177e4
LT
2693 if (!page)
2694 goto out;
2695
03158cd7
NP
2696 if (page_has_buffers(page)) {
2697has_buffers:
2698 unlock_page(page);
2699 page_cache_release(page);
2700 return block_truncate_page(mapping, from, get_block);
2701 }
2702
2703 /* Find the buffer that contains "offset" */
2704 pos = blocksize;
2705 while (offset >= pos) {
2706 iblock++;
2707 pos += blocksize;
2708 }
2709
460bcf57
TT
2710 map_bh.b_size = blocksize;
2711 map_bh.b_state = 0;
03158cd7
NP
2712 err = get_block(inode, iblock, &map_bh, 0);
2713 if (err)
2714 goto unlock;
2715 /* unmapped? It's a hole - nothing to do */
2716 if (!buffer_mapped(&map_bh))
2717 goto unlock;
2718
2719 /* Ok, it's mapped. Make sure it's up-to-date */
2720 if (!PageUptodate(page)) {
2721 err = mapping->a_ops->readpage(NULL, page);
2722 if (err) {
2723 page_cache_release(page);
2724 goto out;
2725 }
2726 lock_page(page);
2727 if (!PageUptodate(page)) {
2728 err = -EIO;
2729 goto unlock;
2730 }
2731 if (page_has_buffers(page))
2732 goto has_buffers;
1da177e4 2733 }
eebd2aa3 2734 zero_user(page, offset, length);
03158cd7
NP
2735 set_page_dirty(page);
2736 err = 0;
2737
2738unlock:
1da177e4
LT
2739 unlock_page(page);
2740 page_cache_release(page);
2741out:
03158cd7 2742 return err;
1da177e4
LT
2743}
2744EXPORT_SYMBOL(nobh_truncate_page);
2745
2746int block_truncate_page(struct address_space *mapping,
2747 loff_t from, get_block_t *get_block)
2748{
2749 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2750 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2751 unsigned blocksize;
54b21a79 2752 sector_t iblock;
1da177e4
LT
2753 unsigned length, pos;
2754 struct inode *inode = mapping->host;
2755 struct page *page;
2756 struct buffer_head *bh;
1da177e4
LT
2757 int err;
2758
2759 blocksize = 1 << inode->i_blkbits;
2760 length = offset & (blocksize - 1);
2761
2762 /* Block boundary? Nothing to do */
2763 if (!length)
2764 return 0;
2765
2766 length = blocksize - length;
54b21a79 2767 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2768
2769 page = grab_cache_page(mapping, index);
2770 err = -ENOMEM;
2771 if (!page)
2772 goto out;
2773
2774 if (!page_has_buffers(page))
2775 create_empty_buffers(page, blocksize, 0);
2776
2777 /* Find the buffer that contains "offset" */
2778 bh = page_buffers(page);
2779 pos = blocksize;
2780 while (offset >= pos) {
2781 bh = bh->b_this_page;
2782 iblock++;
2783 pos += blocksize;
2784 }
2785
2786 err = 0;
2787 if (!buffer_mapped(bh)) {
b0cf2321 2788 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2789 err = get_block(inode, iblock, bh, 0);
2790 if (err)
2791 goto unlock;
2792 /* unmapped? It's a hole - nothing to do */
2793 if (!buffer_mapped(bh))
2794 goto unlock;
2795 }
2796
2797 /* Ok, it's mapped. Make sure it's up-to-date */
2798 if (PageUptodate(page))
2799 set_buffer_uptodate(bh);
2800
33a266dd 2801 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2802 err = -EIO;
2803 ll_rw_block(READ, 1, &bh);
2804 wait_on_buffer(bh);
2805 /* Uhhuh. Read error. Complain and punt. */
2806 if (!buffer_uptodate(bh))
2807 goto unlock;
2808 }
2809
eebd2aa3 2810 zero_user(page, offset, length);
1da177e4
LT
2811 mark_buffer_dirty(bh);
2812 err = 0;
2813
2814unlock:
2815 unlock_page(page);
2816 page_cache_release(page);
2817out:
2818 return err;
2819}
1fe72eaa 2820EXPORT_SYMBOL(block_truncate_page);
1da177e4
LT
2821
2822/*
2823 * The generic ->writepage function for buffer-backed address_spaces
35c80d5f 2824 * this form passes in the end_io handler used to finish the IO.
1da177e4 2825 */
35c80d5f
CM
2826int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2827 struct writeback_control *wbc, bh_end_io_t *handler)
1da177e4
LT
2828{
2829 struct inode * const inode = page->mapping->host;
2830 loff_t i_size = i_size_read(inode);
2831 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2832 unsigned offset;
1da177e4
LT
2833
2834 /* Is the page fully inside i_size? */
2835 if (page->index < end_index)
35c80d5f
CM
2836 return __block_write_full_page(inode, page, get_block, wbc,
2837 handler);
1da177e4
LT
2838
2839 /* Is the page fully outside i_size? (truncate in progress) */
2840 offset = i_size & (PAGE_CACHE_SIZE-1);
2841 if (page->index >= end_index+1 || !offset) {
2842 /*
2843 * The page may have dirty, unmapped buffers. For example,
2844 * they may have been added in ext3_writepage(). Make them
2845 * freeable here, so the page does not leak.
2846 */
aaa4059b 2847 do_invalidatepage(page, 0);
1da177e4
LT
2848 unlock_page(page);
2849 return 0; /* don't care */
2850 }
2851
2852 /*
2853 * The page straddles i_size. It must be zeroed out on each and every
2a61aa40 2854 * writepage invocation because it may be mmapped. "A file is mapped
1da177e4
LT
2855 * in multiples of the page size. For a file that is not a multiple of
2856 * the page size, the remaining memory is zeroed when mapped, and
2857 * writes to that region are not written out to the file."
2858 */
eebd2aa3 2859 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
35c80d5f 2860 return __block_write_full_page(inode, page, get_block, wbc, handler);
1da177e4 2861}
1fe72eaa 2862EXPORT_SYMBOL(block_write_full_page_endio);
1da177e4 2863
35c80d5f
CM
2864/*
2865 * The generic ->writepage function for buffer-backed address_spaces
2866 */
2867int block_write_full_page(struct page *page, get_block_t *get_block,
2868 struct writeback_control *wbc)
2869{
2870 return block_write_full_page_endio(page, get_block, wbc,
2871 end_buffer_async_write);
2872}
1fe72eaa 2873EXPORT_SYMBOL(block_write_full_page);
35c80d5f 2874
1da177e4
LT
2875sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2876 get_block_t *get_block)
2877{
2878 struct buffer_head tmp;
2879 struct inode *inode = mapping->host;
2880 tmp.b_state = 0;
2881 tmp.b_blocknr = 0;
b0cf2321 2882 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2883 get_block(inode, block, &tmp, 0);
2884 return tmp.b_blocknr;
2885}
1fe72eaa 2886EXPORT_SYMBOL(generic_block_bmap);
1da177e4 2887
6712ecf8 2888static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2889{
2890 struct buffer_head *bh = bio->bi_private;
2891
1da177e4
LT
2892 if (err == -EOPNOTSUPP) {
2893 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2894 set_bit(BH_Eopnotsupp, &bh->b_state);
2895 }
2896
08bafc03
KM
2897 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2898 set_bit(BH_Quiet, &bh->b_state);
2899
1da177e4
LT
2900 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2901 bio_put(bio);
1da177e4
LT
2902}
2903
2904int submit_bh(int rw, struct buffer_head * bh)
2905{
2906 struct bio *bio;
2907 int ret = 0;
2908
2909 BUG_ON(!buffer_locked(bh));
2910 BUG_ON(!buffer_mapped(bh));
2911 BUG_ON(!bh->b_end_io);
8fb0e342
AK
2912 BUG_ON(buffer_delay(bh));
2913 BUG_ON(buffer_unwritten(bh));
1da177e4 2914
1da177e4 2915 /*
48fd4f93 2916 * Only clear out a write error when rewriting
1da177e4 2917 */
48fd4f93 2918 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2919 clear_buffer_write_io_error(bh);
2920
2921 /*
2922 * from here on down, it's all bio -- do the initial mapping,
2923 * submit_bio -> generic_make_request may further map this bio around
2924 */
2925 bio = bio_alloc(GFP_NOIO, 1);
2926
2927 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2928 bio->bi_bdev = bh->b_bdev;
2929 bio->bi_io_vec[0].bv_page = bh->b_page;
2930 bio->bi_io_vec[0].bv_len = bh->b_size;
2931 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2932
2933 bio->bi_vcnt = 1;
2934 bio->bi_idx = 0;
2935 bio->bi_size = bh->b_size;
2936
2937 bio->bi_end_io = end_bio_bh_io_sync;
2938 bio->bi_private = bh;
2939
2940 bio_get(bio);
2941 submit_bio(rw, bio);
2942
2943 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2944 ret = -EOPNOTSUPP;
2945
2946 bio_put(bio);
2947 return ret;
2948}
1fe72eaa 2949EXPORT_SYMBOL(submit_bh);
1da177e4
LT
2950
2951/**
2952 * ll_rw_block: low-level access to block devices (DEPRECATED)
9cb569d6 2953 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
1da177e4
LT
2954 * @nr: number of &struct buffer_heads in the array
2955 * @bhs: array of pointers to &struct buffer_head
2956 *
a7662236
JK
2957 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2958 * requests an I/O operation on them, either a %READ or a %WRITE. The third
9cb569d6
CH
2959 * %READA option is described in the documentation for generic_make_request()
2960 * which ll_rw_block() calls.
1da177e4
LT
2961 *
2962 * This function drops any buffer that it cannot get a lock on (with the
9cb569d6
CH
2963 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2964 * request, and any buffer that appears to be up-to-date when doing read
2965 * request. Further it marks as clean buffers that are processed for
2966 * writing (the buffer cache won't assume that they are actually clean
2967 * until the buffer gets unlocked).
1da177e4
LT
2968 *
2969 * ll_rw_block sets b_end_io to simple completion handler that marks
2970 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2971 * any waiters.
2972 *
2973 * All of the buffers must be for the same device, and must also be a
2974 * multiple of the current approved size for the device.
2975 */
2976void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2977{
2978 int i;
2979
2980 for (i = 0; i < nr; i++) {
2981 struct buffer_head *bh = bhs[i];
2982
9cb569d6 2983 if (!trylock_buffer(bh))
1da177e4 2984 continue;
9cb569d6 2985 if (rw == WRITE) {
1da177e4 2986 if (test_clear_buffer_dirty(bh)) {
76c3073a 2987 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2988 get_bh(bh);
9cb569d6 2989 submit_bh(WRITE, bh);
1da177e4
LT
2990 continue;
2991 }
2992 } else {
1da177e4 2993 if (!buffer_uptodate(bh)) {
76c3073a 2994 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2995 get_bh(bh);
1da177e4
LT
2996 submit_bh(rw, bh);
2997 continue;
2998 }
2999 }
3000 unlock_buffer(bh);
1da177e4
LT
3001 }
3002}
1fe72eaa 3003EXPORT_SYMBOL(ll_rw_block);
1da177e4 3004
9cb569d6
CH
3005void write_dirty_buffer(struct buffer_head *bh, int rw)
3006{
3007 lock_buffer(bh);
3008 if (!test_clear_buffer_dirty(bh)) {
3009 unlock_buffer(bh);
3010 return;
3011 }
3012 bh->b_end_io = end_buffer_write_sync;
3013 get_bh(bh);
3014 submit_bh(rw, bh);
3015}
3016EXPORT_SYMBOL(write_dirty_buffer);
3017
1da177e4
LT
3018/*
3019 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3020 * and then start new I/O and then wait upon it. The caller must have a ref on
3021 * the buffer_head.
3022 */
87e99511 3023int __sync_dirty_buffer(struct buffer_head *bh, int rw)
1da177e4
LT
3024{
3025 int ret = 0;
3026
3027 WARN_ON(atomic_read(&bh->b_count) < 1);
3028 lock_buffer(bh);
3029 if (test_clear_buffer_dirty(bh)) {
3030 get_bh(bh);
3031 bh->b_end_io = end_buffer_write_sync;
87e99511 3032 ret = submit_bh(rw, bh);
1da177e4
LT
3033 wait_on_buffer(bh);
3034 if (buffer_eopnotsupp(bh)) {
3035 clear_buffer_eopnotsupp(bh);
3036 ret = -EOPNOTSUPP;
3037 }
3038 if (!ret && !buffer_uptodate(bh))
3039 ret = -EIO;
3040 } else {
3041 unlock_buffer(bh);
3042 }
3043 return ret;
3044}
87e99511
CH
3045EXPORT_SYMBOL(__sync_dirty_buffer);
3046
3047int sync_dirty_buffer(struct buffer_head *bh)
3048{
3049 return __sync_dirty_buffer(bh, WRITE_SYNC);
3050}
1fe72eaa 3051EXPORT_SYMBOL(sync_dirty_buffer);
1da177e4
LT
3052
3053/*
3054 * try_to_free_buffers() checks if all the buffers on this particular page
3055 * are unused, and releases them if so.
3056 *
3057 * Exclusion against try_to_free_buffers may be obtained by either
3058 * locking the page or by holding its mapping's private_lock.
3059 *
3060 * If the page is dirty but all the buffers are clean then we need to
3061 * be sure to mark the page clean as well. This is because the page
3062 * may be against a block device, and a later reattachment of buffers
3063 * to a dirty page will set *all* buffers dirty. Which would corrupt
3064 * filesystem data on the same device.
3065 *
3066 * The same applies to regular filesystem pages: if all the buffers are
3067 * clean then we set the page clean and proceed. To do that, we require
3068 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3069 * private_lock.
3070 *
3071 * try_to_free_buffers() is non-blocking.
3072 */
3073static inline int buffer_busy(struct buffer_head *bh)
3074{
3075 return atomic_read(&bh->b_count) |
3076 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3077}
3078
3079static int
3080drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3081{
3082 struct buffer_head *head = page_buffers(page);
3083 struct buffer_head *bh;
3084
3085 bh = head;
3086 do {
de7d5a3b 3087 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3088 set_bit(AS_EIO, &page->mapping->flags);
3089 if (buffer_busy(bh))
3090 goto failed;
3091 bh = bh->b_this_page;
3092 } while (bh != head);
3093
3094 do {
3095 struct buffer_head *next = bh->b_this_page;
3096
535ee2fb 3097 if (bh->b_assoc_map)
1da177e4
LT
3098 __remove_assoc_queue(bh);
3099 bh = next;
3100 } while (bh != head);
3101 *buffers_to_free = head;
3102 __clear_page_buffers(page);
3103 return 1;
3104failed:
3105 return 0;
3106}
3107
3108int try_to_free_buffers(struct page *page)
3109{
3110 struct address_space * const mapping = page->mapping;
3111 struct buffer_head *buffers_to_free = NULL;
3112 int ret = 0;
3113
3114 BUG_ON(!PageLocked(page));
ecdfc978 3115 if (PageWriteback(page))
1da177e4
LT
3116 return 0;
3117
3118 if (mapping == NULL) { /* can this still happen? */
3119 ret = drop_buffers(page, &buffers_to_free);
3120 goto out;
3121 }
3122
3123 spin_lock(&mapping->private_lock);
3124 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3125
3126 /*
3127 * If the filesystem writes its buffers by hand (eg ext3)
3128 * then we can have clean buffers against a dirty page. We
3129 * clean the page here; otherwise the VM will never notice
3130 * that the filesystem did any IO at all.
3131 *
3132 * Also, during truncate, discard_buffer will have marked all
3133 * the page's buffers clean. We discover that here and clean
3134 * the page also.
87df7241
NP
3135 *
3136 * private_lock must be held over this entire operation in order
3137 * to synchronise against __set_page_dirty_buffers and prevent the
3138 * dirty bit from being lost.
ecdfc978
LT
3139 */
3140 if (ret)
3141 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3142 spin_unlock(&mapping->private_lock);
1da177e4
LT
3143out:
3144 if (buffers_to_free) {
3145 struct buffer_head *bh = buffers_to_free;
3146
3147 do {
3148 struct buffer_head *next = bh->b_this_page;
3149 free_buffer_head(bh);
3150 bh = next;
3151 } while (bh != buffers_to_free);
3152 }
3153 return ret;
3154}
3155EXPORT_SYMBOL(try_to_free_buffers);
3156
3978d717 3157void block_sync_page(struct page *page)
1da177e4
LT
3158{
3159 struct address_space *mapping;
3160
3161 smp_mb();
3162 mapping = page_mapping(page);
3163 if (mapping)
3164 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4 3165}
1fe72eaa 3166EXPORT_SYMBOL(block_sync_page);
1da177e4
LT
3167
3168/*
3169 * There are no bdflush tunables left. But distributions are
3170 * still running obsolete flush daemons, so we terminate them here.
3171 *
3172 * Use of bdflush() is deprecated and will be removed in a future kernel.
5b0830cb 3173 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
1da177e4 3174 */
bdc480e3 3175SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3176{
3177 static int msg_count;
3178
3179 if (!capable(CAP_SYS_ADMIN))
3180 return -EPERM;
3181
3182 if (msg_count < 5) {
3183 msg_count++;
3184 printk(KERN_INFO
3185 "warning: process `%s' used the obsolete bdflush"
3186 " system call\n", current->comm);
3187 printk(KERN_INFO "Fix your initscripts?\n");
3188 }
3189
3190 if (func == 1)
3191 do_exit(0);
3192 return 0;
3193}
3194
3195/*
3196 * Buffer-head allocation
3197 */
e18b890b 3198static struct kmem_cache *bh_cachep;
1da177e4
LT
3199
3200/*
3201 * Once the number of bh's in the machine exceeds this level, we start
3202 * stripping them in writeback.
3203 */
3204static int max_buffer_heads;
3205
3206int buffer_heads_over_limit;
3207
3208struct bh_accounting {
3209 int nr; /* Number of live bh's */
3210 int ratelimit; /* Limit cacheline bouncing */
3211};
3212
3213static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3214
3215static void recalc_bh_state(void)
3216{
3217 int i;
3218 int tot = 0;
3219
3220 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3221 return;
3222 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3223 for_each_online_cpu(i)
1da177e4
LT
3224 tot += per_cpu(bh_accounting, i).nr;
3225 buffer_heads_over_limit = (tot > max_buffer_heads);
3226}
3227
dd0fc66f 3228struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3229{
019b4d12 3230 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
1da177e4 3231 if (ret) {
a35afb83 3232 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3233 get_cpu_var(bh_accounting).nr++;
1da177e4 3234 recalc_bh_state();
736c7b80 3235 put_cpu_var(bh_accounting);
1da177e4
LT
3236 }
3237 return ret;
3238}
3239EXPORT_SYMBOL(alloc_buffer_head);
3240
3241void free_buffer_head(struct buffer_head *bh)
3242{
3243 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3244 kmem_cache_free(bh_cachep, bh);
736c7b80 3245 get_cpu_var(bh_accounting).nr--;
1da177e4 3246 recalc_bh_state();
736c7b80 3247 put_cpu_var(bh_accounting);
1da177e4
LT
3248}
3249EXPORT_SYMBOL(free_buffer_head);
3250
1da177e4
LT
3251static void buffer_exit_cpu(int cpu)
3252{
3253 int i;
3254 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3255
3256 for (i = 0; i < BH_LRU_SIZE; i++) {
3257 brelse(b->bhs[i]);
3258 b->bhs[i] = NULL;
3259 }
8a143426
ED
3260 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3261 per_cpu(bh_accounting, cpu).nr = 0;
3262 put_cpu_var(bh_accounting);
1da177e4
LT
3263}
3264
3265static int buffer_cpu_notify(struct notifier_block *self,
3266 unsigned long action, void *hcpu)
3267{
8bb78442 3268 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3269 buffer_exit_cpu((unsigned long)hcpu);
3270 return NOTIFY_OK;
3271}
1da177e4 3272
389d1b08 3273/**
a6b91919 3274 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3275 * @bh: struct buffer_head
3276 *
3277 * Return true if the buffer is up-to-date and false,
3278 * with the buffer locked, if not.
3279 */
3280int bh_uptodate_or_lock(struct buffer_head *bh)
3281{
3282 if (!buffer_uptodate(bh)) {
3283 lock_buffer(bh);
3284 if (!buffer_uptodate(bh))
3285 return 0;
3286 unlock_buffer(bh);
3287 }
3288 return 1;
3289}
3290EXPORT_SYMBOL(bh_uptodate_or_lock);
3291
3292/**
a6b91919 3293 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3294 * @bh: struct buffer_head
3295 *
3296 * Returns zero on success and -EIO on error.
3297 */
3298int bh_submit_read(struct buffer_head *bh)
3299{
3300 BUG_ON(!buffer_locked(bh));
3301
3302 if (buffer_uptodate(bh)) {
3303 unlock_buffer(bh);
3304 return 0;
3305 }
3306
3307 get_bh(bh);
3308 bh->b_end_io = end_buffer_read_sync;
3309 submit_bh(READ, bh);
3310 wait_on_buffer(bh);
3311 if (buffer_uptodate(bh))
3312 return 0;
3313 return -EIO;
3314}
3315EXPORT_SYMBOL(bh_submit_read);
3316
1da177e4
LT
3317void __init buffer_init(void)
3318{
3319 int nrpages;
3320
b98938c3
CL
3321 bh_cachep = kmem_cache_create("buffer_head",
3322 sizeof(struct buffer_head), 0,
3323 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3324 SLAB_MEM_SPREAD),
019b4d12 3325 NULL);
1da177e4
LT
3326
3327 /*
3328 * Limit the bh occupancy to 10% of ZONE_NORMAL
3329 */
3330 nrpages = (nr_free_buffer_pages() * 10) / 100;
3331 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3332 hotcpu_notifier(buffer_cpu_notify, 0);
3333}