]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/buffer.c
[PATCH] fat: support a truncate() for expanding size (generic_cont_expand)
[net-next-2.6.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 static void invalidate_bh_lrus(void);
47
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49
50 inline void
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 {
53         bh->b_end_io = handler;
54         bh->b_private = private;
55 }
56
57 static int sync_buffer(void *word)
58 {
59         struct block_device *bd;
60         struct buffer_head *bh
61                 = container_of(word, struct buffer_head, b_state);
62
63         smp_mb();
64         bd = bh->b_bdev;
65         if (bd)
66                 blk_run_address_space(bd->bd_inode->i_mapping);
67         io_schedule();
68         return 0;
69 }
70
71 void fastcall __lock_buffer(struct buffer_head *bh)
72 {
73         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74                                                         TASK_UNINTERRUPTIBLE);
75 }
76 EXPORT_SYMBOL(__lock_buffer);
77
78 void fastcall unlock_buffer(struct buffer_head *bh)
79 {
80         clear_buffer_locked(bh);
81         smp_mb__after_clear_bit();
82         wake_up_bit(&bh->b_state, BH_Lock);
83 }
84
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98         ClearPagePrivate(page);
99         set_page_private(page, 0);
100         page_cache_release(page);
101 }
102
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105         char b[BDEVNAME_SIZE];
106
107         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108                         bdevname(bh->b_bdev, b),
109                         (unsigned long long)bh->b_blocknr);
110 }
111
112 /*
113  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
114  * unlock the buffer. This is what ll_rw_block uses too.
115  */
116 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117 {
118         if (uptodate) {
119                 set_buffer_uptodate(bh);
120         } else {
121                 /* This happens, due to failed READA attempts. */
122                 clear_buffer_uptodate(bh);
123         }
124         unlock_buffer(bh);
125         put_bh(bh);
126 }
127
128 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
129 {
130         char b[BDEVNAME_SIZE];
131
132         if (uptodate) {
133                 set_buffer_uptodate(bh);
134         } else {
135                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
136                         buffer_io_error(bh);
137                         printk(KERN_WARNING "lost page write due to "
138                                         "I/O error on %s\n",
139                                        bdevname(bh->b_bdev, b));
140                 }
141                 set_buffer_write_io_error(bh);
142                 clear_buffer_uptodate(bh);
143         }
144         unlock_buffer(bh);
145         put_bh(bh);
146 }
147
148 /*
149  * Write out and wait upon all the dirty data associated with a block
150  * device via its mapping.  Does not take the superblock lock.
151  */
152 int sync_blockdev(struct block_device *bdev)
153 {
154         int ret = 0;
155
156         if (bdev) {
157                 int err;
158
159                 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
160                 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
161                 if (!ret)
162                         ret = err;
163         }
164         return ret;
165 }
166 EXPORT_SYMBOL(sync_blockdev);
167
168 /*
169  * Write out and wait upon all dirty data associated with this
170  * superblock.  Filesystem data as well as the underlying block
171  * device.  Takes the superblock lock.
172  */
173 int fsync_super(struct super_block *sb)
174 {
175         sync_inodes_sb(sb, 0);
176         DQUOT_SYNC(sb);
177         lock_super(sb);
178         if (sb->s_dirt && sb->s_op->write_super)
179                 sb->s_op->write_super(sb);
180         unlock_super(sb);
181         if (sb->s_op->sync_fs)
182                 sb->s_op->sync_fs(sb, 1);
183         sync_blockdev(sb->s_bdev);
184         sync_inodes_sb(sb, 1);
185
186         return sync_blockdev(sb->s_bdev);
187 }
188
189 /*
190  * Write out and wait upon all dirty data associated with this
191  * device.   Filesystem data as well as the underlying block
192  * device.  Takes the superblock lock.
193  */
194 int fsync_bdev(struct block_device *bdev)
195 {
196         struct super_block *sb = get_super(bdev);
197         if (sb) {
198                 int res = fsync_super(sb);
199                 drop_super(sb);
200                 return res;
201         }
202         return sync_blockdev(bdev);
203 }
204
205 /**
206  * freeze_bdev  --  lock a filesystem and force it into a consistent state
207  * @bdev:       blockdevice to lock
208  *
209  * This takes the block device bd_mount_sem to make sure no new mounts
210  * happen on bdev until thaw_bdev() is called.
211  * If a superblock is found on this device, we take the s_umount semaphore
212  * on it to make sure nobody unmounts until the snapshot creation is done.
213  */
214 struct super_block *freeze_bdev(struct block_device *bdev)
215 {
216         struct super_block *sb;
217
218         down(&bdev->bd_mount_sem);
219         sb = get_super(bdev);
220         if (sb && !(sb->s_flags & MS_RDONLY)) {
221                 sb->s_frozen = SB_FREEZE_WRITE;
222                 smp_wmb();
223
224                 sync_inodes_sb(sb, 0);
225                 DQUOT_SYNC(sb);
226
227                 lock_super(sb);
228                 if (sb->s_dirt && sb->s_op->write_super)
229                         sb->s_op->write_super(sb);
230                 unlock_super(sb);
231
232                 if (sb->s_op->sync_fs)
233                         sb->s_op->sync_fs(sb, 1);
234
235                 sync_blockdev(sb->s_bdev);
236                 sync_inodes_sb(sb, 1);
237
238                 sb->s_frozen = SB_FREEZE_TRANS;
239                 smp_wmb();
240
241                 sync_blockdev(sb->s_bdev);
242
243                 if (sb->s_op->write_super_lockfs)
244                         sb->s_op->write_super_lockfs(sb);
245         }
246
247         sync_blockdev(bdev);
248         return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
249 }
250 EXPORT_SYMBOL(freeze_bdev);
251
252 /**
253  * thaw_bdev  -- unlock filesystem
254  * @bdev:       blockdevice to unlock
255  * @sb:         associated superblock
256  *
257  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
258  */
259 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
260 {
261         if (sb) {
262                 BUG_ON(sb->s_bdev != bdev);
263
264                 if (sb->s_op->unlockfs)
265                         sb->s_op->unlockfs(sb);
266                 sb->s_frozen = SB_UNFROZEN;
267                 smp_wmb();
268                 wake_up(&sb->s_wait_unfrozen);
269                 drop_super(sb);
270         }
271
272         up(&bdev->bd_mount_sem);
273 }
274 EXPORT_SYMBOL(thaw_bdev);
275
276 /*
277  * sync everything.  Start out by waking pdflush, because that writes back
278  * all queues in parallel.
279  */
280 static void do_sync(unsigned long wait)
281 {
282         wakeup_pdflush(0);
283         sync_inodes(0);         /* All mappings, inodes and their blockdevs */
284         DQUOT_SYNC(NULL);
285         sync_supers();          /* Write the superblocks */
286         sync_filesystems(0);    /* Start syncing the filesystems */
287         sync_filesystems(wait); /* Waitingly sync the filesystems */
288         sync_inodes(wait);      /* Mappings, inodes and blockdevs, again. */
289         if (!wait)
290                 printk("Emergency Sync complete\n");
291         if (unlikely(laptop_mode))
292                 laptop_sync_completion();
293 }
294
295 asmlinkage long sys_sync(void)
296 {
297         do_sync(1);
298         return 0;
299 }
300
301 void emergency_sync(void)
302 {
303         pdflush_operation(do_sync, 0);
304 }
305
306 /*
307  * Generic function to fsync a file.
308  *
309  * filp may be NULL if called via the msync of a vma.
310  */
311  
312 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
313 {
314         struct inode * inode = dentry->d_inode;
315         struct super_block * sb;
316         int ret, err;
317
318         /* sync the inode to buffers */
319         ret = write_inode_now(inode, 0);
320
321         /* sync the superblock to buffers */
322         sb = inode->i_sb;
323         lock_super(sb);
324         if (sb->s_op->write_super)
325                 sb->s_op->write_super(sb);
326         unlock_super(sb);
327
328         /* .. finally sync the buffers to disk */
329         err = sync_blockdev(sb->s_bdev);
330         if (!ret)
331                 ret = err;
332         return ret;
333 }
334
335 static long do_fsync(unsigned int fd, int datasync)
336 {
337         struct file * file;
338         struct address_space *mapping;
339         int ret, err;
340
341         ret = -EBADF;
342         file = fget(fd);
343         if (!file)
344                 goto out;
345
346         ret = -EINVAL;
347         if (!file->f_op || !file->f_op->fsync) {
348                 /* Why?  We can still call filemap_fdatawrite */
349                 goto out_putf;
350         }
351
352         mapping = file->f_mapping;
353
354         current->flags |= PF_SYNCWRITE;
355         ret = filemap_fdatawrite(mapping);
356
357         /*
358          * We need to protect against concurrent writers,
359          * which could cause livelocks in fsync_buffers_list
360          */
361         down(&mapping->host->i_sem);
362         err = file->f_op->fsync(file, file->f_dentry, datasync);
363         if (!ret)
364                 ret = err;
365         up(&mapping->host->i_sem);
366         err = filemap_fdatawait(mapping);
367         if (!ret)
368                 ret = err;
369         current->flags &= ~PF_SYNCWRITE;
370
371 out_putf:
372         fput(file);
373 out:
374         return ret;
375 }
376
377 asmlinkage long sys_fsync(unsigned int fd)
378 {
379         return do_fsync(fd, 0);
380 }
381
382 asmlinkage long sys_fdatasync(unsigned int fd)
383 {
384         return do_fsync(fd, 1);
385 }
386
387 /*
388  * Various filesystems appear to want __find_get_block to be non-blocking.
389  * But it's the page lock which protects the buffers.  To get around this,
390  * we get exclusion from try_to_free_buffers with the blockdev mapping's
391  * private_lock.
392  *
393  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
394  * may be quite high.  This code could TryLock the page, and if that
395  * succeeds, there is no need to take private_lock. (But if
396  * private_lock is contended then so is mapping->tree_lock).
397  */
398 static struct buffer_head *
399 __find_get_block_slow(struct block_device *bdev, sector_t block)
400 {
401         struct inode *bd_inode = bdev->bd_inode;
402         struct address_space *bd_mapping = bd_inode->i_mapping;
403         struct buffer_head *ret = NULL;
404         pgoff_t index;
405         struct buffer_head *bh;
406         struct buffer_head *head;
407         struct page *page;
408         int all_mapped = 1;
409
410         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
411         page = find_get_page(bd_mapping, index);
412         if (!page)
413                 goto out;
414
415         spin_lock(&bd_mapping->private_lock);
416         if (!page_has_buffers(page))
417                 goto out_unlock;
418         head = page_buffers(page);
419         bh = head;
420         do {
421                 if (bh->b_blocknr == block) {
422                         ret = bh;
423                         get_bh(bh);
424                         goto out_unlock;
425                 }
426                 if (!buffer_mapped(bh))
427                         all_mapped = 0;
428                 bh = bh->b_this_page;
429         } while (bh != head);
430
431         /* we might be here because some of the buffers on this page are
432          * not mapped.  This is due to various races between
433          * file io on the block device and getblk.  It gets dealt with
434          * elsewhere, don't buffer_error if we had some unmapped buffers
435          */
436         if (all_mapped) {
437                 printk("__find_get_block_slow() failed. "
438                         "block=%llu, b_blocknr=%llu\n",
439                         (unsigned long long)block, (unsigned long long)bh->b_blocknr);
440                 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
441                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
442         }
443 out_unlock:
444         spin_unlock(&bd_mapping->private_lock);
445         page_cache_release(page);
446 out:
447         return ret;
448 }
449
450 /* If invalidate_buffers() will trash dirty buffers, it means some kind
451    of fs corruption is going on. Trashing dirty data always imply losing
452    information that was supposed to be just stored on the physical layer
453    by the user.
454
455    Thus invalidate_buffers in general usage is not allwowed to trash
456    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
457    be preserved.  These buffers are simply skipped.
458   
459    We also skip buffers which are still in use.  For example this can
460    happen if a userspace program is reading the block device.
461
462    NOTE: In the case where the user removed a removable-media-disk even if
463    there's still dirty data not synced on disk (due a bug in the device driver
464    or due an error of the user), by not destroying the dirty buffers we could
465    generate corruption also on the next media inserted, thus a parameter is
466    necessary to handle this case in the most safe way possible (trying
467    to not corrupt also the new disk inserted with the data belonging to
468    the old now corrupted disk). Also for the ramdisk the natural thing
469    to do in order to release the ramdisk memory is to destroy dirty buffers.
470
471    These are two special cases. Normal usage imply the device driver
472    to issue a sync on the device (without waiting I/O completion) and
473    then an invalidate_buffers call that doesn't trash dirty buffers.
474
475    For handling cache coherency with the blkdev pagecache the 'update' case
476    is been introduced. It is needed to re-read from disk any pinned
477    buffer. NOTE: re-reading from disk is destructive so we can do it only
478    when we assume nobody is changing the buffercache under our I/O and when
479    we think the disk contains more recent information than the buffercache.
480    The update == 1 pass marks the buffers we need to update, the update == 2
481    pass does the actual I/O. */
482 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
483 {
484         invalidate_bh_lrus();
485         /*
486          * FIXME: what about destroy_dirty_buffers?
487          * We really want to use invalidate_inode_pages2() for
488          * that, but not until that's cleaned up.
489          */
490         invalidate_inode_pages(bdev->bd_inode->i_mapping);
491 }
492
493 /*
494  * Kick pdflush then try to free up some ZONE_NORMAL memory.
495  */
496 static void free_more_memory(void)
497 {
498         struct zone **zones;
499         pg_data_t *pgdat;
500
501         wakeup_pdflush(1024);
502         yield();
503
504         for_each_pgdat(pgdat) {
505                 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
506                 if (*zones)
507                         try_to_free_pages(zones, GFP_NOFS);
508         }
509 }
510
511 /*
512  * I/O completion handler for block_read_full_page() - pages
513  * which come unlocked at the end of I/O.
514  */
515 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
516 {
517         unsigned long flags;
518         struct buffer_head *first;
519         struct buffer_head *tmp;
520         struct page *page;
521         int page_uptodate = 1;
522
523         BUG_ON(!buffer_async_read(bh));
524
525         page = bh->b_page;
526         if (uptodate) {
527                 set_buffer_uptodate(bh);
528         } else {
529                 clear_buffer_uptodate(bh);
530                 if (printk_ratelimit())
531                         buffer_io_error(bh);
532                 SetPageError(page);
533         }
534
535         /*
536          * Be _very_ careful from here on. Bad things can happen if
537          * two buffer heads end IO at almost the same time and both
538          * decide that the page is now completely done.
539          */
540         first = page_buffers(page);
541         local_irq_save(flags);
542         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
543         clear_buffer_async_read(bh);
544         unlock_buffer(bh);
545         tmp = bh;
546         do {
547                 if (!buffer_uptodate(tmp))
548                         page_uptodate = 0;
549                 if (buffer_async_read(tmp)) {
550                         BUG_ON(!buffer_locked(tmp));
551                         goto still_busy;
552                 }
553                 tmp = tmp->b_this_page;
554         } while (tmp != bh);
555         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
556         local_irq_restore(flags);
557
558         /*
559          * If none of the buffers had errors and they are all
560          * uptodate then we can set the page uptodate.
561          */
562         if (page_uptodate && !PageError(page))
563                 SetPageUptodate(page);
564         unlock_page(page);
565         return;
566
567 still_busy:
568         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
569         local_irq_restore(flags);
570         return;
571 }
572
573 /*
574  * Completion handler for block_write_full_page() - pages which are unlocked
575  * during I/O, and which have PageWriteback cleared upon I/O completion.
576  */
577 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
578 {
579         char b[BDEVNAME_SIZE];
580         unsigned long flags;
581         struct buffer_head *first;
582         struct buffer_head *tmp;
583         struct page *page;
584
585         BUG_ON(!buffer_async_write(bh));
586
587         page = bh->b_page;
588         if (uptodate) {
589                 set_buffer_uptodate(bh);
590         } else {
591                 if (printk_ratelimit()) {
592                         buffer_io_error(bh);
593                         printk(KERN_WARNING "lost page write due to "
594                                         "I/O error on %s\n",
595                                bdevname(bh->b_bdev, b));
596                 }
597                 set_bit(AS_EIO, &page->mapping->flags);
598                 clear_buffer_uptodate(bh);
599                 SetPageError(page);
600         }
601
602         first = page_buffers(page);
603         local_irq_save(flags);
604         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
605
606         clear_buffer_async_write(bh);
607         unlock_buffer(bh);
608         tmp = bh->b_this_page;
609         while (tmp != bh) {
610                 if (buffer_async_write(tmp)) {
611                         BUG_ON(!buffer_locked(tmp));
612                         goto still_busy;
613                 }
614                 tmp = tmp->b_this_page;
615         }
616         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
617         local_irq_restore(flags);
618         end_page_writeback(page);
619         return;
620
621 still_busy:
622         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
623         local_irq_restore(flags);
624         return;
625 }
626
627 /*
628  * If a page's buffers are under async readin (end_buffer_async_read
629  * completion) then there is a possibility that another thread of
630  * control could lock one of the buffers after it has completed
631  * but while some of the other buffers have not completed.  This
632  * locked buffer would confuse end_buffer_async_read() into not unlocking
633  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
634  * that this buffer is not under async I/O.
635  *
636  * The page comes unlocked when it has no locked buffer_async buffers
637  * left.
638  *
639  * PageLocked prevents anyone starting new async I/O reads any of
640  * the buffers.
641  *
642  * PageWriteback is used to prevent simultaneous writeout of the same
643  * page.
644  *
645  * PageLocked prevents anyone from starting writeback of a page which is
646  * under read I/O (PageWriteback is only ever set against a locked page).
647  */
648 static void mark_buffer_async_read(struct buffer_head *bh)
649 {
650         bh->b_end_io = end_buffer_async_read;
651         set_buffer_async_read(bh);
652 }
653
654 void mark_buffer_async_write(struct buffer_head *bh)
655 {
656         bh->b_end_io = end_buffer_async_write;
657         set_buffer_async_write(bh);
658 }
659 EXPORT_SYMBOL(mark_buffer_async_write);
660
661
662 /*
663  * fs/buffer.c contains helper functions for buffer-backed address space's
664  * fsync functions.  A common requirement for buffer-based filesystems is
665  * that certain data from the backing blockdev needs to be written out for
666  * a successful fsync().  For example, ext2 indirect blocks need to be
667  * written back and waited upon before fsync() returns.
668  *
669  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
670  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
671  * management of a list of dependent buffers at ->i_mapping->private_list.
672  *
673  * Locking is a little subtle: try_to_free_buffers() will remove buffers
674  * from their controlling inode's queue when they are being freed.  But
675  * try_to_free_buffers() will be operating against the *blockdev* mapping
676  * at the time, not against the S_ISREG file which depends on those buffers.
677  * So the locking for private_list is via the private_lock in the address_space
678  * which backs the buffers.  Which is different from the address_space 
679  * against which the buffers are listed.  So for a particular address_space,
680  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
681  * mapping->private_list will always be protected by the backing blockdev's
682  * ->private_lock.
683  *
684  * Which introduces a requirement: all buffers on an address_space's
685  * ->private_list must be from the same address_space: the blockdev's.
686  *
687  * address_spaces which do not place buffers at ->private_list via these
688  * utility functions are free to use private_lock and private_list for
689  * whatever they want.  The only requirement is that list_empty(private_list)
690  * be true at clear_inode() time.
691  *
692  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
693  * filesystems should do that.  invalidate_inode_buffers() should just go
694  * BUG_ON(!list_empty).
695  *
696  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
697  * take an address_space, not an inode.  And it should be called
698  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
699  * queued up.
700  *
701  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
702  * list if it is already on a list.  Because if the buffer is on a list,
703  * it *must* already be on the right one.  If not, the filesystem is being
704  * silly.  This will save a ton of locking.  But first we have to ensure
705  * that buffers are taken *off* the old inode's list when they are freed
706  * (presumably in truncate).  That requires careful auditing of all
707  * filesystems (do it inside bforget()).  It could also be done by bringing
708  * b_inode back.
709  */
710
711 /*
712  * The buffer's backing address_space's private_lock must be held
713  */
714 static inline void __remove_assoc_queue(struct buffer_head *bh)
715 {
716         list_del_init(&bh->b_assoc_buffers);
717 }
718
719 int inode_has_buffers(struct inode *inode)
720 {
721         return !list_empty(&inode->i_data.private_list);
722 }
723
724 /*
725  * osync is designed to support O_SYNC io.  It waits synchronously for
726  * all already-submitted IO to complete, but does not queue any new
727  * writes to the disk.
728  *
729  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
730  * you dirty the buffers, and then use osync_inode_buffers to wait for
731  * completion.  Any other dirty buffers which are not yet queued for
732  * write will not be flushed to disk by the osync.
733  */
734 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
735 {
736         struct buffer_head *bh;
737         struct list_head *p;
738         int err = 0;
739
740         spin_lock(lock);
741 repeat:
742         list_for_each_prev(p, list) {
743                 bh = BH_ENTRY(p);
744                 if (buffer_locked(bh)) {
745                         get_bh(bh);
746                         spin_unlock(lock);
747                         wait_on_buffer(bh);
748                         if (!buffer_uptodate(bh))
749                                 err = -EIO;
750                         brelse(bh);
751                         spin_lock(lock);
752                         goto repeat;
753                 }
754         }
755         spin_unlock(lock);
756         return err;
757 }
758
759 /**
760  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
761  *                        buffers
762  * @mapping: the mapping which wants those buffers written
763  *
764  * Starts I/O against the buffers at mapping->private_list, and waits upon
765  * that I/O.
766  *
767  * Basically, this is a convenience function for fsync().
768  * @mapping is a file or directory which needs those buffers to be written for
769  * a successful fsync().
770  */
771 int sync_mapping_buffers(struct address_space *mapping)
772 {
773         struct address_space *buffer_mapping = mapping->assoc_mapping;
774
775         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
776                 return 0;
777
778         return fsync_buffers_list(&buffer_mapping->private_lock,
779                                         &mapping->private_list);
780 }
781 EXPORT_SYMBOL(sync_mapping_buffers);
782
783 /*
784  * Called when we've recently written block `bblock', and it is known that
785  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
786  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
787  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
788  */
789 void write_boundary_block(struct block_device *bdev,
790                         sector_t bblock, unsigned blocksize)
791 {
792         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
793         if (bh) {
794                 if (buffer_dirty(bh))
795                         ll_rw_block(WRITE, 1, &bh);
796                 put_bh(bh);
797         }
798 }
799
800 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
801 {
802         struct address_space *mapping = inode->i_mapping;
803         struct address_space *buffer_mapping = bh->b_page->mapping;
804
805         mark_buffer_dirty(bh);
806         if (!mapping->assoc_mapping) {
807                 mapping->assoc_mapping = buffer_mapping;
808         } else {
809                 if (mapping->assoc_mapping != buffer_mapping)
810                         BUG();
811         }
812         if (list_empty(&bh->b_assoc_buffers)) {
813                 spin_lock(&buffer_mapping->private_lock);
814                 list_move_tail(&bh->b_assoc_buffers,
815                                 &mapping->private_list);
816                 spin_unlock(&buffer_mapping->private_lock);
817         }
818 }
819 EXPORT_SYMBOL(mark_buffer_dirty_inode);
820
821 /*
822  * Add a page to the dirty page list.
823  *
824  * It is a sad fact of life that this function is called from several places
825  * deeply under spinlocking.  It may not sleep.
826  *
827  * If the page has buffers, the uptodate buffers are set dirty, to preserve
828  * dirty-state coherency between the page and the buffers.  It the page does
829  * not have buffers then when they are later attached they will all be set
830  * dirty.
831  *
832  * The buffers are dirtied before the page is dirtied.  There's a small race
833  * window in which a writepage caller may see the page cleanness but not the
834  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
835  * before the buffers, a concurrent writepage caller could clear the page dirty
836  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
837  * page on the dirty page list.
838  *
839  * We use private_lock to lock against try_to_free_buffers while using the
840  * page's buffer list.  Also use this to protect against clean buffers being
841  * added to the page after it was set dirty.
842  *
843  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
844  * address_space though.
845  */
846 int __set_page_dirty_buffers(struct page *page)
847 {
848         struct address_space * const mapping = page->mapping;
849
850         spin_lock(&mapping->private_lock);
851         if (page_has_buffers(page)) {
852                 struct buffer_head *head = page_buffers(page);
853                 struct buffer_head *bh = head;
854
855                 do {
856                         set_buffer_dirty(bh);
857                         bh = bh->b_this_page;
858                 } while (bh != head);
859         }
860         spin_unlock(&mapping->private_lock);
861
862         if (!TestSetPageDirty(page)) {
863                 write_lock_irq(&mapping->tree_lock);
864                 if (page->mapping) {    /* Race with truncate? */
865                         if (mapping_cap_account_dirty(mapping))
866                                 inc_page_state(nr_dirty);
867                         radix_tree_tag_set(&mapping->page_tree,
868                                                 page_index(page),
869                                                 PAGECACHE_TAG_DIRTY);
870                 }
871                 write_unlock_irq(&mapping->tree_lock);
872                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
873         }
874         
875         return 0;
876 }
877 EXPORT_SYMBOL(__set_page_dirty_buffers);
878
879 /*
880  * Write out and wait upon a list of buffers.
881  *
882  * We have conflicting pressures: we want to make sure that all
883  * initially dirty buffers get waited on, but that any subsequently
884  * dirtied buffers don't.  After all, we don't want fsync to last
885  * forever if somebody is actively writing to the file.
886  *
887  * Do this in two main stages: first we copy dirty buffers to a
888  * temporary inode list, queueing the writes as we go.  Then we clean
889  * up, waiting for those writes to complete.
890  * 
891  * During this second stage, any subsequent updates to the file may end
892  * up refiling the buffer on the original inode's dirty list again, so
893  * there is a chance we will end up with a buffer queued for write but
894  * not yet completed on that list.  So, as a final cleanup we go through
895  * the osync code to catch these locked, dirty buffers without requeuing
896  * any newly dirty buffers for write.
897  */
898 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
899 {
900         struct buffer_head *bh;
901         struct list_head tmp;
902         int err = 0, err2;
903
904         INIT_LIST_HEAD(&tmp);
905
906         spin_lock(lock);
907         while (!list_empty(list)) {
908                 bh = BH_ENTRY(list->next);
909                 list_del_init(&bh->b_assoc_buffers);
910                 if (buffer_dirty(bh) || buffer_locked(bh)) {
911                         list_add(&bh->b_assoc_buffers, &tmp);
912                         if (buffer_dirty(bh)) {
913                                 get_bh(bh);
914                                 spin_unlock(lock);
915                                 /*
916                                  * Ensure any pending I/O completes so that
917                                  * ll_rw_block() actually writes the current
918                                  * contents - it is a noop if I/O is still in
919                                  * flight on potentially older contents.
920                                  */
921                                 ll_rw_block(SWRITE, 1, &bh);
922                                 brelse(bh);
923                                 spin_lock(lock);
924                         }
925                 }
926         }
927
928         while (!list_empty(&tmp)) {
929                 bh = BH_ENTRY(tmp.prev);
930                 __remove_assoc_queue(bh);
931                 get_bh(bh);
932                 spin_unlock(lock);
933                 wait_on_buffer(bh);
934                 if (!buffer_uptodate(bh))
935                         err = -EIO;
936                 brelse(bh);
937                 spin_lock(lock);
938         }
939         
940         spin_unlock(lock);
941         err2 = osync_buffers_list(lock, list);
942         if (err)
943                 return err;
944         else
945                 return err2;
946 }
947
948 /*
949  * Invalidate any and all dirty buffers on a given inode.  We are
950  * probably unmounting the fs, but that doesn't mean we have already
951  * done a sync().  Just drop the buffers from the inode list.
952  *
953  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
954  * assumes that all the buffers are against the blockdev.  Not true
955  * for reiserfs.
956  */
957 void invalidate_inode_buffers(struct inode *inode)
958 {
959         if (inode_has_buffers(inode)) {
960                 struct address_space *mapping = &inode->i_data;
961                 struct list_head *list = &mapping->private_list;
962                 struct address_space *buffer_mapping = mapping->assoc_mapping;
963
964                 spin_lock(&buffer_mapping->private_lock);
965                 while (!list_empty(list))
966                         __remove_assoc_queue(BH_ENTRY(list->next));
967                 spin_unlock(&buffer_mapping->private_lock);
968         }
969 }
970
971 /*
972  * Remove any clean buffers from the inode's buffer list.  This is called
973  * when we're trying to free the inode itself.  Those buffers can pin it.
974  *
975  * Returns true if all buffers were removed.
976  */
977 int remove_inode_buffers(struct inode *inode)
978 {
979         int ret = 1;
980
981         if (inode_has_buffers(inode)) {
982                 struct address_space *mapping = &inode->i_data;
983                 struct list_head *list = &mapping->private_list;
984                 struct address_space *buffer_mapping = mapping->assoc_mapping;
985
986                 spin_lock(&buffer_mapping->private_lock);
987                 while (!list_empty(list)) {
988                         struct buffer_head *bh = BH_ENTRY(list->next);
989                         if (buffer_dirty(bh)) {
990                                 ret = 0;
991                                 break;
992                         }
993                         __remove_assoc_queue(bh);
994                 }
995                 spin_unlock(&buffer_mapping->private_lock);
996         }
997         return ret;
998 }
999
1000 /*
1001  * Create the appropriate buffers when given a page for data area and
1002  * the size of each buffer.. Use the bh->b_this_page linked list to
1003  * follow the buffers created.  Return NULL if unable to create more
1004  * buffers.
1005  *
1006  * The retry flag is used to differentiate async IO (paging, swapping)
1007  * which may not fail from ordinary buffer allocations.
1008  */
1009 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1010                 int retry)
1011 {
1012         struct buffer_head *bh, *head;
1013         long offset;
1014
1015 try_again:
1016         head = NULL;
1017         offset = PAGE_SIZE;
1018         while ((offset -= size) >= 0) {
1019                 bh = alloc_buffer_head(GFP_NOFS);
1020                 if (!bh)
1021                         goto no_grow;
1022
1023                 bh->b_bdev = NULL;
1024                 bh->b_this_page = head;
1025                 bh->b_blocknr = -1;
1026                 head = bh;
1027
1028                 bh->b_state = 0;
1029                 atomic_set(&bh->b_count, 0);
1030                 bh->b_size = size;
1031
1032                 /* Link the buffer to its page */
1033                 set_bh_page(bh, page, offset);
1034
1035                 bh->b_end_io = NULL;
1036         }
1037         return head;
1038 /*
1039  * In case anything failed, we just free everything we got.
1040  */
1041 no_grow:
1042         if (head) {
1043                 do {
1044                         bh = head;
1045                         head = head->b_this_page;
1046                         free_buffer_head(bh);
1047                 } while (head);
1048         }
1049
1050         /*
1051          * Return failure for non-async IO requests.  Async IO requests
1052          * are not allowed to fail, so we have to wait until buffer heads
1053          * become available.  But we don't want tasks sleeping with 
1054          * partially complete buffers, so all were released above.
1055          */
1056         if (!retry)
1057                 return NULL;
1058
1059         /* We're _really_ low on memory. Now we just
1060          * wait for old buffer heads to become free due to
1061          * finishing IO.  Since this is an async request and
1062          * the reserve list is empty, we're sure there are 
1063          * async buffer heads in use.
1064          */
1065         free_more_memory();
1066         goto try_again;
1067 }
1068 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1069
1070 static inline void
1071 link_dev_buffers(struct page *page, struct buffer_head *head)
1072 {
1073         struct buffer_head *bh, *tail;
1074
1075         bh = head;
1076         do {
1077                 tail = bh;
1078                 bh = bh->b_this_page;
1079         } while (bh);
1080         tail->b_this_page = head;
1081         attach_page_buffers(page, head);
1082 }
1083
1084 /*
1085  * Initialise the state of a blockdev page's buffers.
1086  */ 
1087 static void
1088 init_page_buffers(struct page *page, struct block_device *bdev,
1089                         sector_t block, int size)
1090 {
1091         struct buffer_head *head = page_buffers(page);
1092         struct buffer_head *bh = head;
1093         int uptodate = PageUptodate(page);
1094
1095         do {
1096                 if (!buffer_mapped(bh)) {
1097                         init_buffer(bh, NULL, NULL);
1098                         bh->b_bdev = bdev;
1099                         bh->b_blocknr = block;
1100                         if (uptodate)
1101                                 set_buffer_uptodate(bh);
1102                         set_buffer_mapped(bh);
1103                 }
1104                 block++;
1105                 bh = bh->b_this_page;
1106         } while (bh != head);
1107 }
1108
1109 /*
1110  * Create the page-cache page that contains the requested block.
1111  *
1112  * This is user purely for blockdev mappings.
1113  */
1114 static struct page *
1115 grow_dev_page(struct block_device *bdev, sector_t block,
1116                 pgoff_t index, int size)
1117 {
1118         struct inode *inode = bdev->bd_inode;
1119         struct page *page;
1120         struct buffer_head *bh;
1121
1122         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1123         if (!page)
1124                 return NULL;
1125
1126         if (!PageLocked(page))
1127                 BUG();
1128
1129         if (page_has_buffers(page)) {
1130                 bh = page_buffers(page);
1131                 if (bh->b_size == size) {
1132                         init_page_buffers(page, bdev, block, size);
1133                         return page;
1134                 }
1135                 if (!try_to_free_buffers(page))
1136                         goto failed;
1137         }
1138
1139         /*
1140          * Allocate some buffers for this page
1141          */
1142         bh = alloc_page_buffers(page, size, 0);
1143         if (!bh)
1144                 goto failed;
1145
1146         /*
1147          * Link the page to the buffers and initialise them.  Take the
1148          * lock to be atomic wrt __find_get_block(), which does not
1149          * run under the page lock.
1150          */
1151         spin_lock(&inode->i_mapping->private_lock);
1152         link_dev_buffers(page, bh);
1153         init_page_buffers(page, bdev, block, size);
1154         spin_unlock(&inode->i_mapping->private_lock);
1155         return page;
1156
1157 failed:
1158         BUG();
1159         unlock_page(page);
1160         page_cache_release(page);
1161         return NULL;
1162 }
1163
1164 /*
1165  * Create buffers for the specified block device block's page.  If
1166  * that page was dirty, the buffers are set dirty also.
1167  *
1168  * Except that's a bug.  Attaching dirty buffers to a dirty
1169  * blockdev's page can result in filesystem corruption, because
1170  * some of those buffers may be aliases of filesystem data.
1171  * grow_dev_page() will go BUG() if this happens.
1172  */
1173 static inline int
1174 grow_buffers(struct block_device *bdev, sector_t block, int size)
1175 {
1176         struct page *page;
1177         pgoff_t index;
1178         int sizebits;
1179
1180         sizebits = -1;
1181         do {
1182                 sizebits++;
1183         } while ((size << sizebits) < PAGE_SIZE);
1184
1185         index = block >> sizebits;
1186         block = index << sizebits;
1187
1188         /* Create a page with the proper size buffers.. */
1189         page = grow_dev_page(bdev, block, index, size);
1190         if (!page)
1191                 return 0;
1192         unlock_page(page);
1193         page_cache_release(page);
1194         return 1;
1195 }
1196
1197 static struct buffer_head *
1198 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1199 {
1200         /* Size must be multiple of hard sectorsize */
1201         if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1202                         (size < 512 || size > PAGE_SIZE))) {
1203                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1204                                         size);
1205                 printk(KERN_ERR "hardsect size: %d\n",
1206                                         bdev_hardsect_size(bdev));
1207
1208                 dump_stack();
1209                 return NULL;
1210         }
1211
1212         for (;;) {
1213                 struct buffer_head * bh;
1214
1215                 bh = __find_get_block(bdev, block, size);
1216                 if (bh)
1217                         return bh;
1218
1219                 if (!grow_buffers(bdev, block, size))
1220                         free_more_memory();
1221         }
1222 }
1223
1224 /*
1225  * The relationship between dirty buffers and dirty pages:
1226  *
1227  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1228  * the page is tagged dirty in its radix tree.
1229  *
1230  * At all times, the dirtiness of the buffers represents the dirtiness of
1231  * subsections of the page.  If the page has buffers, the page dirty bit is
1232  * merely a hint about the true dirty state.
1233  *
1234  * When a page is set dirty in its entirety, all its buffers are marked dirty
1235  * (if the page has buffers).
1236  *
1237  * When a buffer is marked dirty, its page is dirtied, but the page's other
1238  * buffers are not.
1239  *
1240  * Also.  When blockdev buffers are explicitly read with bread(), they
1241  * individually become uptodate.  But their backing page remains not
1242  * uptodate - even if all of its buffers are uptodate.  A subsequent
1243  * block_read_full_page() against that page will discover all the uptodate
1244  * buffers, will set the page uptodate and will perform no I/O.
1245  */
1246
1247 /**
1248  * mark_buffer_dirty - mark a buffer_head as needing writeout
1249  * @bh: the buffer_head to mark dirty
1250  *
1251  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1252  * backing page dirty, then tag the page as dirty in its address_space's radix
1253  * tree and then attach the address_space's inode to its superblock's dirty
1254  * inode list.
1255  *
1256  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1257  * mapping->tree_lock and the global inode_lock.
1258  */
1259 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1260 {
1261         if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1262                 __set_page_dirty_nobuffers(bh->b_page);
1263 }
1264
1265 /*
1266  * Decrement a buffer_head's reference count.  If all buffers against a page
1267  * have zero reference count, are clean and unlocked, and if the page is clean
1268  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1269  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1270  * a page but it ends up not being freed, and buffers may later be reattached).
1271  */
1272 void __brelse(struct buffer_head * buf)
1273 {
1274         if (atomic_read(&buf->b_count)) {
1275                 put_bh(buf);
1276                 return;
1277         }
1278         printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1279         WARN_ON(1);
1280 }
1281
1282 /*
1283  * bforget() is like brelse(), except it discards any
1284  * potentially dirty data.
1285  */
1286 void __bforget(struct buffer_head *bh)
1287 {
1288         clear_buffer_dirty(bh);
1289         if (!list_empty(&bh->b_assoc_buffers)) {
1290                 struct address_space *buffer_mapping = bh->b_page->mapping;
1291
1292                 spin_lock(&buffer_mapping->private_lock);
1293                 list_del_init(&bh->b_assoc_buffers);
1294                 spin_unlock(&buffer_mapping->private_lock);
1295         }
1296         __brelse(bh);
1297 }
1298
1299 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1300 {
1301         lock_buffer(bh);
1302         if (buffer_uptodate(bh)) {
1303                 unlock_buffer(bh);
1304                 return bh;
1305         } else {
1306                 get_bh(bh);
1307                 bh->b_end_io = end_buffer_read_sync;
1308                 submit_bh(READ, bh);
1309                 wait_on_buffer(bh);
1310                 if (buffer_uptodate(bh))
1311                         return bh;
1312         }
1313         brelse(bh);
1314         return NULL;
1315 }
1316
1317 /*
1318  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1319  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1320  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1321  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1322  * CPU's LRUs at the same time.
1323  *
1324  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1325  * sb_find_get_block().
1326  *
1327  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1328  * a local interrupt disable for that.
1329  */
1330
1331 #define BH_LRU_SIZE     8
1332
1333 struct bh_lru {
1334         struct buffer_head *bhs[BH_LRU_SIZE];
1335 };
1336
1337 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1338
1339 #ifdef CONFIG_SMP
1340 #define bh_lru_lock()   local_irq_disable()
1341 #define bh_lru_unlock() local_irq_enable()
1342 #else
1343 #define bh_lru_lock()   preempt_disable()
1344 #define bh_lru_unlock() preempt_enable()
1345 #endif
1346
1347 static inline void check_irqs_on(void)
1348 {
1349 #ifdef irqs_disabled
1350         BUG_ON(irqs_disabled());
1351 #endif
1352 }
1353
1354 /*
1355  * The LRU management algorithm is dopey-but-simple.  Sorry.
1356  */
1357 static void bh_lru_install(struct buffer_head *bh)
1358 {
1359         struct buffer_head *evictee = NULL;
1360         struct bh_lru *lru;
1361
1362         check_irqs_on();
1363         bh_lru_lock();
1364         lru = &__get_cpu_var(bh_lrus);
1365         if (lru->bhs[0] != bh) {
1366                 struct buffer_head *bhs[BH_LRU_SIZE];
1367                 int in;
1368                 int out = 0;
1369
1370                 get_bh(bh);
1371                 bhs[out++] = bh;
1372                 for (in = 0; in < BH_LRU_SIZE; in++) {
1373                         struct buffer_head *bh2 = lru->bhs[in];
1374
1375                         if (bh2 == bh) {
1376                                 __brelse(bh2);
1377                         } else {
1378                                 if (out >= BH_LRU_SIZE) {
1379                                         BUG_ON(evictee != NULL);
1380                                         evictee = bh2;
1381                                 } else {
1382                                         bhs[out++] = bh2;
1383                                 }
1384                         }
1385                 }
1386                 while (out < BH_LRU_SIZE)
1387                         bhs[out++] = NULL;
1388                 memcpy(lru->bhs, bhs, sizeof(bhs));
1389         }
1390         bh_lru_unlock();
1391
1392         if (evictee)
1393                 __brelse(evictee);
1394 }
1395
1396 /*
1397  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1398  */
1399 static inline struct buffer_head *
1400 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1401 {
1402         struct buffer_head *ret = NULL;
1403         struct bh_lru *lru;
1404         int i;
1405
1406         check_irqs_on();
1407         bh_lru_lock();
1408         lru = &__get_cpu_var(bh_lrus);
1409         for (i = 0; i < BH_LRU_SIZE; i++) {
1410                 struct buffer_head *bh = lru->bhs[i];
1411
1412                 if (bh && bh->b_bdev == bdev &&
1413                                 bh->b_blocknr == block && bh->b_size == size) {
1414                         if (i) {
1415                                 while (i) {
1416                                         lru->bhs[i] = lru->bhs[i - 1];
1417                                         i--;
1418                                 }
1419                                 lru->bhs[0] = bh;
1420                         }
1421                         get_bh(bh);
1422                         ret = bh;
1423                         break;
1424                 }
1425         }
1426         bh_lru_unlock();
1427         return ret;
1428 }
1429
1430 /*
1431  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1432  * it in the LRU and mark it as accessed.  If it is not present then return
1433  * NULL
1434  */
1435 struct buffer_head *
1436 __find_get_block(struct block_device *bdev, sector_t block, int size)
1437 {
1438         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1439
1440         if (bh == NULL) {
1441                 bh = __find_get_block_slow(bdev, block);
1442                 if (bh)
1443                         bh_lru_install(bh);
1444         }
1445         if (bh)
1446                 touch_buffer(bh);
1447         return bh;
1448 }
1449 EXPORT_SYMBOL(__find_get_block);
1450
1451 /*
1452  * __getblk will locate (and, if necessary, create) the buffer_head
1453  * which corresponds to the passed block_device, block and size. The
1454  * returned buffer has its reference count incremented.
1455  *
1456  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1457  * illegal block number, __getblk() will happily return a buffer_head
1458  * which represents the non-existent block.  Very weird.
1459  *
1460  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1461  * attempt is failing.  FIXME, perhaps?
1462  */
1463 struct buffer_head *
1464 __getblk(struct block_device *bdev, sector_t block, int size)
1465 {
1466         struct buffer_head *bh = __find_get_block(bdev, block, size);
1467
1468         might_sleep();
1469         if (bh == NULL)
1470                 bh = __getblk_slow(bdev, block, size);
1471         return bh;
1472 }
1473 EXPORT_SYMBOL(__getblk);
1474
1475 /*
1476  * Do async read-ahead on a buffer..
1477  */
1478 void __breadahead(struct block_device *bdev, sector_t block, int size)
1479 {
1480         struct buffer_head *bh = __getblk(bdev, block, size);
1481         if (likely(bh)) {
1482                 ll_rw_block(READA, 1, &bh);
1483                 brelse(bh);
1484         }
1485 }
1486 EXPORT_SYMBOL(__breadahead);
1487
1488 /**
1489  *  __bread() - reads a specified block and returns the bh
1490  *  @bdev: the block_device to read from
1491  *  @block: number of block
1492  *  @size: size (in bytes) to read
1493  * 
1494  *  Reads a specified block, and returns buffer head that contains it.
1495  *  It returns NULL if the block was unreadable.
1496  */
1497 struct buffer_head *
1498 __bread(struct block_device *bdev, sector_t block, int size)
1499 {
1500         struct buffer_head *bh = __getblk(bdev, block, size);
1501
1502         if (likely(bh) && !buffer_uptodate(bh))
1503                 bh = __bread_slow(bh);
1504         return bh;
1505 }
1506 EXPORT_SYMBOL(__bread);
1507
1508 /*
1509  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1510  * This doesn't race because it runs in each cpu either in irq
1511  * or with preempt disabled.
1512  */
1513 static void invalidate_bh_lru(void *arg)
1514 {
1515         struct bh_lru *b = &get_cpu_var(bh_lrus);
1516         int i;
1517
1518         for (i = 0; i < BH_LRU_SIZE; i++) {
1519                 brelse(b->bhs[i]);
1520                 b->bhs[i] = NULL;
1521         }
1522         put_cpu_var(bh_lrus);
1523 }
1524         
1525 static void invalidate_bh_lrus(void)
1526 {
1527         on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1528 }
1529
1530 void set_bh_page(struct buffer_head *bh,
1531                 struct page *page, unsigned long offset)
1532 {
1533         bh->b_page = page;
1534         if (offset >= PAGE_SIZE)
1535                 BUG();
1536         if (PageHighMem(page))
1537                 /*
1538                  * This catches illegal uses and preserves the offset:
1539                  */
1540                 bh->b_data = (char *)(0 + offset);
1541         else
1542                 bh->b_data = page_address(page) + offset;
1543 }
1544 EXPORT_SYMBOL(set_bh_page);
1545
1546 /*
1547  * Called when truncating a buffer on a page completely.
1548  */
1549 static inline void discard_buffer(struct buffer_head * bh)
1550 {
1551         lock_buffer(bh);
1552         clear_buffer_dirty(bh);
1553         bh->b_bdev = NULL;
1554         clear_buffer_mapped(bh);
1555         clear_buffer_req(bh);
1556         clear_buffer_new(bh);
1557         clear_buffer_delay(bh);
1558         unlock_buffer(bh);
1559 }
1560
1561 /**
1562  * try_to_release_page() - release old fs-specific metadata on a page
1563  *
1564  * @page: the page which the kernel is trying to free
1565  * @gfp_mask: memory allocation flags (and I/O mode)
1566  *
1567  * The address_space is to try to release any data against the page
1568  * (presumably at page->private).  If the release was successful, return `1'.
1569  * Otherwise return zero.
1570  *
1571  * The @gfp_mask argument specifies whether I/O may be performed to release
1572  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1573  *
1574  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1575  */
1576 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1577 {
1578         struct address_space * const mapping = page->mapping;
1579
1580         BUG_ON(!PageLocked(page));
1581         if (PageWriteback(page))
1582                 return 0;
1583         
1584         if (mapping && mapping->a_ops->releasepage)
1585                 return mapping->a_ops->releasepage(page, gfp_mask);
1586         return try_to_free_buffers(page);
1587 }
1588 EXPORT_SYMBOL(try_to_release_page);
1589
1590 /**
1591  * block_invalidatepage - invalidate part of all of a buffer-backed page
1592  *
1593  * @page: the page which is affected
1594  * @offset: the index of the truncation point
1595  *
1596  * block_invalidatepage() is called when all or part of the page has become
1597  * invalidatedby a truncate operation.
1598  *
1599  * block_invalidatepage() does not have to release all buffers, but it must
1600  * ensure that no dirty buffer is left outside @offset and that no I/O
1601  * is underway against any of the blocks which are outside the truncation
1602  * point.  Because the caller is about to free (and possibly reuse) those
1603  * blocks on-disk.
1604  */
1605 int block_invalidatepage(struct page *page, unsigned long offset)
1606 {
1607         struct buffer_head *head, *bh, *next;
1608         unsigned int curr_off = 0;
1609         int ret = 1;
1610
1611         BUG_ON(!PageLocked(page));
1612         if (!page_has_buffers(page))
1613                 goto out;
1614
1615         head = page_buffers(page);
1616         bh = head;
1617         do {
1618                 unsigned int next_off = curr_off + bh->b_size;
1619                 next = bh->b_this_page;
1620
1621                 /*
1622                  * is this block fully invalidated?
1623                  */
1624                 if (offset <= curr_off)
1625                         discard_buffer(bh);
1626                 curr_off = next_off;
1627                 bh = next;
1628         } while (bh != head);
1629
1630         /*
1631          * We release buffers only if the entire page is being invalidated.
1632          * The get_block cached value has been unconditionally invalidated,
1633          * so real IO is not possible anymore.
1634          */
1635         if (offset == 0)
1636                 ret = try_to_release_page(page, 0);
1637 out:
1638         return ret;
1639 }
1640 EXPORT_SYMBOL(block_invalidatepage);
1641
1642 int do_invalidatepage(struct page *page, unsigned long offset)
1643 {
1644         int (*invalidatepage)(struct page *, unsigned long);
1645         invalidatepage = page->mapping->a_ops->invalidatepage;
1646         if (invalidatepage == NULL)
1647                 invalidatepage = block_invalidatepage;
1648         return (*invalidatepage)(page, offset);
1649 }
1650
1651 /*
1652  * We attach and possibly dirty the buffers atomically wrt
1653  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1654  * is already excluded via the page lock.
1655  */
1656 void create_empty_buffers(struct page *page,
1657                         unsigned long blocksize, unsigned long b_state)
1658 {
1659         struct buffer_head *bh, *head, *tail;
1660
1661         head = alloc_page_buffers(page, blocksize, 1);
1662         bh = head;
1663         do {
1664                 bh->b_state |= b_state;
1665                 tail = bh;
1666                 bh = bh->b_this_page;
1667         } while (bh);
1668         tail->b_this_page = head;
1669
1670         spin_lock(&page->mapping->private_lock);
1671         if (PageUptodate(page) || PageDirty(page)) {
1672                 bh = head;
1673                 do {
1674                         if (PageDirty(page))
1675                                 set_buffer_dirty(bh);
1676                         if (PageUptodate(page))
1677                                 set_buffer_uptodate(bh);
1678                         bh = bh->b_this_page;
1679                 } while (bh != head);
1680         }
1681         attach_page_buffers(page, head);
1682         spin_unlock(&page->mapping->private_lock);
1683 }
1684 EXPORT_SYMBOL(create_empty_buffers);
1685
1686 /*
1687  * We are taking a block for data and we don't want any output from any
1688  * buffer-cache aliases starting from return from that function and
1689  * until the moment when something will explicitly mark the buffer
1690  * dirty (hopefully that will not happen until we will free that block ;-)
1691  * We don't even need to mark it not-uptodate - nobody can expect
1692  * anything from a newly allocated buffer anyway. We used to used
1693  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1694  * don't want to mark the alias unmapped, for example - it would confuse
1695  * anyone who might pick it with bread() afterwards...
1696  *
1697  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1698  * be writeout I/O going on against recently-freed buffers.  We don't
1699  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1700  * only if we really need to.  That happens here.
1701  */
1702 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1703 {
1704         struct buffer_head *old_bh;
1705
1706         might_sleep();
1707
1708         old_bh = __find_get_block_slow(bdev, block);
1709         if (old_bh) {
1710                 clear_buffer_dirty(old_bh);
1711                 wait_on_buffer(old_bh);
1712                 clear_buffer_req(old_bh);
1713                 __brelse(old_bh);
1714         }
1715 }
1716 EXPORT_SYMBOL(unmap_underlying_metadata);
1717
1718 /*
1719  * NOTE! All mapped/uptodate combinations are valid:
1720  *
1721  *      Mapped  Uptodate        Meaning
1722  *
1723  *      No      No              "unknown" - must do get_block()
1724  *      No      Yes             "hole" - zero-filled
1725  *      Yes     No              "allocated" - allocated on disk, not read in
1726  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1727  *
1728  * "Dirty" is valid only with the last case (mapped+uptodate).
1729  */
1730
1731 /*
1732  * While block_write_full_page is writing back the dirty buffers under
1733  * the page lock, whoever dirtied the buffers may decide to clean them
1734  * again at any time.  We handle that by only looking at the buffer
1735  * state inside lock_buffer().
1736  *
1737  * If block_write_full_page() is called for regular writeback
1738  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1739  * locked buffer.   This only can happen if someone has written the buffer
1740  * directly, with submit_bh().  At the address_space level PageWriteback
1741  * prevents this contention from occurring.
1742  */
1743 static int __block_write_full_page(struct inode *inode, struct page *page,
1744                         get_block_t *get_block, struct writeback_control *wbc)
1745 {
1746         int err;
1747         sector_t block;
1748         sector_t last_block;
1749         struct buffer_head *bh, *head;
1750         int nr_underway = 0;
1751
1752         BUG_ON(!PageLocked(page));
1753
1754         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1755
1756         if (!page_has_buffers(page)) {
1757                 create_empty_buffers(page, 1 << inode->i_blkbits,
1758                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1759         }
1760
1761         /*
1762          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1763          * here, and the (potentially unmapped) buffers may become dirty at
1764          * any time.  If a buffer becomes dirty here after we've inspected it
1765          * then we just miss that fact, and the page stays dirty.
1766          *
1767          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1768          * handle that here by just cleaning them.
1769          */
1770
1771         block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1772         head = page_buffers(page);
1773         bh = head;
1774
1775         /*
1776          * Get all the dirty buffers mapped to disk addresses and
1777          * handle any aliases from the underlying blockdev's mapping.
1778          */
1779         do {
1780                 if (block > last_block) {
1781                         /*
1782                          * mapped buffers outside i_size will occur, because
1783                          * this page can be outside i_size when there is a
1784                          * truncate in progress.
1785                          */
1786                         /*
1787                          * The buffer was zeroed by block_write_full_page()
1788                          */
1789                         clear_buffer_dirty(bh);
1790                         set_buffer_uptodate(bh);
1791                 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1792                         err = get_block(inode, block, bh, 1);
1793                         if (err)
1794                                 goto recover;
1795                         if (buffer_new(bh)) {
1796                                 /* blockdev mappings never come here */
1797                                 clear_buffer_new(bh);
1798                                 unmap_underlying_metadata(bh->b_bdev,
1799                                                         bh->b_blocknr);
1800                         }
1801                 }
1802                 bh = bh->b_this_page;
1803                 block++;
1804         } while (bh != head);
1805
1806         do {
1807                 if (!buffer_mapped(bh))
1808                         continue;
1809                 /*
1810                  * If it's a fully non-blocking write attempt and we cannot
1811                  * lock the buffer then redirty the page.  Note that this can
1812                  * potentially cause a busy-wait loop from pdflush and kswapd
1813                  * activity, but those code paths have their own higher-level
1814                  * throttling.
1815                  */
1816                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1817                         lock_buffer(bh);
1818                 } else if (test_set_buffer_locked(bh)) {
1819                         redirty_page_for_writepage(wbc, page);
1820                         continue;
1821                 }
1822                 if (test_clear_buffer_dirty(bh)) {
1823                         mark_buffer_async_write(bh);
1824                 } else {
1825                         unlock_buffer(bh);
1826                 }
1827         } while ((bh = bh->b_this_page) != head);
1828
1829         /*
1830          * The page and its buffers are protected by PageWriteback(), so we can
1831          * drop the bh refcounts early.
1832          */
1833         BUG_ON(PageWriteback(page));
1834         set_page_writeback(page);
1835
1836         do {
1837                 struct buffer_head *next = bh->b_this_page;
1838                 if (buffer_async_write(bh)) {
1839                         submit_bh(WRITE, bh);
1840                         nr_underway++;
1841                 }
1842                 bh = next;
1843         } while (bh != head);
1844         unlock_page(page);
1845
1846         err = 0;
1847 done:
1848         if (nr_underway == 0) {
1849                 /*
1850                  * The page was marked dirty, but the buffers were
1851                  * clean.  Someone wrote them back by hand with
1852                  * ll_rw_block/submit_bh.  A rare case.
1853                  */
1854                 int uptodate = 1;
1855                 do {
1856                         if (!buffer_uptodate(bh)) {
1857                                 uptodate = 0;
1858                                 break;
1859                         }
1860                         bh = bh->b_this_page;
1861                 } while (bh != head);
1862                 if (uptodate)
1863                         SetPageUptodate(page);
1864                 end_page_writeback(page);
1865                 /*
1866                  * The page and buffer_heads can be released at any time from
1867                  * here on.
1868                  */
1869                 wbc->pages_skipped++;   /* We didn't write this page */
1870         }
1871         return err;
1872
1873 recover:
1874         /*
1875          * ENOSPC, or some other error.  We may already have added some
1876          * blocks to the file, so we need to write these out to avoid
1877          * exposing stale data.
1878          * The page is currently locked and not marked for writeback
1879          */
1880         bh = head;
1881         /* Recovery: lock and submit the mapped buffers */
1882         do {
1883                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1884                         lock_buffer(bh);
1885                         mark_buffer_async_write(bh);
1886                 } else {
1887                         /*
1888                          * The buffer may have been set dirty during
1889                          * attachment to a dirty page.
1890                          */
1891                         clear_buffer_dirty(bh);
1892                 }
1893         } while ((bh = bh->b_this_page) != head);
1894         SetPageError(page);
1895         BUG_ON(PageWriteback(page));
1896         set_page_writeback(page);
1897         unlock_page(page);
1898         do {
1899                 struct buffer_head *next = bh->b_this_page;
1900                 if (buffer_async_write(bh)) {
1901                         clear_buffer_dirty(bh);
1902                         submit_bh(WRITE, bh);
1903                         nr_underway++;
1904                 }
1905                 bh = next;
1906         } while (bh != head);
1907         goto done;
1908 }
1909
1910 static int __block_prepare_write(struct inode *inode, struct page *page,
1911                 unsigned from, unsigned to, get_block_t *get_block)
1912 {
1913         unsigned block_start, block_end;
1914         sector_t block;
1915         int err = 0;
1916         unsigned blocksize, bbits;
1917         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1918
1919         BUG_ON(!PageLocked(page));
1920         BUG_ON(from > PAGE_CACHE_SIZE);
1921         BUG_ON(to > PAGE_CACHE_SIZE);
1922         BUG_ON(from > to);
1923
1924         blocksize = 1 << inode->i_blkbits;
1925         if (!page_has_buffers(page))
1926                 create_empty_buffers(page, blocksize, 0);
1927         head = page_buffers(page);
1928
1929         bbits = inode->i_blkbits;
1930         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1931
1932         for(bh = head, block_start = 0; bh != head || !block_start;
1933             block++, block_start=block_end, bh = bh->b_this_page) {
1934                 block_end = block_start + blocksize;
1935                 if (block_end <= from || block_start >= to) {
1936                         if (PageUptodate(page)) {
1937                                 if (!buffer_uptodate(bh))
1938                                         set_buffer_uptodate(bh);
1939                         }
1940                         continue;
1941                 }
1942                 if (buffer_new(bh))
1943                         clear_buffer_new(bh);
1944                 if (!buffer_mapped(bh)) {
1945                         err = get_block(inode, block, bh, 1);
1946                         if (err)
1947                                 break;
1948                         if (buffer_new(bh)) {
1949                                 unmap_underlying_metadata(bh->b_bdev,
1950                                                         bh->b_blocknr);
1951                                 if (PageUptodate(page)) {
1952                                         set_buffer_uptodate(bh);
1953                                         continue;
1954                                 }
1955                                 if (block_end > to || block_start < from) {
1956                                         void *kaddr;
1957
1958                                         kaddr = kmap_atomic(page, KM_USER0);
1959                                         if (block_end > to)
1960                                                 memset(kaddr+to, 0,
1961                                                         block_end-to);
1962                                         if (block_start < from)
1963                                                 memset(kaddr+block_start,
1964                                                         0, from-block_start);
1965                                         flush_dcache_page(page);
1966                                         kunmap_atomic(kaddr, KM_USER0);
1967                                 }
1968                                 continue;
1969                         }
1970                 }
1971                 if (PageUptodate(page)) {
1972                         if (!buffer_uptodate(bh))
1973                                 set_buffer_uptodate(bh);
1974                         continue; 
1975                 }
1976                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1977                      (block_start < from || block_end > to)) {
1978                         ll_rw_block(READ, 1, &bh);
1979                         *wait_bh++=bh;
1980                 }
1981         }
1982         /*
1983          * If we issued read requests - let them complete.
1984          */
1985         while(wait_bh > wait) {
1986                 wait_on_buffer(*--wait_bh);
1987                 if (!buffer_uptodate(*wait_bh))
1988                         err = -EIO;
1989         }
1990         if (!err) {
1991                 bh = head;
1992                 do {
1993                         if (buffer_new(bh))
1994                                 clear_buffer_new(bh);
1995                 } while ((bh = bh->b_this_page) != head);
1996                 return 0;
1997         }
1998         /* Error case: */
1999         /*
2000          * Zero out any newly allocated blocks to avoid exposing stale
2001          * data.  If BH_New is set, we know that the block was newly
2002          * allocated in the above loop.
2003          */
2004         bh = head;
2005         block_start = 0;
2006         do {
2007                 block_end = block_start+blocksize;
2008                 if (block_end <= from)
2009                         goto next_bh;
2010                 if (block_start >= to)
2011                         break;
2012                 if (buffer_new(bh)) {
2013                         void *kaddr;
2014
2015                         clear_buffer_new(bh);
2016                         kaddr = kmap_atomic(page, KM_USER0);
2017                         memset(kaddr+block_start, 0, bh->b_size);
2018                         kunmap_atomic(kaddr, KM_USER0);
2019                         set_buffer_uptodate(bh);
2020                         mark_buffer_dirty(bh);
2021                 }
2022 next_bh:
2023                 block_start = block_end;
2024                 bh = bh->b_this_page;
2025         } while (bh != head);
2026         return err;
2027 }
2028
2029 static int __block_commit_write(struct inode *inode, struct page *page,
2030                 unsigned from, unsigned to)
2031 {
2032         unsigned block_start, block_end;
2033         int partial = 0;
2034         unsigned blocksize;
2035         struct buffer_head *bh, *head;
2036
2037         blocksize = 1 << inode->i_blkbits;
2038
2039         for(bh = head = page_buffers(page), block_start = 0;
2040             bh != head || !block_start;
2041             block_start=block_end, bh = bh->b_this_page) {
2042                 block_end = block_start + blocksize;
2043                 if (block_end <= from || block_start >= to) {
2044                         if (!buffer_uptodate(bh))
2045                                 partial = 1;
2046                 } else {
2047                         set_buffer_uptodate(bh);
2048                         mark_buffer_dirty(bh);
2049                 }
2050         }
2051
2052         /*
2053          * If this is a partial write which happened to make all buffers
2054          * uptodate then we can optimize away a bogus readpage() for
2055          * the next read(). Here we 'discover' whether the page went
2056          * uptodate as a result of this (potentially partial) write.
2057          */
2058         if (!partial)
2059                 SetPageUptodate(page);
2060         return 0;
2061 }
2062
2063 /*
2064  * Generic "read page" function for block devices that have the normal
2065  * get_block functionality. This is most of the block device filesystems.
2066  * Reads the page asynchronously --- the unlock_buffer() and
2067  * set/clear_buffer_uptodate() functions propagate buffer state into the
2068  * page struct once IO has completed.
2069  */
2070 int block_read_full_page(struct page *page, get_block_t *get_block)
2071 {
2072         struct inode *inode = page->mapping->host;
2073         sector_t iblock, lblock;
2074         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2075         unsigned int blocksize;
2076         int nr, i;
2077         int fully_mapped = 1;
2078
2079         BUG_ON(!PageLocked(page));
2080         blocksize = 1 << inode->i_blkbits;
2081         if (!page_has_buffers(page))
2082                 create_empty_buffers(page, blocksize, 0);
2083         head = page_buffers(page);
2084
2085         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2086         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2087         bh = head;
2088         nr = 0;
2089         i = 0;
2090
2091         do {
2092                 if (buffer_uptodate(bh))
2093                         continue;
2094
2095                 if (!buffer_mapped(bh)) {
2096                         int err = 0;
2097
2098                         fully_mapped = 0;
2099                         if (iblock < lblock) {
2100                                 err = get_block(inode, iblock, bh, 0);
2101                                 if (err)
2102                                         SetPageError(page);
2103                         }
2104                         if (!buffer_mapped(bh)) {
2105                                 void *kaddr = kmap_atomic(page, KM_USER0);
2106                                 memset(kaddr + i * blocksize, 0, blocksize);
2107                                 flush_dcache_page(page);
2108                                 kunmap_atomic(kaddr, KM_USER0);
2109                                 if (!err)
2110                                         set_buffer_uptodate(bh);
2111                                 continue;
2112                         }
2113                         /*
2114                          * get_block() might have updated the buffer
2115                          * synchronously
2116                          */
2117                         if (buffer_uptodate(bh))
2118                                 continue;
2119                 }
2120                 arr[nr++] = bh;
2121         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2122
2123         if (fully_mapped)
2124                 SetPageMappedToDisk(page);
2125
2126         if (!nr) {
2127                 /*
2128                  * All buffers are uptodate - we can set the page uptodate
2129                  * as well. But not if get_block() returned an error.
2130                  */
2131                 if (!PageError(page))
2132                         SetPageUptodate(page);
2133                 unlock_page(page);
2134                 return 0;
2135         }
2136
2137         /* Stage two: lock the buffers */
2138         for (i = 0; i < nr; i++) {
2139                 bh = arr[i];
2140                 lock_buffer(bh);
2141                 mark_buffer_async_read(bh);
2142         }
2143
2144         /*
2145          * Stage 3: start the IO.  Check for uptodateness
2146          * inside the buffer lock in case another process reading
2147          * the underlying blockdev brought it uptodate (the sct fix).
2148          */
2149         for (i = 0; i < nr; i++) {
2150                 bh = arr[i];
2151                 if (buffer_uptodate(bh))
2152                         end_buffer_async_read(bh, 1);
2153                 else
2154                         submit_bh(READ, bh);
2155         }
2156         return 0;
2157 }
2158
2159 /* utility function for filesystems that need to do work on expanding
2160  * truncates.  Uses prepare/commit_write to allow the filesystem to
2161  * deal with the hole.  
2162  */
2163 static int __generic_cont_expand(struct inode *inode, loff_t size,
2164                                  pgoff_t index, unsigned int offset)
2165 {
2166         struct address_space *mapping = inode->i_mapping;
2167         struct page *page;
2168         unsigned long limit;
2169         int err;
2170
2171         err = -EFBIG;
2172         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2173         if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2174                 send_sig(SIGXFSZ, current, 0);
2175                 goto out;
2176         }
2177         if (size > inode->i_sb->s_maxbytes)
2178                 goto out;
2179
2180         err = -ENOMEM;
2181         page = grab_cache_page(mapping, index);
2182         if (!page)
2183                 goto out;
2184         err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2185         if (err) {
2186                 /*
2187                  * ->prepare_write() may have instantiated a few blocks
2188                  * outside i_size.  Trim these off again.
2189                  */
2190                 unlock_page(page);
2191                 page_cache_release(page);
2192                 vmtruncate(inode, inode->i_size);
2193                 goto out;
2194         }
2195
2196         err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2197
2198         unlock_page(page);
2199         page_cache_release(page);
2200         if (err > 0)
2201                 err = 0;
2202 out:
2203         return err;
2204 }
2205
2206 int generic_cont_expand(struct inode *inode, loff_t size)
2207 {
2208         pgoff_t index;
2209         unsigned int offset;
2210
2211         offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2212
2213         /* ugh.  in prepare/commit_write, if from==to==start of block, we
2214         ** skip the prepare.  make sure we never send an offset for the start
2215         ** of a block
2216         */
2217         if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2218                 /* caller must handle this extra byte. */
2219                 offset++;
2220         }
2221         index = size >> PAGE_CACHE_SHIFT;
2222
2223         return __generic_cont_expand(inode, size, index, offset);
2224 }
2225
2226 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2227 {
2228         loff_t pos = size - 1;
2229         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2230         unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2231
2232         /* prepare/commit_write can handle even if from==to==start of block. */
2233         return __generic_cont_expand(inode, size, index, offset);
2234 }
2235
2236 /*
2237  * For moronic filesystems that do not allow holes in file.
2238  * We may have to extend the file.
2239  */
2240
2241 int cont_prepare_write(struct page *page, unsigned offset,
2242                 unsigned to, get_block_t *get_block, loff_t *bytes)
2243 {
2244         struct address_space *mapping = page->mapping;
2245         struct inode *inode = mapping->host;
2246         struct page *new_page;
2247         pgoff_t pgpos;
2248         long status;
2249         unsigned zerofrom;
2250         unsigned blocksize = 1 << inode->i_blkbits;
2251         void *kaddr;
2252
2253         while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2254                 status = -ENOMEM;
2255                 new_page = grab_cache_page(mapping, pgpos);
2256                 if (!new_page)
2257                         goto out;
2258                 /* we might sleep */
2259                 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2260                         unlock_page(new_page);
2261                         page_cache_release(new_page);
2262                         continue;
2263                 }
2264                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2265                 if (zerofrom & (blocksize-1)) {
2266                         *bytes |= (blocksize-1);
2267                         (*bytes)++;
2268                 }
2269                 status = __block_prepare_write(inode, new_page, zerofrom,
2270                                                 PAGE_CACHE_SIZE, get_block);
2271                 if (status)
2272                         goto out_unmap;
2273                 kaddr = kmap_atomic(new_page, KM_USER0);
2274                 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2275                 flush_dcache_page(new_page);
2276                 kunmap_atomic(kaddr, KM_USER0);
2277                 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2278                 unlock_page(new_page);
2279                 page_cache_release(new_page);
2280         }
2281
2282         if (page->index < pgpos) {
2283                 /* completely inside the area */
2284                 zerofrom = offset;
2285         } else {
2286                 /* page covers the boundary, find the boundary offset */
2287                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2288
2289                 /* if we will expand the thing last block will be filled */
2290                 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2291                         *bytes |= (blocksize-1);
2292                         (*bytes)++;
2293                 }
2294
2295                 /* starting below the boundary? Nothing to zero out */
2296                 if (offset <= zerofrom)
2297                         zerofrom = offset;
2298         }
2299         status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2300         if (status)
2301                 goto out1;
2302         if (zerofrom < offset) {
2303                 kaddr = kmap_atomic(page, KM_USER0);
2304                 memset(kaddr+zerofrom, 0, offset-zerofrom);
2305                 flush_dcache_page(page);
2306                 kunmap_atomic(kaddr, KM_USER0);
2307                 __block_commit_write(inode, page, zerofrom, offset);
2308         }
2309         return 0;
2310 out1:
2311         ClearPageUptodate(page);
2312         return status;
2313
2314 out_unmap:
2315         ClearPageUptodate(new_page);
2316         unlock_page(new_page);
2317         page_cache_release(new_page);
2318 out:
2319         return status;
2320 }
2321
2322 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2323                         get_block_t *get_block)
2324 {
2325         struct inode *inode = page->mapping->host;
2326         int err = __block_prepare_write(inode, page, from, to, get_block);
2327         if (err)
2328                 ClearPageUptodate(page);
2329         return err;
2330 }
2331
2332 int block_commit_write(struct page *page, unsigned from, unsigned to)
2333 {
2334         struct inode *inode = page->mapping->host;
2335         __block_commit_write(inode,page,from,to);
2336         return 0;
2337 }
2338
2339 int generic_commit_write(struct file *file, struct page *page,
2340                 unsigned from, unsigned to)
2341 {
2342         struct inode *inode = page->mapping->host;
2343         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2344         __block_commit_write(inode,page,from,to);
2345         /*
2346          * No need to use i_size_read() here, the i_size
2347          * cannot change under us because we hold i_sem.
2348          */
2349         if (pos > inode->i_size) {
2350                 i_size_write(inode, pos);
2351                 mark_inode_dirty(inode);
2352         }
2353         return 0;
2354 }
2355
2356
2357 /*
2358  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2359  * immediately, while under the page lock.  So it needs a special end_io
2360  * handler which does not touch the bh after unlocking it.
2361  *
2362  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2363  * a race there is benign: unlock_buffer() only use the bh's address for
2364  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2365  * itself.
2366  */
2367 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2368 {
2369         if (uptodate) {
2370                 set_buffer_uptodate(bh);
2371         } else {
2372                 /* This happens, due to failed READA attempts. */
2373                 clear_buffer_uptodate(bh);
2374         }
2375         unlock_buffer(bh);
2376 }
2377
2378 /*
2379  * On entry, the page is fully not uptodate.
2380  * On exit the page is fully uptodate in the areas outside (from,to)
2381  */
2382 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2383                         get_block_t *get_block)
2384 {
2385         struct inode *inode = page->mapping->host;
2386         const unsigned blkbits = inode->i_blkbits;
2387         const unsigned blocksize = 1 << blkbits;
2388         struct buffer_head map_bh;
2389         struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2390         unsigned block_in_page;
2391         unsigned block_start;
2392         sector_t block_in_file;
2393         char *kaddr;
2394         int nr_reads = 0;
2395         int i;
2396         int ret = 0;
2397         int is_mapped_to_disk = 1;
2398         int dirtied_it = 0;
2399
2400         if (PageMappedToDisk(page))
2401                 return 0;
2402
2403         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2404         map_bh.b_page = page;
2405
2406         /*
2407          * We loop across all blocks in the page, whether or not they are
2408          * part of the affected region.  This is so we can discover if the
2409          * page is fully mapped-to-disk.
2410          */
2411         for (block_start = 0, block_in_page = 0;
2412                   block_start < PAGE_CACHE_SIZE;
2413                   block_in_page++, block_start += blocksize) {
2414                 unsigned block_end = block_start + blocksize;
2415                 int create;
2416
2417                 map_bh.b_state = 0;
2418                 create = 1;
2419                 if (block_start >= to)
2420                         create = 0;
2421                 ret = get_block(inode, block_in_file + block_in_page,
2422                                         &map_bh, create);
2423                 if (ret)
2424                         goto failed;
2425                 if (!buffer_mapped(&map_bh))
2426                         is_mapped_to_disk = 0;
2427                 if (buffer_new(&map_bh))
2428                         unmap_underlying_metadata(map_bh.b_bdev,
2429                                                         map_bh.b_blocknr);
2430                 if (PageUptodate(page))
2431                         continue;
2432                 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2433                         kaddr = kmap_atomic(page, KM_USER0);
2434                         if (block_start < from) {
2435                                 memset(kaddr+block_start, 0, from-block_start);
2436                                 dirtied_it = 1;
2437                         }
2438                         if (block_end > to) {
2439                                 memset(kaddr + to, 0, block_end - to);
2440                                 dirtied_it = 1;
2441                         }
2442                         flush_dcache_page(page);
2443                         kunmap_atomic(kaddr, KM_USER0);
2444                         continue;
2445                 }
2446                 if (buffer_uptodate(&map_bh))
2447                         continue;       /* reiserfs does this */
2448                 if (block_start < from || block_end > to) {
2449                         struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2450
2451                         if (!bh) {
2452                                 ret = -ENOMEM;
2453                                 goto failed;
2454                         }
2455                         bh->b_state = map_bh.b_state;
2456                         atomic_set(&bh->b_count, 0);
2457                         bh->b_this_page = NULL;
2458                         bh->b_page = page;
2459                         bh->b_blocknr = map_bh.b_blocknr;
2460                         bh->b_size = blocksize;
2461                         bh->b_data = (char *)(long)block_start;
2462                         bh->b_bdev = map_bh.b_bdev;
2463                         bh->b_private = NULL;
2464                         read_bh[nr_reads++] = bh;
2465                 }
2466         }
2467
2468         if (nr_reads) {
2469                 struct buffer_head *bh;
2470
2471                 /*
2472                  * The page is locked, so these buffers are protected from
2473                  * any VM or truncate activity.  Hence we don't need to care
2474                  * for the buffer_head refcounts.
2475                  */
2476                 for (i = 0; i < nr_reads; i++) {
2477                         bh = read_bh[i];
2478                         lock_buffer(bh);
2479                         bh->b_end_io = end_buffer_read_nobh;
2480                         submit_bh(READ, bh);
2481                 }
2482                 for (i = 0; i < nr_reads; i++) {
2483                         bh = read_bh[i];
2484                         wait_on_buffer(bh);
2485                         if (!buffer_uptodate(bh))
2486                                 ret = -EIO;
2487                         free_buffer_head(bh);
2488                         read_bh[i] = NULL;
2489                 }
2490                 if (ret)
2491                         goto failed;
2492         }
2493
2494         if (is_mapped_to_disk)
2495                 SetPageMappedToDisk(page);
2496         SetPageUptodate(page);
2497
2498         /*
2499          * Setting the page dirty here isn't necessary for the prepare_write
2500          * function - commit_write will do that.  But if/when this function is
2501          * used within the pagefault handler to ensure that all mmapped pages
2502          * have backing space in the filesystem, we will need to dirty the page
2503          * if its contents were altered.
2504          */
2505         if (dirtied_it)
2506                 set_page_dirty(page);
2507
2508         return 0;
2509
2510 failed:
2511         for (i = 0; i < nr_reads; i++) {
2512                 if (read_bh[i])
2513                         free_buffer_head(read_bh[i]);
2514         }
2515
2516         /*
2517          * Error recovery is pretty slack.  Clear the page and mark it dirty
2518          * so we'll later zero out any blocks which _were_ allocated.
2519          */
2520         kaddr = kmap_atomic(page, KM_USER0);
2521         memset(kaddr, 0, PAGE_CACHE_SIZE);
2522         kunmap_atomic(kaddr, KM_USER0);
2523         SetPageUptodate(page);
2524         set_page_dirty(page);
2525         return ret;
2526 }
2527 EXPORT_SYMBOL(nobh_prepare_write);
2528
2529 int nobh_commit_write(struct file *file, struct page *page,
2530                 unsigned from, unsigned to)
2531 {
2532         struct inode *inode = page->mapping->host;
2533         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2534
2535         set_page_dirty(page);
2536         if (pos > inode->i_size) {
2537                 i_size_write(inode, pos);
2538                 mark_inode_dirty(inode);
2539         }
2540         return 0;
2541 }
2542 EXPORT_SYMBOL(nobh_commit_write);
2543
2544 /*
2545  * nobh_writepage() - based on block_full_write_page() except
2546  * that it tries to operate without attaching bufferheads to
2547  * the page.
2548  */
2549 int nobh_writepage(struct page *page, get_block_t *get_block,
2550                         struct writeback_control *wbc)
2551 {
2552         struct inode * const inode = page->mapping->host;
2553         loff_t i_size = i_size_read(inode);
2554         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2555         unsigned offset;
2556         void *kaddr;
2557         int ret;
2558
2559         /* Is the page fully inside i_size? */
2560         if (page->index < end_index)
2561                 goto out;
2562
2563         /* Is the page fully outside i_size? (truncate in progress) */
2564         offset = i_size & (PAGE_CACHE_SIZE-1);
2565         if (page->index >= end_index+1 || !offset) {
2566                 /*
2567                  * The page may have dirty, unmapped buffers.  For example,
2568                  * they may have been added in ext3_writepage().  Make them
2569                  * freeable here, so the page does not leak.
2570                  */
2571 #if 0
2572                 /* Not really sure about this  - do we need this ? */
2573                 if (page->mapping->a_ops->invalidatepage)
2574                         page->mapping->a_ops->invalidatepage(page, offset);
2575 #endif
2576                 unlock_page(page);
2577                 return 0; /* don't care */
2578         }
2579
2580         /*
2581          * The page straddles i_size.  It must be zeroed out on each and every
2582          * writepage invocation because it may be mmapped.  "A file is mapped
2583          * in multiples of the page size.  For a file that is not a multiple of
2584          * the  page size, the remaining memory is zeroed when mapped, and
2585          * writes to that region are not written out to the file."
2586          */
2587         kaddr = kmap_atomic(page, KM_USER0);
2588         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2589         flush_dcache_page(page);
2590         kunmap_atomic(kaddr, KM_USER0);
2591 out:
2592         ret = mpage_writepage(page, get_block, wbc);
2593         if (ret == -EAGAIN)
2594                 ret = __block_write_full_page(inode, page, get_block, wbc);
2595         return ret;
2596 }
2597 EXPORT_SYMBOL(nobh_writepage);
2598
2599 /*
2600  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2601  */
2602 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2603 {
2604         struct inode *inode = mapping->host;
2605         unsigned blocksize = 1 << inode->i_blkbits;
2606         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2607         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2608         unsigned to;
2609         struct page *page;
2610         struct address_space_operations *a_ops = mapping->a_ops;
2611         char *kaddr;
2612         int ret = 0;
2613
2614         if ((offset & (blocksize - 1)) == 0)
2615                 goto out;
2616
2617         ret = -ENOMEM;
2618         page = grab_cache_page(mapping, index);
2619         if (!page)
2620                 goto out;
2621
2622         to = (offset + blocksize) & ~(blocksize - 1);
2623         ret = a_ops->prepare_write(NULL, page, offset, to);
2624         if (ret == 0) {
2625                 kaddr = kmap_atomic(page, KM_USER0);
2626                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2627                 flush_dcache_page(page);
2628                 kunmap_atomic(kaddr, KM_USER0);
2629                 set_page_dirty(page);
2630         }
2631         unlock_page(page);
2632         page_cache_release(page);
2633 out:
2634         return ret;
2635 }
2636 EXPORT_SYMBOL(nobh_truncate_page);
2637
2638 int block_truncate_page(struct address_space *mapping,
2639                         loff_t from, get_block_t *get_block)
2640 {
2641         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2642         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2643         unsigned blocksize;
2644         pgoff_t iblock;
2645         unsigned length, pos;
2646         struct inode *inode = mapping->host;
2647         struct page *page;
2648         struct buffer_head *bh;
2649         void *kaddr;
2650         int err;
2651
2652         blocksize = 1 << inode->i_blkbits;
2653         length = offset & (blocksize - 1);
2654
2655         /* Block boundary? Nothing to do */
2656         if (!length)
2657                 return 0;
2658
2659         length = blocksize - length;
2660         iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2661         
2662         page = grab_cache_page(mapping, index);
2663         err = -ENOMEM;
2664         if (!page)
2665                 goto out;
2666
2667         if (!page_has_buffers(page))
2668                 create_empty_buffers(page, blocksize, 0);
2669
2670         /* Find the buffer that contains "offset" */
2671         bh = page_buffers(page);
2672         pos = blocksize;
2673         while (offset >= pos) {
2674                 bh = bh->b_this_page;
2675                 iblock++;
2676                 pos += blocksize;
2677         }
2678
2679         err = 0;
2680         if (!buffer_mapped(bh)) {
2681                 err = get_block(inode, iblock, bh, 0);
2682                 if (err)
2683                         goto unlock;
2684                 /* unmapped? It's a hole - nothing to do */
2685                 if (!buffer_mapped(bh))
2686                         goto unlock;
2687         }
2688
2689         /* Ok, it's mapped. Make sure it's up-to-date */
2690         if (PageUptodate(page))
2691                 set_buffer_uptodate(bh);
2692
2693         if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2694                 err = -EIO;
2695                 ll_rw_block(READ, 1, &bh);
2696                 wait_on_buffer(bh);
2697                 /* Uhhuh. Read error. Complain and punt. */
2698                 if (!buffer_uptodate(bh))
2699                         goto unlock;
2700         }
2701
2702         kaddr = kmap_atomic(page, KM_USER0);
2703         memset(kaddr + offset, 0, length);
2704         flush_dcache_page(page);
2705         kunmap_atomic(kaddr, KM_USER0);
2706
2707         mark_buffer_dirty(bh);
2708         err = 0;
2709
2710 unlock:
2711         unlock_page(page);
2712         page_cache_release(page);
2713 out:
2714         return err;
2715 }
2716
2717 /*
2718  * The generic ->writepage function for buffer-backed address_spaces
2719  */
2720 int block_write_full_page(struct page *page, get_block_t *get_block,
2721                         struct writeback_control *wbc)
2722 {
2723         struct inode * const inode = page->mapping->host;
2724         loff_t i_size = i_size_read(inode);
2725         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2726         unsigned offset;
2727         void *kaddr;
2728
2729         /* Is the page fully inside i_size? */
2730         if (page->index < end_index)
2731                 return __block_write_full_page(inode, page, get_block, wbc);
2732
2733         /* Is the page fully outside i_size? (truncate in progress) */
2734         offset = i_size & (PAGE_CACHE_SIZE-1);
2735         if (page->index >= end_index+1 || !offset) {
2736                 /*
2737                  * The page may have dirty, unmapped buffers.  For example,
2738                  * they may have been added in ext3_writepage().  Make them
2739                  * freeable here, so the page does not leak.
2740                  */
2741                 do_invalidatepage(page, 0);
2742                 unlock_page(page);
2743                 return 0; /* don't care */
2744         }
2745
2746         /*
2747          * The page straddles i_size.  It must be zeroed out on each and every
2748          * writepage invokation because it may be mmapped.  "A file is mapped
2749          * in multiples of the page size.  For a file that is not a multiple of
2750          * the  page size, the remaining memory is zeroed when mapped, and
2751          * writes to that region are not written out to the file."
2752          */
2753         kaddr = kmap_atomic(page, KM_USER0);
2754         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2755         flush_dcache_page(page);
2756         kunmap_atomic(kaddr, KM_USER0);
2757         return __block_write_full_page(inode, page, get_block, wbc);
2758 }
2759
2760 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2761                             get_block_t *get_block)
2762 {
2763         struct buffer_head tmp;
2764         struct inode *inode = mapping->host;
2765         tmp.b_state = 0;
2766         tmp.b_blocknr = 0;
2767         get_block(inode, block, &tmp, 0);
2768         return tmp.b_blocknr;
2769 }
2770
2771 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2772 {
2773         struct buffer_head *bh = bio->bi_private;
2774
2775         if (bio->bi_size)
2776                 return 1;
2777
2778         if (err == -EOPNOTSUPP) {
2779                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2780                 set_bit(BH_Eopnotsupp, &bh->b_state);
2781         }
2782
2783         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2784         bio_put(bio);
2785         return 0;
2786 }
2787
2788 int submit_bh(int rw, struct buffer_head * bh)
2789 {
2790         struct bio *bio;
2791         int ret = 0;
2792
2793         BUG_ON(!buffer_locked(bh));
2794         BUG_ON(!buffer_mapped(bh));
2795         BUG_ON(!bh->b_end_io);
2796
2797         if (buffer_ordered(bh) && (rw == WRITE))
2798                 rw = WRITE_BARRIER;
2799
2800         /*
2801          * Only clear out a write error when rewriting, should this
2802          * include WRITE_SYNC as well?
2803          */
2804         if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2805                 clear_buffer_write_io_error(bh);
2806
2807         /*
2808          * from here on down, it's all bio -- do the initial mapping,
2809          * submit_bio -> generic_make_request may further map this bio around
2810          */
2811         bio = bio_alloc(GFP_NOIO, 1);
2812
2813         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2814         bio->bi_bdev = bh->b_bdev;
2815         bio->bi_io_vec[0].bv_page = bh->b_page;
2816         bio->bi_io_vec[0].bv_len = bh->b_size;
2817         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2818
2819         bio->bi_vcnt = 1;
2820         bio->bi_idx = 0;
2821         bio->bi_size = bh->b_size;
2822
2823         bio->bi_end_io = end_bio_bh_io_sync;
2824         bio->bi_private = bh;
2825
2826         bio_get(bio);
2827         submit_bio(rw, bio);
2828
2829         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2830                 ret = -EOPNOTSUPP;
2831
2832         bio_put(bio);
2833         return ret;
2834 }
2835
2836 /**
2837  * ll_rw_block: low-level access to block devices (DEPRECATED)
2838  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2839  * @nr: number of &struct buffer_heads in the array
2840  * @bhs: array of pointers to &struct buffer_head
2841  *
2842  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2843  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2844  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2845  * are sent to disk. The fourth %READA option is described in the documentation
2846  * for generic_make_request() which ll_rw_block() calls.
2847  *
2848  * This function drops any buffer that it cannot get a lock on (with the
2849  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2850  * clean when doing a write request, and any buffer that appears to be
2851  * up-to-date when doing read request.  Further it marks as clean buffers that
2852  * are processed for writing (the buffer cache won't assume that they are
2853  * actually clean until the buffer gets unlocked).
2854  *
2855  * ll_rw_block sets b_end_io to simple completion handler that marks
2856  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2857  * any waiters. 
2858  *
2859  * All of the buffers must be for the same device, and must also be a
2860  * multiple of the current approved size for the device.
2861  */
2862 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2863 {
2864         int i;
2865
2866         for (i = 0; i < nr; i++) {
2867                 struct buffer_head *bh = bhs[i];
2868
2869                 if (rw == SWRITE)
2870                         lock_buffer(bh);
2871                 else if (test_set_buffer_locked(bh))
2872                         continue;
2873
2874                 get_bh(bh);
2875                 if (rw == WRITE || rw == SWRITE) {
2876                         if (test_clear_buffer_dirty(bh)) {
2877                                 bh->b_end_io = end_buffer_write_sync;
2878                                 submit_bh(WRITE, bh);
2879                                 continue;
2880                         }
2881                 } else {
2882                         if (!buffer_uptodate(bh)) {
2883                                 bh->b_end_io = end_buffer_read_sync;
2884                                 submit_bh(rw, bh);
2885                                 continue;
2886                         }
2887                 }
2888                 unlock_buffer(bh);
2889                 put_bh(bh);
2890         }
2891 }
2892
2893 /*
2894  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2895  * and then start new I/O and then wait upon it.  The caller must have a ref on
2896  * the buffer_head.
2897  */
2898 int sync_dirty_buffer(struct buffer_head *bh)
2899 {
2900         int ret = 0;
2901
2902         WARN_ON(atomic_read(&bh->b_count) < 1);
2903         lock_buffer(bh);
2904         if (test_clear_buffer_dirty(bh)) {
2905                 get_bh(bh);
2906                 bh->b_end_io = end_buffer_write_sync;
2907                 ret = submit_bh(WRITE, bh);
2908                 wait_on_buffer(bh);
2909                 if (buffer_eopnotsupp(bh)) {
2910                         clear_buffer_eopnotsupp(bh);
2911                         ret = -EOPNOTSUPP;
2912                 }
2913                 if (!ret && !buffer_uptodate(bh))
2914                         ret = -EIO;
2915         } else {
2916                 unlock_buffer(bh);
2917         }
2918         return ret;
2919 }
2920
2921 /*
2922  * try_to_free_buffers() checks if all the buffers on this particular page
2923  * are unused, and releases them if so.
2924  *
2925  * Exclusion against try_to_free_buffers may be obtained by either
2926  * locking the page or by holding its mapping's private_lock.
2927  *
2928  * If the page is dirty but all the buffers are clean then we need to
2929  * be sure to mark the page clean as well.  This is because the page
2930  * may be against a block device, and a later reattachment of buffers
2931  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2932  * filesystem data on the same device.
2933  *
2934  * The same applies to regular filesystem pages: if all the buffers are
2935  * clean then we set the page clean and proceed.  To do that, we require
2936  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2937  * private_lock.
2938  *
2939  * try_to_free_buffers() is non-blocking.
2940  */
2941 static inline int buffer_busy(struct buffer_head *bh)
2942 {
2943         return atomic_read(&bh->b_count) |
2944                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2945 }
2946
2947 static int
2948 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2949 {
2950         struct buffer_head *head = page_buffers(page);
2951         struct buffer_head *bh;
2952
2953         bh = head;
2954         do {
2955                 if (buffer_write_io_error(bh) && page->mapping)
2956                         set_bit(AS_EIO, &page->mapping->flags);
2957                 if (buffer_busy(bh))
2958                         goto failed;
2959                 bh = bh->b_this_page;
2960         } while (bh != head);
2961
2962         do {
2963                 struct buffer_head *next = bh->b_this_page;
2964
2965                 if (!list_empty(&bh->b_assoc_buffers))
2966                         __remove_assoc_queue(bh);
2967                 bh = next;
2968         } while (bh != head);
2969         *buffers_to_free = head;
2970         __clear_page_buffers(page);
2971         return 1;
2972 failed:
2973         return 0;
2974 }
2975
2976 int try_to_free_buffers(struct page *page)
2977 {
2978         struct address_space * const mapping = page->mapping;
2979         struct buffer_head *buffers_to_free = NULL;
2980         int ret = 0;
2981
2982         BUG_ON(!PageLocked(page));
2983         if (PageWriteback(page))
2984                 return 0;
2985
2986         if (mapping == NULL) {          /* can this still happen? */
2987                 ret = drop_buffers(page, &buffers_to_free);
2988                 goto out;
2989         }
2990
2991         spin_lock(&mapping->private_lock);
2992         ret = drop_buffers(page, &buffers_to_free);
2993         if (ret) {
2994                 /*
2995                  * If the filesystem writes its buffers by hand (eg ext3)
2996                  * then we can have clean buffers against a dirty page.  We
2997                  * clean the page here; otherwise later reattachment of buffers
2998                  * could encounter a non-uptodate page, which is unresolvable.
2999                  * This only applies in the rare case where try_to_free_buffers
3000                  * succeeds but the page is not freed.
3001                  */
3002                 clear_page_dirty(page);
3003         }
3004         spin_unlock(&mapping->private_lock);
3005 out:
3006         if (buffers_to_free) {
3007                 struct buffer_head *bh = buffers_to_free;
3008
3009                 do {
3010                         struct buffer_head *next = bh->b_this_page;
3011                         free_buffer_head(bh);
3012                         bh = next;
3013                 } while (bh != buffers_to_free);
3014         }
3015         return ret;
3016 }
3017 EXPORT_SYMBOL(try_to_free_buffers);
3018
3019 int block_sync_page(struct page *page)
3020 {
3021         struct address_space *mapping;
3022
3023         smp_mb();
3024         mapping = page_mapping(page);
3025         if (mapping)
3026                 blk_run_backing_dev(mapping->backing_dev_info, page);
3027         return 0;
3028 }
3029
3030 /*
3031  * There are no bdflush tunables left.  But distributions are
3032  * still running obsolete flush daemons, so we terminate them here.
3033  *
3034  * Use of bdflush() is deprecated and will be removed in a future kernel.
3035  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3036  */
3037 asmlinkage long sys_bdflush(int func, long data)
3038 {
3039         static int msg_count;
3040
3041         if (!capable(CAP_SYS_ADMIN))
3042                 return -EPERM;
3043
3044         if (msg_count < 5) {
3045                 msg_count++;
3046                 printk(KERN_INFO
3047                         "warning: process `%s' used the obsolete bdflush"
3048                         " system call\n", current->comm);
3049                 printk(KERN_INFO "Fix your initscripts?\n");
3050         }
3051
3052         if (func == 1)
3053                 do_exit(0);
3054         return 0;
3055 }
3056
3057 /*
3058  * Buffer-head allocation
3059  */
3060 static kmem_cache_t *bh_cachep;
3061
3062 /*
3063  * Once the number of bh's in the machine exceeds this level, we start
3064  * stripping them in writeback.
3065  */
3066 static int max_buffer_heads;
3067
3068 int buffer_heads_over_limit;
3069
3070 struct bh_accounting {
3071         int nr;                 /* Number of live bh's */
3072         int ratelimit;          /* Limit cacheline bouncing */
3073 };
3074
3075 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3076
3077 static void recalc_bh_state(void)
3078 {
3079         int i;
3080         int tot = 0;
3081
3082         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3083                 return;
3084         __get_cpu_var(bh_accounting).ratelimit = 0;
3085         for_each_cpu(i)
3086                 tot += per_cpu(bh_accounting, i).nr;
3087         buffer_heads_over_limit = (tot > max_buffer_heads);
3088 }
3089         
3090 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3091 {
3092         struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3093         if (ret) {
3094                 get_cpu_var(bh_accounting).nr++;
3095                 recalc_bh_state();
3096                 put_cpu_var(bh_accounting);
3097         }
3098         return ret;
3099 }
3100 EXPORT_SYMBOL(alloc_buffer_head);
3101
3102 void free_buffer_head(struct buffer_head *bh)
3103 {
3104         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3105         kmem_cache_free(bh_cachep, bh);
3106         get_cpu_var(bh_accounting).nr--;
3107         recalc_bh_state();
3108         put_cpu_var(bh_accounting);
3109 }
3110 EXPORT_SYMBOL(free_buffer_head);
3111
3112 static void
3113 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3114 {
3115         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3116                             SLAB_CTOR_CONSTRUCTOR) {
3117                 struct buffer_head * bh = (struct buffer_head *)data;
3118
3119                 memset(bh, 0, sizeof(*bh));
3120                 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3121         }
3122 }
3123
3124 #ifdef CONFIG_HOTPLUG_CPU
3125 static void buffer_exit_cpu(int cpu)
3126 {
3127         int i;
3128         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3129
3130         for (i = 0; i < BH_LRU_SIZE; i++) {
3131                 brelse(b->bhs[i]);
3132                 b->bhs[i] = NULL;
3133         }
3134 }
3135
3136 static int buffer_cpu_notify(struct notifier_block *self,
3137                               unsigned long action, void *hcpu)
3138 {
3139         if (action == CPU_DEAD)
3140                 buffer_exit_cpu((unsigned long)hcpu);
3141         return NOTIFY_OK;
3142 }
3143 #endif /* CONFIG_HOTPLUG_CPU */
3144
3145 void __init buffer_init(void)
3146 {
3147         int nrpages;
3148
3149         bh_cachep = kmem_cache_create("buffer_head",
3150                         sizeof(struct buffer_head), 0,
3151                         SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3152
3153         /*
3154          * Limit the bh occupancy to 10% of ZONE_NORMAL
3155          */
3156         nrpages = (nr_free_buffer_pages() * 10) / 100;
3157         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3158         hotcpu_notifier(buffer_cpu_notify, 0);
3159 }
3160
3161 EXPORT_SYMBOL(__bforget);
3162 EXPORT_SYMBOL(__brelse);
3163 EXPORT_SYMBOL(__wait_on_buffer);
3164 EXPORT_SYMBOL(block_commit_write);
3165 EXPORT_SYMBOL(block_prepare_write);
3166 EXPORT_SYMBOL(block_read_full_page);
3167 EXPORT_SYMBOL(block_sync_page);
3168 EXPORT_SYMBOL(block_truncate_page);
3169 EXPORT_SYMBOL(block_write_full_page);
3170 EXPORT_SYMBOL(cont_prepare_write);
3171 EXPORT_SYMBOL(end_buffer_async_write);
3172 EXPORT_SYMBOL(end_buffer_read_sync);
3173 EXPORT_SYMBOL(end_buffer_write_sync);
3174 EXPORT_SYMBOL(file_fsync);
3175 EXPORT_SYMBOL(fsync_bdev);
3176 EXPORT_SYMBOL(generic_block_bmap);
3177 EXPORT_SYMBOL(generic_commit_write);
3178 EXPORT_SYMBOL(generic_cont_expand);
3179 EXPORT_SYMBOL(generic_cont_expand_simple);
3180 EXPORT_SYMBOL(init_buffer);
3181 EXPORT_SYMBOL(invalidate_bdev);
3182 EXPORT_SYMBOL(ll_rw_block);
3183 EXPORT_SYMBOL(mark_buffer_dirty);
3184 EXPORT_SYMBOL(submit_bh);
3185 EXPORT_SYMBOL(sync_dirty_buffer);
3186 EXPORT_SYMBOL(unlock_buffer);