]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - fs/buffer.c
[PATCH] Make address_space_operations->sync_page return void
[net-next-2.6.git] / fs / buffer.c
index 1d3683d496f8b0dbab6e3251511532c9904b5236..0b9456fd074f67c1b87861c444a9bfc20a4caf54 100644 (file)
@@ -160,12 +160,7 @@ int sync_blockdev(struct block_device *bdev)
 }
 EXPORT_SYMBOL(sync_blockdev);
 
-/*
- * Write out and wait upon all dirty data associated with this
- * superblock.  Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
+static void __fsync_super(struct super_block *sb)
 {
        sync_inodes_sb(sb, 0);
        DQUOT_SYNC(sb);
@@ -177,7 +172,16 @@ int fsync_super(struct super_block *sb)
                sb->s_op->sync_fs(sb, 1);
        sync_blockdev(sb->s_bdev);
        sync_inodes_sb(sb, 1);
+}
 
+/*
+ * Write out and wait upon all dirty data associated with this
+ * superblock.  Filesystem data as well as the underlying block
+ * device.  Takes the superblock lock.
+ */
+int fsync_super(struct super_block *sb)
+{
+       __fsync_super(sb);
        return sync_blockdev(sb->s_bdev);
 }
 
@@ -201,7 +205,7 @@ int fsync_bdev(struct block_device *bdev)
  * freeze_bdev  --  lock a filesystem and force it into a consistent state
  * @bdev:      blockdevice to lock
  *
- * This takes the block device bd_mount_sem to make sure no new mounts
+ * This takes the block device bd_mount_mutex to make sure no new mounts
  * happen on bdev until thaw_bdev() is called.
  * If a superblock is found on this device, we take the s_umount semaphore
  * on it to make sure nobody unmounts until the snapshot creation is done.
@@ -210,25 +214,13 @@ struct super_block *freeze_bdev(struct block_device *bdev)
 {
        struct super_block *sb;
 
-       down(&bdev->bd_mount_sem);
+       mutex_lock(&bdev->bd_mount_mutex);
        sb = get_super(bdev);
        if (sb && !(sb->s_flags & MS_RDONLY)) {
                sb->s_frozen = SB_FREEZE_WRITE;
                smp_wmb();
 
-               sync_inodes_sb(sb, 0);
-               DQUOT_SYNC(sb);
-
-               lock_super(sb);
-               if (sb->s_dirt && sb->s_op->write_super)
-                       sb->s_op->write_super(sb);
-               unlock_super(sb);
-
-               if (sb->s_op->sync_fs)
-                       sb->s_op->sync_fs(sb, 1);
-
-               sync_blockdev(sb->s_bdev);
-               sync_inodes_sb(sb, 1);
+               __fsync_super(sb);
 
                sb->s_frozen = SB_FREEZE_TRANS;
                smp_wmb();
@@ -264,7 +256,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
                drop_super(sb);
        }
 
-       up(&bdev->bd_mount_sem);
+       mutex_unlock(&bdev->bd_mount_mutex);
 }
 EXPORT_SYMBOL(thaw_bdev);
 
@@ -327,31 +319,24 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
        return ret;
 }
 
-static long do_fsync(unsigned int fd, int datasync)
+long do_fsync(struct file *file, int datasync)
 {
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
+       int ret;
+       int err;
+       struct address_space *mapping = file->f_mapping;
 
-       ret = -EINVAL;
        if (!file->f_op || !file->f_op->fsync) {
                /* Why?  We can still call filemap_fdatawrite */
-               goto out_putf;
+               ret = -EINVAL;
+               goto out;
        }
 
-       mapping = file->f_mapping;
-
        current->flags |= PF_SYNCWRITE;
        ret = filemap_fdatawrite(mapping);
 
        /*
-        * We need to protect against concurrent writers,
-        * which could cause livelocks in fsync_buffers_list
+        * We need to protect against concurrent writers, which could cause
+        * livelocks in fsync_buffers_list().
         */
        mutex_lock(&mapping->host->i_mutex);
        err = file->f_op->fsync(file, file->f_dentry, datasync);
@@ -362,21 +347,31 @@ static long do_fsync(unsigned int fd, int datasync)
        if (!ret)
                ret = err;
        current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
-       fput(file);
 out:
        return ret;
 }
 
+static long __do_fsync(unsigned int fd, int datasync)
+{
+       struct file *file;
+       int ret = -EBADF;
+
+       file = fget(fd);
+       if (file) {
+               ret = do_fsync(file, datasync);
+               fput(file);
+       }
+       return ret;
+}
+
 asmlinkage long sys_fsync(unsigned int fd)
 {
-       return do_fsync(fd, 0);
+       return __do_fsync(fd, 0);
 }
 
 asmlinkage long sys_fdatasync(unsigned int fd)
 {
-       return do_fsync(fd, 1);
+       return __do_fsync(fd, 1);
 }
 
 /*
@@ -865,8 +860,8 @@ int __set_page_dirty_buffers(struct page *page)
                }
                write_unlock_irq(&mapping->tree_lock);
                __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+               return 1;
        }
-       
        return 0;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
@@ -3012,7 +3007,7 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
 {
        struct address_space *mapping;
 
@@ -3020,7 +3015,6 @@ int block_sync_page(struct page *page)
        mapping = page_mapping(page);
        if (mapping)
                blk_run_backing_dev(mapping->backing_dev_info, page);
-       return 0;
 }
 
 /*
@@ -3078,7 +3072,7 @@ static void recalc_bh_state(void)
        if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
                return;
        __get_cpu_var(bh_accounting).ratelimit = 0;
-       for_each_cpu(i)
+       for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
@@ -3127,6 +3121,9 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
+       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       per_cpu(bh_accounting, cpu).nr = 0;
+       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
@@ -3143,8 +3140,11 @@ void __init buffer_init(void)
        int nrpages;
 
        bh_cachep = kmem_cache_create("buffer_head",
-                       sizeof(struct buffer_head), 0,
-                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
+                                       sizeof(struct buffer_head), 0,
+                                       (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                                       SLAB_MEM_SPREAD),
+                                       init_buffer_head,
+                                       NULL);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL