]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Apr 2010 18:50:29 +0000 (11:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Apr 2010 18:50:29 +0000 (11:50 -0700)
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (34 commits)
  cfq-iosched: Fix the incorrect timeslice accounting with forced_dispatch
  loop: Update mtime when writing using aops
  block: expose the statistics in blkio.time and blkio.sectors for the root cgroup
  backing-dev: Handle class_create() failure
  Block: Fix block/elevator.c elevator_get() off-by-one error
  drbd: lc_element_by_index() never returns NULL
  cciss: unlock on error path
  cfq-iosched: Do not merge queues of BE and IDLE classes
  cfq-iosched: Add additional blktrace log messages in CFQ for easier debugging
  i2o: Remove the dangerous kobj_to_i2o_device macro
  block: remove 16 bytes of padding from struct request on 64bits
  cfq-iosched: fix a kbuild regression
  block: make CONFIG_BLK_CGROUP visible
  Remove GENHD_FL_DRIVERFS
  block: Export max number of segments and max segment size in sysfs
  block: Finalize conversion of block limits functions
  block: Fix overrun in lcm() and move it to lib
  vfs: improve writeback_inodes_wb()
  paride: fix off-by-one test
  drbd: fix al-to-on-disk-bitmap for 4k logical_block_size
  ...

1  2 
block/blk-settings.c
block/blk-sysfs.c
block/cfq-iosched.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/loop.c
drivers/block/virtio_blk.c
drivers/scsi/sd.c
fs/fs-writeback.c

diff --combined block/blk-settings.c
index d9a9db5f0a2bddfbdd6e5dbfe8a8c0c7a73b24b0,4c4700dca56a0962326a3e3bc202b9c91ec67c6e..f5ed5a1187ba8564527b70f682328b31eea4549a
@@@ -8,8 -8,8 +8,9 @@@
  #include <linux/blkdev.h>
  #include <linux/bootmem.h>    /* for max_pfn/max_low_pfn */
  #include <linux/gcd.h>
+ #include <linux/lcm.h>
  #include <linux/jiffies.h>
 +#include <linux/gfp.h>
  
  #include "blk.h"
  
@@@ -462,16 -462,6 +463,6 @@@ void blk_queue_stack_limits(struct requ
  }
  EXPORT_SYMBOL(blk_queue_stack_limits);
  
- static unsigned int lcm(unsigned int a, unsigned int b)
- {
-       if (a && b)
-               return (a * b) / gcd(a, b);
-       else if (b)
-               return b;
-       return a;
- }
  /**
   * blk_stack_limits - adjust queue_limits for stacked devices
   * @t:        the stacking driver limits (top device)
diff --combined block/blk-sysfs.c
index c2b821fa324a115b95c5dd93920b34dd189f9dc7,4426739fb757b714b22876e416469ec595f5ab0b..306759bbdf1be719ef1f8e0a1f1912475c521a09
@@@ -2,7 -2,6 +2,7 @@@
   * Functions related to sysfs handling
   */
  #include <linux/kernel.h>
 +#include <linux/slab.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
@@@ -107,6 -106,19 +107,19 @@@ static ssize_t queue_max_sectors_show(s
        return queue_var_show(max_sectors_kb, (page));
  }
  
+ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+ {
+       return queue_var_show(queue_max_segments(q), (page));
+ }
+ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
+ {
+       if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+               return queue_var_show(queue_max_segment_size(q), (page));
+       return queue_var_show(PAGE_CACHE_SIZE, (page));
+ }
  static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
  {
        return queue_var_show(queue_logical_block_size(q), page);
@@@ -281,6 -293,16 +294,16 @@@ static struct queue_sysfs_entry queue_m
        .show = queue_max_hw_sectors_show,
  };
  
+ static struct queue_sysfs_entry queue_max_segments_entry = {
+       .attr = {.name = "max_segments", .mode = S_IRUGO },
+       .show = queue_max_segments_show,
+ };
+ static struct queue_sysfs_entry queue_max_segment_size_entry = {
+       .attr = {.name = "max_segment_size", .mode = S_IRUGO },
+       .show = queue_max_segment_size_show,
+ };
  static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@@ -356,6 -378,8 +379,8 @@@ static struct attribute *default_attrs[
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
+       &queue_max_segments_entry.attr,
+       &queue_max_segment_size_entry.attr,
        &queue_iosched_entry.attr,
        &queue_hw_sector_size_entry.attr,
        &queue_logical_block_size_entry.attr,
diff --combined block/cfq-iosched.c
index fc98a48554fd46025c4dcc9a12f75dc4a98e609b,b773000f8a0634b055e388a0cd4cef512099d356..838834be115b3f341f3bc6e0eba54a5c3fcc424c
@@@ -7,7 -7,6 +7,7 @@@
   *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   */
  #include <linux/module.h>
 +#include <linux/slab.h>
  #include <linux/blkdev.h>
  #include <linux/elevator.h>
  #include <linux/jiffies.h>
@@@ -48,6 -47,7 +48,7 @@@ static const int cfq_hist_divisor = 4
  #define CFQ_SERVICE_SHIFT       12
  
  #define CFQQ_SEEK_THR         (sector_t)(8 * 100)
+ #define CFQQ_CLOSE_THR                (sector_t)(8 * 1024)
  #define CFQQ_SECT_THR_NONROT  (sector_t)(2 * 32)
  #define CFQQ_SEEKY(cfqq)      (hweight32(cfqq->seek_history) > 32/8)
  
@@@ -948,6 -948,11 +949,11 @@@ cfq_find_alloc_cfqg(struct cfq_data *cf
        unsigned int major, minor;
  
        cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
+       if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
+               sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
+               cfqg->blkg.dev = MKDEV(major, minor);
+               goto done;
+       }
        if (cfqg || !create)
                goto done;
  
@@@ -1518,7 -1523,8 +1524,8 @@@ static void __cfq_set_active_queue(stru
                                   struct cfq_queue *cfqq)
  {
        if (cfqq) {
-               cfq_log_cfqq(cfqd, cfqq, "set_active");
+               cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
+                               cfqd->serving_prio, cfqd->serving_type);
                cfqq->slice_start = 0;
                cfqq->dispatch_start = jiffies;
                cfqq->allocated_slice = 0;
@@@ -1661,9 -1667,9 +1668,9 @@@ static inline sector_t cfq_dist_from_la
  }
  
  static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                              struct request *rq, bool for_preempt)
+                              struct request *rq)
  {
-       return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR;
+       return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
  }
  
  static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
         * will contain the closest sector.
         */
        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
-       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
+       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
                return __cfqq;
  
        if (blk_rq_pos(__cfqq->next_rq) < sector)
                return NULL;
  
        __cfqq = rb_entry(node, struct cfq_queue, p_node);
-       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
+       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
                return __cfqq;
  
        return NULL;
@@@ -1722,6 -1728,8 +1729,8 @@@ static struct cfq_queue *cfq_close_coop
  {
        struct cfq_queue *cfqq;
  
+       if (cfq_class_idle(cur_cfqq))
+               return NULL;
        if (!cfq_cfqq_sync(cur_cfqq))
                return NULL;
        if (CFQQ_SEEKY(cur_cfqq))
@@@ -1788,7 -1796,11 +1797,11 @@@ static bool cfq_should_idle(struct cfq_
         * Otherwise, we do only if they are the last ones
         * in their service tree.
         */
-       return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
+       if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
+               return 1;
+       cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
+                       service_tree->count);
+       return 0;
  }
  
  static void cfq_arm_slice_timer(struct cfq_data *cfqd)
         * time slice.
         */
        if (sample_valid(cic->ttime_samples) &&
-           (cfqq->slice_end - jiffies < cic->ttime_mean))
+           (cfqq->slice_end - jiffies < cic->ttime_mean)) {
+               cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
+                               cic->ttime_mean);
                return;
+       }
  
        cfq_mark_cfqq_wait_request(cfqq);
  
@@@ -2042,6 -2057,7 +2058,7 @@@ static void choose_service_tree(struct 
                slice = max(slice, 2 * cfqd->cfq_slice_idle);
  
        slice = max_t(unsigned, slice, CFQ_MIN_TT);
+       cfq_log(cfqd, "workload slice:%d", slice);
        cfqd->workload_expires = jiffies + slice;
        cfqd->noidle_tree_requires_idle = false;
  }
@@@ -2189,10 -2205,13 +2206,13 @@@ static int cfq_forced_dispatch(struct c
        struct cfq_queue *cfqq;
        int dispatched = 0;
  
-       while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL)
+       /* Expire the timeslice of the current active queue first */
+       cfq_slice_expired(cfqd, 0);
+       while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
+               __cfq_set_active_queue(cfqd, cfqq);
                dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+       }
  
-       cfq_slice_expired(cfqd, 0);
        BUG_ON(cfqd->busy_queues);
  
        cfq_log(cfqd, "forced_dispatch=%d", dispatched);
@@@ -3104,7 -3123,7 +3124,7 @@@ cfq_should_preempt(struct cfq_data *cfq
         * if this request is as-good as one we would expect from the
         * current cfqq, let it preempt
         */
-       if (cfq_rq_close(cfqd, cfqq, rq, true))
+       if (cfq_rq_close(cfqd, cfqq, rq))
                return true;
  
        return false;
@@@ -3308,6 -3327,7 +3328,7 @@@ static void cfq_completed_request(struc
                if (cfq_should_wait_busy(cfqd, cfqq)) {
                        cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
                        cfq_mark_cfqq_wait_busy(cfqq);
+                       cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
  
                /*
index 3d6f3d988949f913269d2cd7c5db642319bab6f0,f58e76581c4b4c99e169310746c14465e7bedf82..3390716898d5aaf429b8c08db34cd28ff68ed569
@@@ -26,7 -26,6 +26,7 @@@
  #include <linux/vmalloc.h>
  #include <linux/string.h>
  #include <linux/drbd.h>
 +#include <linux/slab.h>
  #include <asm/kmap_types.h>
  #include "drbd_int.h"
  
@@@ -67,7 -66,7 +67,7 @@@ struct drbd_bitmap 
        size_t   bm_words;
        size_t   bm_number_of_pages;
        sector_t bm_dev_capacity;
-       struct semaphore bm_change; /* serializes resize operations */
+       struct mutex bm_change; /* serializes resize operations */
  
        atomic_t bm_async_io;
        wait_queue_head_t bm_io_wait;
@@@ -115,7 -114,7 +115,7 @@@ void drbd_bm_lock(struct drbd_conf *mde
                return;
        }
  
-       trylock_failed = down_trylock(&b->bm_change);
+       trylock_failed = !mutex_trylock(&b->bm_change);
  
        if (trylock_failed) {
                dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
                    b->bm_task == mdev->receiver.task ? "receiver" :
                    b->bm_task == mdev->asender.task  ? "asender"  :
                    b->bm_task == mdev->worker.task   ? "worker"   : "?");
-               down(&b->bm_change);
+               mutex_lock(&b->bm_change);
        }
        if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
                dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
@@@ -148,7 -147,7 +148,7 @@@ void drbd_bm_unlock(struct drbd_conf *m
  
        b->bm_why  = NULL;
        b->bm_task = NULL;
-       up(&b->bm_change);
+       mutex_unlock(&b->bm_change);
  }
  
  /* word offset to long pointer */
@@@ -296,7 -295,7 +296,7 @@@ int drbd_bm_init(struct drbd_conf *mdev
        if (!b)
                return -ENOMEM;
        spin_lock_init(&b->bm_lock);
-       init_MUTEX(&b->bm_change);
+       mutex_init(&b->bm_change);
        init_waitqueue_head(&b->bm_io_wait);
  
        mdev->bitmap = b;
diff --combined drivers/block/loop.c
index cb69929d917a5a8221a5045eba71f0ebd480ffc2,1c21a3f238689012e8cbcaca2d8a537472b2aef3..8546d123b9a745492cbbe0aace5b5ce6a2324298
@@@ -71,6 -71,7 +71,6 @@@
  #include <linux/buffer_head.h>                /* for invalidate_bdev() */
  #include <linux/completion.h>
  #include <linux/highmem.h>
 -#include <linux/gfp.h>
  #include <linux/kthread.h>
  #include <linux/splice.h>
  
@@@ -237,6 -238,8 +237,8 @@@ static int do_lo_send_aops(struct loop_
                if (ret)
                        goto fail;
  
+               file_update_time(file);
                transfer_result = lo_do_transfer(lo, WRITE, page, offset,
                                bvec->bv_page, bv_offs, size, IV);
                copied = size;
index 4b12b820c9a62f655e0745b8d2eae87c16d55437,653817ceeeddb27db4b2f12fe592f1b33ee6adbc..2138a7ae050c10c44bdba1608f441a7811ca1a22
@@@ -1,6 -1,5 +1,6 @@@
  //#define DEBUG
  #include <linux/spinlock.h>
 +#include <linux/slab.h>
  #include <linux/blkdev.h>
  #include <linux/hdreg.h>
  #include <linux/virtio.h>
@@@ -348,14 -347,13 +348,13 @@@ static int __devinit virtblk_probe(stru
        set_capacity(vblk->disk, cap);
  
        /* We can handle whatever the host told us to handle. */
-       blk_queue_max_phys_segments(q, vblk->sg_elems-2);
-       blk_queue_max_hw_segments(q, vblk->sg_elems-2);
+       blk_queue_max_segments(q, vblk->sg_elems-2);
  
        /* No need to bounce any requests */
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
  
        /* No real sector limit. */
-       blk_queue_max_sectors(q, -1U);
+       blk_queue_max_hw_sectors(q, -1U);
  
        /* Host can optionally specify maximum segment size and number of
         * segments. */
diff --combined drivers/scsi/sd.c
index 58c62ff42ab3953f98e27e230f236bb4cb98d99f,a82ab3e2b4f7af5d05019e57874ff7a33e46bd57..8b827f37b03ef95195be9736b9495cec6d390c5c
@@@ -49,7 -49,6 +49,7 @@@
  #include <linux/mutex.h>
  #include <linux/string_helpers.h>
  #include <linux/async.h>
 +#include <linux/slab.h>
  #include <asm/uaccess.h>
  #include <asm/unaligned.h>
  
@@@ -1949,7 -1948,7 +1949,7 @@@ static void sd_read_block_limits(struc
  {
        struct request_queue *q = sdkp->disk->queue;
        unsigned int sector_sz = sdkp->device->sector_size;
 -      const int vpd_len = 32;
 +      const int vpd_len = 64;
        unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
  
        if (!buffer ||
@@@ -1999,7 -1998,7 +1999,7 @@@ static void sd_read_block_characteristi
  {
        unsigned char *buffer;
        u16 rot;
 -      const int vpd_len = 32;
 +      const int vpd_len = 64;
  
        buffer = kmalloc(vpd_len, GFP_KERNEL);
  
@@@ -2186,7 -2185,7 +2186,7 @@@ static void sd_probe_async(void *data, 
        blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
  
        gd->driverfs_dev = &sdp->sdev_gendev;
-       gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
+       gd->flags = GENHD_FL_EXT_DEVT;
        if (sdp->removable)
                gd->flags |= GENHD_FL_REMOVABLE;
  
diff --combined fs/fs-writeback.c
index 781a322ccb456b926ff2906526645fd8dcd7cfec,6841effa47ca8280c736da681a378caba88cc33b..4b37f7cea4dd28edac895ebbe9017d80e7031c16
@@@ -16,7 -16,6 +16,7 @@@
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/spinlock.h>
 +#include <linux/slab.h>
  #include <linux/sched.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
@@@ -554,108 -553,85 +554,85 @@@ select_queue
        return ret;
  }
  
- static void unpin_sb_for_writeback(struct super_block **psb)
+ static void unpin_sb_for_writeback(struct super_block *sb)
  {
-       struct super_block *sb = *psb;
-       if (sb) {
-               up_read(&sb->s_umount);
-               put_super(sb);
-               *psb = NULL;
-       }
+       up_read(&sb->s_umount);
+       put_super(sb);
  }
  
+ enum sb_pin_state {
+       SB_PINNED,
+       SB_NOT_PINNED,
+       SB_PIN_FAILED
+ };
  /*
   * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
   * before calling writeback. So make sure that we do pin it, so it doesn't
   * go away while we are writing inodes from it.
-  *
-  * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
-  * 1 if we failed.
   */
- static int pin_sb_for_writeback(struct writeback_control *wbc,
-                               struct inode *inode, struct super_block **psb)
+ static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
+                                             struct super_block *sb)
  {
-       struct super_block *sb = inode->i_sb;
-       /*
-        * If this sb is already pinned, nothing more to do. If not and
-        * *psb is non-NULL, unpin the old one first
-        */
-       if (sb == *psb)
-               return 0;
-       else if (*psb)
-               unpin_sb_for_writeback(psb);
        /*
         * Caller must already hold the ref for this
         */
        if (wbc->sync_mode == WB_SYNC_ALL) {
                WARN_ON(!rwsem_is_locked(&sb->s_umount));
-               return 0;
+               return SB_NOT_PINNED;
        }
        spin_lock(&sb_lock);
        sb->s_count++;
        if (down_read_trylock(&sb->s_umount)) {
                if (sb->s_root) {
                        spin_unlock(&sb_lock);
-                       goto pinned;
+                       return SB_PINNED;
                }
                /*
                 * umounted, drop rwsem again and fall through to failure
                 */
                up_read(&sb->s_umount);
        }
        sb->s_count--;
        spin_unlock(&sb_lock);
-       return 1;
- pinned:
-       *psb = sb;
-       return 0;
+       return SB_PIN_FAILED;
  }
  
- static void writeback_inodes_wb(struct bdi_writeback *wb,
-                               struct writeback_control *wbc)
+ /*
+  * Write a portion of b_io inodes which belong to @sb.
+  * If @wbc->sb != NULL, then find and write all such
+  * inodes. Otherwise write only ones which go sequentially
+  * in reverse order.
+  * Return 1, if the caller writeback routine should be
+  * interrupted. Otherwise return 0.
+  */
+ static int writeback_sb_inodes(struct super_block *sb,
+                              struct bdi_writeback *wb,
+                              struct writeback_control *wbc)
  {
-       struct super_block *sb = wbc->sb, *pin_sb = NULL;
-       const unsigned long start = jiffies;    /* livelock avoidance */
-       spin_lock(&inode_lock);
-       if (!wbc->for_kupdate || list_empty(&wb->b_io))
-               queue_io(wb, wbc->older_than_this);
        while (!list_empty(&wb->b_io)) {
-               struct inode *inode = list_entry(wb->b_io.prev,
-                                               struct inode, i_list);
                long pages_skipped;
-               /*
-                * super block given and doesn't match, skip this inode
-                */
-               if (sb && sb != inode->i_sb) {
+               struct inode *inode = list_entry(wb->b_io.prev,
+                                                struct inode, i_list);
+               if (wbc->sb && sb != inode->i_sb) {
+                       /* super block given and doesn't
+                          match, skip this inode */
                        redirty_tail(inode);
                        continue;
                }
+               if (sb != inode->i_sb)
+                       /* finish with this superblock */
+                       return 0;
                if (inode->i_state & (I_NEW | I_WILL_FREE)) {
                        requeue_io(inode);
                        continue;
                }
                /*
                 * Was this inode dirtied after sync_sb_inodes was called?
                 * This keeps sync from extra jobs and livelock.
                 */
-               if (inode_dirtied_after(inode, start))
-                       break;
-               if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
-                       requeue_io(inode);
-                       continue;
-               }
+               if (inode_dirtied_after(inode, wbc->wb_start))
+                       return 1;
  
                BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
                __iget(inode);
                spin_lock(&inode_lock);
                if (wbc->nr_to_write <= 0) {
                        wbc->more_io = 1;
-                       break;
+                       return 1;
                }
                if (!list_empty(&wb->b_more_io))
                        wbc->more_io = 1;
        }
+       /* b_io is empty */
+       return 1;
+ }
+ static void writeback_inodes_wb(struct bdi_writeback *wb,
+                               struct writeback_control *wbc)
+ {
+       int ret = 0;
+       wbc->wb_start = jiffies; /* livelock avoidance */
+       spin_lock(&inode_lock);
+       if (!wbc->for_kupdate || list_empty(&wb->b_io))
+               queue_io(wb, wbc->older_than_this);
  
-       unpin_sb_for_writeback(&pin_sb);
+       while (!list_empty(&wb->b_io)) {
+               struct inode *inode = list_entry(wb->b_io.prev,
+                                                struct inode, i_list);
+               struct super_block *sb = inode->i_sb;
+               enum sb_pin_state state;
  
+               if (wbc->sb && sb != wbc->sb) {
+                       /* super block given and doesn't
+                          match, skip this inode */
+                       redirty_tail(inode);
+                       continue;
+               }
+               state = pin_sb_for_writeback(wbc, sb);
+               if (state == SB_PIN_FAILED) {
+                       requeue_io(inode);
+                       continue;
+               }
+               ret = writeback_sb_inodes(sb, wb, wbc);
+               if (state == SB_PINNED)
+                       unpin_sb_for_writeback(sb);
+               if (ret)
+                       break;
+       }
        spin_unlock(&inode_lock);
        /* Leave any unwritten inodes on b_io */
  }