]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/md/dm.c
dm: use common __issue_target_request for flush and discard support
[net-next-2.6.git] / drivers / md / dm.c
index a503b95ecbfb515e863e7539a5f516bbef3e9fe6..3dd846e801fb7d7710f76aa98b31fb396ea215d3 100644 (file)
@@ -125,6 +125,10 @@ struct mapped_device {
        unsigned long flags;
 
        struct request_queue *queue;
+       unsigned type;
+       /* Protect queue and type against concurrent access. */
+       struct mutex type_lock;
+
        struct gendisk *disk;
        char name[16];
 
@@ -639,8 +643,14 @@ static void dec_pending(struct dm_io *io, int error)
                         * There can be just one barrier request so we use
                         * a per-device variable for error reporting.
                         * Note that you can't touch the bio after end_io_acct
+                        *
+                        * We ignore -EOPNOTSUPP for empty flush reported by
+                        * underlying devices. We assume that if the device
+                        * doesn't support empty barriers, it doesn't need
+                        * cache flushing commands.
                         */
-                       if (!md->barrier_error && io_error != -EOPNOTSUPP)
+                       if (!md->barrier_error &&
+                           !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
                                md->barrier_error = io_error;
                        end_io_acct(io);
                        free_io(md, io);
@@ -1172,30 +1182,91 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
        return tio;
 }
 
-static void __flush_target(struct clone_info *ci, struct dm_target *ti,
-                         unsigned flush_nr)
+static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
+                                  unsigned request_nr)
 {
        struct dm_target_io *tio = alloc_tio(ci, ti);
        struct bio *clone;
 
-       tio->info.flush_request = flush_nr;
+       tio->info.target_request_nr = request_nr;
 
-       clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+       /*
+        * Discard requests require the bio's inline iovecs be initialized.
+        * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
+        * and discard, so no need for concern about wasted bvec allocations.
+        */
+       clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
        __bio_clone(clone, ci->bio);
        clone->bi_destructor = dm_bio_destructor;
 
        __map_bio(ti, clone, tio);
 }
 
+static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
+                                   unsigned num_requests)
+{
+       unsigned request_nr;
+
+       for (request_nr = 0; request_nr < num_requests; request_nr++)
+               __issue_target_request(ci, ti, request_nr);
+}
+
 static int __clone_and_map_empty_barrier(struct clone_info *ci)
 {
-       unsigned target_nr = 0, flush_nr;
+       unsigned target_nr = 0;
        struct dm_target *ti;
 
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
-               for (flush_nr = 0; flush_nr < ti->num_flush_requests;
-                    flush_nr++)
-                       __flush_target(ci, ti, flush_nr);
+               __issue_target_requests(ci, ti, ti->num_flush_requests);
+
+       ci->sector_count = 0;
+
+       return 0;
+}
+
+/*
+ * Perform all io with a single clone.
+ */
+static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+{
+       struct bio *clone, *bio = ci->bio;
+       struct dm_target_io *tio;
+
+       tio = alloc_tio(ci, ti);
+       clone = clone_bio(bio, ci->sector, ci->idx,
+                         bio->bi_vcnt - ci->idx, ci->sector_count,
+                         ci->md->bs);
+       __map_bio(ti, clone, tio);
+       ci->sector_count = 0;
+}
+
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+       struct dm_target *ti;
+       sector_t max;
+
+       ti = dm_table_find_target(ci->map, ci->sector);
+       if (!dm_target_is_valid(ti))
+               return -EIO;
+
+       /*
+        * Even though the device advertised discard support,
+        * reconfiguration might have changed that since the
+        * check was performed.
+        */
+
+       if (!ti->num_discard_requests)
+               return -EOPNOTSUPP;
+
+       max = max_io_len(ci->md, ci->sector, ti);
+
+       if (ci->sector_count > max)
+               /*
+                * FIXME: Handle a discard that spans two or more targets.
+                */
+               return -EOPNOTSUPP;
+
+       __issue_target_requests(ci, ti, ti->num_discard_requests);
 
        ci->sector_count = 0;
 
@@ -1212,27 +1283,21 @@ static int __clone_and_map(struct clone_info *ci)
        if (unlikely(bio_empty_barrier(bio)))
                return __clone_and_map_empty_barrier(ci);
 
+       if (unlikely(bio->bi_rw & REQ_DISCARD))
+               return __clone_and_map_discard(ci);
+
        ti = dm_table_find_target(ci->map, ci->sector);
        if (!dm_target_is_valid(ti))
                return -EIO;
 
        max = max_io_len(ci->md, ci->sector, ti);
 
-       /*
-        * Allocate a target io object.
-        */
-       tio = alloc_tio(ci, ti);
-
        if (ci->sector_count <= max) {
                /*
                 * Optimise for the simple case where we can do all of
                 * the remaining io with a single clone.
                 */
-               clone = clone_bio(bio, ci->sector, ci->idx,
-                                 bio->bi_vcnt - ci->idx, ci->sector_count,
-                                 ci->md->bs);
-               __map_bio(ti, clone, tio);
-               ci->sector_count = 0;
+               __clone_and_map_simple(ci, ti);
 
        } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
                /*
@@ -1253,6 +1318,7 @@ static int __clone_and_map(struct clone_info *ci)
                        len += bv_len;
                }
 
+               tio = alloc_tio(ci, ti);
                clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
                                  ci->md->bs);
                __map_bio(ti, clone, tio);
@@ -1276,12 +1342,11 @@ static int __clone_and_map(struct clone_info *ci)
                                        return -EIO;
 
                                max = max_io_len(ci->md, ci->sector, ti);
-
-                               tio = alloc_tio(ci, ti);
                        }
 
                        len = min(remaining, max);
 
+                       tio = alloc_tio(ci, ti);
                        clone = split_bvec(bio, ci->sector, ci->idx,
                                           bv->bv_offset + offset, len,
                                           ci->md->bs);
@@ -1846,6 +1911,28 @@ static const struct block_device_operations dm_blk_dops;
 static void dm_wq_work(struct work_struct *work);
 static void dm_rq_barrier_work(struct work_struct *work);
 
+static void dm_init_md_queue(struct mapped_device *md)
+{
+       /*
+        * Request-based dm devices cannot be stacked on top of bio-based dm
+        * devices.  The type of this dm device has not been decided yet.
+        * The type is decided at the first table loading time.
+        * To prevent problematic device stacking, clear the queue flag
+        * for request stacking support until then.
+        *
+        * This queue is new, so no concurrency on the queue_flags.
+        */
+       queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+       md->queue->queuedata = md;
+       md->queue->backing_dev_info.congested_fn = dm_any_congested;
+       md->queue->backing_dev_info.congested_data = md;
+       blk_queue_make_request(md->queue, dm_request);
+       blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
+       md->queue->unplug_fn = dm_unplug_all;
+       blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+}
+
 /*
  * Allocate and initialise a blank device with a given minor.
  */
@@ -1871,8 +1958,10 @@ static struct mapped_device *alloc_dev(int minor)
        if (r < 0)
                goto bad_minor;
 
+       md->type = DM_TYPE_NONE;
        init_rwsem(&md->io_lock);
        mutex_init(&md->suspend_lock);
+       mutex_init(&md->type_lock);
        spin_lock_init(&md->deferred_lock);
        spin_lock_init(&md->barrier_error_lock);
        rwlock_init(&md->map_lock);
@@ -1883,33 +1972,11 @@ static struct mapped_device *alloc_dev(int minor)
        INIT_LIST_HEAD(&md->uevent_list);
        spin_lock_init(&md->uevent_lock);
 
-       md->queue = blk_init_queue(dm_request_fn, NULL);
+       md->queue = blk_alloc_queue(GFP_KERNEL);
        if (!md->queue)
                goto bad_queue;
 
-       /*
-        * Request-based dm devices cannot be stacked on top of bio-based dm
-        * devices.  The type of this dm device has not been decided yet,
-        * although we initialized the queue using blk_init_queue().
-        * The type is decided at the first table loading time.
-        * To prevent problematic device stacking, clear the queue flag
-        * for request stacking support until then.
-        *
-        * This queue is new, so no concurrency on the queue_flags.
-        */
-       queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
-       md->saved_make_request_fn = md->queue->make_request_fn;
-       md->queue->queuedata = md;
-       md->queue->backing_dev_info.congested_fn = dm_any_congested;
-       md->queue->backing_dev_info.congested_data = md;
-       blk_queue_make_request(md->queue, dm_request);
-       blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
-       md->queue->unplug_fn = dm_unplug_all;
-       blk_queue_merge_bvec(md->queue, dm_merge_bvec);
-       blk_queue_softirq_done(md->queue, dm_softirq_done);
-       blk_queue_prep_rq(md->queue, dm_prep_fn);
-       blk_queue_lld_busy(md->queue, dm_lld_busy);
-       blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+       dm_init_md_queue(md);
 
        md->disk = alloc_disk(1);
        if (!md->disk)
@@ -2124,6 +2191,72 @@ int dm_create(int minor, struct mapped_device **result)
        return 0;
 }
 
+/*
+ * Functions to manage md->type.
+ * All are required to hold md->type_lock.
+ */
+void dm_lock_md_type(struct mapped_device *md)
+{
+       mutex_lock(&md->type_lock);
+}
+
+void dm_unlock_md_type(struct mapped_device *md)
+{
+       mutex_unlock(&md->type_lock);
+}
+
+void dm_set_md_type(struct mapped_device *md, unsigned type)
+{
+       md->type = type;
+}
+
+unsigned dm_get_md_type(struct mapped_device *md)
+{
+       return md->type;
+}
+
+/*
+ * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
+ */
+static int dm_init_request_based_queue(struct mapped_device *md)
+{
+       struct request_queue *q = NULL;
+
+       if (md->queue->elevator)
+               return 1;
+
+       /* Fully initialize the queue */
+       q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
+       if (!q)
+               return 0;
+
+       md->queue = q;
+       md->saved_make_request_fn = md->queue->make_request_fn;
+       dm_init_md_queue(md);
+       blk_queue_softirq_done(md->queue, dm_softirq_done);
+       blk_queue_prep_rq(md->queue, dm_prep_fn);
+       blk_queue_lld_busy(md->queue, dm_lld_busy);
+       blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+
+       elv_register_queue(md->queue);
+
+       return 1;
+}
+
+/*
+ * Setup the DM device's queue based on md's type
+ */
+int dm_setup_md_queue(struct mapped_device *md)
+{
+       if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
+           !dm_init_request_based_queue(md)) {
+               DMWARN("Cannot initialize queue for request-based mapped device");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static struct mapped_device *dm_find_md(dev_t dev)
 {
        struct mapped_device *md;
@@ -2284,7 +2417,12 @@ static void process_barrier(struct mapped_device *md, struct bio *bio)
 
        if (!bio_empty_barrier(bio)) {
                __split_and_process_bio(md, bio);
-               dm_flush(md);
+               /*
+                * If the request isn't supported, don't waste time with
+                * the second flush.
+                */
+               if (md->barrier_error != -EOPNOTSUPP)
+                       dm_flush(md);
        }
 
        if (md->barrier_error != DM_ENDIO_REQUEUE)
@@ -2341,11 +2479,11 @@ static void dm_queue_flush(struct mapped_device *md)
        queue_work(md->wq, &md->work);
 }
 
-static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
+static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
 
-       tio->info.flush_request = flush_nr;
+       tio->info.target_request_nr = request_nr;
 }
 
 /* Issue barrier requests to targets and wait for their completion. */
@@ -2363,7 +2501,7 @@ static int dm_rq_barrier(struct mapped_device *md)
                ti = dm_table_get_target(map, i);
                for (j = 0; j < ti->num_flush_requests; j++) {
                        clone = clone_rq(md->flush_request, md, GFP_NOIO);
-                       dm_rq_set_flush_nr(clone, j);
+                       dm_rq_set_target_request_nr(clone, j);
                        atomic_inc(&md->pending[rq_data_dir(clone)]);
                        map_request(ti, clone, md);
                }
@@ -2429,13 +2567,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
                goto out;
        }
 
-       /* cannot change the device type, once a table is bound */
-       if (md->map &&
-           (dm_table_get_type(md->map) != dm_table_get_type(table))) {
-               DMWARN("can't change the device type after a table is bound");
-               goto out;
-       }
-
        map = __bind(md, table, &limits);
 
 out: