2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
27 static const char *_name = DM_NAME;
29 static unsigned int major = 0;
30 static unsigned int _major = 0;
32 static DEFINE_SPINLOCK(_minor_lock);
35 * One of these is allocated per bio.
38 struct mapped_device *md;
42 unsigned long start_time;
47 * One of these is allocated per target within a bio. Hopefully
48 * this will be simplified out one day.
57 * For request-based dm.
58 * One of these is allocated per request.
60 struct dm_rq_target_io {
61 struct mapped_device *md;
63 struct request *orig, clone;
69 * For request-based dm.
70 * One of these is allocated per bio.
72 struct dm_rq_clone_bio_info {
77 union map_info *dm_get_mapinfo(struct bio *bio)
79 if (bio && bio->bi_private)
80 return &((struct dm_target_io *)bio->bi_private)->info;
84 #define MINOR_ALLOCED ((void *)-1)
87 * Bits for the md->flags field.
89 #define DMF_BLOCK_IO_FOR_SUSPEND 0
90 #define DMF_SUSPENDED 1
93 #define DMF_DELETING 4
94 #define DMF_NOFLUSH_SUSPENDING 5
95 #define DMF_QUEUE_IO_TO_THREAD 6
98 * Work processed by per-device workqueue.
100 struct mapped_device {
101 struct rw_semaphore io_lock;
102 struct mutex suspend_lock;
109 struct request_queue *queue;
110 struct gendisk *disk;
116 * A list of ios that arrived while we were suspended.
119 wait_queue_head_t wait;
120 struct work_struct work;
121 struct bio_list deferred;
122 spinlock_t deferred_lock;
125 * An error from the barrier request currently being processed.
130 * Processing queue (flush/barriers)
132 struct workqueue_struct *wq;
135 * The current mapping.
137 struct dm_table *map;
140 * io objects are allocated from here.
151 wait_queue_head_t eventq;
153 struct list_head uevent_list;
154 spinlock_t uevent_lock; /* Protect access to uevent_list */
157 * freeze/thaw support require holding onto a super block
159 struct super_block *frozen_sb;
160 struct block_device *bdev;
162 /* forced geometry settings */
163 struct hd_geometry geometry;
170 static struct kmem_cache *_io_cache;
171 static struct kmem_cache *_tio_cache;
172 static struct kmem_cache *_rq_tio_cache;
173 static struct kmem_cache *_rq_bio_info_cache;
175 static int __init local_init(void)
179 /* allocate a slab for the dm_ios */
180 _io_cache = KMEM_CACHE(dm_io, 0);
184 /* allocate a slab for the target ios */
185 _tio_cache = KMEM_CACHE(dm_target_io, 0);
187 goto out_free_io_cache;
189 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
191 goto out_free_tio_cache;
193 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
194 if (!_rq_bio_info_cache)
195 goto out_free_rq_tio_cache;
197 r = dm_uevent_init();
199 goto out_free_rq_bio_info_cache;
202 r = register_blkdev(_major, _name);
204 goto out_uevent_exit;
213 out_free_rq_bio_info_cache:
214 kmem_cache_destroy(_rq_bio_info_cache);
215 out_free_rq_tio_cache:
216 kmem_cache_destroy(_rq_tio_cache);
218 kmem_cache_destroy(_tio_cache);
220 kmem_cache_destroy(_io_cache);
225 static void local_exit(void)
227 kmem_cache_destroy(_rq_bio_info_cache);
228 kmem_cache_destroy(_rq_tio_cache);
229 kmem_cache_destroy(_tio_cache);
230 kmem_cache_destroy(_io_cache);
231 unregister_blkdev(_major, _name);
236 DMINFO("cleaned up");
239 static int (*_inits[])(void) __initdata = {
248 static void (*_exits[])(void) = {
257 static int __init dm_init(void)
259 const int count = ARRAY_SIZE(_inits);
263 for (i = 0; i < count; i++) {
278 static void __exit dm_exit(void)
280 int i = ARRAY_SIZE(_exits);
287 * Block device functions
289 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
291 struct mapped_device *md;
293 spin_lock(&_minor_lock);
295 md = bdev->bd_disk->private_data;
299 if (test_bit(DMF_FREEING, &md->flags) ||
300 test_bit(DMF_DELETING, &md->flags)) {
306 atomic_inc(&md->open_count);
309 spin_unlock(&_minor_lock);
311 return md ? 0 : -ENXIO;
314 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
316 struct mapped_device *md = disk->private_data;
317 atomic_dec(&md->open_count);
322 int dm_open_count(struct mapped_device *md)
324 return atomic_read(&md->open_count);
328 * Guarantees nothing is using the device before it's deleted.
330 int dm_lock_for_deletion(struct mapped_device *md)
334 spin_lock(&_minor_lock);
336 if (dm_open_count(md))
339 set_bit(DMF_DELETING, &md->flags);
341 spin_unlock(&_minor_lock);
346 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
348 struct mapped_device *md = bdev->bd_disk->private_data;
350 return dm_get_geometry(md, geo);
353 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
354 unsigned int cmd, unsigned long arg)
356 struct mapped_device *md = bdev->bd_disk->private_data;
357 struct dm_table *map = dm_get_table(md);
358 struct dm_target *tgt;
361 if (!map || !dm_table_get_size(map))
364 /* We only support devices that have a single target */
365 if (dm_table_get_num_targets(map) != 1)
368 tgt = dm_table_get_target(map, 0);
370 if (dm_suspended(md)) {
375 if (tgt->type->ioctl)
376 r = tgt->type->ioctl(tgt, cmd, arg);
384 static struct dm_io *alloc_io(struct mapped_device *md)
386 return mempool_alloc(md->io_pool, GFP_NOIO);
389 static void free_io(struct mapped_device *md, struct dm_io *io)
391 mempool_free(io, md->io_pool);
394 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
396 mempool_free(tio, md->tio_pool);
399 static void start_io_acct(struct dm_io *io)
401 struct mapped_device *md = io->md;
404 io->start_time = jiffies;
406 cpu = part_stat_lock();
407 part_round_stats(cpu, &dm_disk(md)->part0);
409 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
412 static void end_io_acct(struct dm_io *io)
414 struct mapped_device *md = io->md;
415 struct bio *bio = io->bio;
416 unsigned long duration = jiffies - io->start_time;
418 int rw = bio_data_dir(bio);
420 cpu = part_stat_lock();
421 part_round_stats(cpu, &dm_disk(md)->part0);
422 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
426 * After this is decremented the bio must not be touched if it is
429 dm_disk(md)->part0.in_flight = pending =
430 atomic_dec_return(&md->pending);
432 /* nudge anyone waiting on suspend queue */
438 * Add the bio to the list of deferred io.
440 static void queue_io(struct mapped_device *md, struct bio *bio)
442 down_write(&md->io_lock);
444 spin_lock_irq(&md->deferred_lock);
445 bio_list_add(&md->deferred, bio);
446 spin_unlock_irq(&md->deferred_lock);
448 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
449 queue_work(md->wq, &md->work);
451 up_write(&md->io_lock);
455 * Everyone (including functions in this file), should use this
456 * function to access the md->map field, and make sure they call
457 * dm_table_put() when finished.
459 struct dm_table *dm_get_table(struct mapped_device *md)
463 read_lock(&md->map_lock);
467 read_unlock(&md->map_lock);
473 * Get the geometry associated with a dm device
475 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
483 * Set the geometry of a device.
485 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
487 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
489 if (geo->start > sz) {
490 DMWARN("Start sector is beyond the geometry limits.");
499 /*-----------------------------------------------------------------
501 * A more elegant soln is in the works that uses the queue
502 * merge fn, unfortunately there are a couple of changes to
503 * the block layer that I want to make for this. So in the
504 * interests of getting something for people to use I give
505 * you this clearly demarcated crap.
506 *---------------------------------------------------------------*/
508 static int __noflush_suspending(struct mapped_device *md)
510 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
514 * Decrements the number of outstanding ios that a bio has been
515 * cloned into, completing the original io if necc.
517 static void dec_pending(struct dm_io *io, int error)
522 struct mapped_device *md = io->md;
524 /* Push-back supersedes any I/O errors */
525 if (error && !(io->error > 0 && __noflush_suspending(md)))
528 if (atomic_dec_and_test(&io->io_count)) {
529 if (io->error == DM_ENDIO_REQUEUE) {
531 * Target requested pushing back the I/O.
533 spin_lock_irqsave(&md->deferred_lock, flags);
534 if (__noflush_suspending(md)) {
535 if (!bio_barrier(io->bio))
536 bio_list_add_head(&md->deferred,
539 /* noflush suspend was interrupted. */
541 spin_unlock_irqrestore(&md->deferred_lock, flags);
544 io_error = io->error;
547 if (bio_barrier(bio)) {
549 * There can be just one barrier request so we use
550 * a per-device variable for error reporting.
551 * Note that you can't touch the bio after end_io_acct
553 if (!md->barrier_error && io_error != -EOPNOTSUPP)
554 md->barrier_error = io_error;
559 if (io_error != DM_ENDIO_REQUEUE) {
560 trace_block_bio_complete(md->queue, bio);
562 bio_endio(bio, io_error);
570 static void clone_endio(struct bio *bio, int error)
573 struct dm_target_io *tio = bio->bi_private;
574 struct dm_io *io = tio->io;
575 struct mapped_device *md = tio->io->md;
576 dm_endio_fn endio = tio->ti->type->end_io;
578 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
582 r = endio(tio->ti, bio, error, &tio->info);
583 if (r < 0 || r == DM_ENDIO_REQUEUE)
585 * error and requeue request are handled
589 else if (r == DM_ENDIO_INCOMPLETE)
590 /* The target will handle the io */
593 DMWARN("unimplemented target endio return value: %d", r);
599 * Store md for cleanup instead of tio which is about to get freed.
601 bio->bi_private = md->bs;
605 dec_pending(io, error);
608 static sector_t max_io_len(struct mapped_device *md,
609 sector_t sector, struct dm_target *ti)
611 sector_t offset = sector - ti->begin;
612 sector_t len = ti->len - offset;
615 * Does the target need to split even further ?
619 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
628 static void __map_bio(struct dm_target *ti, struct bio *clone,
629 struct dm_target_io *tio)
633 struct mapped_device *md;
635 clone->bi_end_io = clone_endio;
636 clone->bi_private = tio;
639 * Map the clone. If r == 0 we don't need to do
640 * anything, the target has assumed ownership of
643 atomic_inc(&tio->io->io_count);
644 sector = clone->bi_sector;
645 r = ti->type->map(ti, clone, &tio->info);
646 if (r == DM_MAPIO_REMAPPED) {
647 /* the bio has been remapped so dispatch it */
649 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
650 tio->io->bio->bi_bdev->bd_dev, sector);
652 generic_make_request(clone);
653 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
654 /* error the io and bail out, or requeue it if needed */
656 dec_pending(tio->io, r);
658 * Store bio_set for cleanup.
660 clone->bi_private = md->bs;
664 DMWARN("unimplemented target map return value: %d", r);
670 struct mapped_device *md;
671 struct dm_table *map;
675 sector_t sector_count;
679 static void dm_bio_destructor(struct bio *bio)
681 struct bio_set *bs = bio->bi_private;
687 * Creates a little bio that is just does part of a bvec.
689 static struct bio *split_bvec(struct bio *bio, sector_t sector,
690 unsigned short idx, unsigned int offset,
691 unsigned int len, struct bio_set *bs)
694 struct bio_vec *bv = bio->bi_io_vec + idx;
696 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
697 clone->bi_destructor = dm_bio_destructor;
698 *clone->bi_io_vec = *bv;
700 clone->bi_sector = sector;
701 clone->bi_bdev = bio->bi_bdev;
702 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
704 clone->bi_size = to_bytes(len);
705 clone->bi_io_vec->bv_offset = offset;
706 clone->bi_io_vec->bv_len = clone->bi_size;
707 clone->bi_flags |= 1 << BIO_CLONED;
709 if (bio_integrity(bio)) {
710 bio_integrity_clone(clone, bio, GFP_NOIO);
711 bio_integrity_trim(clone,
712 bio_sector_offset(bio, idx, offset), len);
719 * Creates a bio that consists of range of complete bvecs.
721 static struct bio *clone_bio(struct bio *bio, sector_t sector,
722 unsigned short idx, unsigned short bv_count,
723 unsigned int len, struct bio_set *bs)
727 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
728 __bio_clone(clone, bio);
729 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
730 clone->bi_destructor = dm_bio_destructor;
731 clone->bi_sector = sector;
733 clone->bi_vcnt = idx + bv_count;
734 clone->bi_size = to_bytes(len);
735 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
737 if (bio_integrity(bio)) {
738 bio_integrity_clone(clone, bio, GFP_NOIO);
740 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
741 bio_integrity_trim(clone,
742 bio_sector_offset(bio, idx, 0), len);
748 static struct dm_target_io *alloc_tio(struct clone_info *ci,
749 struct dm_target *ti)
751 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
755 memset(&tio->info, 0, sizeof(tio->info));
760 static void __flush_target(struct clone_info *ci, struct dm_target *ti,
763 struct dm_target_io *tio = alloc_tio(ci, ti);
766 tio->info.flush_request = flush_nr;
768 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
769 __bio_clone(clone, ci->bio);
770 clone->bi_destructor = dm_bio_destructor;
772 __map_bio(ti, clone, tio);
775 static int __clone_and_map_empty_barrier(struct clone_info *ci)
777 unsigned target_nr = 0, flush_nr;
778 struct dm_target *ti;
780 while ((ti = dm_table_get_target(ci->map, target_nr++)))
781 for (flush_nr = 0; flush_nr < ti->num_flush_requests;
783 __flush_target(ci, ti, flush_nr);
785 ci->sector_count = 0;
790 static int __clone_and_map(struct clone_info *ci)
792 struct bio *clone, *bio = ci->bio;
793 struct dm_target *ti;
794 sector_t len = 0, max;
795 struct dm_target_io *tio;
797 if (unlikely(bio_empty_barrier(bio)))
798 return __clone_and_map_empty_barrier(ci);
800 ti = dm_table_find_target(ci->map, ci->sector);
801 if (!dm_target_is_valid(ti))
804 max = max_io_len(ci->md, ci->sector, ti);
807 * Allocate a target io object.
809 tio = alloc_tio(ci, ti);
811 if (ci->sector_count <= max) {
813 * Optimise for the simple case where we can do all of
814 * the remaining io with a single clone.
816 clone = clone_bio(bio, ci->sector, ci->idx,
817 bio->bi_vcnt - ci->idx, ci->sector_count,
819 __map_bio(ti, clone, tio);
820 ci->sector_count = 0;
822 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
824 * There are some bvecs that don't span targets.
825 * Do as many of these as possible.
828 sector_t remaining = max;
831 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
832 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
834 if (bv_len > remaining)
841 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
843 __map_bio(ti, clone, tio);
846 ci->sector_count -= len;
851 * Handle a bvec that must be split between two or more targets.
853 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
854 sector_t remaining = to_sector(bv->bv_len);
855 unsigned int offset = 0;
859 ti = dm_table_find_target(ci->map, ci->sector);
860 if (!dm_target_is_valid(ti))
863 max = max_io_len(ci->md, ci->sector, ti);
865 tio = alloc_tio(ci, ti);
868 len = min(remaining, max);
870 clone = split_bvec(bio, ci->sector, ci->idx,
871 bv->bv_offset + offset, len,
874 __map_bio(ti, clone, tio);
877 ci->sector_count -= len;
878 offset += to_bytes(len);
879 } while (remaining -= len);
888 * Split the bio into several clones and submit it to targets.
890 static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
892 struct clone_info ci;
895 ci.map = dm_get_table(md);
896 if (unlikely(!ci.map)) {
897 if (!bio_barrier(bio))
900 if (!md->barrier_error)
901 md->barrier_error = -EIO;
907 ci.io = alloc_io(md);
909 atomic_set(&ci.io->io_count, 1);
912 ci.sector = bio->bi_sector;
913 ci.sector_count = bio_sectors(bio);
914 if (unlikely(bio_empty_barrier(bio)))
916 ci.idx = bio->bi_idx;
918 start_io_acct(ci.io);
919 while (ci.sector_count && !error)
920 error = __clone_and_map(&ci);
922 /* drop the extra reference count */
923 dec_pending(ci.io, error);
924 dm_table_put(ci.map);
926 /*-----------------------------------------------------------------
928 *---------------------------------------------------------------*/
930 static int dm_merge_bvec(struct request_queue *q,
931 struct bvec_merge_data *bvm,
932 struct bio_vec *biovec)
934 struct mapped_device *md = q->queuedata;
935 struct dm_table *map = dm_get_table(md);
936 struct dm_target *ti;
937 sector_t max_sectors;
943 ti = dm_table_find_target(map, bvm->bi_sector);
944 if (!dm_target_is_valid(ti))
948 * Find maximum amount of I/O that won't need splitting
950 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
951 (sector_t) BIO_MAX_SECTORS);
952 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
957 * merge_bvec_fn() returns number of bytes
958 * it can accept at this offset
959 * max is precomputed maximal io size
961 if (max_size && ti->type->merge)
962 max_size = ti->type->merge(ti, bvm, biovec, max_size);
964 * If the target doesn't support merge method and some of the devices
965 * provided their merge_bvec method (we know this by looking at
966 * queue_max_hw_sectors), then we can't allow bios with multiple vector
967 * entries. So always set max_size to 0, and the code below allows
970 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
979 * Always allow an entire first page
981 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
982 max_size = biovec->bv_len;
988 * The request function that just remaps the bio built up by
991 static int dm_request(struct request_queue *q, struct bio *bio)
993 int rw = bio_data_dir(bio);
994 struct mapped_device *md = q->queuedata;
997 down_read(&md->io_lock);
999 cpu = part_stat_lock();
1000 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1001 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1005 * If we're suspended or the thread is processing barriers
1006 * we have to queue this io for later.
1008 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1009 unlikely(bio_barrier(bio))) {
1010 up_read(&md->io_lock);
1012 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1013 bio_rw(bio) == READA) {
1023 __split_and_process_bio(md, bio);
1024 up_read(&md->io_lock);
1028 static void dm_unplug_all(struct request_queue *q)
1030 struct mapped_device *md = q->queuedata;
1031 struct dm_table *map = dm_get_table(md);
1034 dm_table_unplug_all(map);
1039 static int dm_any_congested(void *congested_data, int bdi_bits)
1042 struct mapped_device *md = congested_data;
1043 struct dm_table *map;
1045 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1046 map = dm_get_table(md);
1048 r = dm_table_any_congested(map, bdi_bits);
1056 /*-----------------------------------------------------------------
1057 * An IDR is used to keep track of allocated minor numbers.
1058 *---------------------------------------------------------------*/
1059 static DEFINE_IDR(_minor_idr);
1061 static void free_minor(int minor)
1063 spin_lock(&_minor_lock);
1064 idr_remove(&_minor_idr, minor);
1065 spin_unlock(&_minor_lock);
1069 * See if the device with a specific minor # is free.
1071 static int specific_minor(int minor)
1075 if (minor >= (1 << MINORBITS))
1078 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1082 spin_lock(&_minor_lock);
1084 if (idr_find(&_minor_idr, minor)) {
1089 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1094 idr_remove(&_minor_idr, m);
1100 spin_unlock(&_minor_lock);
1104 static int next_free_minor(int *minor)
1108 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1112 spin_lock(&_minor_lock);
1114 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1118 if (m >= (1 << MINORBITS)) {
1119 idr_remove(&_minor_idr, m);
1127 spin_unlock(&_minor_lock);
1131 static struct block_device_operations dm_blk_dops;
1133 static void dm_wq_work(struct work_struct *work);
1136 * Allocate and initialise a blank device with a given minor.
1138 static struct mapped_device *alloc_dev(int minor)
1141 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1145 DMWARN("unable to allocate device, out of memory.");
1149 if (!try_module_get(THIS_MODULE))
1150 goto bad_module_get;
1152 /* get a minor number for the dev */
1153 if (minor == DM_ANY_MINOR)
1154 r = next_free_minor(&minor);
1156 r = specific_minor(minor);
1160 init_rwsem(&md->io_lock);
1161 mutex_init(&md->suspend_lock);
1162 spin_lock_init(&md->deferred_lock);
1163 rwlock_init(&md->map_lock);
1164 atomic_set(&md->holders, 1);
1165 atomic_set(&md->open_count, 0);
1166 atomic_set(&md->event_nr, 0);
1167 atomic_set(&md->uevent_seq, 0);
1168 INIT_LIST_HEAD(&md->uevent_list);
1169 spin_lock_init(&md->uevent_lock);
1171 md->queue = blk_alloc_queue(GFP_KERNEL);
1175 md->queue->queuedata = md;
1176 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1177 md->queue->backing_dev_info.congested_data = md;
1178 blk_queue_make_request(md->queue, dm_request);
1179 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
1180 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1181 md->queue->unplug_fn = dm_unplug_all;
1182 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1184 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1188 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1192 md->bs = bioset_create(16, 0);
1196 md->disk = alloc_disk(1);
1200 atomic_set(&md->pending, 0);
1201 init_waitqueue_head(&md->wait);
1202 INIT_WORK(&md->work, dm_wq_work);
1203 init_waitqueue_head(&md->eventq);
1205 md->disk->major = _major;
1206 md->disk->first_minor = minor;
1207 md->disk->fops = &dm_blk_dops;
1208 md->disk->queue = md->queue;
1209 md->disk->private_data = md;
1210 sprintf(md->disk->disk_name, "dm-%d", minor);
1212 format_dev_t(md->name, MKDEV(_major, minor));
1214 md->wq = create_singlethread_workqueue("kdmflush");
1218 md->bdev = bdget_disk(md->disk, 0);
1222 /* Populate the mapping, nobody knows we exist yet */
1223 spin_lock(&_minor_lock);
1224 old_md = idr_replace(&_minor_idr, md, minor);
1225 spin_unlock(&_minor_lock);
1227 BUG_ON(old_md != MINOR_ALLOCED);
1232 destroy_workqueue(md->wq);
1236 bioset_free(md->bs);
1238 mempool_destroy(md->tio_pool);
1240 mempool_destroy(md->io_pool);
1242 blk_cleanup_queue(md->queue);
1246 module_put(THIS_MODULE);
1252 static void unlock_fs(struct mapped_device *md);
1254 static void free_dev(struct mapped_device *md)
1256 int minor = MINOR(disk_devt(md->disk));
1260 destroy_workqueue(md->wq);
1261 mempool_destroy(md->tio_pool);
1262 mempool_destroy(md->io_pool);
1263 bioset_free(md->bs);
1264 blk_integrity_unregister(md->disk);
1265 del_gendisk(md->disk);
1268 spin_lock(&_minor_lock);
1269 md->disk->private_data = NULL;
1270 spin_unlock(&_minor_lock);
1273 blk_cleanup_queue(md->queue);
1274 module_put(THIS_MODULE);
1279 * Bind a table to the device.
1281 static void event_callback(void *context)
1283 unsigned long flags;
1285 struct mapped_device *md = (struct mapped_device *) context;
1287 spin_lock_irqsave(&md->uevent_lock, flags);
1288 list_splice_init(&md->uevent_list, &uevents);
1289 spin_unlock_irqrestore(&md->uevent_lock, flags);
1291 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1293 atomic_inc(&md->event_nr);
1294 wake_up(&md->eventq);
1297 static void __set_size(struct mapped_device *md, sector_t size)
1299 set_capacity(md->disk, size);
1301 mutex_lock(&md->bdev->bd_inode->i_mutex);
1302 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1303 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1306 static int __bind(struct mapped_device *md, struct dm_table *t)
1308 struct request_queue *q = md->queue;
1311 size = dm_table_get_size(t);
1314 * Wipe any geometry if the size of the table changed.
1316 if (size != get_capacity(md->disk))
1317 memset(&md->geometry, 0, sizeof(md->geometry));
1319 __set_size(md, size);
1322 dm_table_destroy(t);
1326 dm_table_event_callback(t, event_callback, md);
1328 write_lock(&md->map_lock);
1330 dm_table_set_restrictions(t, q);
1331 write_unlock(&md->map_lock);
1336 static void __unbind(struct mapped_device *md)
1338 struct dm_table *map = md->map;
1343 dm_table_event_callback(map, NULL, NULL);
1344 write_lock(&md->map_lock);
1346 write_unlock(&md->map_lock);
1347 dm_table_destroy(map);
1351 * Constructor for a new device.
1353 int dm_create(int minor, struct mapped_device **result)
1355 struct mapped_device *md;
1357 md = alloc_dev(minor);
1367 static struct mapped_device *dm_find_md(dev_t dev)
1369 struct mapped_device *md;
1370 unsigned minor = MINOR(dev);
1372 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1375 spin_lock(&_minor_lock);
1377 md = idr_find(&_minor_idr, minor);
1378 if (md && (md == MINOR_ALLOCED ||
1379 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1380 test_bit(DMF_FREEING, &md->flags))) {
1386 spin_unlock(&_minor_lock);
1391 struct mapped_device *dm_get_md(dev_t dev)
1393 struct mapped_device *md = dm_find_md(dev);
1401 void *dm_get_mdptr(struct mapped_device *md)
1403 return md->interface_ptr;
1406 void dm_set_mdptr(struct mapped_device *md, void *ptr)
1408 md->interface_ptr = ptr;
1411 void dm_get(struct mapped_device *md)
1413 atomic_inc(&md->holders);
1416 const char *dm_device_name(struct mapped_device *md)
1420 EXPORT_SYMBOL_GPL(dm_device_name);
1422 void dm_put(struct mapped_device *md)
1424 struct dm_table *map;
1426 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1428 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1429 map = dm_get_table(md);
1430 idr_replace(&_minor_idr, MINOR_ALLOCED,
1431 MINOR(disk_devt(dm_disk(md))));
1432 set_bit(DMF_FREEING, &md->flags);
1433 spin_unlock(&_minor_lock);
1434 if (!dm_suspended(md)) {
1435 dm_table_presuspend_targets(map);
1436 dm_table_postsuspend_targets(map);
1444 EXPORT_SYMBOL_GPL(dm_put);
1446 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
1449 DECLARE_WAITQUEUE(wait, current);
1451 dm_unplug_all(md->queue);
1453 add_wait_queue(&md->wait, &wait);
1456 set_current_state(interruptible);
1459 if (!atomic_read(&md->pending))
1462 if (interruptible == TASK_INTERRUPTIBLE &&
1463 signal_pending(current)) {
1470 set_current_state(TASK_RUNNING);
1472 remove_wait_queue(&md->wait, &wait);
1477 static void dm_flush(struct mapped_device *md)
1479 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1482 static void process_barrier(struct mapped_device *md, struct bio *bio)
1484 md->barrier_error = 0;
1488 if (!bio_empty_barrier(bio)) {
1489 __split_and_process_bio(md, bio);
1493 if (md->barrier_error != DM_ENDIO_REQUEUE)
1494 bio_endio(bio, md->barrier_error);
1496 spin_lock_irq(&md->deferred_lock);
1497 bio_list_add_head(&md->deferred, bio);
1498 spin_unlock_irq(&md->deferred_lock);
1503 * Process the deferred bios
1505 static void dm_wq_work(struct work_struct *work)
1507 struct mapped_device *md = container_of(work, struct mapped_device,
1511 down_write(&md->io_lock);
1513 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1514 spin_lock_irq(&md->deferred_lock);
1515 c = bio_list_pop(&md->deferred);
1516 spin_unlock_irq(&md->deferred_lock);
1519 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1523 up_write(&md->io_lock);
1526 process_barrier(md, c);
1528 __split_and_process_bio(md, c);
1530 down_write(&md->io_lock);
1533 up_write(&md->io_lock);
1536 static void dm_queue_flush(struct mapped_device *md)
1538 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1539 smp_mb__after_clear_bit();
1540 queue_work(md->wq, &md->work);
1544 * Swap in a new table (destroying old one).
1546 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1550 mutex_lock(&md->suspend_lock);
1552 /* device must be suspended */
1553 if (!dm_suspended(md))
1557 r = __bind(md, table);
1560 mutex_unlock(&md->suspend_lock);
1565 * Functions to lock and unlock any filesystem running on the
1568 static int lock_fs(struct mapped_device *md)
1572 WARN_ON(md->frozen_sb);
1574 md->frozen_sb = freeze_bdev(md->bdev);
1575 if (IS_ERR(md->frozen_sb)) {
1576 r = PTR_ERR(md->frozen_sb);
1577 md->frozen_sb = NULL;
1581 set_bit(DMF_FROZEN, &md->flags);
1586 static void unlock_fs(struct mapped_device *md)
1588 if (!test_bit(DMF_FROZEN, &md->flags))
1591 thaw_bdev(md->bdev, md->frozen_sb);
1592 md->frozen_sb = NULL;
1593 clear_bit(DMF_FROZEN, &md->flags);
1597 * We need to be able to change a mapping table under a mounted
1598 * filesystem. For example we might want to move some data in
1599 * the background. Before the table can be swapped with
1600 * dm_bind_table, dm_suspend must be called to flush any in
1601 * flight bios and ensure that any further io gets deferred.
1603 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1605 struct dm_table *map = NULL;
1607 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
1608 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1610 mutex_lock(&md->suspend_lock);
1612 if (dm_suspended(md)) {
1617 map = dm_get_table(md);
1620 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1621 * This flag is cleared before dm_suspend returns.
1624 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1626 /* This does not get reverted if there's an error later. */
1627 dm_table_presuspend_targets(map);
1630 * Flush I/O to the device. noflush supersedes do_lockfs,
1631 * because lock_fs() needs to flush I/Os.
1633 if (!noflush && do_lockfs) {
1640 * Here we must make sure that no processes are submitting requests
1641 * to target drivers i.e. no one may be executing
1642 * __split_and_process_bio. This is called from dm_request and
1645 * To get all processes out of __split_and_process_bio in dm_request,
1646 * we take the write lock. To prevent any process from reentering
1647 * __split_and_process_bio from dm_request, we set
1648 * DMF_QUEUE_IO_TO_THREAD.
1650 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1651 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1652 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1653 * further calls to __split_and_process_bio from dm_wq_work.
1655 down_write(&md->io_lock);
1656 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1657 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
1658 up_write(&md->io_lock);
1660 flush_workqueue(md->wq);
1663 * At this point no more requests are entering target request routines.
1664 * We call dm_wait_for_completion to wait for all existing requests
1667 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1669 down_write(&md->io_lock);
1671 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1672 up_write(&md->io_lock);
1674 /* were we interrupted ? */
1679 goto out; /* pushback list is already flushed, so skip flush */
1683 * If dm_wait_for_completion returned 0, the device is completely
1684 * quiescent now. There is no request-processing activity. All new
1685 * requests are being added to md->deferred list.
1688 dm_table_postsuspend_targets(map);
1690 set_bit(DMF_SUSPENDED, &md->flags);
1696 mutex_unlock(&md->suspend_lock);
1700 int dm_resume(struct mapped_device *md)
1703 struct dm_table *map = NULL;
1705 mutex_lock(&md->suspend_lock);
1706 if (!dm_suspended(md))
1709 map = dm_get_table(md);
1710 if (!map || !dm_table_get_size(map))
1713 r = dm_table_resume_targets(map);
1721 clear_bit(DMF_SUSPENDED, &md->flags);
1723 dm_table_unplug_all(map);
1725 dm_kobject_uevent(md);
1731 mutex_unlock(&md->suspend_lock);
1736 /*-----------------------------------------------------------------
1737 * Event notification.
1738 *---------------------------------------------------------------*/
1739 void dm_kobject_uevent(struct mapped_device *md)
1741 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1744 uint32_t dm_next_uevent_seq(struct mapped_device *md)
1746 return atomic_add_return(1, &md->uevent_seq);
1749 uint32_t dm_get_event_nr(struct mapped_device *md)
1751 return atomic_read(&md->event_nr);
1754 int dm_wait_event(struct mapped_device *md, int event_nr)
1756 return wait_event_interruptible(md->eventq,
1757 (event_nr != atomic_read(&md->event_nr)));
1760 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1762 unsigned long flags;
1764 spin_lock_irqsave(&md->uevent_lock, flags);
1765 list_add(elist, &md->uevent_list);
1766 spin_unlock_irqrestore(&md->uevent_lock, flags);
1770 * The gendisk is only valid as long as you have a reference
1773 struct gendisk *dm_disk(struct mapped_device *md)
1778 struct kobject *dm_kobject(struct mapped_device *md)
1784 * struct mapped_device should not be exported outside of dm.c
1785 * so use this check to verify that kobj is part of md structure
1787 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1789 struct mapped_device *md;
1791 md = container_of(kobj, struct mapped_device, kobj);
1792 if (&md->kobj != kobj)
1795 if (test_bit(DMF_FREEING, &md->flags) ||
1796 test_bit(DMF_DELETING, &md->flags))
1803 int dm_suspended(struct mapped_device *md)
1805 return test_bit(DMF_SUSPENDED, &md->flags);
1808 int dm_noflush_suspending(struct dm_target *ti)
1810 struct mapped_device *md = dm_table_get_md(ti->table);
1811 int r = __noflush_suspending(md);
1817 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1819 static struct block_device_operations dm_blk_dops = {
1820 .open = dm_blk_open,
1821 .release = dm_blk_close,
1822 .ioctl = dm_blk_ioctl,
1823 .getgeo = dm_blk_getgeo,
1824 .owner = THIS_MODULE
1827 EXPORT_SYMBOL(dm_get_mapinfo);
1832 module_init(dm_init);
1833 module_exit(dm_exit);
1835 module_param(major, uint, 0);
1836 MODULE_PARM_DESC(major, "The major number of the device mapper");
1837 MODULE_DESCRIPTION(DM_NAME " driver");
1838 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1839 MODULE_LICENSE("GPL");