]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/dm.c
dm: rename suspended_bdev to bdev
[net-next-2.6.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
3ac51e74 21#include <linux/hdreg.h>
55782138
LZ
22
23#include <trace/events/block.h>
1da177e4 24
72d94861
AK
25#define DM_MSG_PREFIX "core"
26
1da177e4
LT
27static const char *_name = DM_NAME;
28
29static unsigned int major = 0;
30static unsigned int _major = 0;
31
f32c10b0 32static DEFINE_SPINLOCK(_minor_lock);
1da177e4 33/*
8fbf26ad 34 * For bio-based dm.
1da177e4
LT
35 * One of these is allocated per bio.
36 */
37struct dm_io {
38 struct mapped_device *md;
39 int error;
1da177e4 40 atomic_t io_count;
6ae2fa67 41 struct bio *bio;
3eaf840e 42 unsigned long start_time;
1da177e4
LT
43};
44
45/*
8fbf26ad 46 * For bio-based dm.
1da177e4
LT
47 * One of these is allocated per target within a bio. Hopefully
48 * this will be simplified out one day.
49 */
028867ac 50struct dm_target_io {
1da177e4
LT
51 struct dm_io *io;
52 struct dm_target *ti;
53 union map_info info;
54};
55
8fbf26ad
KU
56/*
57 * For request-based dm.
58 * One of these is allocated per request.
59 */
60struct dm_rq_target_io {
61 struct mapped_device *md;
62 struct dm_target *ti;
63 struct request *orig, clone;
64 int error;
65 union map_info info;
66};
67
68/*
69 * For request-based dm.
70 * One of these is allocated per bio.
71 */
72struct dm_rq_clone_bio_info {
73 struct bio *orig;
74 struct request *rq;
75};
76
1da177e4
LT
77union map_info *dm_get_mapinfo(struct bio *bio)
78{
17b2f66f 79 if (bio && bio->bi_private)
028867ac 80 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 81 return NULL;
1da177e4
LT
82}
83
ba61fdd1
JM
84#define MINOR_ALLOCED ((void *)-1)
85
1da177e4
LT
86/*
87 * Bits for the md->flags field.
88 */
1eb787ec 89#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 90#define DMF_SUSPENDED 1
aa8d7c2f 91#define DMF_FROZEN 2
fba9f90e 92#define DMF_FREEING 3
5c6bd75d 93#define DMF_DELETING 4
2e93ccc1 94#define DMF_NOFLUSH_SUSPENDING 5
1eb787ec 95#define DMF_QUEUE_IO_TO_THREAD 6
1da177e4 96
304f3f6a
MB
97/*
98 * Work processed by per-device workqueue.
99 */
1da177e4 100struct mapped_device {
2ca3310e 101 struct rw_semaphore io_lock;
e61290a4 102 struct mutex suspend_lock;
1da177e4
LT
103 rwlock_t map_lock;
104 atomic_t holders;
5c6bd75d 105 atomic_t open_count;
1da177e4
LT
106
107 unsigned long flags;
108
165125e1 109 struct request_queue *queue;
1da177e4 110 struct gendisk *disk;
7e51f257 111 char name[16];
1da177e4
LT
112
113 void *interface_ptr;
114
115 /*
116 * A list of ios that arrived while we were suspended.
117 */
118 atomic_t pending;
119 wait_queue_head_t wait;
53d5914f 120 struct work_struct work;
74859364 121 struct bio_list deferred;
022c2611 122 spinlock_t deferred_lock;
1da177e4 123
af7e466a
MP
124 /*
125 * An error from the barrier request currently being processed.
126 */
127 int barrier_error;
128
304f3f6a
MB
129 /*
130 * Processing queue (flush/barriers)
131 */
132 struct workqueue_struct *wq;
133
1da177e4
LT
134 /*
135 * The current mapping.
136 */
137 struct dm_table *map;
138
139 /*
140 * io objects are allocated from here.
141 */
142 mempool_t *io_pool;
143 mempool_t *tio_pool;
144
9faf400f
SB
145 struct bio_set *bs;
146
1da177e4
LT
147 /*
148 * Event handling.
149 */
150 atomic_t event_nr;
151 wait_queue_head_t eventq;
7a8c3d3b
MA
152 atomic_t uevent_seq;
153 struct list_head uevent_list;
154 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
155
156 /*
157 * freeze/thaw support require holding onto a super block
158 */
159 struct super_block *frozen_sb;
db8fef4f 160 struct block_device *bdev;
3ac51e74
DW
161
162 /* forced geometry settings */
163 struct hd_geometry geometry;
784aae73
MB
164
165 /* sysfs handle */
166 struct kobject kobj;
1da177e4
LT
167};
168
169#define MIN_IOS 256
e18b890b
CL
170static struct kmem_cache *_io_cache;
171static struct kmem_cache *_tio_cache;
8fbf26ad
KU
172static struct kmem_cache *_rq_tio_cache;
173static struct kmem_cache *_rq_bio_info_cache;
1da177e4 174
1da177e4
LT
175static int __init local_init(void)
176{
51157b4a 177 int r = -ENOMEM;
1da177e4 178
1da177e4 179 /* allocate a slab for the dm_ios */
028867ac 180 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 181 if (!_io_cache)
51157b4a 182 return r;
1da177e4
LT
183
184 /* allocate a slab for the target ios */
028867ac 185 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
186 if (!_tio_cache)
187 goto out_free_io_cache;
1da177e4 188
8fbf26ad
KU
189 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
190 if (!_rq_tio_cache)
191 goto out_free_tio_cache;
192
193 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
194 if (!_rq_bio_info_cache)
195 goto out_free_rq_tio_cache;
196
51e5b2bd 197 r = dm_uevent_init();
51157b4a 198 if (r)
8fbf26ad 199 goto out_free_rq_bio_info_cache;
51e5b2bd 200
1da177e4
LT
201 _major = major;
202 r = register_blkdev(_major, _name);
51157b4a
KU
203 if (r < 0)
204 goto out_uevent_exit;
1da177e4
LT
205
206 if (!_major)
207 _major = r;
208
209 return 0;
51157b4a
KU
210
211out_uevent_exit:
212 dm_uevent_exit();
8fbf26ad
KU
213out_free_rq_bio_info_cache:
214 kmem_cache_destroy(_rq_bio_info_cache);
215out_free_rq_tio_cache:
216 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
217out_free_tio_cache:
218 kmem_cache_destroy(_tio_cache);
219out_free_io_cache:
220 kmem_cache_destroy(_io_cache);
221
222 return r;
1da177e4
LT
223}
224
225static void local_exit(void)
226{
8fbf26ad
KU
227 kmem_cache_destroy(_rq_bio_info_cache);
228 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
229 kmem_cache_destroy(_tio_cache);
230 kmem_cache_destroy(_io_cache);
00d59405 231 unregister_blkdev(_major, _name);
51e5b2bd 232 dm_uevent_exit();
1da177e4
LT
233
234 _major = 0;
235
236 DMINFO("cleaned up");
237}
238
b9249e55 239static int (*_inits[])(void) __initdata = {
1da177e4
LT
240 local_init,
241 dm_target_init,
242 dm_linear_init,
243 dm_stripe_init,
945fa4d2 244 dm_kcopyd_init,
1da177e4
LT
245 dm_interface_init,
246};
247
b9249e55 248static void (*_exits[])(void) = {
1da177e4
LT
249 local_exit,
250 dm_target_exit,
251 dm_linear_exit,
252 dm_stripe_exit,
945fa4d2 253 dm_kcopyd_exit,
1da177e4
LT
254 dm_interface_exit,
255};
256
257static int __init dm_init(void)
258{
259 const int count = ARRAY_SIZE(_inits);
260
261 int r, i;
262
263 for (i = 0; i < count; i++) {
264 r = _inits[i]();
265 if (r)
266 goto bad;
267 }
268
269 return 0;
270
271 bad:
272 while (i--)
273 _exits[i]();
274
275 return r;
276}
277
278static void __exit dm_exit(void)
279{
280 int i = ARRAY_SIZE(_exits);
281
282 while (i--)
283 _exits[i]();
284}
285
286/*
287 * Block device functions
288 */
fe5f9f2c 289static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
290{
291 struct mapped_device *md;
292
fba9f90e
JM
293 spin_lock(&_minor_lock);
294
fe5f9f2c 295 md = bdev->bd_disk->private_data;
fba9f90e
JM
296 if (!md)
297 goto out;
298
5c6bd75d
AK
299 if (test_bit(DMF_FREEING, &md->flags) ||
300 test_bit(DMF_DELETING, &md->flags)) {
fba9f90e
JM
301 md = NULL;
302 goto out;
303 }
304
1da177e4 305 dm_get(md);
5c6bd75d 306 atomic_inc(&md->open_count);
fba9f90e
JM
307
308out:
309 spin_unlock(&_minor_lock);
310
311 return md ? 0 : -ENXIO;
1da177e4
LT
312}
313
fe5f9f2c 314static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 315{
fe5f9f2c 316 struct mapped_device *md = disk->private_data;
5c6bd75d 317 atomic_dec(&md->open_count);
1da177e4
LT
318 dm_put(md);
319 return 0;
320}
321
5c6bd75d
AK
322int dm_open_count(struct mapped_device *md)
323{
324 return atomic_read(&md->open_count);
325}
326
327/*
328 * Guarantees nothing is using the device before it's deleted.
329 */
330int dm_lock_for_deletion(struct mapped_device *md)
331{
332 int r = 0;
333
334 spin_lock(&_minor_lock);
335
336 if (dm_open_count(md))
337 r = -EBUSY;
338 else
339 set_bit(DMF_DELETING, &md->flags);
340
341 spin_unlock(&_minor_lock);
342
343 return r;
344}
345
3ac51e74
DW
346static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
347{
348 struct mapped_device *md = bdev->bd_disk->private_data;
349
350 return dm_get_geometry(md, geo);
351}
352
fe5f9f2c 353static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
354 unsigned int cmd, unsigned long arg)
355{
fe5f9f2c
AV
356 struct mapped_device *md = bdev->bd_disk->private_data;
357 struct dm_table *map = dm_get_table(md);
aa129a22
MB
358 struct dm_target *tgt;
359 int r = -ENOTTY;
360
aa129a22
MB
361 if (!map || !dm_table_get_size(map))
362 goto out;
363
364 /* We only support devices that have a single target */
365 if (dm_table_get_num_targets(map) != 1)
366 goto out;
367
368 tgt = dm_table_get_target(map, 0);
369
370 if (dm_suspended(md)) {
371 r = -EAGAIN;
372 goto out;
373 }
374
375 if (tgt->type->ioctl)
647b3d00 376 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
377
378out:
379 dm_table_put(map);
380
aa129a22
MB
381 return r;
382}
383
028867ac 384static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
385{
386 return mempool_alloc(md->io_pool, GFP_NOIO);
387}
388
028867ac 389static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
390{
391 mempool_free(io, md->io_pool);
392}
393
028867ac 394static struct dm_target_io *alloc_tio(struct mapped_device *md)
1da177e4
LT
395{
396 return mempool_alloc(md->tio_pool, GFP_NOIO);
397}
398
028867ac 399static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
400{
401 mempool_free(tio, md->tio_pool);
402}
403
3eaf840e
JNN
404static void start_io_acct(struct dm_io *io)
405{
406 struct mapped_device *md = io->md;
c9959059 407 int cpu;
3eaf840e
JNN
408
409 io->start_time = jiffies;
410
074a7aca
TH
411 cpu = part_stat_lock();
412 part_round_stats(cpu, &dm_disk(md)->part0);
413 part_stat_unlock();
414 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
3eaf840e
JNN
415}
416
d221d2e7 417static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
418{
419 struct mapped_device *md = io->md;
420 struct bio *bio = io->bio;
421 unsigned long duration = jiffies - io->start_time;
c9959059 422 int pending, cpu;
3eaf840e
JNN
423 int rw = bio_data_dir(bio);
424
074a7aca
TH
425 cpu = part_stat_lock();
426 part_round_stats(cpu, &dm_disk(md)->part0);
427 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
428 part_stat_unlock();
3eaf840e 429
af7e466a
MP
430 /*
431 * After this is decremented the bio must not be touched if it is
432 * a barrier.
433 */
074a7aca
TH
434 dm_disk(md)->part0.in_flight = pending =
435 atomic_dec_return(&md->pending);
3eaf840e 436
d221d2e7
MP
437 /* nudge anyone waiting on suspend queue */
438 if (!pending)
439 wake_up(&md->wait);
3eaf840e
JNN
440}
441
1da177e4
LT
442/*
443 * Add the bio to the list of deferred io.
444 */
92c63902 445static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 446{
2ca3310e 447 down_write(&md->io_lock);
1da177e4 448
022c2611 449 spin_lock_irq(&md->deferred_lock);
1da177e4 450 bio_list_add(&md->deferred, bio);
022c2611 451 spin_unlock_irq(&md->deferred_lock);
1da177e4 452
92c63902
MP
453 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
454 queue_work(md->wq, &md->work);
455
2ca3310e 456 up_write(&md->io_lock);
1da177e4
LT
457}
458
459/*
460 * Everyone (including functions in this file), should use this
461 * function to access the md->map field, and make sure they call
462 * dm_table_put() when finished.
463 */
464struct dm_table *dm_get_table(struct mapped_device *md)
465{
466 struct dm_table *t;
467
468 read_lock(&md->map_lock);
469 t = md->map;
470 if (t)
471 dm_table_get(t);
472 read_unlock(&md->map_lock);
473
474 return t;
475}
476
3ac51e74
DW
477/*
478 * Get the geometry associated with a dm device
479 */
480int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
481{
482 *geo = md->geometry;
483
484 return 0;
485}
486
487/*
488 * Set the geometry of a device.
489 */
490int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
491{
492 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
493
494 if (geo->start > sz) {
495 DMWARN("Start sector is beyond the geometry limits.");
496 return -EINVAL;
497 }
498
499 md->geometry = *geo;
500
501 return 0;
502}
503
1da177e4
LT
504/*-----------------------------------------------------------------
505 * CRUD START:
506 * A more elegant soln is in the works that uses the queue
507 * merge fn, unfortunately there are a couple of changes to
508 * the block layer that I want to make for this. So in the
509 * interests of getting something for people to use I give
510 * you this clearly demarcated crap.
511 *---------------------------------------------------------------*/
512
2e93ccc1
KU
513static int __noflush_suspending(struct mapped_device *md)
514{
515 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
516}
517
1da177e4
LT
518/*
519 * Decrements the number of outstanding ios that a bio has been
520 * cloned into, completing the original io if necc.
521 */
858119e1 522static void dec_pending(struct dm_io *io, int error)
1da177e4 523{
2e93ccc1 524 unsigned long flags;
b35f8caa
MB
525 int io_error;
526 struct bio *bio;
527 struct mapped_device *md = io->md;
2e93ccc1
KU
528
529 /* Push-back supersedes any I/O errors */
b35f8caa 530 if (error && !(io->error > 0 && __noflush_suspending(md)))
1da177e4
LT
531 io->error = error;
532
533 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
534 if (io->error == DM_ENDIO_REQUEUE) {
535 /*
536 * Target requested pushing back the I/O.
2e93ccc1 537 */
022c2611 538 spin_lock_irqsave(&md->deferred_lock, flags);
b35f8caa 539 if (__noflush_suspending(md))
af7e466a 540 bio_list_add_head(&md->deferred, io->bio);
2e93ccc1
KU
541 else
542 /* noflush suspend was interrupted. */
543 io->error = -EIO;
022c2611 544 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
545 }
546
b35f8caa
MB
547 io_error = io->error;
548 bio = io->bio;
2e93ccc1 549
af7e466a
MP
550 if (bio_barrier(bio)) {
551 /*
552 * There can be just one barrier request so we use
553 * a per-device variable for error reporting.
554 * Note that you can't touch the bio after end_io_acct
555 */
556 md->barrier_error = io_error;
557 end_io_acct(io);
558 } else {
559 end_io_acct(io);
b35f8caa 560
af7e466a
MP
561 if (io_error != DM_ENDIO_REQUEUE) {
562 trace_block_bio_complete(md->queue, bio);
2056a782 563
af7e466a
MP
564 bio_endio(bio, io_error);
565 }
b35f8caa 566 }
af7e466a
MP
567
568 free_io(md, io);
1da177e4
LT
569 }
570}
571
6712ecf8 572static void clone_endio(struct bio *bio, int error)
1da177e4
LT
573{
574 int r = 0;
028867ac 575 struct dm_target_io *tio = bio->bi_private;
b35f8caa 576 struct dm_io *io = tio->io;
9faf400f 577 struct mapped_device *md = tio->io->md;
1da177e4
LT
578 dm_endio_fn endio = tio->ti->type->end_io;
579
1da177e4
LT
580 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
581 error = -EIO;
582
583 if (endio) {
584 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
585 if (r < 0 || r == DM_ENDIO_REQUEUE)
586 /*
587 * error and requeue request are handled
588 * in dec_pending().
589 */
1da177e4 590 error = r;
45cbcd79
KU
591 else if (r == DM_ENDIO_INCOMPLETE)
592 /* The target will handle the io */
6712ecf8 593 return;
45cbcd79
KU
594 else if (r) {
595 DMWARN("unimplemented target endio return value: %d", r);
596 BUG();
597 }
1da177e4
LT
598 }
599
9faf400f
SB
600 /*
601 * Store md for cleanup instead of tio which is about to get freed.
602 */
603 bio->bi_private = md->bs;
604
9faf400f 605 free_tio(md, tio);
b35f8caa
MB
606 bio_put(bio);
607 dec_pending(io, error);
1da177e4
LT
608}
609
610static sector_t max_io_len(struct mapped_device *md,
611 sector_t sector, struct dm_target *ti)
612{
613 sector_t offset = sector - ti->begin;
614 sector_t len = ti->len - offset;
615
616 /*
617 * Does the target need to split even further ?
618 */
619 if (ti->split_io) {
620 sector_t boundary;
621 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
622 - offset;
623 if (len > boundary)
624 len = boundary;
625 }
626
627 return len;
628}
629
630static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 631 struct dm_target_io *tio)
1da177e4
LT
632{
633 int r;
2056a782 634 sector_t sector;
9faf400f 635 struct mapped_device *md;
1da177e4
LT
636
637 /*
638 * Sanity checks.
639 */
640 BUG_ON(!clone->bi_size);
641
642 clone->bi_end_io = clone_endio;
643 clone->bi_private = tio;
644
645 /*
646 * Map the clone. If r == 0 we don't need to do
647 * anything, the target has assumed ownership of
648 * this io.
649 */
650 atomic_inc(&tio->io->io_count);
2056a782 651 sector = clone->bi_sector;
1da177e4 652 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 653 if (r == DM_MAPIO_REMAPPED) {
1da177e4 654 /* the bio has been remapped so dispatch it */
2056a782 655
5f3ea37c 656 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
22a7c31a 657 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 658
1da177e4 659 generic_make_request(clone);
2e93ccc1
KU
660 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
661 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
662 md = tio->io->md;
663 dec_pending(tio->io, r);
664 /*
665 * Store bio_set for cleanup.
666 */
667 clone->bi_private = md->bs;
1da177e4 668 bio_put(clone);
9faf400f 669 free_tio(md, tio);
45cbcd79
KU
670 } else if (r) {
671 DMWARN("unimplemented target map return value: %d", r);
672 BUG();
1da177e4
LT
673 }
674}
675
676struct clone_info {
677 struct mapped_device *md;
678 struct dm_table *map;
679 struct bio *bio;
680 struct dm_io *io;
681 sector_t sector;
682 sector_t sector_count;
683 unsigned short idx;
684};
685
3676347a
PO
686static void dm_bio_destructor(struct bio *bio)
687{
9faf400f
SB
688 struct bio_set *bs = bio->bi_private;
689
690 bio_free(bio, bs);
3676347a
PO
691}
692
1da177e4
LT
693/*
694 * Creates a little bio that is just does part of a bvec.
695 */
696static struct bio *split_bvec(struct bio *bio, sector_t sector,
697 unsigned short idx, unsigned int offset,
9faf400f 698 unsigned int len, struct bio_set *bs)
1da177e4
LT
699{
700 struct bio *clone;
701 struct bio_vec *bv = bio->bi_io_vec + idx;
702
9faf400f 703 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 704 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
705 *clone->bi_io_vec = *bv;
706
707 clone->bi_sector = sector;
708 clone->bi_bdev = bio->bi_bdev;
af7e466a 709 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
1da177e4
LT
710 clone->bi_vcnt = 1;
711 clone->bi_size = to_bytes(len);
712 clone->bi_io_vec->bv_offset = offset;
713 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 714 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4 715
9c47008d
MP
716 if (bio_integrity(bio)) {
717 bio_integrity_clone(clone, bio, GFP_NOIO);
718 bio_integrity_trim(clone,
719 bio_sector_offset(bio, idx, offset), len);
720 }
721
1da177e4
LT
722 return clone;
723}
724
725/*
726 * Creates a bio that consists of range of complete bvecs.
727 */
728static struct bio *clone_bio(struct bio *bio, sector_t sector,
729 unsigned short idx, unsigned short bv_count,
9faf400f 730 unsigned int len, struct bio_set *bs)
1da177e4
LT
731{
732 struct bio *clone;
733
9faf400f
SB
734 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
735 __bio_clone(clone, bio);
af7e466a 736 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
9faf400f 737 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
738 clone->bi_sector = sector;
739 clone->bi_idx = idx;
740 clone->bi_vcnt = idx + bv_count;
741 clone->bi_size = to_bytes(len);
742 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
743
9c47008d
MP
744 if (bio_integrity(bio)) {
745 bio_integrity_clone(clone, bio, GFP_NOIO);
746
747 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
748 bio_integrity_trim(clone,
749 bio_sector_offset(bio, idx, 0), len);
750 }
751
1da177e4
LT
752 return clone;
753}
754
512875bd 755static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
756{
757 struct bio *clone, *bio = ci->bio;
512875bd
JN
758 struct dm_target *ti;
759 sector_t len = 0, max;
028867ac 760 struct dm_target_io *tio;
1da177e4 761
512875bd
JN
762 ti = dm_table_find_target(ci->map, ci->sector);
763 if (!dm_target_is_valid(ti))
764 return -EIO;
765
766 max = max_io_len(ci->md, ci->sector, ti);
767
1da177e4
LT
768 /*
769 * Allocate a target io object.
770 */
771 tio = alloc_tio(ci->md);
772 tio->io = ci->io;
773 tio->ti = ti;
774 memset(&tio->info, 0, sizeof(tio->info));
775
776 if (ci->sector_count <= max) {
777 /*
778 * Optimise for the simple case where we can do all of
779 * the remaining io with a single clone.
780 */
781 clone = clone_bio(bio, ci->sector, ci->idx,
9faf400f
SB
782 bio->bi_vcnt - ci->idx, ci->sector_count,
783 ci->md->bs);
1da177e4
LT
784 __map_bio(ti, clone, tio);
785 ci->sector_count = 0;
786
787 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
788 /*
789 * There are some bvecs that don't span targets.
790 * Do as many of these as possible.
791 */
792 int i;
793 sector_t remaining = max;
794 sector_t bv_len;
795
796 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
797 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
798
799 if (bv_len > remaining)
800 break;
801
802 remaining -= bv_len;
803 len += bv_len;
804 }
805
9faf400f
SB
806 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
807 ci->md->bs);
1da177e4
LT
808 __map_bio(ti, clone, tio);
809
810 ci->sector += len;
811 ci->sector_count -= len;
812 ci->idx = i;
813
814 } else {
815 /*
d2044a94 816 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
817 */
818 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
819 sector_t remaining = to_sector(bv->bv_len);
820 unsigned int offset = 0;
1da177e4 821
d2044a94
AK
822 do {
823 if (offset) {
824 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
825 if (!dm_target_is_valid(ti))
826 return -EIO;
827
d2044a94 828 max = max_io_len(ci->md, ci->sector, ti);
1da177e4 829
d2044a94
AK
830 tio = alloc_tio(ci->md);
831 tio->io = ci->io;
832 tio->ti = ti;
833 memset(&tio->info, 0, sizeof(tio->info));
834 }
835
836 len = min(remaining, max);
837
838 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
839 bv->bv_offset + offset, len,
840 ci->md->bs);
d2044a94
AK
841
842 __map_bio(ti, clone, tio);
843
844 ci->sector += len;
845 ci->sector_count -= len;
846 offset += to_bytes(len);
847 } while (remaining -= len);
1da177e4 848
1da177e4
LT
849 ci->idx++;
850 }
512875bd
JN
851
852 return 0;
1da177e4
LT
853}
854
855/*
8a53c28d 856 * Split the bio into several clones and submit it to targets.
1da177e4 857 */
f0b9a450 858static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
859{
860 struct clone_info ci;
512875bd 861 int error = 0;
1da177e4
LT
862
863 ci.map = dm_get_table(md);
f0b9a450 864 if (unlikely(!ci.map)) {
af7e466a
MP
865 if (!bio_barrier(bio))
866 bio_io_error(bio);
867 else
868 md->barrier_error = -EIO;
f0b9a450
MP
869 return;
870 }
692d0eb9 871
1da177e4
LT
872 ci.md = md;
873 ci.bio = bio;
874 ci.io = alloc_io(md);
875 ci.io->error = 0;
876 atomic_set(&ci.io->io_count, 1);
877 ci.io->bio = bio;
878 ci.io->md = md;
879 ci.sector = bio->bi_sector;
880 ci.sector_count = bio_sectors(bio);
881 ci.idx = bio->bi_idx;
882
3eaf840e 883 start_io_acct(ci.io);
512875bd
JN
884 while (ci.sector_count && !error)
885 error = __clone_and_map(&ci);
1da177e4
LT
886
887 /* drop the extra reference count */
512875bd 888 dec_pending(ci.io, error);
1da177e4
LT
889 dm_table_put(ci.map);
890}
891/*-----------------------------------------------------------------
892 * CRUD END
893 *---------------------------------------------------------------*/
894
f6fccb12
MB
895static int dm_merge_bvec(struct request_queue *q,
896 struct bvec_merge_data *bvm,
897 struct bio_vec *biovec)
898{
899 struct mapped_device *md = q->queuedata;
900 struct dm_table *map = dm_get_table(md);
901 struct dm_target *ti;
902 sector_t max_sectors;
5037108a 903 int max_size = 0;
f6fccb12
MB
904
905 if (unlikely(!map))
5037108a 906 goto out;
f6fccb12
MB
907
908 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
909 if (!dm_target_is_valid(ti))
910 goto out_table;
f6fccb12
MB
911
912 /*
913 * Find maximum amount of I/O that won't need splitting
914 */
915 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
916 (sector_t) BIO_MAX_SECTORS);
917 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
918 if (max_size < 0)
919 max_size = 0;
920
921 /*
922 * merge_bvec_fn() returns number of bytes
923 * it can accept at this offset
924 * max is precomputed maximal io size
925 */
926 if (max_size && ti->type->merge)
927 max_size = ti->type->merge(ti, bvm, biovec, max_size);
8cbeb67a
MP
928 /*
929 * If the target doesn't support merge method and some of the devices
930 * provided their merge_bvec method (we know this by looking at
931 * queue_max_hw_sectors), then we can't allow bios with multiple vector
932 * entries. So always set max_size to 0, and the code below allows
933 * just one page.
934 */
935 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
936
937 max_size = 0;
f6fccb12 938
b01cd5ac 939out_table:
5037108a
MP
940 dm_table_put(map);
941
942out:
f6fccb12
MB
943 /*
944 * Always allow an entire first page
945 */
946 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
947 max_size = biovec->bv_len;
948
f6fccb12
MB
949 return max_size;
950}
951
1da177e4
LT
952/*
953 * The request function that just remaps the bio built up by
954 * dm_merge_bvec.
955 */
165125e1 956static int dm_request(struct request_queue *q, struct bio *bio)
1da177e4 957{
12f03a49 958 int rw = bio_data_dir(bio);
1da177e4 959 struct mapped_device *md = q->queuedata;
c9959059 960 int cpu;
1da177e4 961
2ca3310e 962 down_read(&md->io_lock);
1da177e4 963
074a7aca
TH
964 cpu = part_stat_lock();
965 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
966 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
967 part_stat_unlock();
12f03a49 968
1da177e4 969 /*
1eb787ec
AK
970 * If we're suspended or the thread is processing barriers
971 * we have to queue this io for later.
1da177e4 972 */
af7e466a
MP
973 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
974 unlikely(bio_barrier(bio))) {
2ca3310e 975 up_read(&md->io_lock);
1da177e4 976
54d9a1b4
AK
977 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
978 bio_rw(bio) == READA) {
979 bio_io_error(bio);
980 return 0;
981 }
1da177e4 982
92c63902 983 queue_io(md, bio);
1da177e4 984
92c63902 985 return 0;
1da177e4
LT
986 }
987
f0b9a450 988 __split_and_process_bio(md, bio);
2ca3310e 989 up_read(&md->io_lock);
f0b9a450 990 return 0;
1da177e4
LT
991}
992
165125e1 993static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
994{
995 struct mapped_device *md = q->queuedata;
996 struct dm_table *map = dm_get_table(md);
997
998 if (map) {
999 dm_table_unplug_all(map);
1000 dm_table_put(map);
1001 }
1002}
1003
1004static int dm_any_congested(void *congested_data, int bdi_bits)
1005{
8a57dfc6
CS
1006 int r = bdi_bits;
1007 struct mapped_device *md = congested_data;
1008 struct dm_table *map;
1da177e4 1009
1eb787ec 1010 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
8a57dfc6
CS
1011 map = dm_get_table(md);
1012 if (map) {
1013 r = dm_table_any_congested(map, bdi_bits);
1014 dm_table_put(map);
1015 }
1016 }
1017
1da177e4
LT
1018 return r;
1019}
1020
1021/*-----------------------------------------------------------------
1022 * An IDR is used to keep track of allocated minor numbers.
1023 *---------------------------------------------------------------*/
1da177e4
LT
1024static DEFINE_IDR(_minor_idr);
1025
2b06cfff 1026static void free_minor(int minor)
1da177e4 1027{
f32c10b0 1028 spin_lock(&_minor_lock);
1da177e4 1029 idr_remove(&_minor_idr, minor);
f32c10b0 1030 spin_unlock(&_minor_lock);
1da177e4
LT
1031}
1032
1033/*
1034 * See if the device with a specific minor # is free.
1035 */
cf13ab8e 1036static int specific_minor(int minor)
1da177e4
LT
1037{
1038 int r, m;
1039
1040 if (minor >= (1 << MINORBITS))
1041 return -EINVAL;
1042
62f75c2f
JM
1043 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1044 if (!r)
1045 return -ENOMEM;
1046
f32c10b0 1047 spin_lock(&_minor_lock);
1da177e4
LT
1048
1049 if (idr_find(&_minor_idr, minor)) {
1050 r = -EBUSY;
1051 goto out;
1052 }
1053
ba61fdd1 1054 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1055 if (r)
1da177e4 1056 goto out;
1da177e4
LT
1057
1058 if (m != minor) {
1059 idr_remove(&_minor_idr, m);
1060 r = -EBUSY;
1061 goto out;
1062 }
1063
1064out:
f32c10b0 1065 spin_unlock(&_minor_lock);
1da177e4
LT
1066 return r;
1067}
1068
cf13ab8e 1069static int next_free_minor(int *minor)
1da177e4 1070{
2b06cfff 1071 int r, m;
1da177e4 1072
1da177e4 1073 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1074 if (!r)
1075 return -ENOMEM;
1076
f32c10b0 1077 spin_lock(&_minor_lock);
1da177e4 1078
ba61fdd1 1079 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1080 if (r)
1da177e4 1081 goto out;
1da177e4
LT
1082
1083 if (m >= (1 << MINORBITS)) {
1084 idr_remove(&_minor_idr, m);
1085 r = -ENOSPC;
1086 goto out;
1087 }
1088
1089 *minor = m;
1090
1091out:
f32c10b0 1092 spin_unlock(&_minor_lock);
1da177e4
LT
1093 return r;
1094}
1095
1096static struct block_device_operations dm_blk_dops;
1097
53d5914f
MP
1098static void dm_wq_work(struct work_struct *work);
1099
1da177e4
LT
1100/*
1101 * Allocate and initialise a blank device with a given minor.
1102 */
2b06cfff 1103static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1104{
1105 int r;
cf13ab8e 1106 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1107 void *old_md;
1da177e4
LT
1108
1109 if (!md) {
1110 DMWARN("unable to allocate device, out of memory.");
1111 return NULL;
1112 }
1113
10da4f79 1114 if (!try_module_get(THIS_MODULE))
6ed7ade8 1115 goto bad_module_get;
10da4f79 1116
1da177e4 1117 /* get a minor number for the dev */
2b06cfff 1118 if (minor == DM_ANY_MINOR)
cf13ab8e 1119 r = next_free_minor(&minor);
2b06cfff 1120 else
cf13ab8e 1121 r = specific_minor(minor);
1da177e4 1122 if (r < 0)
6ed7ade8 1123 goto bad_minor;
1da177e4 1124
2ca3310e 1125 init_rwsem(&md->io_lock);
e61290a4 1126 mutex_init(&md->suspend_lock);
022c2611 1127 spin_lock_init(&md->deferred_lock);
1da177e4
LT
1128 rwlock_init(&md->map_lock);
1129 atomic_set(&md->holders, 1);
5c6bd75d 1130 atomic_set(&md->open_count, 0);
1da177e4 1131 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1132 atomic_set(&md->uevent_seq, 0);
1133 INIT_LIST_HEAD(&md->uevent_list);
1134 spin_lock_init(&md->uevent_lock);
1da177e4
LT
1135
1136 md->queue = blk_alloc_queue(GFP_KERNEL);
1137 if (!md->queue)
6ed7ade8 1138 goto bad_queue;
1da177e4
LT
1139
1140 md->queue->queuedata = md;
1141 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1142 md->queue->backing_dev_info.congested_data = md;
1143 blk_queue_make_request(md->queue, dm_request);
99360b4c 1144 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
daef265f 1145 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1da177e4 1146 md->queue->unplug_fn = dm_unplug_all;
f6fccb12 1147 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1da177e4 1148
93d2341c 1149 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
74859364 1150 if (!md->io_pool)
6ed7ade8 1151 goto bad_io_pool;
1da177e4 1152
93d2341c 1153 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1da177e4 1154 if (!md->tio_pool)
6ed7ade8 1155 goto bad_tio_pool;
1da177e4 1156
bb799ca0 1157 md->bs = bioset_create(16, 0);
9faf400f
SB
1158 if (!md->bs)
1159 goto bad_no_bioset;
1160
1da177e4
LT
1161 md->disk = alloc_disk(1);
1162 if (!md->disk)
6ed7ade8 1163 goto bad_disk;
1da177e4 1164
f0b04115
JM
1165 atomic_set(&md->pending, 0);
1166 init_waitqueue_head(&md->wait);
53d5914f 1167 INIT_WORK(&md->work, dm_wq_work);
f0b04115
JM
1168 init_waitqueue_head(&md->eventq);
1169
1da177e4
LT
1170 md->disk->major = _major;
1171 md->disk->first_minor = minor;
1172 md->disk->fops = &dm_blk_dops;
1173 md->disk->queue = md->queue;
1174 md->disk->private_data = md;
1175 sprintf(md->disk->disk_name, "dm-%d", minor);
1176 add_disk(md->disk);
7e51f257 1177 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1178
304f3f6a
MB
1179 md->wq = create_singlethread_workqueue("kdmflush");
1180 if (!md->wq)
1181 goto bad_thread;
1182
ba61fdd1 1183 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1184 spin_lock(&_minor_lock);
ba61fdd1 1185 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1186 spin_unlock(&_minor_lock);
ba61fdd1
JM
1187
1188 BUG_ON(old_md != MINOR_ALLOCED);
1189
1da177e4
LT
1190 return md;
1191
304f3f6a
MB
1192bad_thread:
1193 put_disk(md->disk);
6ed7ade8 1194bad_disk:
9faf400f 1195 bioset_free(md->bs);
6ed7ade8 1196bad_no_bioset:
1da177e4 1197 mempool_destroy(md->tio_pool);
6ed7ade8 1198bad_tio_pool:
1da177e4 1199 mempool_destroy(md->io_pool);
6ed7ade8 1200bad_io_pool:
1312f40e 1201 blk_cleanup_queue(md->queue);
6ed7ade8 1202bad_queue:
1da177e4 1203 free_minor(minor);
6ed7ade8 1204bad_minor:
10da4f79 1205 module_put(THIS_MODULE);
6ed7ade8 1206bad_module_get:
1da177e4
LT
1207 kfree(md);
1208 return NULL;
1209}
1210
ae9da83f
JN
1211static void unlock_fs(struct mapped_device *md);
1212
1da177e4
LT
1213static void free_dev(struct mapped_device *md)
1214{
f331c029 1215 int minor = MINOR(disk_devt(md->disk));
63d94e48 1216
db8fef4f 1217 if (md->bdev) {
ae9da83f 1218 unlock_fs(md);
db8fef4f 1219 bdput(md->bdev);
d9dde59b 1220 }
304f3f6a 1221 destroy_workqueue(md->wq);
1da177e4
LT
1222 mempool_destroy(md->tio_pool);
1223 mempool_destroy(md->io_pool);
9faf400f 1224 bioset_free(md->bs);
9c47008d 1225 blk_integrity_unregister(md->disk);
1da177e4 1226 del_gendisk(md->disk);
63d94e48 1227 free_minor(minor);
fba9f90e
JM
1228
1229 spin_lock(&_minor_lock);
1230 md->disk->private_data = NULL;
1231 spin_unlock(&_minor_lock);
1232
1da177e4 1233 put_disk(md->disk);
1312f40e 1234 blk_cleanup_queue(md->queue);
10da4f79 1235 module_put(THIS_MODULE);
1da177e4
LT
1236 kfree(md);
1237}
1238
1239/*
1240 * Bind a table to the device.
1241 */
1242static void event_callback(void *context)
1243{
7a8c3d3b
MA
1244 unsigned long flags;
1245 LIST_HEAD(uevents);
1da177e4
LT
1246 struct mapped_device *md = (struct mapped_device *) context;
1247
7a8c3d3b
MA
1248 spin_lock_irqsave(&md->uevent_lock, flags);
1249 list_splice_init(&md->uevent_list, &uevents);
1250 spin_unlock_irqrestore(&md->uevent_lock, flags);
1251
ed9e1982 1252 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 1253
1da177e4
LT
1254 atomic_inc(&md->event_nr);
1255 wake_up(&md->eventq);
1256}
1257
4e90188b 1258static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 1259{
4e90188b 1260 set_capacity(md->disk, size);
1da177e4 1261
db8fef4f
MP
1262 mutex_lock(&md->bdev->bd_inode->i_mutex);
1263 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1264 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1da177e4
LT
1265}
1266
1267static int __bind(struct mapped_device *md, struct dm_table *t)
1268{
165125e1 1269 struct request_queue *q = md->queue;
1da177e4
LT
1270 sector_t size;
1271
1272 size = dm_table_get_size(t);
3ac51e74
DW
1273
1274 /*
1275 * Wipe any geometry if the size of the table changed.
1276 */
1277 if (size != get_capacity(md->disk))
1278 memset(&md->geometry, 0, sizeof(md->geometry));
1279
db8fef4f 1280 if (md->bdev)
bfa152fa 1281 __set_size(md, size);
d5816876
MP
1282
1283 if (!size) {
1284 dm_table_destroy(t);
1da177e4 1285 return 0;
d5816876 1286 }
1da177e4 1287
2ca3310e
AK
1288 dm_table_event_callback(t, event_callback, md);
1289
1da177e4
LT
1290 write_lock(&md->map_lock);
1291 md->map = t;
2ca3310e 1292 dm_table_set_restrictions(t, q);
1da177e4
LT
1293 write_unlock(&md->map_lock);
1294
1da177e4
LT
1295 return 0;
1296}
1297
1298static void __unbind(struct mapped_device *md)
1299{
1300 struct dm_table *map = md->map;
1301
1302 if (!map)
1303 return;
1304
1305 dm_table_event_callback(map, NULL, NULL);
1306 write_lock(&md->map_lock);
1307 md->map = NULL;
1308 write_unlock(&md->map_lock);
d5816876 1309 dm_table_destroy(map);
1da177e4
LT
1310}
1311
1312/*
1313 * Constructor for a new device.
1314 */
2b06cfff 1315int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
1316{
1317 struct mapped_device *md;
1318
2b06cfff 1319 md = alloc_dev(minor);
1da177e4
LT
1320 if (!md)
1321 return -ENXIO;
1322
784aae73
MB
1323 dm_sysfs_init(md);
1324
1da177e4
LT
1325 *result = md;
1326 return 0;
1327}
1328
637842cf 1329static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
1330{
1331 struct mapped_device *md;
1da177e4
LT
1332 unsigned minor = MINOR(dev);
1333
1334 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1335 return NULL;
1336
f32c10b0 1337 spin_lock(&_minor_lock);
1da177e4
LT
1338
1339 md = idr_find(&_minor_idr, minor);
fba9f90e 1340 if (md && (md == MINOR_ALLOCED ||
f331c029 1341 (MINOR(disk_devt(dm_disk(md))) != minor) ||
17b2f66f 1342 test_bit(DMF_FREEING, &md->flags))) {
637842cf 1343 md = NULL;
fba9f90e
JM
1344 goto out;
1345 }
1da177e4 1346
fba9f90e 1347out:
f32c10b0 1348 spin_unlock(&_minor_lock);
1da177e4 1349
637842cf
DT
1350 return md;
1351}
1352
d229a958
DT
1353struct mapped_device *dm_get_md(dev_t dev)
1354{
1355 struct mapped_device *md = dm_find_md(dev);
1356
1357 if (md)
1358 dm_get(md);
1359
1360 return md;
1361}
1362
9ade92a9 1363void *dm_get_mdptr(struct mapped_device *md)
637842cf 1364{
9ade92a9 1365 return md->interface_ptr;
1da177e4
LT
1366}
1367
1368void dm_set_mdptr(struct mapped_device *md, void *ptr)
1369{
1370 md->interface_ptr = ptr;
1371}
1372
1373void dm_get(struct mapped_device *md)
1374{
1375 atomic_inc(&md->holders);
1376}
1377
72d94861
AK
1378const char *dm_device_name(struct mapped_device *md)
1379{
1380 return md->name;
1381}
1382EXPORT_SYMBOL_GPL(dm_device_name);
1383
1da177e4
LT
1384void dm_put(struct mapped_device *md)
1385{
1134e5ae 1386 struct dm_table *map;
1da177e4 1387
fba9f90e
JM
1388 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1389
f32c10b0 1390 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1134e5ae 1391 map = dm_get_table(md);
f331c029
TH
1392 idr_replace(&_minor_idr, MINOR_ALLOCED,
1393 MINOR(disk_devt(dm_disk(md))));
fba9f90e 1394 set_bit(DMF_FREEING, &md->flags);
f32c10b0 1395 spin_unlock(&_minor_lock);
cf222b37 1396 if (!dm_suspended(md)) {
1da177e4
LT
1397 dm_table_presuspend_targets(map);
1398 dm_table_postsuspend_targets(map);
1399 }
784aae73 1400 dm_sysfs_exit(md);
1134e5ae 1401 dm_table_put(map);
a1b51e98 1402 __unbind(md);
1da177e4
LT
1403 free_dev(md);
1404 }
1da177e4 1405}
79eb885c 1406EXPORT_SYMBOL_GPL(dm_put);
1da177e4 1407
401600df 1408static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
1409{
1410 int r = 0;
b44ebeb0
MP
1411 DECLARE_WAITQUEUE(wait, current);
1412
1413 dm_unplug_all(md->queue);
1414
1415 add_wait_queue(&md->wait, &wait);
46125c1c
MB
1416
1417 while (1) {
401600df 1418 set_current_state(interruptible);
46125c1c
MB
1419
1420 smp_mb();
1421 if (!atomic_read(&md->pending))
1422 break;
1423
401600df
MP
1424 if (interruptible == TASK_INTERRUPTIBLE &&
1425 signal_pending(current)) {
46125c1c
MB
1426 r = -EINTR;
1427 break;
1428 }
1429
1430 io_schedule();
1431 }
1432 set_current_state(TASK_RUNNING);
1433
b44ebeb0
MP
1434 remove_wait_queue(&md->wait, &wait);
1435
46125c1c
MB
1436 return r;
1437}
1438
af7e466a
MP
1439static int dm_flush(struct mapped_device *md)
1440{
1441 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1442 return 0;
1443}
1444
1445static void process_barrier(struct mapped_device *md, struct bio *bio)
1446{
1447 int error = dm_flush(md);
1448
1449 if (unlikely(error)) {
1450 bio_endio(bio, error);
1451 return;
1452 }
1453 if (bio_empty_barrier(bio)) {
1454 bio_endio(bio, 0);
1455 return;
1456 }
1457
1458 __split_and_process_bio(md, bio);
1459
1460 error = dm_flush(md);
1461
1462 if (!error && md->barrier_error)
1463 error = md->barrier_error;
1464
1465 if (md->barrier_error != DM_ENDIO_REQUEUE)
1466 bio_endio(bio, error);
1467}
1468
1da177e4
LT
1469/*
1470 * Process the deferred bios
1471 */
ef208587 1472static void dm_wq_work(struct work_struct *work)
1da177e4 1473{
ef208587
MP
1474 struct mapped_device *md = container_of(work, struct mapped_device,
1475 work);
6d6f10df 1476 struct bio *c;
1da177e4 1477
ef208587
MP
1478 down_write(&md->io_lock);
1479
3b00b203 1480 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
1481 spin_lock_irq(&md->deferred_lock);
1482 c = bio_list_pop(&md->deferred);
1483 spin_unlock_irq(&md->deferred_lock);
1484
1485 if (!c) {
1eb787ec 1486 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
df12ee99
AK
1487 break;
1488 }
022c2611 1489
3b00b203
MP
1490 up_write(&md->io_lock);
1491
af7e466a
MP
1492 if (bio_barrier(c))
1493 process_barrier(md, c);
1494 else
1495 __split_and_process_bio(md, c);
3b00b203
MP
1496
1497 down_write(&md->io_lock);
022c2611 1498 }
73d410c0 1499
ef208587 1500 up_write(&md->io_lock);
1da177e4
LT
1501}
1502
9a1fb464 1503static void dm_queue_flush(struct mapped_device *md)
304f3f6a 1504{
3b00b203
MP
1505 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1506 smp_mb__after_clear_bit();
53d5914f 1507 queue_work(md->wq, &md->work);
304f3f6a
MB
1508}
1509
1da177e4
LT
1510/*
1511 * Swap in a new table (destroying old one).
1512 */
1513int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1514{
93c534ae 1515 int r = -EINVAL;
1da177e4 1516
e61290a4 1517 mutex_lock(&md->suspend_lock);
1da177e4
LT
1518
1519 /* device must be suspended */
cf222b37 1520 if (!dm_suspended(md))
93c534ae 1521 goto out;
1da177e4 1522
bfa152fa 1523 /* without bdev, the device size cannot be changed */
db8fef4f 1524 if (!md->bdev)
bfa152fa
JN
1525 if (get_capacity(md->disk) != dm_table_get_size(table))
1526 goto out;
1527
1da177e4
LT
1528 __unbind(md);
1529 r = __bind(md, table);
1da177e4 1530
93c534ae 1531out:
e61290a4 1532 mutex_unlock(&md->suspend_lock);
93c534ae 1533 return r;
1da177e4
LT
1534}
1535
1536/*
1537 * Functions to lock and unlock any filesystem running on the
1538 * device.
1539 */
2ca3310e 1540static int lock_fs(struct mapped_device *md)
1da177e4 1541{
e39e2e95 1542 int r;
1da177e4
LT
1543
1544 WARN_ON(md->frozen_sb);
dfbe03f6 1545
db8fef4f 1546 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 1547 if (IS_ERR(md->frozen_sb)) {
cf222b37 1548 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
1549 md->frozen_sb = NULL;
1550 return r;
dfbe03f6
AK
1551 }
1552
aa8d7c2f
AK
1553 set_bit(DMF_FROZEN, &md->flags);
1554
1da177e4 1555 /* don't bdput right now, we don't want the bdev
e39e2e95 1556 * to go away while it is locked.
1da177e4
LT
1557 */
1558 return 0;
1559}
1560
2ca3310e 1561static void unlock_fs(struct mapped_device *md)
1da177e4 1562{
aa8d7c2f
AK
1563 if (!test_bit(DMF_FROZEN, &md->flags))
1564 return;
1565
db8fef4f 1566 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 1567 md->frozen_sb = NULL;
aa8d7c2f 1568 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
1569}
1570
1571/*
1572 * We need to be able to change a mapping table under a mounted
1573 * filesystem. For example we might want to move some data in
1574 * the background. Before the table can be swapped with
1575 * dm_bind_table, dm_suspend must be called to flush any in
1576 * flight bios and ensure that any further io gets deferred.
1577 */
a3d77d35 1578int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 1579{
2ca3310e 1580 struct dm_table *map = NULL;
46125c1c 1581 int r = 0;
a3d77d35 1582 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 1583 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 1584
e61290a4 1585 mutex_lock(&md->suspend_lock);
2ca3310e 1586
73d410c0
MB
1587 if (dm_suspended(md)) {
1588 r = -EINVAL;
d287483d 1589 goto out_unlock;
73d410c0 1590 }
1da177e4
LT
1591
1592 map = dm_get_table(md);
1da177e4 1593
2e93ccc1
KU
1594 /*
1595 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1596 * This flag is cleared before dm_suspend returns.
1597 */
1598 if (noflush)
1599 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1600
cf222b37
AK
1601 /* This does not get reverted if there's an error later. */
1602 dm_table_presuspend_targets(map);
1603
bfa152fa
JN
1604 /* bdget() can stall if the pending I/Os are not flushed */
1605 if (!noflush) {
db8fef4f
MP
1606 md->bdev = bdget_disk(md->disk, 0);
1607 if (!md->bdev) {
bfa152fa
JN
1608 DMWARN("bdget failed in dm_suspend");
1609 r = -ENOMEM;
f431d966 1610 goto out;
bfa152fa 1611 }
e39e2e95 1612
6d6f10df
MB
1613 /*
1614 * Flush I/O to the device. noflush supersedes do_lockfs,
1615 * because lock_fs() needs to flush I/Os.
1616 */
1617 if (do_lockfs) {
1618 r = lock_fs(md);
1619 if (r)
1620 goto out;
1621 }
aa8d7c2f 1622 }
1da177e4
LT
1623
1624 /*
3b00b203
MP
1625 * Here we must make sure that no processes are submitting requests
1626 * to target drivers i.e. no one may be executing
1627 * __split_and_process_bio. This is called from dm_request and
1628 * dm_wq_work.
1629 *
1630 * To get all processes out of __split_and_process_bio in dm_request,
1631 * we take the write lock. To prevent any process from reentering
1632 * __split_and_process_bio from dm_request, we set
1633 * DMF_QUEUE_IO_TO_THREAD.
1634 *
1635 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1636 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1637 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1638 * further calls to __split_and_process_bio from dm_wq_work.
1da177e4 1639 */
2ca3310e 1640 down_write(&md->io_lock);
1eb787ec
AK
1641 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1642 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2ca3310e 1643 up_write(&md->io_lock);
1da177e4 1644
3b00b203
MP
1645 flush_workqueue(md->wq);
1646
1da177e4 1647 /*
3b00b203
MP
1648 * At this point no more requests are entering target request routines.
1649 * We call dm_wait_for_completion to wait for all existing requests
1650 * to finish.
1da177e4 1651 */
401600df 1652 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 1653
2ca3310e 1654 down_write(&md->io_lock);
6d6f10df 1655 if (noflush)
022c2611 1656 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
94d6351e 1657 up_write(&md->io_lock);
2e93ccc1 1658
1da177e4 1659 /* were we interrupted ? */
46125c1c 1660 if (r < 0) {
9a1fb464 1661 dm_queue_flush(md);
73d410c0 1662
2ca3310e 1663 unlock_fs(md);
2e93ccc1 1664 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 1665 }
1da177e4 1666
3b00b203
MP
1667 /*
1668 * If dm_wait_for_completion returned 0, the device is completely
1669 * quiescent now. There is no request-processing activity. All new
1670 * requests are being added to md->deferred list.
1671 */
1672
cf222b37 1673 dm_table_postsuspend_targets(map);
1da177e4 1674
2ca3310e 1675 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 1676
2ca3310e 1677out:
db8fef4f
MP
1678 if (r && md->bdev) {
1679 bdput(md->bdev);
1680 md->bdev = NULL;
e39e2e95
AK
1681 }
1682
2ca3310e 1683 dm_table_put(map);
d287483d
AK
1684
1685out_unlock:
e61290a4 1686 mutex_unlock(&md->suspend_lock);
cf222b37 1687 return r;
1da177e4
LT
1688}
1689
1690int dm_resume(struct mapped_device *md)
1691{
cf222b37 1692 int r = -EINVAL;
cf222b37 1693 struct dm_table *map = NULL;
1da177e4 1694
e61290a4 1695 mutex_lock(&md->suspend_lock);
2ca3310e 1696 if (!dm_suspended(md))
cf222b37 1697 goto out;
cf222b37
AK
1698
1699 map = dm_get_table(md);
2ca3310e 1700 if (!map || !dm_table_get_size(map))
cf222b37 1701 goto out;
1da177e4 1702
8757b776
MB
1703 r = dm_table_resume_targets(map);
1704 if (r)
1705 goto out;
2ca3310e 1706
9a1fb464 1707 dm_queue_flush(md);
2ca3310e
AK
1708
1709 unlock_fs(md);
1710
db8fef4f
MP
1711 if (md->bdev) {
1712 bdput(md->bdev);
1713 md->bdev = NULL;
bfa152fa 1714 }
e39e2e95 1715
2ca3310e
AK
1716 clear_bit(DMF_SUSPENDED, &md->flags);
1717
1da177e4 1718 dm_table_unplug_all(map);
1da177e4 1719
69267a30 1720 dm_kobject_uevent(md);
8560ed6f 1721
cf222b37 1722 r = 0;
2ca3310e 1723
cf222b37
AK
1724out:
1725 dm_table_put(map);
e61290a4 1726 mutex_unlock(&md->suspend_lock);
2ca3310e 1727
cf222b37 1728 return r;
1da177e4
LT
1729}
1730
1731/*-----------------------------------------------------------------
1732 * Event notification.
1733 *---------------------------------------------------------------*/
69267a30
AK
1734void dm_kobject_uevent(struct mapped_device *md)
1735{
ed9e1982 1736 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
69267a30
AK
1737}
1738
7a8c3d3b
MA
1739uint32_t dm_next_uevent_seq(struct mapped_device *md)
1740{
1741 return atomic_add_return(1, &md->uevent_seq);
1742}
1743
1da177e4
LT
1744uint32_t dm_get_event_nr(struct mapped_device *md)
1745{
1746 return atomic_read(&md->event_nr);
1747}
1748
1749int dm_wait_event(struct mapped_device *md, int event_nr)
1750{
1751 return wait_event_interruptible(md->eventq,
1752 (event_nr != atomic_read(&md->event_nr)));
1753}
1754
7a8c3d3b
MA
1755void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1756{
1757 unsigned long flags;
1758
1759 spin_lock_irqsave(&md->uevent_lock, flags);
1760 list_add(elist, &md->uevent_list);
1761 spin_unlock_irqrestore(&md->uevent_lock, flags);
1762}
1763
1da177e4
LT
1764/*
1765 * The gendisk is only valid as long as you have a reference
1766 * count on 'md'.
1767 */
1768struct gendisk *dm_disk(struct mapped_device *md)
1769{
1770 return md->disk;
1771}
1772
784aae73
MB
1773struct kobject *dm_kobject(struct mapped_device *md)
1774{
1775 return &md->kobj;
1776}
1777
1778/*
1779 * struct mapped_device should not be exported outside of dm.c
1780 * so use this check to verify that kobj is part of md structure
1781 */
1782struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1783{
1784 struct mapped_device *md;
1785
1786 md = container_of(kobj, struct mapped_device, kobj);
1787 if (&md->kobj != kobj)
1788 return NULL;
1789
4d89b7b4
MB
1790 if (test_bit(DMF_FREEING, &md->flags) ||
1791 test_bit(DMF_DELETING, &md->flags))
1792 return NULL;
1793
784aae73
MB
1794 dm_get(md);
1795 return md;
1796}
1797
1da177e4
LT
1798int dm_suspended(struct mapped_device *md)
1799{
1800 return test_bit(DMF_SUSPENDED, &md->flags);
1801}
1802
2e93ccc1
KU
1803int dm_noflush_suspending(struct dm_target *ti)
1804{
1805 struct mapped_device *md = dm_table_get_md(ti->table);
1806 int r = __noflush_suspending(md);
1807
1808 dm_put(md);
1809
1810 return r;
1811}
1812EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1813
1da177e4
LT
1814static struct block_device_operations dm_blk_dops = {
1815 .open = dm_blk_open,
1816 .release = dm_blk_close,
aa129a22 1817 .ioctl = dm_blk_ioctl,
3ac51e74 1818 .getgeo = dm_blk_getgeo,
1da177e4
LT
1819 .owner = THIS_MODULE
1820};
1821
1822EXPORT_SYMBOL(dm_get_mapinfo);
1823
1824/*
1825 * module hooks
1826 */
1827module_init(dm_init);
1828module_exit(dm_exit);
1829
1830module_param(major, uint, 0);
1831MODULE_PARM_DESC(major, "The major number of the device mapper");
1832MODULE_DESCRIPTION(DM_NAME " driver");
1833MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1834MODULE_LICENSE("GPL");