]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/dm.c
[PATCH] md: Restore 'remaining' count when retrying an write operation
[net-next-2.6.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9#include "dm-bio-list.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/blkpg.h>
15#include <linux/bio.h>
16#include <linux/buffer_head.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
3ac51e74 20#include <linux/hdreg.h>
2056a782 21#include <linux/blktrace_api.h>
1da177e4
LT
22
23static const char *_name = DM_NAME;
24
25static unsigned int major = 0;
26static unsigned int _major = 0;
27
28/*
29 * One of these is allocated per bio.
30 */
31struct dm_io {
32 struct mapped_device *md;
33 int error;
34 struct bio *bio;
35 atomic_t io_count;
3eaf840e 36 unsigned long start_time;
1da177e4
LT
37};
38
39/*
40 * One of these is allocated per target within a bio. Hopefully
41 * this will be simplified out one day.
42 */
43struct target_io {
44 struct dm_io *io;
45 struct dm_target *ti;
46 union map_info info;
47};
48
49union map_info *dm_get_mapinfo(struct bio *bio)
50{
51 if (bio && bio->bi_private)
52 return &((struct target_io *)bio->bi_private)->info;
53 return NULL;
54}
55
56/*
57 * Bits for the md->flags field.
58 */
59#define DMF_BLOCK_IO 0
60#define DMF_SUSPENDED 1
aa8d7c2f 61#define DMF_FROZEN 2
1da177e4
LT
62
63struct mapped_device {
2ca3310e
AK
64 struct rw_semaphore io_lock;
65 struct semaphore suspend_lock;
1da177e4
LT
66 rwlock_t map_lock;
67 atomic_t holders;
68
69 unsigned long flags;
70
71 request_queue_t *queue;
72 struct gendisk *disk;
7e51f257 73 char name[16];
1da177e4
LT
74
75 void *interface_ptr;
76
77 /*
78 * A list of ios that arrived while we were suspended.
79 */
80 atomic_t pending;
81 wait_queue_head_t wait;
82 struct bio_list deferred;
83
84 /*
85 * The current mapping.
86 */
87 struct dm_table *map;
88
89 /*
90 * io objects are allocated from here.
91 */
92 mempool_t *io_pool;
93 mempool_t *tio_pool;
94
95 /*
96 * Event handling.
97 */
98 atomic_t event_nr;
99 wait_queue_head_t eventq;
100
101 /*
102 * freeze/thaw support require holding onto a super block
103 */
104 struct super_block *frozen_sb;
e39e2e95 105 struct block_device *suspended_bdev;
3ac51e74
DW
106
107 /* forced geometry settings */
108 struct hd_geometry geometry;
1da177e4
LT
109};
110
111#define MIN_IOS 256
112static kmem_cache_t *_io_cache;
113static kmem_cache_t *_tio_cache;
114
115static struct bio_set *dm_set;
116
117static int __init local_init(void)
118{
119 int r;
120
121 dm_set = bioset_create(16, 16, 4);
122 if (!dm_set)
123 return -ENOMEM;
124
125 /* allocate a slab for the dm_ios */
126 _io_cache = kmem_cache_create("dm_io",
127 sizeof(struct dm_io), 0, 0, NULL, NULL);
128 if (!_io_cache)
129 return -ENOMEM;
130
131 /* allocate a slab for the target ios */
132 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
133 0, 0, NULL, NULL);
134 if (!_tio_cache) {
135 kmem_cache_destroy(_io_cache);
136 return -ENOMEM;
137 }
138
139 _major = major;
140 r = register_blkdev(_major, _name);
141 if (r < 0) {
142 kmem_cache_destroy(_tio_cache);
143 kmem_cache_destroy(_io_cache);
144 return r;
145 }
146
147 if (!_major)
148 _major = r;
149
150 return 0;
151}
152
153static void local_exit(void)
154{
155 kmem_cache_destroy(_tio_cache);
156 kmem_cache_destroy(_io_cache);
157
158 bioset_free(dm_set);
159
160 if (unregister_blkdev(_major, _name) < 0)
161 DMERR("devfs_unregister_blkdev failed");
162
163 _major = 0;
164
165 DMINFO("cleaned up");
166}
167
168int (*_inits[])(void) __initdata = {
169 local_init,
170 dm_target_init,
171 dm_linear_init,
172 dm_stripe_init,
173 dm_interface_init,
174};
175
176void (*_exits[])(void) = {
177 local_exit,
178 dm_target_exit,
179 dm_linear_exit,
180 dm_stripe_exit,
181 dm_interface_exit,
182};
183
184static int __init dm_init(void)
185{
186 const int count = ARRAY_SIZE(_inits);
187
188 int r, i;
189
190 for (i = 0; i < count; i++) {
191 r = _inits[i]();
192 if (r)
193 goto bad;
194 }
195
196 return 0;
197
198 bad:
199 while (i--)
200 _exits[i]();
201
202 return r;
203}
204
205static void __exit dm_exit(void)
206{
207 int i = ARRAY_SIZE(_exits);
208
209 while (i--)
210 _exits[i]();
211}
212
213/*
214 * Block device functions
215 */
216static int dm_blk_open(struct inode *inode, struct file *file)
217{
218 struct mapped_device *md;
219
220 md = inode->i_bdev->bd_disk->private_data;
221 dm_get(md);
222 return 0;
223}
224
225static int dm_blk_close(struct inode *inode, struct file *file)
226{
227 struct mapped_device *md;
228
229 md = inode->i_bdev->bd_disk->private_data;
230 dm_put(md);
231 return 0;
232}
233
3ac51e74
DW
234static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
235{
236 struct mapped_device *md = bdev->bd_disk->private_data;
237
238 return dm_get_geometry(md, geo);
239}
240
1da177e4
LT
241static inline struct dm_io *alloc_io(struct mapped_device *md)
242{
243 return mempool_alloc(md->io_pool, GFP_NOIO);
244}
245
246static inline void free_io(struct mapped_device *md, struct dm_io *io)
247{
248 mempool_free(io, md->io_pool);
249}
250
251static inline struct target_io *alloc_tio(struct mapped_device *md)
252{
253 return mempool_alloc(md->tio_pool, GFP_NOIO);
254}
255
256static inline void free_tio(struct mapped_device *md, struct target_io *tio)
257{
258 mempool_free(tio, md->tio_pool);
259}
260
3eaf840e
JNN
261static void start_io_acct(struct dm_io *io)
262{
263 struct mapped_device *md = io->md;
264
265 io->start_time = jiffies;
266
267 preempt_disable();
268 disk_round_stats(dm_disk(md));
269 preempt_enable();
270 dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
271}
272
273static int end_io_acct(struct dm_io *io)
274{
275 struct mapped_device *md = io->md;
276 struct bio *bio = io->bio;
277 unsigned long duration = jiffies - io->start_time;
278 int pending;
279 int rw = bio_data_dir(bio);
280
281 preempt_disable();
282 disk_round_stats(dm_disk(md));
283 preempt_enable();
284 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
285
286 disk_stat_add(dm_disk(md), ticks[rw], duration);
287
288 return !pending;
289}
290
1da177e4
LT
291/*
292 * Add the bio to the list of deferred io.
293 */
294static int queue_io(struct mapped_device *md, struct bio *bio)
295{
2ca3310e 296 down_write(&md->io_lock);
1da177e4
LT
297
298 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
2ca3310e 299 up_write(&md->io_lock);
1da177e4
LT
300 return 1;
301 }
302
303 bio_list_add(&md->deferred, bio);
304
2ca3310e 305 up_write(&md->io_lock);
1da177e4
LT
306 return 0; /* deferred successfully */
307}
308
309/*
310 * Everyone (including functions in this file), should use this
311 * function to access the md->map field, and make sure they call
312 * dm_table_put() when finished.
313 */
314struct dm_table *dm_get_table(struct mapped_device *md)
315{
316 struct dm_table *t;
317
318 read_lock(&md->map_lock);
319 t = md->map;
320 if (t)
321 dm_table_get(t);
322 read_unlock(&md->map_lock);
323
324 return t;
325}
326
3ac51e74
DW
327/*
328 * Get the geometry associated with a dm device
329 */
330int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
331{
332 *geo = md->geometry;
333
334 return 0;
335}
336
337/*
338 * Set the geometry of a device.
339 */
340int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
341{
342 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
343
344 if (geo->start > sz) {
345 DMWARN("Start sector is beyond the geometry limits.");
346 return -EINVAL;
347 }
348
349 md->geometry = *geo;
350
351 return 0;
352}
353
1da177e4
LT
354/*-----------------------------------------------------------------
355 * CRUD START:
356 * A more elegant soln is in the works that uses the queue
357 * merge fn, unfortunately there are a couple of changes to
358 * the block layer that I want to make for this. So in the
359 * interests of getting something for people to use I give
360 * you this clearly demarcated crap.
361 *---------------------------------------------------------------*/
362
363/*
364 * Decrements the number of outstanding ios that a bio has been
365 * cloned into, completing the original io if necc.
366 */
858119e1 367static void dec_pending(struct dm_io *io, int error)
1da177e4
LT
368{
369 if (error)
370 io->error = error;
371
372 if (atomic_dec_and_test(&io->io_count)) {
3eaf840e 373 if (end_io_acct(io))
1da177e4
LT
374 /* nudge anyone waiting on suspend queue */
375 wake_up(&io->md->wait);
376
2056a782
JA
377 blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
378
1da177e4
LT
379 bio_endio(io->bio, io->bio->bi_size, io->error);
380 free_io(io->md, io);
381 }
382}
383
384static int clone_endio(struct bio *bio, unsigned int done, int error)
385{
386 int r = 0;
387 struct target_io *tio = bio->bi_private;
388 struct dm_io *io = tio->io;
389 dm_endio_fn endio = tio->ti->type->end_io;
390
391 if (bio->bi_size)
392 return 1;
393
394 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
395 error = -EIO;
396
397 if (endio) {
398 r = endio(tio->ti, bio, error, &tio->info);
399 if (r < 0)
400 error = r;
401
402 else if (r > 0)
403 /* the target wants another shot at the io */
404 return 1;
405 }
406
407 free_tio(io->md, tio);
408 dec_pending(io, error);
409 bio_put(bio);
410 return r;
411}
412
413static sector_t max_io_len(struct mapped_device *md,
414 sector_t sector, struct dm_target *ti)
415{
416 sector_t offset = sector - ti->begin;
417 sector_t len = ti->len - offset;
418
419 /*
420 * Does the target need to split even further ?
421 */
422 if (ti->split_io) {
423 sector_t boundary;
424 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
425 - offset;
426 if (len > boundary)
427 len = boundary;
428 }
429
430 return len;
431}
432
433static void __map_bio(struct dm_target *ti, struct bio *clone,
434 struct target_io *tio)
435{
436 int r;
2056a782 437 sector_t sector;
1da177e4
LT
438
439 /*
440 * Sanity checks.
441 */
442 BUG_ON(!clone->bi_size);
443
444 clone->bi_end_io = clone_endio;
445 clone->bi_private = tio;
446
447 /*
448 * Map the clone. If r == 0 we don't need to do
449 * anything, the target has assumed ownership of
450 * this io.
451 */
452 atomic_inc(&tio->io->io_count);
2056a782 453 sector = clone->bi_sector;
1da177e4 454 r = ti->type->map(ti, clone, &tio->info);
2056a782 455 if (r > 0) {
1da177e4 456 /* the bio has been remapped so dispatch it */
2056a782
JA
457
458 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
459 tio->io->bio->bi_bdev->bd_dev, sector,
460 clone->bi_sector);
461
1da177e4 462 generic_make_request(clone);
2056a782 463 }
1da177e4
LT
464
465 else if (r < 0) {
466 /* error the io and bail out */
467 struct dm_io *io = tio->io;
468 free_tio(tio->io->md, tio);
f6a80ea8 469 dec_pending(io, r);
1da177e4
LT
470 bio_put(clone);
471 }
472}
473
474struct clone_info {
475 struct mapped_device *md;
476 struct dm_table *map;
477 struct bio *bio;
478 struct dm_io *io;
479 sector_t sector;
480 sector_t sector_count;
481 unsigned short idx;
482};
483
3676347a
PO
484static void dm_bio_destructor(struct bio *bio)
485{
486 bio_free(bio, dm_set);
487}
488
1da177e4
LT
489/*
490 * Creates a little bio that is just does part of a bvec.
491 */
492static struct bio *split_bvec(struct bio *bio, sector_t sector,
493 unsigned short idx, unsigned int offset,
494 unsigned int len)
495{
496 struct bio *clone;
497 struct bio_vec *bv = bio->bi_io_vec + idx;
498
499 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
3676347a 500 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
501 *clone->bi_io_vec = *bv;
502
503 clone->bi_sector = sector;
504 clone->bi_bdev = bio->bi_bdev;
505 clone->bi_rw = bio->bi_rw;
506 clone->bi_vcnt = 1;
507 clone->bi_size = to_bytes(len);
508 clone->bi_io_vec->bv_offset = offset;
509 clone->bi_io_vec->bv_len = clone->bi_size;
510
511 return clone;
512}
513
514/*
515 * Creates a bio that consists of range of complete bvecs.
516 */
517static struct bio *clone_bio(struct bio *bio, sector_t sector,
518 unsigned short idx, unsigned short bv_count,
519 unsigned int len)
520{
521 struct bio *clone;
522
523 clone = bio_clone(bio, GFP_NOIO);
524 clone->bi_sector = sector;
525 clone->bi_idx = idx;
526 clone->bi_vcnt = idx + bv_count;
527 clone->bi_size = to_bytes(len);
528 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
529
530 return clone;
531}
532
533static void __clone_and_map(struct clone_info *ci)
534{
535 struct bio *clone, *bio = ci->bio;
536 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
537 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
538 struct target_io *tio;
539
540 /*
541 * Allocate a target io object.
542 */
543 tio = alloc_tio(ci->md);
544 tio->io = ci->io;
545 tio->ti = ti;
546 memset(&tio->info, 0, sizeof(tio->info));
547
548 if (ci->sector_count <= max) {
549 /*
550 * Optimise for the simple case where we can do all of
551 * the remaining io with a single clone.
552 */
553 clone = clone_bio(bio, ci->sector, ci->idx,
554 bio->bi_vcnt - ci->idx, ci->sector_count);
555 __map_bio(ti, clone, tio);
556 ci->sector_count = 0;
557
558 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
559 /*
560 * There are some bvecs that don't span targets.
561 * Do as many of these as possible.
562 */
563 int i;
564 sector_t remaining = max;
565 sector_t bv_len;
566
567 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
568 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
569
570 if (bv_len > remaining)
571 break;
572
573 remaining -= bv_len;
574 len += bv_len;
575 }
576
577 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
578 __map_bio(ti, clone, tio);
579
580 ci->sector += len;
581 ci->sector_count -= len;
582 ci->idx = i;
583
584 } else {
585 /*
d2044a94 586 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
587 */
588 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
589 sector_t remaining = to_sector(bv->bv_len);
590 unsigned int offset = 0;
1da177e4 591
d2044a94
AK
592 do {
593 if (offset) {
594 ti = dm_table_find_target(ci->map, ci->sector);
595 max = max_io_len(ci->md, ci->sector, ti);
1da177e4 596
d2044a94
AK
597 tio = alloc_tio(ci->md);
598 tio->io = ci->io;
599 tio->ti = ti;
600 memset(&tio->info, 0, sizeof(tio->info));
601 }
602
603 len = min(remaining, max);
604
605 clone = split_bvec(bio, ci->sector, ci->idx,
606 bv->bv_offset + offset, len);
607
608 __map_bio(ti, clone, tio);
609
610 ci->sector += len;
611 ci->sector_count -= len;
612 offset += to_bytes(len);
613 } while (remaining -= len);
1da177e4 614
1da177e4
LT
615 ci->idx++;
616 }
617}
618
619/*
620 * Split the bio into several clones.
621 */
622static void __split_bio(struct mapped_device *md, struct bio *bio)
623{
624 struct clone_info ci;
625
626 ci.map = dm_get_table(md);
627 if (!ci.map) {
628 bio_io_error(bio, bio->bi_size);
629 return;
630 }
631
632 ci.md = md;
633 ci.bio = bio;
634 ci.io = alloc_io(md);
635 ci.io->error = 0;
636 atomic_set(&ci.io->io_count, 1);
637 ci.io->bio = bio;
638 ci.io->md = md;
639 ci.sector = bio->bi_sector;
640 ci.sector_count = bio_sectors(bio);
641 ci.idx = bio->bi_idx;
642
3eaf840e 643 start_io_acct(ci.io);
1da177e4
LT
644 while (ci.sector_count)
645 __clone_and_map(&ci);
646
647 /* drop the extra reference count */
648 dec_pending(ci.io, 0);
649 dm_table_put(ci.map);
650}
651/*-----------------------------------------------------------------
652 * CRUD END
653 *---------------------------------------------------------------*/
654
655/*
656 * The request function that just remaps the bio built up by
657 * dm_merge_bvec.
658 */
659static int dm_request(request_queue_t *q, struct bio *bio)
660{
661 int r;
12f03a49 662 int rw = bio_data_dir(bio);
1da177e4
LT
663 struct mapped_device *md = q->queuedata;
664
2ca3310e 665 down_read(&md->io_lock);
1da177e4 666
12f03a49
KC
667 disk_stat_inc(dm_disk(md), ios[rw]);
668 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
669
1da177e4
LT
670 /*
671 * If we're suspended we have to queue
672 * this io for later.
673 */
674 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
2ca3310e 675 up_read(&md->io_lock);
1da177e4
LT
676
677 if (bio_rw(bio) == READA) {
678 bio_io_error(bio, bio->bi_size);
679 return 0;
680 }
681
682 r = queue_io(md, bio);
683 if (r < 0) {
684 bio_io_error(bio, bio->bi_size);
685 return 0;
686
687 } else if (r == 0)
688 return 0; /* deferred successfully */
689
690 /*
691 * We're in a while loop, because someone could suspend
692 * before we get to the following read lock.
693 */
2ca3310e 694 down_read(&md->io_lock);
1da177e4
LT
695 }
696
697 __split_bio(md, bio);
2ca3310e 698 up_read(&md->io_lock);
1da177e4
LT
699 return 0;
700}
701
702static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
703 sector_t *error_sector)
704{
705 struct mapped_device *md = q->queuedata;
706 struct dm_table *map = dm_get_table(md);
707 int ret = -ENXIO;
708
709 if (map) {
cf222b37 710 ret = dm_table_flush_all(map);
1da177e4
LT
711 dm_table_put(map);
712 }
713
714 return ret;
715}
716
717static void dm_unplug_all(request_queue_t *q)
718{
719 struct mapped_device *md = q->queuedata;
720 struct dm_table *map = dm_get_table(md);
721
722 if (map) {
723 dm_table_unplug_all(map);
724 dm_table_put(map);
725 }
726}
727
728static int dm_any_congested(void *congested_data, int bdi_bits)
729{
730 int r;
731 struct mapped_device *md = (struct mapped_device *) congested_data;
732 struct dm_table *map = dm_get_table(md);
733
734 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
735 r = bdi_bits;
736 else
737 r = dm_table_any_congested(map, bdi_bits);
738
739 dm_table_put(map);
740 return r;
741}
742
743/*-----------------------------------------------------------------
744 * An IDR is used to keep track of allocated minor numbers.
745 *---------------------------------------------------------------*/
746static DECLARE_MUTEX(_minor_lock);
747static DEFINE_IDR(_minor_idr);
748
749static void free_minor(unsigned int minor)
750{
751 down(&_minor_lock);
752 idr_remove(&_minor_idr, minor);
753 up(&_minor_lock);
754}
755
756/*
757 * See if the device with a specific minor # is free.
758 */
759static int specific_minor(struct mapped_device *md, unsigned int minor)
760{
761 int r, m;
762
763 if (minor >= (1 << MINORBITS))
764 return -EINVAL;
765
766 down(&_minor_lock);
767
768 if (idr_find(&_minor_idr, minor)) {
769 r = -EBUSY;
770 goto out;
771 }
772
773 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
774 if (!r) {
775 r = -ENOMEM;
776 goto out;
777 }
778
779 r = idr_get_new_above(&_minor_idr, md, minor, &m);
780 if (r) {
781 goto out;
782 }
783
784 if (m != minor) {
785 idr_remove(&_minor_idr, m);
786 r = -EBUSY;
787 goto out;
788 }
789
790out:
791 up(&_minor_lock);
792 return r;
793}
794
795static int next_free_minor(struct mapped_device *md, unsigned int *minor)
796{
797 int r;
798 unsigned int m;
799
800 down(&_minor_lock);
801
802 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
803 if (!r) {
804 r = -ENOMEM;
805 goto out;
806 }
807
808 r = idr_get_new(&_minor_idr, md, &m);
809 if (r) {
810 goto out;
811 }
812
813 if (m >= (1 << MINORBITS)) {
814 idr_remove(&_minor_idr, m);
815 r = -ENOSPC;
816 goto out;
817 }
818
819 *minor = m;
820
821out:
822 up(&_minor_lock);
823 return r;
824}
825
826static struct block_device_operations dm_blk_dops;
827
828/*
829 * Allocate and initialise a blank device with a given minor.
830 */
831static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
832{
833 int r;
834 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
835
836 if (!md) {
837 DMWARN("unable to allocate device, out of memory.");
838 return NULL;
839 }
840
841 /* get a minor number for the dev */
842 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
843 if (r < 0)
844 goto bad1;
845
846 memset(md, 0, sizeof(*md));
2ca3310e
AK
847 init_rwsem(&md->io_lock);
848 init_MUTEX(&md->suspend_lock);
1da177e4
LT
849 rwlock_init(&md->map_lock);
850 atomic_set(&md->holders, 1);
851 atomic_set(&md->event_nr, 0);
852
853 md->queue = blk_alloc_queue(GFP_KERNEL);
854 if (!md->queue)
855 goto bad1;
856
857 md->queue->queuedata = md;
858 md->queue->backing_dev_info.congested_fn = dm_any_congested;
859 md->queue->backing_dev_info.congested_data = md;
860 blk_queue_make_request(md->queue, dm_request);
daef265f 861 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1da177e4
LT
862 md->queue->unplug_fn = dm_unplug_all;
863 md->queue->issue_flush_fn = dm_flush_all;
864
93d2341c 865 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1da177e4
LT
866 if (!md->io_pool)
867 goto bad2;
868
93d2341c 869 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1da177e4
LT
870 if (!md->tio_pool)
871 goto bad3;
872
873 md->disk = alloc_disk(1);
874 if (!md->disk)
875 goto bad4;
876
877 md->disk->major = _major;
878 md->disk->first_minor = minor;
879 md->disk->fops = &dm_blk_dops;
880 md->disk->queue = md->queue;
881 md->disk->private_data = md;
882 sprintf(md->disk->disk_name, "dm-%d", minor);
883 add_disk(md->disk);
7e51f257 884 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4
LT
885
886 atomic_set(&md->pending, 0);
887 init_waitqueue_head(&md->wait);
888 init_waitqueue_head(&md->eventq);
889
890 return md;
891
892 bad4:
893 mempool_destroy(md->tio_pool);
894 bad3:
895 mempool_destroy(md->io_pool);
896 bad2:
1312f40e 897 blk_cleanup_queue(md->queue);
1da177e4
LT
898 free_minor(minor);
899 bad1:
900 kfree(md);
901 return NULL;
902}
903
904static void free_dev(struct mapped_device *md)
905{
63d94e48
JN
906 unsigned int minor = md->disk->first_minor;
907
d9dde59b
JN
908 if (md->suspended_bdev) {
909 thaw_bdev(md->suspended_bdev, NULL);
910 bdput(md->suspended_bdev);
911 }
1da177e4
LT
912 mempool_destroy(md->tio_pool);
913 mempool_destroy(md->io_pool);
914 del_gendisk(md->disk);
63d94e48 915 free_minor(minor);
1da177e4 916 put_disk(md->disk);
1312f40e 917 blk_cleanup_queue(md->queue);
1da177e4
LT
918 kfree(md);
919}
920
921/*
922 * Bind a table to the device.
923 */
924static void event_callback(void *context)
925{
926 struct mapped_device *md = (struct mapped_device *) context;
927
928 atomic_inc(&md->event_nr);
929 wake_up(&md->eventq);
930}
931
4e90188b 932static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 933{
4e90188b 934 set_capacity(md->disk, size);
1da177e4 935
1b1dcc1b 936 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
e39e2e95 937 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1b1dcc1b 938 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
1da177e4
LT
939}
940
941static int __bind(struct mapped_device *md, struct dm_table *t)
942{
943 request_queue_t *q = md->queue;
944 sector_t size;
945
946 size = dm_table_get_size(t);
3ac51e74
DW
947
948 /*
949 * Wipe any geometry if the size of the table changed.
950 */
951 if (size != get_capacity(md->disk))
952 memset(&md->geometry, 0, sizeof(md->geometry));
953
4e90188b 954 __set_size(md, size);
1da177e4
LT
955 if (size == 0)
956 return 0;
957
2ca3310e
AK
958 dm_table_get(t);
959 dm_table_event_callback(t, event_callback, md);
960
1da177e4
LT
961 write_lock(&md->map_lock);
962 md->map = t;
2ca3310e 963 dm_table_set_restrictions(t, q);
1da177e4
LT
964 write_unlock(&md->map_lock);
965
1da177e4
LT
966 return 0;
967}
968
969static void __unbind(struct mapped_device *md)
970{
971 struct dm_table *map = md->map;
972
973 if (!map)
974 return;
975
976 dm_table_event_callback(map, NULL, NULL);
977 write_lock(&md->map_lock);
978 md->map = NULL;
979 write_unlock(&md->map_lock);
980 dm_table_put(map);
981}
982
983/*
984 * Constructor for a new device.
985 */
986static int create_aux(unsigned int minor, int persistent,
987 struct mapped_device **result)
988{
989 struct mapped_device *md;
990
991 md = alloc_dev(minor, persistent);
992 if (!md)
993 return -ENXIO;
994
995 *result = md;
996 return 0;
997}
998
999int dm_create(struct mapped_device **result)
1000{
1001 return create_aux(0, 0, result);
1002}
1003
1004int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
1005{
1006 return create_aux(minor, 1, result);
1007}
1008
637842cf 1009static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
1010{
1011 struct mapped_device *md;
1da177e4
LT
1012 unsigned minor = MINOR(dev);
1013
1014 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1015 return NULL;
1016
1017 down(&_minor_lock);
1018
1019 md = idr_find(&_minor_idr, minor);
637842cf
DT
1020 if (!md || (dm_disk(md)->first_minor != minor))
1021 md = NULL;
1da177e4
LT
1022
1023 up(&_minor_lock);
1024
637842cf
DT
1025 return md;
1026}
1027
d229a958
DT
1028struct mapped_device *dm_get_md(dev_t dev)
1029{
1030 struct mapped_device *md = dm_find_md(dev);
1031
1032 if (md)
1033 dm_get(md);
1034
1035 return md;
1036}
1037
9ade92a9 1038void *dm_get_mdptr(struct mapped_device *md)
637842cf 1039{
9ade92a9 1040 return md->interface_ptr;
1da177e4
LT
1041}
1042
1043void dm_set_mdptr(struct mapped_device *md, void *ptr)
1044{
1045 md->interface_ptr = ptr;
1046}
1047
1048void dm_get(struct mapped_device *md)
1049{
1050 atomic_inc(&md->holders);
1051}
1052
1053void dm_put(struct mapped_device *md)
1054{
1134e5ae 1055 struct dm_table *map;
1da177e4
LT
1056
1057 if (atomic_dec_and_test(&md->holders)) {
1134e5ae 1058 map = dm_get_table(md);
cf222b37 1059 if (!dm_suspended(md)) {
1da177e4
LT
1060 dm_table_presuspend_targets(map);
1061 dm_table_postsuspend_targets(map);
1062 }
1063 __unbind(md);
1134e5ae 1064 dm_table_put(map);
1da177e4
LT
1065 free_dev(md);
1066 }
1da177e4
LT
1067}
1068
1069/*
1070 * Process the deferred bios
1071 */
1072static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
1073{
1074 struct bio *n;
1075
1076 while (c) {
1077 n = c->bi_next;
1078 c->bi_next = NULL;
1079 __split_bio(md, c);
1080 c = n;
1081 }
1082}
1083
1084/*
1085 * Swap in a new table (destroying old one).
1086 */
1087int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1088{
93c534ae 1089 int r = -EINVAL;
1da177e4 1090
2ca3310e 1091 down(&md->suspend_lock);
1da177e4
LT
1092
1093 /* device must be suspended */
cf222b37 1094 if (!dm_suspended(md))
93c534ae 1095 goto out;
1da177e4
LT
1096
1097 __unbind(md);
1098 r = __bind(md, table);
1da177e4 1099
93c534ae 1100out:
2ca3310e 1101 up(&md->suspend_lock);
93c534ae 1102 return r;
1da177e4
LT
1103}
1104
1105/*
1106 * Functions to lock and unlock any filesystem running on the
1107 * device.
1108 */
2ca3310e 1109static int lock_fs(struct mapped_device *md)
1da177e4 1110{
e39e2e95 1111 int r;
1da177e4
LT
1112
1113 WARN_ON(md->frozen_sb);
dfbe03f6 1114
e39e2e95 1115 md->frozen_sb = freeze_bdev(md->suspended_bdev);
dfbe03f6 1116 if (IS_ERR(md->frozen_sb)) {
cf222b37 1117 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
1118 md->frozen_sb = NULL;
1119 return r;
dfbe03f6
AK
1120 }
1121
aa8d7c2f
AK
1122 set_bit(DMF_FROZEN, &md->flags);
1123
1da177e4 1124 /* don't bdput right now, we don't want the bdev
e39e2e95 1125 * to go away while it is locked.
1da177e4
LT
1126 */
1127 return 0;
1128}
1129
2ca3310e 1130static void unlock_fs(struct mapped_device *md)
1da177e4 1131{
aa8d7c2f
AK
1132 if (!test_bit(DMF_FROZEN, &md->flags))
1133 return;
1134
e39e2e95 1135 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1da177e4 1136 md->frozen_sb = NULL;
aa8d7c2f 1137 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
1138}
1139
1140/*
1141 * We need to be able to change a mapping table under a mounted
1142 * filesystem. For example we might want to move some data in
1143 * the background. Before the table can be swapped with
1144 * dm_bind_table, dm_suspend must be called to flush any in
1145 * flight bios and ensure that any further io gets deferred.
1146 */
aa8d7c2f 1147int dm_suspend(struct mapped_device *md, int do_lockfs)
1da177e4 1148{
2ca3310e 1149 struct dm_table *map = NULL;
1da177e4 1150 DECLARE_WAITQUEUE(wait, current);
1ecac7fd 1151 struct bio *def;
cf222b37 1152 int r = -EINVAL;
1da177e4 1153
2ca3310e
AK
1154 down(&md->suspend_lock);
1155
1156 if (dm_suspended(md))
1157 goto out;
1da177e4
LT
1158
1159 map = dm_get_table(md);
1da177e4 1160
cf222b37
AK
1161 /* This does not get reverted if there's an error later. */
1162 dm_table_presuspend_targets(map);
1163
e39e2e95
AK
1164 md->suspended_bdev = bdget_disk(md->disk, 0);
1165 if (!md->suspended_bdev) {
1166 DMWARN("bdget failed in dm_suspend");
1167 r = -ENOMEM;
1168 goto out;
1169 }
1170
cf222b37 1171 /* Flush I/O to the device. */
aa8d7c2f
AK
1172 if (do_lockfs) {
1173 r = lock_fs(md);
1174 if (r)
1175 goto out;
1176 }
1da177e4
LT
1177
1178 /*
354e0071 1179 * First we set the BLOCK_IO flag so no more ios will be mapped.
1da177e4 1180 */
2ca3310e
AK
1181 down_write(&md->io_lock);
1182 set_bit(DMF_BLOCK_IO, &md->flags);
1da177e4 1183
1da177e4 1184 add_wait_queue(&md->wait, &wait);
2ca3310e 1185 up_write(&md->io_lock);
1da177e4
LT
1186
1187 /* unplug */
2ca3310e 1188 if (map)
1da177e4 1189 dm_table_unplug_all(map);
1da177e4
LT
1190
1191 /*
1192 * Then we wait for the already mapped ios to
1193 * complete.
1194 */
1195 while (1) {
1196 set_current_state(TASK_INTERRUPTIBLE);
1197
1198 if (!atomic_read(&md->pending) || signal_pending(current))
1199 break;
1200
1201 io_schedule();
1202 }
1203 set_current_state(TASK_RUNNING);
1204
2ca3310e 1205 down_write(&md->io_lock);
1da177e4
LT
1206 remove_wait_queue(&md->wait, &wait);
1207
1208 /* were we interrupted ? */
cf222b37 1209 r = -EINTR;
2ca3310e 1210 if (atomic_read(&md->pending)) {
1ecac7fd
JN
1211 clear_bit(DMF_BLOCK_IO, &md->flags);
1212 def = bio_list_get(&md->deferred);
1213 __flush_deferred_io(md, def);
2ca3310e
AK
1214 up_write(&md->io_lock);
1215 unlock_fs(md);
2ca3310e
AK
1216 goto out;
1217 }
1218 up_write(&md->io_lock);
1da177e4 1219
cf222b37 1220 dm_table_postsuspend_targets(map);
1da177e4 1221
2ca3310e 1222 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 1223
2ca3310e 1224 r = 0;
b84b0287 1225
2ca3310e 1226out:
e39e2e95
AK
1227 if (r && md->suspended_bdev) {
1228 bdput(md->suspended_bdev);
1229 md->suspended_bdev = NULL;
1230 }
1231
2ca3310e
AK
1232 dm_table_put(map);
1233 up(&md->suspend_lock);
cf222b37 1234 return r;
1da177e4
LT
1235}
1236
1237int dm_resume(struct mapped_device *md)
1238{
cf222b37 1239 int r = -EINVAL;
1da177e4 1240 struct bio *def;
cf222b37 1241 struct dm_table *map = NULL;
1da177e4 1242
2ca3310e
AK
1243 down(&md->suspend_lock);
1244 if (!dm_suspended(md))
cf222b37 1245 goto out;
cf222b37
AK
1246
1247 map = dm_get_table(md);
2ca3310e 1248 if (!map || !dm_table_get_size(map))
cf222b37 1249 goto out;
1da177e4
LT
1250
1251 dm_table_resume_targets(map);
2ca3310e
AK
1252
1253 down_write(&md->io_lock);
1da177e4
LT
1254 clear_bit(DMF_BLOCK_IO, &md->flags);
1255
1256 def = bio_list_get(&md->deferred);
1257 __flush_deferred_io(md, def);
2ca3310e
AK
1258 up_write(&md->io_lock);
1259
1260 unlock_fs(md);
1261
e39e2e95
AK
1262 bdput(md->suspended_bdev);
1263 md->suspended_bdev = NULL;
1264
2ca3310e
AK
1265 clear_bit(DMF_SUSPENDED, &md->flags);
1266
1da177e4 1267 dm_table_unplug_all(map);
1da177e4 1268
cf222b37 1269 r = 0;
2ca3310e 1270
cf222b37
AK
1271out:
1272 dm_table_put(map);
2ca3310e
AK
1273 up(&md->suspend_lock);
1274
cf222b37 1275 return r;
1da177e4
LT
1276}
1277
1278/*-----------------------------------------------------------------
1279 * Event notification.
1280 *---------------------------------------------------------------*/
1281uint32_t dm_get_event_nr(struct mapped_device *md)
1282{
1283 return atomic_read(&md->event_nr);
1284}
1285
1286int dm_wait_event(struct mapped_device *md, int event_nr)
1287{
1288 return wait_event_interruptible(md->eventq,
1289 (event_nr != atomic_read(&md->event_nr)));
1290}
1291
1292/*
1293 * The gendisk is only valid as long as you have a reference
1294 * count on 'md'.
1295 */
1296struct gendisk *dm_disk(struct mapped_device *md)
1297{
1298 return md->disk;
1299}
1300
1301int dm_suspended(struct mapped_device *md)
1302{
1303 return test_bit(DMF_SUSPENDED, &md->flags);
1304}
1305
1306static struct block_device_operations dm_blk_dops = {
1307 .open = dm_blk_open,
1308 .release = dm_blk_close,
3ac51e74 1309 .getgeo = dm_blk_getgeo,
1da177e4
LT
1310 .owner = THIS_MODULE
1311};
1312
1313EXPORT_SYMBOL(dm_get_mapinfo);
1314
1315/*
1316 * module hooks
1317 */
1318module_init(dm_init);
1319module_exit(dm_exit);
1320
1321module_param(major, uint, 0);
1322MODULE_PARM_DESC(major, "The major number of the device mapper");
1323MODULE_DESCRIPTION(DM_NAME " driver");
1324MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1325MODULE_LICENSE("GPL");