]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/dm-table.c
[PATCH] dm: tidy mdptr
[net-next-2.6.git] / drivers / md / dm-table.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/blkdev.h>
13#include <linux/namei.h>
14#include <linux/ctype.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <asm/atomic.h>
18
19#define MAX_DEPTH 16
20#define NODE_SIZE L1_CACHE_BYTES
21#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
22#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
23
24struct dm_table {
25 atomic_t holders;
26
27 /* btree table */
28 unsigned int depth;
29 unsigned int counts[MAX_DEPTH]; /* in nodes */
30 sector_t *index[MAX_DEPTH];
31
32 unsigned int num_targets;
33 unsigned int num_allocated;
34 sector_t *highs;
35 struct dm_target *targets;
36
37 /*
38 * Indicates the rw permissions for the new logical
39 * device. This should be a combination of FMODE_READ
40 * and FMODE_WRITE.
41 */
42 int mode;
43
44 /* a list of devices used by this table */
45 struct list_head devices;
46
47 /*
48 * These are optimistic limits taken from all the
49 * targets, some targets will need smaller limits.
50 */
51 struct io_restrictions limits;
52
53 /* events get handed up using this callback */
54 void (*event_fn)(void *);
55 void *event_context;
56};
57
58/*
59 * Similar to ceiling(log_size(n))
60 */
61static unsigned int int_log(unsigned int n, unsigned int base)
62{
63 int result = 0;
64
65 while (n > 1) {
66 n = dm_div_up(n, base);
67 result++;
68 }
69
70 return result;
71}
72
73/*
74 * Returns the minimum that is _not_ zero, unless both are zero.
75 */
76#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
77
78/*
79 * Combine two io_restrictions, always taking the lower value.
80 */
81static void combine_restrictions_low(struct io_restrictions *lhs,
82 struct io_restrictions *rhs)
83{
84 lhs->max_sectors =
85 min_not_zero(lhs->max_sectors, rhs->max_sectors);
86
87 lhs->max_phys_segments =
88 min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
89
90 lhs->max_hw_segments =
91 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
92
93 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
94
95 lhs->max_segment_size =
96 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
97
98 lhs->seg_boundary_mask =
99 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
969429b5
N
100
101 lhs->no_cluster |= rhs->no_cluster;
1da177e4
LT
102}
103
104/*
105 * Calculate the index of the child node of the n'th node k'th key.
106 */
107static inline unsigned int get_child(unsigned int n, unsigned int k)
108{
109 return (n * CHILDREN_PER_NODE) + k;
110}
111
112/*
113 * Return the n'th node of level l from table t.
114 */
115static inline sector_t *get_node(struct dm_table *t,
116 unsigned int l, unsigned int n)
117{
118 return t->index[l] + (n * KEYS_PER_NODE);
119}
120
121/*
122 * Return the highest key that you could lookup from the n'th
123 * node on level l of the btree.
124 */
125static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
126{
127 for (; l < t->depth - 1; l++)
128 n = get_child(n, CHILDREN_PER_NODE - 1);
129
130 if (n >= t->counts[l])
131 return (sector_t) - 1;
132
133 return get_node(t, l, n)[KEYS_PER_NODE - 1];
134}
135
136/*
137 * Fills in a level of the btree based on the highs of the level
138 * below it.
139 */
140static int setup_btree_index(unsigned int l, struct dm_table *t)
141{
142 unsigned int n, k;
143 sector_t *node;
144
145 for (n = 0U; n < t->counts[l]; n++) {
146 node = get_node(t, l, n);
147
148 for (k = 0U; k < KEYS_PER_NODE; k++)
149 node[k] = high(t, l + 1, get_child(n, k));
150 }
151
152 return 0;
153}
154
155void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
156{
157 unsigned long size;
158 void *addr;
159
160 /*
161 * Check that we're not going to overflow.
162 */
163 if (nmemb > (ULONG_MAX / elem_size))
164 return NULL;
165
166 size = nmemb * elem_size;
167 addr = vmalloc(size);
168 if (addr)
169 memset(addr, 0, size);
170
171 return addr;
172}
173
174/*
175 * highs, and targets are managed as dynamic arrays during a
176 * table load.
177 */
178static int alloc_targets(struct dm_table *t, unsigned int num)
179{
180 sector_t *n_highs;
181 struct dm_target *n_targets;
182 int n = t->num_targets;
183
184 /*
185 * Allocate both the target array and offset array at once.
186 */
187 n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
188 sizeof(sector_t));
189 if (!n_highs)
190 return -ENOMEM;
191
192 n_targets = (struct dm_target *) (n_highs + num);
193
194 if (n) {
195 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
196 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
197 }
198
199 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
200 vfree(t->highs);
201
202 t->num_allocated = num;
203 t->highs = n_highs;
204 t->targets = n_targets;
205
206 return 0;
207}
208
209int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
210{
211 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
212
213 if (!t)
214 return -ENOMEM;
215
216 memset(t, 0, sizeof(*t));
217 INIT_LIST_HEAD(&t->devices);
218 atomic_set(&t->holders, 1);
219
220 if (!num_targets)
221 num_targets = KEYS_PER_NODE;
222
223 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
224
225 if (alloc_targets(t, num_targets)) {
226 kfree(t);
227 t = NULL;
228 return -ENOMEM;
229 }
230
231 t->mode = mode;
232 *result = t;
233 return 0;
234}
235
236static void free_devices(struct list_head *devices)
237{
238 struct list_head *tmp, *next;
239
240 for (tmp = devices->next; tmp != devices; tmp = next) {
241 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
242 next = tmp->next;
243 kfree(dd);
244 }
245}
246
5e198d94 247static void table_destroy(struct dm_table *t)
1da177e4
LT
248{
249 unsigned int i;
250
251 /* free the indexes (see dm_table_complete) */
252 if (t->depth >= 2)
253 vfree(t->index[t->depth - 2]);
254
255 /* free the targets */
256 for (i = 0; i < t->num_targets; i++) {
257 struct dm_target *tgt = t->targets + i;
258
259 if (tgt->type->dtr)
260 tgt->type->dtr(tgt);
261
262 dm_put_target_type(tgt->type);
263 }
264
265 vfree(t->highs);
266
267 /* free the device list */
268 if (t->devices.next != &t->devices) {
269 DMWARN("devices still present during destroy: "
270 "dm_table_remove_device calls missing");
271
272 free_devices(&t->devices);
273 }
274
275 kfree(t);
276}
277
278void dm_table_get(struct dm_table *t)
279{
280 atomic_inc(&t->holders);
281}
282
283void dm_table_put(struct dm_table *t)
284{
285 if (!t)
286 return;
287
288 if (atomic_dec_and_test(&t->holders))
289 table_destroy(t);
290}
291
292/*
293 * Checks to see if we need to extend highs or targets.
294 */
295static inline int check_space(struct dm_table *t)
296{
297 if (t->num_targets >= t->num_allocated)
298 return alloc_targets(t, t->num_allocated * 2);
299
300 return 0;
301}
302
303/*
304 * Convert a device path to a dev_t.
305 */
306static int lookup_device(const char *path, dev_t *dev)
307{
308 int r;
309 struct nameidata nd;
310 struct inode *inode;
311
312 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
313 return r;
314
315 inode = nd.dentry->d_inode;
316 if (!inode) {
317 r = -ENOENT;
318 goto out;
319 }
320
321 if (!S_ISBLK(inode->i_mode)) {
322 r = -ENOTBLK;
323 goto out;
324 }
325
326 *dev = inode->i_rdev;
327
328 out:
329 path_release(&nd);
330 return r;
331}
332
333/*
334 * See if we've already got a device in the list.
335 */
336static struct dm_dev *find_device(struct list_head *l, dev_t dev)
337{
338 struct dm_dev *dd;
339
340 list_for_each_entry (dd, l, list)
341 if (dd->bdev->bd_dev == dev)
342 return dd;
343
344 return NULL;
345}
346
347/*
348 * Open a device so we can use it as a map destination.
349 */
350static int open_dev(struct dm_dev *d, dev_t dev)
351{
352 static char *_claim_ptr = "I belong to device-mapper";
353 struct block_device *bdev;
354
355 int r;
356
547bc926 357 BUG_ON(d->bdev);
1da177e4
LT
358
359 bdev = open_by_devnum(dev, d->mode);
360 if (IS_ERR(bdev))
361 return PTR_ERR(bdev);
362 r = bd_claim(bdev, _claim_ptr);
363 if (r)
364 blkdev_put(bdev);
365 else
366 d->bdev = bdev;
367 return r;
368}
369
370/*
371 * Close a device that we've been using.
372 */
373static void close_dev(struct dm_dev *d)
374{
375 if (!d->bdev)
376 return;
377
378 bd_release(d->bdev);
379 blkdev_put(d->bdev);
380 d->bdev = NULL;
381}
382
383/*
384 * If possible (ie. blk_size[major] is set), this checks an area
385 * of a destination device is valid.
386 */
387static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
388{
389 sector_t dev_size;
390 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
391 return ((start < dev_size) && (len <= (dev_size - start)));
392}
393
394/*
395 * This upgrades the mode on an already open dm_dev. Being
396 * careful to leave things as they were if we fail to reopen the
397 * device.
398 */
399static int upgrade_mode(struct dm_dev *dd, int new_mode)
400{
401 int r;
402 struct dm_dev dd_copy;
403 dev_t dev = dd->bdev->bd_dev;
404
405 dd_copy = *dd;
406
407 dd->mode |= new_mode;
408 dd->bdev = NULL;
409 r = open_dev(dd, dev);
410 if (!r)
411 close_dev(&dd_copy);
412 else
413 *dd = dd_copy;
414
415 return r;
416}
417
418/*
419 * Add a device to the list, or just increment the usage count if
420 * it's already present.
421 */
422static int __table_get_device(struct dm_table *t, struct dm_target *ti,
423 const char *path, sector_t start, sector_t len,
424 int mode, struct dm_dev **result)
425{
426 int r;
427 dev_t dev;
428 struct dm_dev *dd;
429 unsigned int major, minor;
430
547bc926 431 BUG_ON(!t);
1da177e4
LT
432
433 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
434 /* Extract the major/minor numbers */
435 dev = MKDEV(major, minor);
436 if (MAJOR(dev) != major || MINOR(dev) != minor)
437 return -EOVERFLOW;
438 } else {
439 /* convert the path to a device */
440 if ((r = lookup_device(path, &dev)))
441 return r;
442 }
443
444 dd = find_device(&t->devices, dev);
445 if (!dd) {
446 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
447 if (!dd)
448 return -ENOMEM;
449
450 dd->mode = mode;
451 dd->bdev = NULL;
452
453 if ((r = open_dev(dd, dev))) {
454 kfree(dd);
455 return r;
456 }
457
458 format_dev_t(dd->name, dev);
459
460 atomic_set(&dd->count, 0);
461 list_add(&dd->list, &t->devices);
462
463 } else if (dd->mode != (mode | dd->mode)) {
464 r = upgrade_mode(dd, mode);
465 if (r)
466 return r;
467 }
468 atomic_inc(&dd->count);
469
470 if (!check_device_area(dd, start, len)) {
471 DMWARN("device %s too small for target", path);
472 dm_put_device(ti, dd);
473 return -EINVAL;
474 }
475
476 *result = dd;
477
478 return 0;
479}
480
481
482int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
483 sector_t len, int mode, struct dm_dev **result)
484{
485 int r = __table_get_device(ti->table, ti, path,
486 start, len, mode, result);
487 if (!r) {
488 request_queue_t *q = bdev_get_queue((*result)->bdev);
489 struct io_restrictions *rs = &ti->limits;
490
491 /*
492 * Combine the device limits low.
493 *
494 * FIXME: if we move an io_restriction struct
495 * into q this would just be a call to
496 * combine_restrictions_low()
497 */
498 rs->max_sectors =
499 min_not_zero(rs->max_sectors, q->max_sectors);
500
501 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
502 * currently doesn't honor MD's merge_bvec_fn routine.
503 * In this case, we'll force DM to use PAGE_SIZE or
504 * smaller I/O, just to be safe. A better fix is in the
505 * works, but add this for the time being so it will at
506 * least operate correctly.
507 */
508 if (q->merge_bvec_fn)
509 rs->max_sectors =
510 min_not_zero(rs->max_sectors,
3ee247eb 511 (unsigned int) (PAGE_SIZE >> 9));
1da177e4
LT
512
513 rs->max_phys_segments =
514 min_not_zero(rs->max_phys_segments,
515 q->max_phys_segments);
516
517 rs->max_hw_segments =
518 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
519
520 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
521
522 rs->max_segment_size =
523 min_not_zero(rs->max_segment_size, q->max_segment_size);
524
525 rs->seg_boundary_mask =
526 min_not_zero(rs->seg_boundary_mask,
527 q->seg_boundary_mask);
969429b5
N
528
529 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1da177e4
LT
530 }
531
532 return r;
533}
534
535/*
536 * Decrement a devices use count and remove it if necessary.
537 */
538void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
539{
540 if (atomic_dec_and_test(&dd->count)) {
541 close_dev(dd);
542 list_del(&dd->list);
543 kfree(dd);
544 }
545}
546
547/*
548 * Checks to see if the target joins onto the end of the table.
549 */
550static int adjoin(struct dm_table *table, struct dm_target *ti)
551{
552 struct dm_target *prev;
553
554 if (!table->num_targets)
555 return !ti->begin;
556
557 prev = &table->targets[table->num_targets - 1];
558 return (ti->begin == (prev->begin + prev->len));
559}
560
561/*
562 * Used to dynamically allocate the arg array.
563 */
564static char **realloc_argv(unsigned *array_size, char **old_argv)
565{
566 char **argv;
567 unsigned new_size;
568
569 new_size = *array_size ? *array_size * 2 : 64;
570 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
571 if (argv) {
572 memcpy(argv, old_argv, *array_size * sizeof(*argv));
573 *array_size = new_size;
574 }
575
576 kfree(old_argv);
577 return argv;
578}
579
580/*
581 * Destructively splits up the argument list to pass to ctr.
582 */
583int dm_split_args(int *argc, char ***argvp, char *input)
584{
585 char *start, *end = input, *out, **argv = NULL;
586 unsigned array_size = 0;
587
588 *argc = 0;
589 argv = realloc_argv(&array_size, argv);
590 if (!argv)
591 return -ENOMEM;
592
593 while (1) {
594 start = end;
595
596 /* Skip whitespace */
597 while (*start && isspace(*start))
598 start++;
599
600 if (!*start)
601 break; /* success, we hit the end */
602
603 /* 'out' is used to remove any back-quotes */
604 end = out = start;
605 while (*end) {
606 /* Everything apart from '\0' can be quoted */
607 if (*end == '\\' && *(end + 1)) {
608 *out++ = *(end + 1);
609 end += 2;
610 continue;
611 }
612
613 if (isspace(*end))
614 break; /* end of token */
615
616 *out++ = *end++;
617 }
618
619 /* have we already filled the array ? */
620 if ((*argc + 1) > array_size) {
621 argv = realloc_argv(&array_size, argv);
622 if (!argv)
623 return -ENOMEM;
624 }
625
626 /* we know this is whitespace */
627 if (*end)
628 end++;
629
630 /* terminate the string and put it in the array */
631 *out = '\0';
632 argv[*argc] = start;
633 (*argc)++;
634 }
635
636 *argvp = argv;
637 return 0;
638}
639
640static void check_for_valid_limits(struct io_restrictions *rs)
641{
642 if (!rs->max_sectors)
defd94b7 643 rs->max_sectors = SAFE_MAX_SECTORS;
1da177e4
LT
644 if (!rs->max_phys_segments)
645 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
646 if (!rs->max_hw_segments)
647 rs->max_hw_segments = MAX_HW_SEGMENTS;
648 if (!rs->hardsect_size)
649 rs->hardsect_size = 1 << SECTOR_SHIFT;
650 if (!rs->max_segment_size)
651 rs->max_segment_size = MAX_SEGMENT_SIZE;
652 if (!rs->seg_boundary_mask)
653 rs->seg_boundary_mask = -1;
654}
655
656int dm_table_add_target(struct dm_table *t, const char *type,
657 sector_t start, sector_t len, char *params)
658{
659 int r = -EINVAL, argc;
660 char **argv;
661 struct dm_target *tgt;
662
663 if ((r = check_space(t)))
664 return r;
665
666 tgt = t->targets + t->num_targets;
667 memset(tgt, 0, sizeof(*tgt));
668
669 if (!len) {
670 tgt->error = "zero-length target";
671 DMERR("%s", tgt->error);
672 return -EINVAL;
673 }
674
675 tgt->type = dm_get_target_type(type);
676 if (!tgt->type) {
677 tgt->error = "unknown target type";
678 DMERR("%s", tgt->error);
679 return -EINVAL;
680 }
681
682 tgt->table = t;
683 tgt->begin = start;
684 tgt->len = len;
685 tgt->error = "Unknown error";
686
687 /*
688 * Does this target adjoin the previous one ?
689 */
690 if (!adjoin(t, tgt)) {
691 tgt->error = "Gap in table";
692 r = -EINVAL;
693 goto bad;
694 }
695
696 r = dm_split_args(&argc, &argv, params);
697 if (r) {
698 tgt->error = "couldn't split parameters (insufficient memory)";
699 goto bad;
700 }
701
702 r = tgt->type->ctr(tgt, argc, argv);
703 kfree(argv);
704 if (r)
705 goto bad;
706
707 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
708
709 /* FIXME: the plan is to combine high here and then have
710 * the merge fn apply the target level restrictions. */
711 combine_restrictions_low(&t->limits, &tgt->limits);
712 return 0;
713
714 bad:
715 DMERR("%s", tgt->error);
716 dm_put_target_type(tgt->type);
717 return r;
718}
719
720static int setup_indexes(struct dm_table *t)
721{
722 int i;
723 unsigned int total = 0;
724 sector_t *indexes;
725
726 /* allocate the space for *all* the indexes */
727 for (i = t->depth - 2; i >= 0; i--) {
728 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
729 total += t->counts[i];
730 }
731
732 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
733 if (!indexes)
734 return -ENOMEM;
735
736 /* set up internal nodes, bottom-up */
737 for (i = t->depth - 2, total = 0; i >= 0; i--) {
738 t->index[i] = indexes;
739 indexes += (KEYS_PER_NODE * t->counts[i]);
740 setup_btree_index(i, t);
741 }
742
743 return 0;
744}
745
746/*
747 * Builds the btree to index the map.
748 */
749int dm_table_complete(struct dm_table *t)
750{
751 int r = 0;
752 unsigned int leaf_nodes;
753
754 check_for_valid_limits(&t->limits);
755
756 /* how many indexes will the btree have ? */
757 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
758 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
759
760 /* leaf layer has already been set up */
761 t->counts[t->depth - 1] = leaf_nodes;
762 t->index[t->depth - 1] = t->highs;
763
764 if (t->depth >= 2)
765 r = setup_indexes(t);
766
767 return r;
768}
769
770static DECLARE_MUTEX(_event_lock);
771void dm_table_event_callback(struct dm_table *t,
772 void (*fn)(void *), void *context)
773{
774 down(&_event_lock);
775 t->event_fn = fn;
776 t->event_context = context;
777 up(&_event_lock);
778}
779
780void dm_table_event(struct dm_table *t)
781{
782 /*
783 * You can no longer call dm_table_event() from interrupt
784 * context, use a bottom half instead.
785 */
786 BUG_ON(in_interrupt());
787
788 down(&_event_lock);
789 if (t->event_fn)
790 t->event_fn(t->event_context);
791 up(&_event_lock);
792}
793
794sector_t dm_table_get_size(struct dm_table *t)
795{
796 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
797}
798
799struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
800{
801 if (index > t->num_targets)
802 return NULL;
803
804 return t->targets + index;
805}
806
807/*
808 * Search the btree for the correct target.
809 */
810struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
811{
812 unsigned int l, n = 0, k = 0;
813 sector_t *node;
814
815 for (l = 0; l < t->depth; l++) {
816 n = get_child(n, k);
817 node = get_node(t, l, n);
818
819 for (k = 0; k < KEYS_PER_NODE; k++)
820 if (node[k] >= sector)
821 break;
822 }
823
824 return &t->targets[(KEYS_PER_NODE * n) + k];
825}
826
827void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
828{
829 /*
830 * Make sure we obey the optimistic sub devices
831 * restrictions.
832 */
833 blk_queue_max_sectors(q, t->limits.max_sectors);
834 q->max_phys_segments = t->limits.max_phys_segments;
835 q->max_hw_segments = t->limits.max_hw_segments;
836 q->hardsect_size = t->limits.hardsect_size;
837 q->max_segment_size = t->limits.max_segment_size;
838 q->seg_boundary_mask = t->limits.seg_boundary_mask;
969429b5
N
839 if (t->limits.no_cluster)
840 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
841 else
842 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
843
1da177e4
LT
844}
845
846unsigned int dm_table_get_num_targets(struct dm_table *t)
847{
848 return t->num_targets;
849}
850
851struct list_head *dm_table_get_devices(struct dm_table *t)
852{
853 return &t->devices;
854}
855
856int dm_table_get_mode(struct dm_table *t)
857{
858 return t->mode;
859}
860
861static void suspend_targets(struct dm_table *t, unsigned postsuspend)
862{
863 int i = t->num_targets;
864 struct dm_target *ti = t->targets;
865
866 while (i--) {
867 if (postsuspend) {
868 if (ti->type->postsuspend)
869 ti->type->postsuspend(ti);
870 } else if (ti->type->presuspend)
871 ti->type->presuspend(ti);
872
873 ti++;
874 }
875}
876
877void dm_table_presuspend_targets(struct dm_table *t)
878{
cf222b37
AK
879 if (!t)
880 return;
881
1da177e4
LT
882 return suspend_targets(t, 0);
883}
884
885void dm_table_postsuspend_targets(struct dm_table *t)
886{
cf222b37
AK
887 if (!t)
888 return;
889
1da177e4
LT
890 return suspend_targets(t, 1);
891}
892
893void dm_table_resume_targets(struct dm_table *t)
894{
895 int i;
896
897 for (i = 0; i < t->num_targets; i++) {
898 struct dm_target *ti = t->targets + i;
899
900 if (ti->type->resume)
901 ti->type->resume(ti);
902 }
903}
904
905int dm_table_any_congested(struct dm_table *t, int bdi_bits)
906{
907 struct list_head *d, *devices;
908 int r = 0;
909
910 devices = dm_table_get_devices(t);
911 for (d = devices->next; d != devices; d = d->next) {
912 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
913 request_queue_t *q = bdev_get_queue(dd->bdev);
914 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
915 }
916
917 return r;
918}
919
920void dm_table_unplug_all(struct dm_table *t)
921{
922 struct list_head *d, *devices = dm_table_get_devices(t);
923
924 for (d = devices->next; d != devices; d = d->next) {
925 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
926 request_queue_t *q = bdev_get_queue(dd->bdev);
927
928 if (q->unplug_fn)
929 q->unplug_fn(q);
930 }
931}
932
933int dm_table_flush_all(struct dm_table *t)
934{
935 struct list_head *d, *devices = dm_table_get_devices(t);
936 int ret = 0;
937
938 for (d = devices->next; d != devices; d = d->next) {
939 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
940 request_queue_t *q = bdev_get_queue(dd->bdev);
941 int err;
942
943 if (!q->issue_flush_fn)
944 err = -EOPNOTSUPP;
945 else
946 err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
947
948 if (!ret)
949 ret = err;
950 }
951
952 return ret;
953}
954
955EXPORT_SYMBOL(dm_vcalloc);
956EXPORT_SYMBOL(dm_get_device);
957EXPORT_SYMBOL(dm_put_device);
958EXPORT_SYMBOL(dm_table_event);
d5e404c1 959EXPORT_SYMBOL(dm_table_get_size);
1da177e4
LT
960EXPORT_SYMBOL(dm_table_get_mode);
961EXPORT_SYMBOL(dm_table_put);
962EXPORT_SYMBOL(dm_table_get);
963EXPORT_SYMBOL(dm_table_unplug_all);
964EXPORT_SYMBOL(dm_table_flush_all);