]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/dm-io.c
dm mpath: prevent io from work queue while suspended
[net-next-2.6.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
891ce207 3 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
952b3557
MP
8#include "dm.h"
9
586e80e6 10#include <linux/device-mapper.h>
1da177e4
LT
11
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
a765e20e 17#include <linux/dm-io.h>
1da177e4 18
f1e53987
MP
19#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22
891ce207
HM
23struct dm_io_client {
24 mempool_t *pool;
25 struct bio_set *bios;
26};
27
f1e53987
MP
28/*
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address. Refer to store_io_and_region_in_bio() below.
31 */
1da177e4 32struct io {
e01fd7ee 33 unsigned long error_bits;
5af443a7 34 unsigned long eopnotsupp_bits;
1da177e4
LT
35 atomic_t count;
36 struct task_struct *sleeper;
891ce207 37 struct dm_io_client *client;
1da177e4
LT
38 io_notify_fn callback;
39 void *context;
f1e53987 40} __attribute__((aligned(DM_IO_MAX_REGIONS)));
1da177e4 41
952b3557
MP
42static struct kmem_cache *_dm_io_cache;
43
1da177e4
LT
44/*
45 * io contexts are only dynamically allocated for asynchronous
46 * io. Since async io is likely to be the majority of io we'll
891ce207 47 * have the same number of io contexts as bios! (FIXME: must reduce this).
1da177e4 48 */
891ce207 49
1da177e4
LT
50static unsigned int pages_to_ios(unsigned int pages)
51{
52 return 4 * pages; /* too many ? */
53}
54
c8b03afe
HM
55/*
56 * Create a client with mempool and bioset.
57 */
58struct dm_io_client *dm_io_client_create(unsigned num_pages)
59{
60 unsigned ios = pages_to_ios(num_pages);
61 struct dm_io_client *client;
62
63 client = kmalloc(sizeof(*client), GFP_KERNEL);
64 if (!client)
65 return ERR_PTR(-ENOMEM);
66
952b3557 67 client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
c8b03afe
HM
68 if (!client->pool)
69 goto bad;
70
bb799ca0 71 client->bios = bioset_create(16, 0);
c8b03afe
HM
72 if (!client->bios)
73 goto bad;
74
75 return client;
76
77 bad:
78 if (client->pool)
79 mempool_destroy(client->pool);
80 kfree(client);
81 return ERR_PTR(-ENOMEM);
82}
83EXPORT_SYMBOL(dm_io_client_create);
84
85int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
86{
87 return mempool_resize(client->pool, pages_to_ios(num_pages),
88 GFP_KERNEL);
89}
90EXPORT_SYMBOL(dm_io_client_resize);
91
92void dm_io_client_destroy(struct dm_io_client *client)
93{
94 mempool_destroy(client->pool);
95 bioset_free(client->bios);
96 kfree(client);
97}
98EXPORT_SYMBOL(dm_io_client_destroy);
99
1da177e4
LT
100/*-----------------------------------------------------------------
101 * We need to keep track of which region a bio is doing io for.
f1e53987
MP
102 * To avoid a memory allocation to store just 5 or 6 bits, we
103 * ensure the 'struct io' pointer is aligned so enough low bits are
104 * always zero and then combine it with the region number directly in
105 * bi_private.
1da177e4 106 *---------------------------------------------------------------*/
f1e53987
MP
107static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
108 unsigned region)
1da177e4 109{
f1e53987
MP
110 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
111 DMCRIT("Unaligned struct io pointer %p", io);
112 BUG();
113 }
114
115 bio->bi_private = (void *)((unsigned long)io | region);
1da177e4
LT
116}
117
f1e53987
MP
118static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
119 unsigned *region)
1da177e4 120{
f1e53987
MP
121 unsigned long val = (unsigned long)bio->bi_private;
122
123 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
124 *region = val & (DM_IO_MAX_REGIONS - 1);
1da177e4
LT
125}
126
127/*-----------------------------------------------------------------
128 * We need an io object to keep track of the number of bios that
129 * have been dispatched for a particular io.
130 *---------------------------------------------------------------*/
131static void dec_count(struct io *io, unsigned int region, int error)
132{
5af443a7 133 if (error) {
e01fd7ee 134 set_bit(region, &io->error_bits);
5af443a7
MP
135 if (error == -EOPNOTSUPP)
136 set_bit(region, &io->eopnotsupp_bits);
137 }
1da177e4
LT
138
139 if (atomic_dec_and_test(&io->count)) {
140 if (io->sleeper)
141 wake_up_process(io->sleeper);
142
143 else {
e01fd7ee 144 unsigned long r = io->error_bits;
1da177e4
LT
145 io_notify_fn fn = io->callback;
146 void *context = io->context;
147
bf17ce3a 148 mempool_free(io, io->client->pool);
1da177e4
LT
149 fn(r, context);
150 }
151 }
152}
153
6712ecf8 154static void endio(struct bio *bio, int error)
1da177e4 155{
c897feb3
HM
156 struct io *io;
157 unsigned region;
1da177e4 158
1da177e4
LT
159 if (error && bio_data_dir(bio) == READ)
160 zero_fill_bio(bio);
161
c897feb3
HM
162 /*
163 * The bio destructor in bio_put() may use the io object.
164 */
f1e53987 165 retrieve_io_and_region_from_bio(bio, &io, &region);
c897feb3 166
1da177e4
LT
167 bio_put(bio);
168
c897feb3 169 dec_count(io, region, error);
1da177e4
LT
170}
171
172/*-----------------------------------------------------------------
173 * These little objects provide an abstraction for getting a new
174 * destination page for io.
175 *---------------------------------------------------------------*/
176struct dpages {
177 void (*get_page)(struct dpages *dp,
178 struct page **p, unsigned long *len, unsigned *offset);
179 void (*next_page)(struct dpages *dp);
180
181 unsigned context_u;
182 void *context_ptr;
183};
184
185/*
186 * Functions for getting the pages from a list.
187 */
188static void list_get_page(struct dpages *dp,
189 struct page **p, unsigned long *len, unsigned *offset)
190{
191 unsigned o = dp->context_u;
192 struct page_list *pl = (struct page_list *) dp->context_ptr;
193
194 *p = pl->page;
195 *len = PAGE_SIZE - o;
196 *offset = o;
197}
198
199static void list_next_page(struct dpages *dp)
200{
201 struct page_list *pl = (struct page_list *) dp->context_ptr;
202 dp->context_ptr = pl->next;
203 dp->context_u = 0;
204}
205
206static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
207{
208 dp->get_page = list_get_page;
209 dp->next_page = list_next_page;
210 dp->context_u = offset;
211 dp->context_ptr = pl;
212}
213
214/*
215 * Functions for getting the pages from a bvec.
216 */
217static void bvec_get_page(struct dpages *dp,
218 struct page **p, unsigned long *len, unsigned *offset)
219{
220 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
221 *p = bvec->bv_page;
222 *len = bvec->bv_len;
223 *offset = bvec->bv_offset;
224}
225
226static void bvec_next_page(struct dpages *dp)
227{
228 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
229 dp->context_ptr = bvec + 1;
230}
231
232static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
233{
234 dp->get_page = bvec_get_page;
235 dp->next_page = bvec_next_page;
236 dp->context_ptr = bvec;
237}
238
c8b03afe
HM
239/*
240 * Functions for getting the pages from a VMA.
241 */
1da177e4
LT
242static void vm_get_page(struct dpages *dp,
243 struct page **p, unsigned long *len, unsigned *offset)
244{
245 *p = vmalloc_to_page(dp->context_ptr);
246 *offset = dp->context_u;
247 *len = PAGE_SIZE - dp->context_u;
248}
249
250static void vm_next_page(struct dpages *dp)
251{
252 dp->context_ptr += PAGE_SIZE - dp->context_u;
253 dp->context_u = 0;
254}
255
256static void vm_dp_init(struct dpages *dp, void *data)
257{
258 dp->get_page = vm_get_page;
259 dp->next_page = vm_next_page;
260 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
261 dp->context_ptr = data;
262}
263
3676347a
PO
264static void dm_bio_destructor(struct bio *bio)
265{
f1e53987
MP
266 unsigned region;
267 struct io *io;
268
269 retrieve_io_and_region_from_bio(bio, &io, &region);
891ce207 270
bf17ce3a 271 bio_free(bio, io->client->bios);
3676347a
PO
272}
273
c8b03afe
HM
274/*
275 * Functions for getting the pages from kernel memory.
276 */
277static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
278 unsigned *offset)
279{
280 *p = virt_to_page(dp->context_ptr);
281 *offset = dp->context_u;
282 *len = PAGE_SIZE - dp->context_u;
283}
284
285static void km_next_page(struct dpages *dp)
286{
287 dp->context_ptr += PAGE_SIZE - dp->context_u;
288 dp->context_u = 0;
289}
290
291static void km_dp_init(struct dpages *dp, void *data)
292{
293 dp->get_page = km_get_page;
294 dp->next_page = km_next_page;
295 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
296 dp->context_ptr = data;
297}
298
1da177e4
LT
299/*-----------------------------------------------------------------
300 * IO routines that accept a list of pages.
301 *---------------------------------------------------------------*/
22a1ceb1 302static void do_region(int rw, unsigned region, struct dm_io_region *where,
1da177e4
LT
303 struct dpages *dp, struct io *io)
304{
305 struct bio *bio;
306 struct page *page;
307 unsigned long len;
308 unsigned offset;
309 unsigned num_bvecs;
310 sector_t remaining = where->count;
311
312 while (remaining) {
313 /*
f1e53987 314 * Allocate a suitably sized-bio.
1da177e4 315 */
596f138e
JN
316 num_bvecs = dm_sector_div_up(remaining,
317 (PAGE_SIZE >> SECTOR_SHIFT));
f1e53987 318 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
bf17ce3a 319 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
1da177e4
LT
320 bio->bi_sector = where->sector + (where->count - remaining);
321 bio->bi_bdev = where->bdev;
322 bio->bi_end_io = endio;
3676347a 323 bio->bi_destructor = dm_bio_destructor;
f1e53987 324 store_io_and_region_in_bio(bio, io, region);
1da177e4
LT
325
326 /*
327 * Try and add as many pages as possible.
328 */
329 while (remaining) {
330 dp->get_page(dp, &page, &len, &offset);
331 len = min(len, to_bytes(remaining));
332 if (!bio_add_page(bio, page, len, offset))
333 break;
334
335 offset = 0;
336 remaining -= to_sector(len);
337 dp->next_page(dp);
338 }
339
340 atomic_inc(&io->count);
341 submit_bio(rw, bio);
342 }
343}
344
345static void dispatch_io(int rw, unsigned int num_regions,
22a1ceb1 346 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
347 struct io *io, int sync)
348{
349 int i;
350 struct dpages old_pages = *dp;
351
f1e53987
MP
352 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
353
1da177e4 354 if (sync)
93dbb393 355 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
1da177e4
LT
356
357 /*
358 * For multiple regions we need to be careful to rewind
359 * the dp object for each call to do_region.
360 */
361 for (i = 0; i < num_regions; i++) {
362 *dp = old_pages;
363 if (where[i].count)
364 do_region(rw, i, where + i, dp, io);
365 }
366
367 /*
f00b16ad 368 * Drop the extra reference that we were holding to avoid
1da177e4
LT
369 * the io being completed too early.
370 */
371 dec_count(io, 0, 0);
372}
373
891ce207 374static int sync_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 375 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 376 unsigned long *error_bits)
1da177e4 377{
f1e53987
MP
378 /*
379 * gcc <= 4.3 can't do the alignment for stack variables, so we must
380 * align it on our own.
381 * volatile prevents the optimizer from removing or reusing
382 * "io_" field from the stack frame (allowed in ANSI C).
383 */
384 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
385 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
1da177e4 386
7ff14a36 387 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
388 WARN_ON(1);
389 return -EIO;
390 }
391
51aa3228 392retry:
f1e53987
MP
393 io->error_bits = 0;
394 io->eopnotsupp_bits = 0;
395 atomic_set(&io->count, 1); /* see dispatch_io() */
396 io->sleeper = current;
397 io->client = client;
1da177e4 398
f1e53987 399 dispatch_io(rw, num_regions, where, dp, io, 1);
1da177e4
LT
400
401 while (1) {
402 set_current_state(TASK_UNINTERRUPTIBLE);
403
f1e53987 404 if (!atomic_read(&io->count))
1da177e4
LT
405 break;
406
407 io_schedule();
408 }
409 set_current_state(TASK_RUNNING);
410
f1e53987 411 if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
51aa3228
MP
412 rw &= ~(1 << BIO_RW_BARRIER);
413 goto retry;
414 }
415
891ce207 416 if (error_bits)
f1e53987 417 *error_bits = io->error_bits;
891ce207 418
f1e53987 419 return io->error_bits ? -EIO : 0;
1da177e4
LT
420}
421
891ce207 422static int async_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 423 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 424 io_notify_fn fn, void *context)
1da177e4
LT
425{
426 struct io *io;
427
7ff14a36 428 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
429 WARN_ON(1);
430 fn(1, context);
431 return -EIO;
432 }
433
bf17ce3a 434 io = mempool_alloc(client->pool, GFP_NOIO);
e01fd7ee 435 io->error_bits = 0;
5af443a7 436 io->eopnotsupp_bits = 0;
1da177e4
LT
437 atomic_set(&io->count, 1); /* see dispatch_io() */
438 io->sleeper = NULL;
891ce207 439 io->client = client;
1da177e4
LT
440 io->callback = fn;
441 io->context = context;
442
443 dispatch_io(rw, num_regions, where, dp, io, 0);
444 return 0;
445}
446
c8b03afe
HM
447static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
448{
449 /* Set up dpages based on memory type */
450 switch (io_req->mem.type) {
451 case DM_IO_PAGE_LIST:
452 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
453 break;
454
455 case DM_IO_BVEC:
456 bvec_dp_init(dp, io_req->mem.ptr.bvec);
457 break;
458
459 case DM_IO_VMA:
460 vm_dp_init(dp, io_req->mem.ptr.vma);
461 break;
462
463 case DM_IO_KMEM:
464 km_dp_init(dp, io_req->mem.ptr.addr);
465 break;
466
467 default:
468 return -EINVAL;
469 }
470
471 return 0;
472}
473
474/*
7ff14a36
MP
475 * New collapsed (a)synchronous interface.
476 *
477 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
478 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
479 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
480 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
c8b03afe
HM
481 */
482int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 483 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
484{
485 int r;
486 struct dpages dp;
487
488 r = dp_init(io_req, &dp);
489 if (r)
490 return r;
491
492 if (!io_req->notify.fn)
493 return sync_io(io_req->client, num_regions, where,
494 io_req->bi_rw, &dp, sync_error_bits);
495
496 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
497 &dp, io_req->notify.fn, io_req->notify.context);
498}
499EXPORT_SYMBOL(dm_io);
952b3557
MP
500
501int __init dm_io_init(void)
502{
503 _dm_io_cache = KMEM_CACHE(io, 0);
504 if (!_dm_io_cache)
505 return -ENOMEM;
506
507 return 0;
508}
509
510void dm_io_exit(void)
511{
512 kmem_cache_destroy(_dm_io_cache);
513 _dm_io_cache = NULL;
514}