]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/dm-io.c
dm io: use slab for struct io
[net-next-2.6.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
891ce207 3 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
952b3557
MP
8#include "dm.h"
9
586e80e6 10#include <linux/device-mapper.h>
1da177e4
LT
11
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
a765e20e 17#include <linux/dm-io.h>
1da177e4 18
891ce207
HM
19struct dm_io_client {
20 mempool_t *pool;
21 struct bio_set *bios;
22};
23
1da177e4
LT
24/* FIXME: can we shrink this ? */
25struct io {
e01fd7ee 26 unsigned long error_bits;
5af443a7 27 unsigned long eopnotsupp_bits;
1da177e4
LT
28 atomic_t count;
29 struct task_struct *sleeper;
891ce207 30 struct dm_io_client *client;
1da177e4
LT
31 io_notify_fn callback;
32 void *context;
33};
34
952b3557
MP
35static struct kmem_cache *_dm_io_cache;
36
1da177e4
LT
37/*
38 * io contexts are only dynamically allocated for asynchronous
39 * io. Since async io is likely to be the majority of io we'll
891ce207 40 * have the same number of io contexts as bios! (FIXME: must reduce this).
1da177e4 41 */
891ce207 42
1da177e4
LT
43static unsigned int pages_to_ios(unsigned int pages)
44{
45 return 4 * pages; /* too many ? */
46}
47
c8b03afe
HM
48/*
49 * Create a client with mempool and bioset.
50 */
51struct dm_io_client *dm_io_client_create(unsigned num_pages)
52{
53 unsigned ios = pages_to_ios(num_pages);
54 struct dm_io_client *client;
55
56 client = kmalloc(sizeof(*client), GFP_KERNEL);
57 if (!client)
58 return ERR_PTR(-ENOMEM);
59
952b3557 60 client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
c8b03afe
HM
61 if (!client->pool)
62 goto bad;
63
bb799ca0 64 client->bios = bioset_create(16, 0);
c8b03afe
HM
65 if (!client->bios)
66 goto bad;
67
68 return client;
69
70 bad:
71 if (client->pool)
72 mempool_destroy(client->pool);
73 kfree(client);
74 return ERR_PTR(-ENOMEM);
75}
76EXPORT_SYMBOL(dm_io_client_create);
77
78int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
79{
80 return mempool_resize(client->pool, pages_to_ios(num_pages),
81 GFP_KERNEL);
82}
83EXPORT_SYMBOL(dm_io_client_resize);
84
85void dm_io_client_destroy(struct dm_io_client *client)
86{
87 mempool_destroy(client->pool);
88 bioset_free(client->bios);
89 kfree(client);
90}
91EXPORT_SYMBOL(dm_io_client_destroy);
92
1da177e4
LT
93/*-----------------------------------------------------------------
94 * We need to keep track of which region a bio is doing io for.
95 * In order to save a memory allocation we store this the last
96 * bvec which we know is unused (blech).
97 * XXX This is ugly and can OOPS with some configs... find another way.
98 *---------------------------------------------------------------*/
99static inline void bio_set_region(struct bio *bio, unsigned region)
100{
f00b16ad 101 bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
1da177e4
LT
102}
103
104static inline unsigned bio_get_region(struct bio *bio)
105{
f00b16ad 106 return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
1da177e4
LT
107}
108
109/*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
113static void dec_count(struct io *io, unsigned int region, int error)
114{
5af443a7 115 if (error) {
e01fd7ee 116 set_bit(region, &io->error_bits);
5af443a7
MP
117 if (error == -EOPNOTSUPP)
118 set_bit(region, &io->eopnotsupp_bits);
119 }
1da177e4
LT
120
121 if (atomic_dec_and_test(&io->count)) {
122 if (io->sleeper)
123 wake_up_process(io->sleeper);
124
125 else {
e01fd7ee 126 unsigned long r = io->error_bits;
1da177e4
LT
127 io_notify_fn fn = io->callback;
128 void *context = io->context;
129
bf17ce3a 130 mempool_free(io, io->client->pool);
1da177e4
LT
131 fn(r, context);
132 }
133 }
134}
135
6712ecf8 136static void endio(struct bio *bio, int error)
1da177e4 137{
c897feb3
HM
138 struct io *io;
139 unsigned region;
1da177e4 140
1da177e4
LT
141 if (error && bio_data_dir(bio) == READ)
142 zero_fill_bio(bio);
143
c897feb3
HM
144 /*
145 * The bio destructor in bio_put() may use the io object.
146 */
147 io = bio->bi_private;
148 region = bio_get_region(bio);
149
f00b16ad 150 bio->bi_max_vecs++;
1da177e4
LT
151 bio_put(bio);
152
c897feb3 153 dec_count(io, region, error);
1da177e4
LT
154}
155
156/*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 unsigned context_u;
166 void *context_ptr;
167};
168
169/*
170 * Functions for getting the pages from a list.
171 */
172static void list_get_page(struct dpages *dp,
173 struct page **p, unsigned long *len, unsigned *offset)
174{
175 unsigned o = dp->context_u;
176 struct page_list *pl = (struct page_list *) dp->context_ptr;
177
178 *p = pl->page;
179 *len = PAGE_SIZE - o;
180 *offset = o;
181}
182
183static void list_next_page(struct dpages *dp)
184{
185 struct page_list *pl = (struct page_list *) dp->context_ptr;
186 dp->context_ptr = pl->next;
187 dp->context_u = 0;
188}
189
190static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
191{
192 dp->get_page = list_get_page;
193 dp->next_page = list_next_page;
194 dp->context_u = offset;
195 dp->context_ptr = pl;
196}
197
198/*
199 * Functions for getting the pages from a bvec.
200 */
201static void bvec_get_page(struct dpages *dp,
202 struct page **p, unsigned long *len, unsigned *offset)
203{
204 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
205 *p = bvec->bv_page;
206 *len = bvec->bv_len;
207 *offset = bvec->bv_offset;
208}
209
210static void bvec_next_page(struct dpages *dp)
211{
212 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
213 dp->context_ptr = bvec + 1;
214}
215
216static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
217{
218 dp->get_page = bvec_get_page;
219 dp->next_page = bvec_next_page;
220 dp->context_ptr = bvec;
221}
222
c8b03afe
HM
223/*
224 * Functions for getting the pages from a VMA.
225 */
1da177e4
LT
226static void vm_get_page(struct dpages *dp,
227 struct page **p, unsigned long *len, unsigned *offset)
228{
229 *p = vmalloc_to_page(dp->context_ptr);
230 *offset = dp->context_u;
231 *len = PAGE_SIZE - dp->context_u;
232}
233
234static void vm_next_page(struct dpages *dp)
235{
236 dp->context_ptr += PAGE_SIZE - dp->context_u;
237 dp->context_u = 0;
238}
239
240static void vm_dp_init(struct dpages *dp, void *data)
241{
242 dp->get_page = vm_get_page;
243 dp->next_page = vm_next_page;
244 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
245 dp->context_ptr = data;
246}
247
3676347a
PO
248static void dm_bio_destructor(struct bio *bio)
249{
891ce207
HM
250 struct io *io = bio->bi_private;
251
bf17ce3a 252 bio_free(bio, io->client->bios);
3676347a
PO
253}
254
c8b03afe
HM
255/*
256 * Functions for getting the pages from kernel memory.
257 */
258static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
259 unsigned *offset)
260{
261 *p = virt_to_page(dp->context_ptr);
262 *offset = dp->context_u;
263 *len = PAGE_SIZE - dp->context_u;
264}
265
266static void km_next_page(struct dpages *dp)
267{
268 dp->context_ptr += PAGE_SIZE - dp->context_u;
269 dp->context_u = 0;
270}
271
272static void km_dp_init(struct dpages *dp, void *data)
273{
274 dp->get_page = km_get_page;
275 dp->next_page = km_next_page;
276 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
277 dp->context_ptr = data;
278}
279
1da177e4
LT
280/*-----------------------------------------------------------------
281 * IO routines that accept a list of pages.
282 *---------------------------------------------------------------*/
22a1ceb1 283static void do_region(int rw, unsigned region, struct dm_io_region *where,
1da177e4
LT
284 struct dpages *dp, struct io *io)
285{
286 struct bio *bio;
287 struct page *page;
288 unsigned long len;
289 unsigned offset;
290 unsigned num_bvecs;
291 sector_t remaining = where->count;
292
293 while (remaining) {
294 /*
f00b16ad
HM
295 * Allocate a suitably sized-bio: we add an extra
296 * bvec for bio_get/set_region() and decrement bi_max_vecs
297 * to hide it from bio_add_page().
1da177e4 298 */
596f138e
JN
299 num_bvecs = dm_sector_div_up(remaining,
300 (PAGE_SIZE >> SECTOR_SHIFT));
301 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
302 num_bvecs);
d659e6cc
MP
303 if (unlikely(num_bvecs > BIO_MAX_PAGES))
304 num_bvecs = BIO_MAX_PAGES;
bf17ce3a 305 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
1da177e4
LT
306 bio->bi_sector = where->sector + (where->count - remaining);
307 bio->bi_bdev = where->bdev;
308 bio->bi_end_io = endio;
309 bio->bi_private = io;
3676347a 310 bio->bi_destructor = dm_bio_destructor;
f00b16ad 311 bio->bi_max_vecs--;
1da177e4
LT
312 bio_set_region(bio, region);
313
314 /*
315 * Try and add as many pages as possible.
316 */
317 while (remaining) {
318 dp->get_page(dp, &page, &len, &offset);
319 len = min(len, to_bytes(remaining));
320 if (!bio_add_page(bio, page, len, offset))
321 break;
322
323 offset = 0;
324 remaining -= to_sector(len);
325 dp->next_page(dp);
326 }
327
328 atomic_inc(&io->count);
329 submit_bio(rw, bio);
330 }
331}
332
333static void dispatch_io(int rw, unsigned int num_regions,
22a1ceb1 334 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
335 struct io *io, int sync)
336{
337 int i;
338 struct dpages old_pages = *dp;
339
340 if (sync)
93dbb393 341 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
1da177e4
LT
342
343 /*
344 * For multiple regions we need to be careful to rewind
345 * the dp object for each call to do_region.
346 */
347 for (i = 0; i < num_regions; i++) {
348 *dp = old_pages;
349 if (where[i].count)
350 do_region(rw, i, where + i, dp, io);
351 }
352
353 /*
f00b16ad 354 * Drop the extra reference that we were holding to avoid
1da177e4
LT
355 * the io being completed too early.
356 */
357 dec_count(io, 0, 0);
358}
359
891ce207 360static int sync_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 361 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 362 unsigned long *error_bits)
1da177e4
LT
363{
364 struct io io;
365
7ff14a36 366 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
367 WARN_ON(1);
368 return -EIO;
369 }
370
51aa3228 371retry:
e01fd7ee 372 io.error_bits = 0;
5af443a7 373 io.eopnotsupp_bits = 0;
1da177e4
LT
374 atomic_set(&io.count, 1); /* see dispatch_io() */
375 io.sleeper = current;
891ce207 376 io.client = client;
1da177e4
LT
377
378 dispatch_io(rw, num_regions, where, dp, &io, 1);
379
380 while (1) {
381 set_current_state(TASK_UNINTERRUPTIBLE);
382
b64b6bf4 383 if (!atomic_read(&io.count))
1da177e4
LT
384 break;
385
386 io_schedule();
387 }
388 set_current_state(TASK_RUNNING);
389
51aa3228
MP
390 if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
391 rw &= ~(1 << BIO_RW_BARRIER);
392 goto retry;
393 }
394
891ce207 395 if (error_bits)
e01fd7ee 396 *error_bits = io.error_bits;
891ce207 397
e01fd7ee 398 return io.error_bits ? -EIO : 0;
1da177e4
LT
399}
400
891ce207 401static int async_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 402 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 403 io_notify_fn fn, void *context)
1da177e4
LT
404{
405 struct io *io;
406
7ff14a36 407 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
408 WARN_ON(1);
409 fn(1, context);
410 return -EIO;
411 }
412
bf17ce3a 413 io = mempool_alloc(client->pool, GFP_NOIO);
e01fd7ee 414 io->error_bits = 0;
5af443a7 415 io->eopnotsupp_bits = 0;
1da177e4
LT
416 atomic_set(&io->count, 1); /* see dispatch_io() */
417 io->sleeper = NULL;
891ce207 418 io->client = client;
1da177e4
LT
419 io->callback = fn;
420 io->context = context;
421
422 dispatch_io(rw, num_regions, where, dp, io, 0);
423 return 0;
424}
425
c8b03afe
HM
426static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
427{
428 /* Set up dpages based on memory type */
429 switch (io_req->mem.type) {
430 case DM_IO_PAGE_LIST:
431 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
432 break;
433
434 case DM_IO_BVEC:
435 bvec_dp_init(dp, io_req->mem.ptr.bvec);
436 break;
437
438 case DM_IO_VMA:
439 vm_dp_init(dp, io_req->mem.ptr.vma);
440 break;
441
442 case DM_IO_KMEM:
443 km_dp_init(dp, io_req->mem.ptr.addr);
444 break;
445
446 default:
447 return -EINVAL;
448 }
449
450 return 0;
451}
452
453/*
7ff14a36
MP
454 * New collapsed (a)synchronous interface.
455 *
456 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
457 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
458 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
459 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
c8b03afe
HM
460 */
461int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 462 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
463{
464 int r;
465 struct dpages dp;
466
467 r = dp_init(io_req, &dp);
468 if (r)
469 return r;
470
471 if (!io_req->notify.fn)
472 return sync_io(io_req->client, num_regions, where,
473 io_req->bi_rw, &dp, sync_error_bits);
474
475 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
476 &dp, io_req->notify.fn, io_req->notify.context);
477}
478EXPORT_SYMBOL(dm_io);
952b3557
MP
479
480int __init dm_io_init(void)
481{
482 _dm_io_cache = KMEM_CACHE(io, 0);
483 if (!_dm_io_cache)
484 return -ENOMEM;
485
486 return 0;
487}
488
489void dm_io_exit(void)
490{
491 kmem_cache_destroy(_dm_io_cache);
492 _dm_io_cache = NULL;
493}