]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/dm-io.c
dm io: delay dec_count
[net-next-2.6.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-io.h"
8
9#include <linux/bio.h>
10#include <linux/mempool.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14
15static struct bio_set *_bios;
16
17/* FIXME: can we shrink this ? */
18struct io {
19 unsigned long error;
20 atomic_t count;
21 struct task_struct *sleeper;
22 io_notify_fn callback;
23 void *context;
24};
25
26/*
27 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME:
30 * must reduce this).
31 */
32static unsigned _num_ios;
33static mempool_t *_io_pool;
34
1da177e4
LT
35static unsigned int pages_to_ios(unsigned int pages)
36{
37 return 4 * pages; /* too many ? */
38}
39
40static int resize_pool(unsigned int new_ios)
41{
42 int r = 0;
43
44 if (_io_pool) {
45 if (new_ios == 0) {
46 /* free off the pool */
47 mempool_destroy(_io_pool);
48 _io_pool = NULL;
49 bioset_free(_bios);
50
51 } else {
52 /* resize the pool */
53 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
54 }
55
56 } else {
57 /* create new pool */
0eaae62a
MD
58 _io_pool = mempool_create_kmalloc_pool(new_ios,
59 sizeof(struct io));
1da177e4
LT
60 if (!_io_pool)
61 return -ENOMEM;
62
5972511b 63 _bios = bioset_create(16, 16);
1da177e4
LT
64 if (!_bios) {
65 mempool_destroy(_io_pool);
66 _io_pool = NULL;
67 return -ENOMEM;
68 }
69 }
70
71 if (!r)
72 _num_ios = new_ios;
73
74 return r;
75}
76
77int dm_io_get(unsigned int num_pages)
78{
79 return resize_pool(_num_ios + pages_to_ios(num_pages));
80}
81
82void dm_io_put(unsigned int num_pages)
83{
84 resize_pool(_num_ios - pages_to_ios(num_pages));
85}
86
87/*-----------------------------------------------------------------
88 * We need to keep track of which region a bio is doing io for.
89 * In order to save a memory allocation we store this the last
90 * bvec which we know is unused (blech).
91 * XXX This is ugly and can OOPS with some configs... find another way.
92 *---------------------------------------------------------------*/
93static inline void bio_set_region(struct bio *bio, unsigned region)
94{
f00b16ad 95 bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
1da177e4
LT
96}
97
98static inline unsigned bio_get_region(struct bio *bio)
99{
f00b16ad 100 return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
1da177e4
LT
101}
102
103/*-----------------------------------------------------------------
104 * We need an io object to keep track of the number of bios that
105 * have been dispatched for a particular io.
106 *---------------------------------------------------------------*/
107static void dec_count(struct io *io, unsigned int region, int error)
108{
109 if (error)
110 set_bit(region, &io->error);
111
112 if (atomic_dec_and_test(&io->count)) {
113 if (io->sleeper)
114 wake_up_process(io->sleeper);
115
116 else {
117 int r = io->error;
118 io_notify_fn fn = io->callback;
119 void *context = io->context;
120
121 mempool_free(io, _io_pool);
122 fn(r, context);
123 }
124 }
125}
126
127static int endio(struct bio *bio, unsigned int done, int error)
128{
c897feb3
HM
129 struct io *io;
130 unsigned region;
1da177e4
LT
131
132 /* keep going until we've finished */
133 if (bio->bi_size)
134 return 1;
135
136 if (error && bio_data_dir(bio) == READ)
137 zero_fill_bio(bio);
138
c897feb3
HM
139 /*
140 * The bio destructor in bio_put() may use the io object.
141 */
142 io = bio->bi_private;
143 region = bio_get_region(bio);
144
f00b16ad 145 bio->bi_max_vecs++;
1da177e4
LT
146 bio_put(bio);
147
c897feb3
HM
148 dec_count(io, region, error);
149
1da177e4
LT
150 return 0;
151}
152
153/*-----------------------------------------------------------------
154 * These little objects provide an abstraction for getting a new
155 * destination page for io.
156 *---------------------------------------------------------------*/
157struct dpages {
158 void (*get_page)(struct dpages *dp,
159 struct page **p, unsigned long *len, unsigned *offset);
160 void (*next_page)(struct dpages *dp);
161
162 unsigned context_u;
163 void *context_ptr;
164};
165
166/*
167 * Functions for getting the pages from a list.
168 */
169static void list_get_page(struct dpages *dp,
170 struct page **p, unsigned long *len, unsigned *offset)
171{
172 unsigned o = dp->context_u;
173 struct page_list *pl = (struct page_list *) dp->context_ptr;
174
175 *p = pl->page;
176 *len = PAGE_SIZE - o;
177 *offset = o;
178}
179
180static void list_next_page(struct dpages *dp)
181{
182 struct page_list *pl = (struct page_list *) dp->context_ptr;
183 dp->context_ptr = pl->next;
184 dp->context_u = 0;
185}
186
187static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
188{
189 dp->get_page = list_get_page;
190 dp->next_page = list_next_page;
191 dp->context_u = offset;
192 dp->context_ptr = pl;
193}
194
195/*
196 * Functions for getting the pages from a bvec.
197 */
198static void bvec_get_page(struct dpages *dp,
199 struct page **p, unsigned long *len, unsigned *offset)
200{
201 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202 *p = bvec->bv_page;
203 *len = bvec->bv_len;
204 *offset = bvec->bv_offset;
205}
206
207static void bvec_next_page(struct dpages *dp)
208{
209 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
210 dp->context_ptr = bvec + 1;
211}
212
213static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
214{
215 dp->get_page = bvec_get_page;
216 dp->next_page = bvec_next_page;
217 dp->context_ptr = bvec;
218}
219
220static void vm_get_page(struct dpages *dp,
221 struct page **p, unsigned long *len, unsigned *offset)
222{
223 *p = vmalloc_to_page(dp->context_ptr);
224 *offset = dp->context_u;
225 *len = PAGE_SIZE - dp->context_u;
226}
227
228static void vm_next_page(struct dpages *dp)
229{
230 dp->context_ptr += PAGE_SIZE - dp->context_u;
231 dp->context_u = 0;
232}
233
234static void vm_dp_init(struct dpages *dp, void *data)
235{
236 dp->get_page = vm_get_page;
237 dp->next_page = vm_next_page;
238 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
239 dp->context_ptr = data;
240}
241
3676347a
PO
242static void dm_bio_destructor(struct bio *bio)
243{
244 bio_free(bio, _bios);
245}
246
1da177e4
LT
247/*-----------------------------------------------------------------
248 * IO routines that accept a list of pages.
249 *---------------------------------------------------------------*/
250static void do_region(int rw, unsigned int region, struct io_region *where,
251 struct dpages *dp, struct io *io)
252{
253 struct bio *bio;
254 struct page *page;
255 unsigned long len;
256 unsigned offset;
257 unsigned num_bvecs;
258 sector_t remaining = where->count;
259
260 while (remaining) {
261 /*
f00b16ad
HM
262 * Allocate a suitably sized-bio: we add an extra
263 * bvec for bio_get/set_region() and decrement bi_max_vecs
264 * to hide it from bio_add_page().
1da177e4 265 */
f00b16ad 266 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
1da177e4
LT
267 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
268 bio->bi_sector = where->sector + (where->count - remaining);
269 bio->bi_bdev = where->bdev;
270 bio->bi_end_io = endio;
271 bio->bi_private = io;
3676347a 272 bio->bi_destructor = dm_bio_destructor;
f00b16ad 273 bio->bi_max_vecs--;
1da177e4
LT
274 bio_set_region(bio, region);
275
276 /*
277 * Try and add as many pages as possible.
278 */
279 while (remaining) {
280 dp->get_page(dp, &page, &len, &offset);
281 len = min(len, to_bytes(remaining));
282 if (!bio_add_page(bio, page, len, offset))
283 break;
284
285 offset = 0;
286 remaining -= to_sector(len);
287 dp->next_page(dp);
288 }
289
290 atomic_inc(&io->count);
291 submit_bio(rw, bio);
292 }
293}
294
295static void dispatch_io(int rw, unsigned int num_regions,
296 struct io_region *where, struct dpages *dp,
297 struct io *io, int sync)
298{
299 int i;
300 struct dpages old_pages = *dp;
301
302 if (sync)
303 rw |= (1 << BIO_RW_SYNC);
304
305 /*
306 * For multiple regions we need to be careful to rewind
307 * the dp object for each call to do_region.
308 */
309 for (i = 0; i < num_regions; i++) {
310 *dp = old_pages;
311 if (where[i].count)
312 do_region(rw, i, where + i, dp, io);
313 }
314
315 /*
f00b16ad 316 * Drop the extra reference that we were holding to avoid
1da177e4
LT
317 * the io being completed too early.
318 */
319 dec_count(io, 0, 0);
320}
321
322static int sync_io(unsigned int num_regions, struct io_region *where,
323 int rw, struct dpages *dp, unsigned long *error_bits)
324{
325 struct io io;
326
327 if (num_regions > 1 && rw != WRITE) {
328 WARN_ON(1);
329 return -EIO;
330 }
331
332 io.error = 0;
333 atomic_set(&io.count, 1); /* see dispatch_io() */
334 io.sleeper = current;
335
336 dispatch_io(rw, num_regions, where, dp, &io, 1);
337
338 while (1) {
339 set_current_state(TASK_UNINTERRUPTIBLE);
340
341 if (!atomic_read(&io.count) || signal_pending(current))
342 break;
343
344 io_schedule();
345 }
346 set_current_state(TASK_RUNNING);
347
348 if (atomic_read(&io.count))
349 return -EINTR;
350
351 *error_bits = io.error;
352 return io.error ? -EIO : 0;
353}
354
355static int async_io(unsigned int num_regions, struct io_region *where, int rw,
356 struct dpages *dp, io_notify_fn fn, void *context)
357{
358 struct io *io;
359
360 if (num_regions > 1 && rw != WRITE) {
361 WARN_ON(1);
362 fn(1, context);
363 return -EIO;
364 }
365
366 io = mempool_alloc(_io_pool, GFP_NOIO);
367 io->error = 0;
368 atomic_set(&io->count, 1); /* see dispatch_io() */
369 io->sleeper = NULL;
370 io->callback = fn;
371 io->context = context;
372
373 dispatch_io(rw, num_regions, where, dp, io, 0);
374 return 0;
375}
376
377int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
378 struct page_list *pl, unsigned int offset,
379 unsigned long *error_bits)
380{
381 struct dpages dp;
382 list_dp_init(&dp, pl, offset);
383 return sync_io(num_regions, where, rw, &dp, error_bits);
384}
385
386int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
387 struct bio_vec *bvec, unsigned long *error_bits)
388{
389 struct dpages dp;
390 bvec_dp_init(&dp, bvec);
391 return sync_io(num_regions, where, rw, &dp, error_bits);
392}
393
394int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
395 void *data, unsigned long *error_bits)
396{
397 struct dpages dp;
398 vm_dp_init(&dp, data);
399 return sync_io(num_regions, where, rw, &dp, error_bits);
400}
401
402int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
403 struct page_list *pl, unsigned int offset,
404 io_notify_fn fn, void *context)
405{
406 struct dpages dp;
407 list_dp_init(&dp, pl, offset);
408 return async_io(num_regions, where, rw, &dp, fn, context);
409}
410
411int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
412 struct bio_vec *bvec, io_notify_fn fn, void *context)
413{
414 struct dpages dp;
415 bvec_dp_init(&dp, bvec);
416 return async_io(num_regions, where, rw, &dp, fn, context);
417}
418
419int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
420 void *data, io_notify_fn fn, void *context)
421{
422 struct dpages dp;
423 vm_dp_init(&dp, data);
424 return async_io(num_regions, where, rw, &dp, fn, context);
425}
426
427EXPORT_SYMBOL(dm_io_get);
428EXPORT_SYMBOL(dm_io_put);
429EXPORT_SYMBOL(dm_io_sync);
430EXPORT_SYMBOL(dm_io_async);
431EXPORT_SYMBOL(dm_io_sync_bvec);
432EXPORT_SYMBOL(dm_io_async_bvec);
433EXPORT_SYMBOL(dm_io_sync_vm);
434EXPORT_SYMBOL(dm_io_async_vm);