]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/raid0.c
md: prepare for non-power-of-two chunk sizes
[net-next-2.6.git] / drivers / md / raid0.c
CommitLineData
1da177e4
LT
1/*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9 RAID-0 management functions.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
bff61975 21#include <linux/blkdev.h>
bff61975 22#include <linux/seq_file.h>
43b2e5d8 23#include "md.h"
ef740c37 24#include "raid0.h"
1da177e4 25
165125e1 26static void raid0_unplug(struct request_queue *q)
1da177e4
LT
27{
28 mddev_t *mddev = q->queuedata;
070ec55d 29 raid0_conf_t *conf = mddev->private;
b414579f 30 mdk_rdev_t **devlist = conf->devlist;
1da177e4
LT
31 int i;
32
33 for (i=0; i<mddev->raid_disks; i++) {
165125e1 34 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
1da177e4 35
2ad8b1ef 36 blk_unplug(r_queue);
1da177e4
LT
37 }
38}
39
26be34dc
N
40static int raid0_congested(void *data, int bits)
41{
42 mddev_t *mddev = data;
070ec55d 43 raid0_conf_t *conf = mddev->private;
b414579f 44 mdk_rdev_t **devlist = conf->devlist;
26be34dc
N
45 int i, ret = 0;
46
47 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
165125e1 48 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
26be34dc
N
49
50 ret |= bdi_congested(&q->backing_dev_info, bits);
51 }
52 return ret;
53}
54
46994191 55/*
56 * inform the user of the raid configuration
57*/
58static void dump_zones(mddev_t *mddev)
59{
60 int j, k, h;
61 sector_t zone_size = 0;
62 sector_t zone_start = 0;
63 char b[BDEVNAME_SIZE];
64 raid0_conf_t *conf = mddev->private;
65 printk(KERN_INFO "******* %s configuration *********\n",
66 mdname(mddev));
67 h = 0;
68 for (j = 0; j < conf->nr_strip_zones; j++) {
69 printk(KERN_INFO "zone%d=[", j);
70 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
71 printk("%s/",
72 bdevname(conf->devlist[j*mddev->raid_disks
73 + k]->bdev, b));
74 printk("]\n");
75
76 zone_size = conf->strip_zone[j].zone_end - zone_start;
77 printk(KERN_INFO " zone offset=%llukb "
78 "device offset=%llukb size=%llukb\n",
79 (unsigned long long)zone_start>>1,
80 (unsigned long long)conf->strip_zone[j].dev_start>>1,
81 (unsigned long long)zone_size>>1);
82 zone_start = conf->strip_zone[j].zone_end;
83 }
84 printk(KERN_INFO "**********************************\n\n");
85}
86
ed7b0038 87static int create_strip_zones(mddev_t *mddev)
1da177e4 88{
ed7b0038 89 int i, c, j, err;
49f357a2 90 sector_t curr_zone_end, sectors;
b414579f 91 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
1da177e4
LT
92 struct strip_zone *zone;
93 int cnt;
94 char b[BDEVNAME_SIZE];
ed7b0038
AN
95 raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
96
97 if (!conf)
98 return -ENOMEM;
159ec1fc 99 list_for_each_entry(rdev1, &mddev->disks, same_set) {
0825b87a 100 printk(KERN_INFO "raid0: looking at %s\n",
1da177e4
LT
101 bdevname(rdev1->bdev,b));
102 c = 0;
159ec1fc 103 list_for_each_entry(rdev2, &mddev->disks, same_set) {
0825b87a 104 printk(KERN_INFO "raid0: comparing %s(%llu)",
1da177e4 105 bdevname(rdev1->bdev,b),
dd8ac336 106 (unsigned long long)rdev1->sectors);
0825b87a 107 printk(KERN_INFO " with %s(%llu)\n",
1da177e4 108 bdevname(rdev2->bdev,b),
dd8ac336 109 (unsigned long long)rdev2->sectors);
1da177e4 110 if (rdev2 == rdev1) {
0825b87a 111 printk(KERN_INFO "raid0: END\n");
1da177e4
LT
112 break;
113 }
dd8ac336 114 if (rdev2->sectors == rdev1->sectors) {
1da177e4
LT
115 /*
116 * Not unique, don't count it as a new
117 * group
118 */
0825b87a 119 printk(KERN_INFO "raid0: EQUAL\n");
1da177e4
LT
120 c = 1;
121 break;
122 }
0825b87a 123 printk(KERN_INFO "raid0: NOT EQUAL\n");
1da177e4
LT
124 }
125 if (!c) {
0825b87a 126 printk(KERN_INFO "raid0: ==> UNIQUE\n");
1da177e4 127 conf->nr_strip_zones++;
0825b87a
AN
128 printk(KERN_INFO "raid0: %d zones\n",
129 conf->nr_strip_zones);
1da177e4
LT
130 }
131 }
0825b87a 132 printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
ed7b0038 133 err = -ENOMEM;
9ffae0cf 134 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
1da177e4
LT
135 conf->nr_strip_zones, GFP_KERNEL);
136 if (!conf->strip_zone)
ed7b0038 137 goto abort;
9ffae0cf 138 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
1da177e4
LT
139 conf->nr_strip_zones*mddev->raid_disks,
140 GFP_KERNEL);
141 if (!conf->devlist)
ed7b0038 142 goto abort;
1da177e4 143
1da177e4
LT
144 /* The first zone must contain all devices, so here we check that
145 * there is a proper alignment of slots to devices and find them all
146 */
147 zone = &conf->strip_zone[0];
148 cnt = 0;
149 smallest = NULL;
b414579f 150 dev = conf->devlist;
ed7b0038 151 err = -EINVAL;
159ec1fc 152 list_for_each_entry(rdev1, &mddev->disks, same_set) {
1da177e4
LT
153 int j = rdev1->raid_disk;
154
155 if (j < 0 || j >= mddev->raid_disks) {
0825b87a
AN
156 printk(KERN_ERR "raid0: bad disk number %d - "
157 "aborting!\n", j);
1da177e4
LT
158 goto abort;
159 }
b414579f 160 if (dev[j]) {
0825b87a
AN
161 printk(KERN_ERR "raid0: multiple devices for %d - "
162 "aborting!\n", j);
1da177e4
LT
163 goto abort;
164 }
b414579f 165 dev[j] = rdev1;
1da177e4
LT
166
167 blk_queue_stack_limits(mddev->queue,
168 rdev1->bdev->bd_disk->queue);
169 /* as we don't honour merge_bvec_fn, we must never risk
170 * violating it, so limit ->max_sector to one PAGE, as
171 * a one page request is never in violation.
172 */
173
174 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
ae03bf63 175 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1da177e4
LT
176 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
177
dd8ac336 178 if (!smallest || (rdev1->sectors < smallest->sectors))
1da177e4
LT
179 smallest = rdev1;
180 cnt++;
181 }
182 if (cnt != mddev->raid_disks) {
0825b87a
AN
183 printk(KERN_ERR "raid0: too few disks (%d of %d) - "
184 "aborting!\n", cnt, mddev->raid_disks);
1da177e4
LT
185 goto abort;
186 }
187 zone->nb_dev = cnt;
49f357a2 188 zone->zone_end = smallest->sectors * cnt;
1da177e4 189
49f357a2 190 curr_zone_end = zone->zone_end;
1da177e4
LT
191
192 /* now do the other zones */
193 for (i = 1; i < conf->nr_strip_zones; i++)
194 {
195 zone = conf->strip_zone + i;
b414579f 196 dev = conf->devlist + i * mddev->raid_disks;
1da177e4 197
0825b87a 198 printk(KERN_INFO "raid0: zone %d\n", i);
d27a43ab 199 zone->dev_start = smallest->sectors;
1da177e4
LT
200 smallest = NULL;
201 c = 0;
202
203 for (j=0; j<cnt; j++) {
204 char b[BDEVNAME_SIZE];
b414579f 205 rdev = conf->devlist[j];
0825b87a
AN
206 printk(KERN_INFO "raid0: checking %s ...",
207 bdevname(rdev->bdev, b));
d27a43ab 208 if (rdev->sectors <= zone->dev_start) {
0825b87a 209 printk(KERN_INFO " nope.\n");
dd8ac336
AN
210 continue;
211 }
212 printk(KERN_INFO " contained as device %d\n", c);
b414579f 213 dev[c] = rdev;
dd8ac336
AN
214 c++;
215 if (!smallest || rdev->sectors < smallest->sectors) {
216 smallest = rdev;
217 printk(KERN_INFO " (%llu) is smallest!.\n",
218 (unsigned long long)rdev->sectors);
219 }
1da177e4
LT
220 }
221
222 zone->nb_dev = c;
49f357a2 223 sectors = (smallest->sectors - zone->dev_start) * c;
83838ed8 224 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
49f357a2 225 zone->nb_dev, (unsigned long long)sectors);
1da177e4 226
49f357a2 227 curr_zone_end += sectors;
d27a43ab 228 zone->zone_end = curr_zone_end;
1da177e4 229
6b8796cc 230 printk(KERN_INFO "raid0: current zone start: %llu\n",
d27a43ab 231 (unsigned long long)smallest->sectors);
1da177e4 232 }
1da177e4 233 mddev->queue->unplug_fn = raid0_unplug;
26be34dc
N
234 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
235 mddev->queue->backing_dev_info.congested_data = mddev;
1da177e4 236
92e59b6b 237 /*
238 * now since we have the hard sector sizes, we can make sure
239 * chunk size is a multiple of that sector size
240 */
241 if (mddev->chunk_size % queue_logical_block_size(mddev->queue)) {
242 printk(KERN_ERR "%s chunk_size of %d not valid\n",
243 mdname(mddev),
244 mddev->chunk_size);
245 goto abort;
246 }
0825b87a 247 printk(KERN_INFO "raid0: done.\n");
ed7b0038 248 mddev->private = conf;
1da177e4 249 return 0;
5568a603 250abort:
ed7b0038
AN
251 kfree(conf->strip_zone);
252 kfree(conf->devlist);
253 kfree(conf);
254 mddev->private = NULL;
255 return err;
1da177e4
LT
256}
257
258/**
259 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
260 * @q: request queue
cc371e66 261 * @bvm: properties of new bio
1da177e4
LT
262 * @biovec: the request that could be merged to it.
263 *
264 * Return amount of bytes we can accept at this offset
265 */
cc371e66
AK
266static int raid0_mergeable_bvec(struct request_queue *q,
267 struct bvec_merge_data *bvm,
268 struct bio_vec *biovec)
1da177e4
LT
269{
270 mddev_t *mddev = q->queuedata;
cc371e66 271 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
1da177e4
LT
272 int max;
273 unsigned int chunk_sectors = mddev->chunk_size >> 9;
cc371e66 274 unsigned int bio_sectors = bvm->bi_size >> 9;
1da177e4
LT
275
276 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
277 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
278 if (max <= biovec->bv_len && bio_sectors == 0)
279 return biovec->bv_len;
280 else
281 return max;
282}
283
80c3a6ce
DW
284static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
285{
286 sector_t array_sectors = 0;
287 mdk_rdev_t *rdev;
288
289 WARN_ONCE(sectors || raid_disks,
290 "%s does not support generic reshape\n", __func__);
291
292 list_for_each_entry(rdev, &mddev->disks, same_set)
293 array_sectors += rdev->sectors;
294
295 return array_sectors;
296}
297
8f79cfcd 298static int raid0_run(mddev_t *mddev)
1da177e4 299{
5568a603 300 int ret;
1da177e4 301
92e59b6b 302 if (mddev->chunk_size == 0 ||
303 !is_power_of_2(mddev->chunk_size)) {
304 printk(KERN_ERR "md/raid0: chunk size must be a power of 2.\n");
2604b703
N
305 return -EINVAL;
306 }
1da177e4 307 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
e7e72bf6 308 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
1da177e4 309
5568a603
AN
310 ret = create_strip_zones(mddev);
311 if (ret < 0)
ed7b0038 312 return ret;
1da177e4
LT
313
314 /* calculate array device size */
1f403624 315 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
1da177e4 316
ccacc7d2
AN
317 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
318 (unsigned long long)mddev->array_sectors);
1da177e4
LT
319 /* calculate the max read-ahead size.
320 * For read-ahead of large files to be effective, we need to
321 * readahead at least twice a whole stripe. i.e. number of devices
322 * multiplied by chunk size times 2.
323 * If an individual device has an ra_pages greater than the
324 * chunk size, then we will not drive that device as hard as it
325 * wants. We consider this a configuration error: a larger
326 * chunksize should be used in that case.
327 */
328 {
2d1f3b5d 329 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
1da177e4
LT
330 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
331 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
332 }
333
1da177e4 334 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
46994191 335 dump_zones(mddev);
1da177e4 336 return 0;
1da177e4
LT
337}
338
fb5ab4b5 339static int raid0_stop(mddev_t *mddev)
1da177e4 340{
070ec55d 341 raid0_conf_t *conf = mddev->private;
1da177e4
LT
342
343 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
990a8baf 344 kfree(conf->strip_zone);
fb5ab4b5 345 kfree(conf->devlist);
990a8baf 346 kfree(conf);
1da177e4 347 mddev->private = NULL;
1da177e4
LT
348 return 0;
349}
350
49f357a2
N
351/* Find the zone which holds a particular offset
352 * Update *sectorp to be an offset in that zone
353 */
dc582663 354static struct strip_zone *find_zone(struct raid0_private_data *conf,
49f357a2 355 sector_t *sectorp)
dc582663
AN
356{
357 int i;
358 struct strip_zone *z = conf->strip_zone;
49f357a2 359 sector_t sector = *sectorp;
dc582663
AN
360
361 for (i = 0; i < conf->nr_strip_zones; i++)
49f357a2
N
362 if (sector < z[i].zone_end) {
363 if (i)
364 *sectorp = sector - z[i-1].zone_end;
dc582663 365 return z + i;
49f357a2 366 }
dc582663
AN
367 BUG();
368}
369
165125e1 370static int raid0_make_request (struct request_queue *q, struct bio *bio)
1da177e4
LT
371{
372 mddev_t *mddev = q->queuedata;
a4712005 373 unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
070ec55d 374 raid0_conf_t *conf = mddev->private;
1da177e4
LT
375 struct strip_zone *zone;
376 mdk_rdev_t *tmp_dev;
787f17fe 377 sector_t chunk;
49f357a2 378 sector_t sector, rsect, sector_offset;
a362357b 379 const int rw = bio_data_dir(bio);
c9959059 380 int cpu;
1da177e4 381
e5dcdd80 382 if (unlikely(bio_barrier(bio))) {
6712ecf8 383 bio_endio(bio, -EOPNOTSUPP);
e5dcdd80
N
384 return 0;
385 }
386
074a7aca
TH
387 cpu = part_stat_lock();
388 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
389 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
390 bio_sectors(bio));
391 part_stat_unlock();
1da177e4 392
1da177e4 393 chunk_sects = mddev->chunk_size >> 9;
1b7fdf8f 394 chunksect_bits = ffz(~chunk_sects);
e0f06868 395 sector = bio->bi_sector;
1da177e4
LT
396
397 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
398 struct bio_pair *bp;
399 /* Sanity check -- queue functions should prevent this happening */
400 if (bio->bi_vcnt != 1 ||
401 bio->bi_idx != 0)
402 goto bad_map;
403 /* This is a one page bio that upper layers
404 * refuse to split for us, so we need to split it.
405 */
6feef531 406 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
1da177e4
LT
407 if (raid0_make_request(q, &bp->bio1))
408 generic_make_request(&bp->bio1);
409 if (raid0_make_request(q, &bp->bio2))
410 generic_make_request(&bp->bio2);
411
412 bio_pair_release(bp);
413 return 0;
414 }
49f357a2
N
415 sector_offset = sector;
416 zone = find_zone(conf, &sector_offset);
a4712005 417 sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
1da177e4 418 {
49f357a2 419 sector_t x = sector_offset >> chunksect_bits;
1da177e4
LT
420
421 sector_div(x, zone->nb_dev);
422 chunk = x;
1da177e4 423
e0f06868 424 x = sector >> chunksect_bits;
b414579f
N
425 tmp_dev = conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
426 + sector_div(x, zone->nb_dev)];
1da177e4 427 }
019c4e2f 428 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
1da177e4
LT
429
430 bio->bi_bdev = tmp_dev->bdev;
431 bio->bi_sector = rsect + tmp_dev->data_offset;
432
433 /*
434 * Let the main block layer submit the IO and resolve recursion:
435 */
436 return 1;
437
438bad_map:
439 printk("raid0_make_request bug: can't convert block across chunks"
a4712005 440 " or bigger than %dk %llu %d\n", chunk_sects / 2,
1da177e4
LT
441 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
442
6712ecf8 443 bio_io_error(bio);
1da177e4
LT
444 return 0;
445}
8299d7f7 446
1b961429 447static void raid0_status(struct seq_file *seq, mddev_t *mddev)
1da177e4
LT
448{
449#undef MD_DEBUG
450#ifdef MD_DEBUG
451 int j, k, h;
452 char b[BDEVNAME_SIZE];
070ec55d 453 raid0_conf_t *conf = mddev->private;
8299d7f7 454
1b961429 455 sector_t zone_size;
456 sector_t zone_start = 0;
1da177e4 457 h = 0;
1b961429 458
1da177e4
LT
459 for (j = 0; j < conf->nr_strip_zones; j++) {
460 seq_printf(seq, " z%d", j);
1da177e4
LT
461 seq_printf(seq, "=[");
462 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
8299d7f7 463 seq_printf(seq, "%s/", bdevname(
1b961429 464 conf->devlist[j*mddev->raid_disks + k]
465 ->bdev, b));
466
467 zone_size = conf->strip_zone[j].zone_end - zone_start;
468 seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
469 (unsigned long long)zone_start>>1,
470 (unsigned long long)conf->strip_zone[j].dev_start>>1,
471 (unsigned long long)zone_size>>1);
472 zone_start = conf->strip_zone[j].zone_end;
1da177e4
LT
473 }
474#endif
475 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
476 return;
477}
478
2604b703 479static struct mdk_personality raid0_personality=
1da177e4
LT
480{
481 .name = "raid0",
2604b703 482 .level = 0,
1da177e4
LT
483 .owner = THIS_MODULE,
484 .make_request = raid0_make_request,
485 .run = raid0_run,
486 .stop = raid0_stop,
487 .status = raid0_status,
80c3a6ce 488 .size = raid0_size,
1da177e4
LT
489};
490
491static int __init raid0_init (void)
492{
2604b703 493 return register_md_personality (&raid0_personality);
1da177e4
LT
494}
495
496static void raid0_exit (void)
497{
2604b703 498 unregister_md_personality (&raid0_personality);
1da177e4
LT
499}
500
501module_init(raid0_init);
502module_exit(raid0_exit);
503MODULE_LICENSE("GPL");
504MODULE_ALIAS("md-personality-2"); /* RAID0 */
d9d166c2 505MODULE_ALIAS("md-raid0");
2604b703 506MODULE_ALIAS("md-level-0");