]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | raid0.c : Multiple Devices driver for Linux | |
3 | Copyright (C) 1994-96 Marc ZYNGIER | |
4 | <zyngier@ufr-info-p7.ibp.fr> or | |
5 | <maz@gloups.fdn.fr> | |
6 | Copyright (C) 1999, 2000 Ingo Molnar, Red Hat | |
7 | ||
8 | ||
9 | RAID-0 management functions. | |
10 | ||
11 | This program is free software; you can redistribute it and/or modify | |
12 | it under the terms of the GNU General Public License as published by | |
13 | the Free Software Foundation; either version 2, or (at your option) | |
14 | any later version. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | (for example /usr/src/linux/COPYING); if not, write to the Free | |
18 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
19 | */ | |
20 | ||
bff61975 N |
21 | #include <linux/blkdev.h> |
22 | #include <linux/raid/md_k.h> | |
23 | #include <linux/seq_file.h> | |
ef740c37 | 24 | #include "raid0.h" |
1da177e4 | 25 | |
165125e1 | 26 | static void raid0_unplug(struct request_queue *q) |
1da177e4 LT |
27 | { |
28 | mddev_t *mddev = q->queuedata; | |
29 | raid0_conf_t *conf = mddev_to_conf(mddev); | |
30 | mdk_rdev_t **devlist = conf->strip_zone[0].dev; | |
31 | int i; | |
32 | ||
33 | for (i=0; i<mddev->raid_disks; i++) { | |
165125e1 | 34 | struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); |
1da177e4 | 35 | |
2ad8b1ef | 36 | blk_unplug(r_queue); |
1da177e4 LT |
37 | } |
38 | } | |
39 | ||
26be34dc N |
40 | static int raid0_congested(void *data, int bits) |
41 | { | |
42 | mddev_t *mddev = data; | |
43 | raid0_conf_t *conf = mddev_to_conf(mddev); | |
44 | mdk_rdev_t **devlist = conf->strip_zone[0].dev; | |
45 | int i, ret = 0; | |
46 | ||
47 | for (i = 0; i < mddev->raid_disks && !ret ; i++) { | |
165125e1 | 48 | struct request_queue *q = bdev_get_queue(devlist[i]->bdev); |
26be34dc N |
49 | |
50 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
51 | } | |
52 | return ret; | |
53 | } | |
54 | ||
1da177e4 LT |
55 | |
56 | static int create_strip_zones (mddev_t *mddev) | |
57 | { | |
58 | int i, c, j; | |
6b8796cc | 59 | sector_t current_start, curr_zone_start; |
1da177e4 LT |
60 | sector_t min_spacing; |
61 | raid0_conf_t *conf = mddev_to_conf(mddev); | |
62 | mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; | |
1da177e4 LT |
63 | struct strip_zone *zone; |
64 | int cnt; | |
65 | char b[BDEVNAME_SIZE]; | |
66 | ||
67 | /* | |
68 | * The number of 'same size groups' | |
69 | */ | |
70 | conf->nr_strip_zones = 0; | |
71 | ||
159ec1fc | 72 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
0825b87a | 73 | printk(KERN_INFO "raid0: looking at %s\n", |
1da177e4 LT |
74 | bdevname(rdev1->bdev,b)); |
75 | c = 0; | |
159ec1fc | 76 | list_for_each_entry(rdev2, &mddev->disks, same_set) { |
0825b87a | 77 | printk(KERN_INFO "raid0: comparing %s(%llu)", |
1da177e4 LT |
78 | bdevname(rdev1->bdev,b), |
79 | (unsigned long long)rdev1->size); | |
0825b87a | 80 | printk(KERN_INFO " with %s(%llu)\n", |
1da177e4 LT |
81 | bdevname(rdev2->bdev,b), |
82 | (unsigned long long)rdev2->size); | |
83 | if (rdev2 == rdev1) { | |
0825b87a | 84 | printk(KERN_INFO "raid0: END\n"); |
1da177e4 LT |
85 | break; |
86 | } | |
87 | if (rdev2->size == rdev1->size) | |
88 | { | |
89 | /* | |
90 | * Not unique, don't count it as a new | |
91 | * group | |
92 | */ | |
0825b87a | 93 | printk(KERN_INFO "raid0: EQUAL\n"); |
1da177e4 LT |
94 | c = 1; |
95 | break; | |
96 | } | |
0825b87a | 97 | printk(KERN_INFO "raid0: NOT EQUAL\n"); |
1da177e4 LT |
98 | } |
99 | if (!c) { | |
0825b87a | 100 | printk(KERN_INFO "raid0: ==> UNIQUE\n"); |
1da177e4 | 101 | conf->nr_strip_zones++; |
0825b87a AN |
102 | printk(KERN_INFO "raid0: %d zones\n", |
103 | conf->nr_strip_zones); | |
1da177e4 LT |
104 | } |
105 | } | |
0825b87a | 106 | printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); |
1da177e4 | 107 | |
9ffae0cf | 108 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* |
1da177e4 LT |
109 | conf->nr_strip_zones, GFP_KERNEL); |
110 | if (!conf->strip_zone) | |
111 | return 1; | |
9ffae0cf | 112 | conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* |
1da177e4 LT |
113 | conf->nr_strip_zones*mddev->raid_disks, |
114 | GFP_KERNEL); | |
115 | if (!conf->devlist) | |
116 | return 1; | |
117 | ||
1da177e4 LT |
118 | /* The first zone must contain all devices, so here we check that |
119 | * there is a proper alignment of slots to devices and find them all | |
120 | */ | |
121 | zone = &conf->strip_zone[0]; | |
122 | cnt = 0; | |
123 | smallest = NULL; | |
124 | zone->dev = conf->devlist; | |
159ec1fc | 125 | list_for_each_entry(rdev1, &mddev->disks, same_set) { |
1da177e4 LT |
126 | int j = rdev1->raid_disk; |
127 | ||
128 | if (j < 0 || j >= mddev->raid_disks) { | |
0825b87a AN |
129 | printk(KERN_ERR "raid0: bad disk number %d - " |
130 | "aborting!\n", j); | |
1da177e4 LT |
131 | goto abort; |
132 | } | |
133 | if (zone->dev[j]) { | |
0825b87a AN |
134 | printk(KERN_ERR "raid0: multiple devices for %d - " |
135 | "aborting!\n", j); | |
1da177e4 LT |
136 | goto abort; |
137 | } | |
138 | zone->dev[j] = rdev1; | |
139 | ||
140 | blk_queue_stack_limits(mddev->queue, | |
141 | rdev1->bdev->bd_disk->queue); | |
142 | /* as we don't honour merge_bvec_fn, we must never risk | |
143 | * violating it, so limit ->max_sector to one PAGE, as | |
144 | * a one page request is never in violation. | |
145 | */ | |
146 | ||
147 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && | |
148 | mddev->queue->max_sectors > (PAGE_SIZE>>9)) | |
149 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | |
150 | ||
151 | if (!smallest || (rdev1->size <smallest->size)) | |
152 | smallest = rdev1; | |
153 | cnt++; | |
154 | } | |
155 | if (cnt != mddev->raid_disks) { | |
0825b87a AN |
156 | printk(KERN_ERR "raid0: too few disks (%d of %d) - " |
157 | "aborting!\n", cnt, mddev->raid_disks); | |
1da177e4 LT |
158 | goto abort; |
159 | } | |
160 | zone->nb_dev = cnt; | |
83838ed8 | 161 | zone->sectors = smallest->size * cnt * 2; |
6199d3db | 162 | zone->zone_start = 0; |
1da177e4 | 163 | |
6b8796cc | 164 | current_start = smallest->size * 2; |
83838ed8 | 165 | curr_zone_start = zone->sectors; |
1da177e4 LT |
166 | |
167 | /* now do the other zones */ | |
168 | for (i = 1; i < conf->nr_strip_zones; i++) | |
169 | { | |
170 | zone = conf->strip_zone + i; | |
171 | zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; | |
172 | ||
0825b87a | 173 | printk(KERN_INFO "raid0: zone %d\n", i); |
6b8796cc | 174 | zone->dev_start = current_start; |
1da177e4 LT |
175 | smallest = NULL; |
176 | c = 0; | |
177 | ||
178 | for (j=0; j<cnt; j++) { | |
179 | char b[BDEVNAME_SIZE]; | |
180 | rdev = conf->strip_zone[0].dev[j]; | |
0825b87a AN |
181 | printk(KERN_INFO "raid0: checking %s ...", |
182 | bdevname(rdev->bdev, b)); | |
6b8796cc | 183 | if (rdev->size > current_start / 2) { |
0825b87a AN |
184 | printk(KERN_INFO " contained as device %d\n", |
185 | c); | |
1da177e4 LT |
186 | zone->dev[c] = rdev; |
187 | c++; | |
188 | if (!smallest || (rdev->size <smallest->size)) { | |
189 | smallest = rdev; | |
0825b87a | 190 | printk(KERN_INFO " (%llu) is smallest!.\n", |
1da177e4 LT |
191 | (unsigned long long)rdev->size); |
192 | } | |
193 | } else | |
0825b87a | 194 | printk(KERN_INFO " nope.\n"); |
1da177e4 LT |
195 | } |
196 | ||
197 | zone->nb_dev = c; | |
83838ed8 AN |
198 | zone->sectors = (smallest->size * 2 - current_start) * c; |
199 | printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", | |
200 | zone->nb_dev, (unsigned long long)zone->sectors); | |
1da177e4 | 201 | |
6b8796cc | 202 | zone->zone_start = curr_zone_start; |
83838ed8 | 203 | curr_zone_start += zone->sectors; |
1da177e4 | 204 | |
6b8796cc AN |
205 | current_start = smallest->size * 2; |
206 | printk(KERN_INFO "raid0: current zone start: %llu\n", | |
207 | (unsigned long long)current_start); | |
1da177e4 LT |
208 | } |
209 | ||
210 | /* Now find appropriate hash spacing. | |
211 | * We want a number which causes most hash entries to cover | |
212 | * at most two strips, but the hash table must be at most | |
213 | * 1 PAGE. We choose the smallest strip, or contiguous collection | |
214 | * of strips, that has big enough size. We never consider the last | |
215 | * strip though as it's size has no bearing on the efficacy of the hash | |
216 | * table. | |
217 | */ | |
ccacc7d2 AN |
218 | conf->spacing = curr_zone_start; |
219 | min_spacing = curr_zone_start; | |
1da177e4 LT |
220 | sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); |
221 | for (i=0; i < conf->nr_strip_zones-1; i++) { | |
ccacc7d2 AN |
222 | sector_t s = 0; |
223 | for (j = i; j < conf->nr_strip_zones - 1 && | |
224 | s < min_spacing; j++) | |
225 | s += conf->strip_zone[j].sectors; | |
226 | if (s >= min_spacing && s < conf->spacing) | |
227 | conf->spacing = s; | |
1da177e4 LT |
228 | } |
229 | ||
230 | mddev->queue->unplug_fn = raid0_unplug; | |
231 | ||
26be34dc N |
232 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
233 | mddev->queue->backing_dev_info.congested_data = mddev; | |
1da177e4 | 234 | |
0825b87a | 235 | printk(KERN_INFO "raid0: done.\n"); |
1da177e4 LT |
236 | return 0; |
237 | abort: | |
238 | return 1; | |
239 | } | |
240 | ||
241 | /** | |
242 | * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged | |
243 | * @q: request queue | |
cc371e66 | 244 | * @bvm: properties of new bio |
1da177e4 LT |
245 | * @biovec: the request that could be merged to it. |
246 | * | |
247 | * Return amount of bytes we can accept at this offset | |
248 | */ | |
cc371e66 AK |
249 | static int raid0_mergeable_bvec(struct request_queue *q, |
250 | struct bvec_merge_data *bvm, | |
251 | struct bio_vec *biovec) | |
1da177e4 LT |
252 | { |
253 | mddev_t *mddev = q->queuedata; | |
cc371e66 | 254 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
1da177e4 LT |
255 | int max; |
256 | unsigned int chunk_sectors = mddev->chunk_size >> 9; | |
cc371e66 | 257 | unsigned int bio_sectors = bvm->bi_size >> 9; |
1da177e4 LT |
258 | |
259 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; | |
260 | if (max < 0) max = 0; /* bio_add cannot handle a negative return */ | |
261 | if (max <= biovec->bv_len && bio_sectors == 0) | |
262 | return biovec->bv_len; | |
263 | else | |
264 | return max; | |
265 | } | |
266 | ||
267 | static int raid0_run (mddev_t *mddev) | |
268 | { | |
269 | unsigned cur=0, i=0, nb_zone; | |
ccacc7d2 | 270 | s64 sectors; |
1da177e4 LT |
271 | raid0_conf_t *conf; |
272 | mdk_rdev_t *rdev; | |
1da177e4 | 273 | |
2604b703 N |
274 | if (mddev->chunk_size == 0) { |
275 | printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); | |
276 | return -EINVAL; | |
277 | } | |
278 | printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", | |
1da177e4 LT |
279 | mdname(mddev), |
280 | mddev->chunk_size >> 9, | |
281 | (mddev->chunk_size>>1)-1); | |
282 | blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); | |
283 | blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); | |
e7e72bf6 | 284 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; |
1da177e4 LT |
285 | |
286 | conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); | |
287 | if (!conf) | |
288 | goto out; | |
289 | mddev->private = (void *)conf; | |
290 | ||
291 | conf->strip_zone = NULL; | |
292 | conf->devlist = NULL; | |
293 | if (create_strip_zones (mddev)) | |
294 | goto out_free_conf; | |
295 | ||
296 | /* calculate array device size */ | |
f233ea5c | 297 | mddev->array_sectors = 0; |
159ec1fc | 298 | list_for_each_entry(rdev, &mddev->disks, same_set) |
f233ea5c | 299 | mddev->array_sectors += rdev->size * 2; |
1da177e4 | 300 | |
ccacc7d2 AN |
301 | printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", |
302 | (unsigned long long)mddev->array_sectors); | |
303 | printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n", | |
304 | (unsigned long long)conf->spacing); | |
1da177e4 | 305 | { |
ccacc7d2 AN |
306 | sector_t s = mddev->array_sectors; |
307 | sector_t space = conf->spacing; | |
1da177e4 | 308 | int round; |
ccacc7d2 | 309 | conf->sector_shift = 0; |
1eb29128 | 310 | if (sizeof(sector_t) > sizeof(u32)) { |
1da177e4 | 311 | /*shift down space and s so that sector_div will work */ |
1eb29128 | 312 | while (space > (sector_t) (~(u32)0)) { |
1da177e4 LT |
313 | s >>= 1; |
314 | space >>= 1; | |
315 | s += 1; /* force round-up */ | |
ccacc7d2 | 316 | conf->sector_shift++; |
1da177e4 LT |
317 | } |
318 | } | |
1eb29128 | 319 | round = sector_div(s, (u32)space) ? 1 : 0; |
1da177e4 LT |
320 | nb_zone = s + round; |
321 | } | |
ccacc7d2 | 322 | printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone); |
1da177e4 | 323 | |
ccacc7d2 | 324 | printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n", |
1da177e4 LT |
325 | nb_zone*sizeof(struct strip_zone*)); |
326 | conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); | |
327 | if (!conf->hash_table) | |
328 | goto out_free_conf; | |
ccacc7d2 | 329 | sectors = conf->strip_zone[cur].sectors; |
1da177e4 | 330 | |
5c4c3331 N |
331 | conf->hash_table[0] = conf->strip_zone + cur; |
332 | for (i=1; i< nb_zone; i++) { | |
ccacc7d2 | 333 | while (sectors <= conf->spacing) { |
1da177e4 | 334 | cur++; |
ccacc7d2 | 335 | sectors += conf->strip_zone[cur].sectors; |
1da177e4 | 336 | } |
ccacc7d2 | 337 | sectors -= conf->spacing; |
5c4c3331 | 338 | conf->hash_table[i] = conf->strip_zone + cur; |
1da177e4 | 339 | } |
ccacc7d2 AN |
340 | if (conf->sector_shift) { |
341 | conf->spacing >>= conf->sector_shift; | |
342 | /* round spacing up so when we divide by it, we | |
1da177e4 LT |
343 | * err on the side of too-low, which is safest |
344 | */ | |
ccacc7d2 | 345 | conf->spacing++; |
1da177e4 LT |
346 | } |
347 | ||
348 | /* calculate the max read-ahead size. | |
349 | * For read-ahead of large files to be effective, we need to | |
350 | * readahead at least twice a whole stripe. i.e. number of devices | |
351 | * multiplied by chunk size times 2. | |
352 | * If an individual device has an ra_pages greater than the | |
353 | * chunk size, then we will not drive that device as hard as it | |
354 | * wants. We consider this a configuration error: a larger | |
355 | * chunksize should be used in that case. | |
356 | */ | |
357 | { | |
2d1f3b5d | 358 | int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; |
1da177e4 LT |
359 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
360 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | |
361 | } | |
362 | ||
363 | ||
364 | blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); | |
365 | return 0; | |
366 | ||
367 | out_free_conf: | |
990a8baf JJ |
368 | kfree(conf->strip_zone); |
369 | kfree(conf->devlist); | |
1da177e4 LT |
370 | kfree(conf); |
371 | mddev->private = NULL; | |
372 | out: | |
29fc7e3e | 373 | return -ENOMEM; |
1da177e4 LT |
374 | } |
375 | ||
376 | static int raid0_stop (mddev_t *mddev) | |
377 | { | |
378 | raid0_conf_t *conf = mddev_to_conf(mddev); | |
379 | ||
380 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | |
990a8baf | 381 | kfree(conf->hash_table); |
1da177e4 | 382 | conf->hash_table = NULL; |
990a8baf | 383 | kfree(conf->strip_zone); |
1da177e4 | 384 | conf->strip_zone = NULL; |
990a8baf | 385 | kfree(conf); |
1da177e4 LT |
386 | mddev->private = NULL; |
387 | ||
388 | return 0; | |
389 | } | |
390 | ||
165125e1 | 391 | static int raid0_make_request (struct request_queue *q, struct bio *bio) |
1da177e4 LT |
392 | { |
393 | mddev_t *mddev = q->queuedata; | |
a4712005 | 394 | unsigned int sect_in_chunk, chunksect_bits, chunk_sects; |
1da177e4 LT |
395 | raid0_conf_t *conf = mddev_to_conf(mddev); |
396 | struct strip_zone *zone; | |
397 | mdk_rdev_t *tmp_dev; | |
787f17fe | 398 | sector_t chunk; |
e0f06868 | 399 | sector_t sector, rsect; |
a362357b | 400 | const int rw = bio_data_dir(bio); |
c9959059 | 401 | int cpu; |
1da177e4 | 402 | |
e5dcdd80 | 403 | if (unlikely(bio_barrier(bio))) { |
6712ecf8 | 404 | bio_endio(bio, -EOPNOTSUPP); |
e5dcdd80 N |
405 | return 0; |
406 | } | |
407 | ||
074a7aca TH |
408 | cpu = part_stat_lock(); |
409 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | |
410 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], | |
411 | bio_sectors(bio)); | |
412 | part_stat_unlock(); | |
1da177e4 | 413 | |
1da177e4 | 414 | chunk_sects = mddev->chunk_size >> 9; |
1b7fdf8f | 415 | chunksect_bits = ffz(~chunk_sects); |
e0f06868 | 416 | sector = bio->bi_sector; |
1da177e4 LT |
417 | |
418 | if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { | |
419 | struct bio_pair *bp; | |
420 | /* Sanity check -- queue functions should prevent this happening */ | |
421 | if (bio->bi_vcnt != 1 || | |
422 | bio->bi_idx != 0) | |
423 | goto bad_map; | |
424 | /* This is a one page bio that upper layers | |
425 | * refuse to split for us, so we need to split it. | |
426 | */ | |
6feef531 | 427 | bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); |
1da177e4 LT |
428 | if (raid0_make_request(q, &bp->bio1)) |
429 | generic_make_request(&bp->bio1); | |
430 | if (raid0_make_request(q, &bp->bio2)) | |
431 | generic_make_request(&bp->bio2); | |
432 | ||
433 | bio_pair_release(bp); | |
434 | return 0; | |
435 | } | |
436 | ||
437 | ||
438 | { | |
ccacc7d2 AN |
439 | sector_t x = sector >> conf->sector_shift; |
440 | sector_div(x, (u32)conf->spacing); | |
1da177e4 LT |
441 | zone = conf->hash_table[x]; |
442 | } | |
83838ed8 AN |
443 | |
444 | while (sector >= zone->zone_start + zone->sectors) | |
1da177e4 | 445 | zone++; |
83838ed8 | 446 | |
a4712005 | 447 | sect_in_chunk = bio->bi_sector & (chunk_sects - 1); |
1da177e4 LT |
448 | |
449 | ||
450 | { | |
6199d3db | 451 | sector_t x = (sector - zone->zone_start) >> chunksect_bits; |
1da177e4 LT |
452 | |
453 | sector_div(x, zone->nb_dev); | |
454 | chunk = x; | |
1da177e4 | 455 | |
e0f06868 | 456 | x = sector >> chunksect_bits; |
1da177e4 LT |
457 | tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; |
458 | } | |
019c4e2f | 459 | rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk; |
1da177e4 LT |
460 | |
461 | bio->bi_bdev = tmp_dev->bdev; | |
462 | bio->bi_sector = rsect + tmp_dev->data_offset; | |
463 | ||
464 | /* | |
465 | * Let the main block layer submit the IO and resolve recursion: | |
466 | */ | |
467 | return 1; | |
468 | ||
469 | bad_map: | |
470 | printk("raid0_make_request bug: can't convert block across chunks" | |
a4712005 | 471 | " or bigger than %dk %llu %d\n", chunk_sects / 2, |
1da177e4 LT |
472 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
473 | ||
6712ecf8 | 474 | bio_io_error(bio); |
1da177e4 LT |
475 | return 0; |
476 | } | |
8299d7f7 | 477 | |
1da177e4 LT |
478 | static void raid0_status (struct seq_file *seq, mddev_t *mddev) |
479 | { | |
480 | #undef MD_DEBUG | |
481 | #ifdef MD_DEBUG | |
482 | int j, k, h; | |
483 | char b[BDEVNAME_SIZE]; | |
484 | raid0_conf_t *conf = mddev_to_conf(mddev); | |
8299d7f7 | 485 | |
1da177e4 LT |
486 | h = 0; |
487 | for (j = 0; j < conf->nr_strip_zones; j++) { | |
488 | seq_printf(seq, " z%d", j); | |
489 | if (conf->hash_table[h] == conf->strip_zone+j) | |
8299d7f7 | 490 | seq_printf(seq, "(h%d)", h++); |
1da177e4 LT |
491 | seq_printf(seq, "=["); |
492 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) | |
8299d7f7 | 493 | seq_printf(seq, "%s/", bdevname( |
1da177e4 LT |
494 | conf->strip_zone[j].dev[k]->bdev,b)); |
495 | ||
6199d3db AN |
496 | seq_printf(seq, "] zs=%d ds=%d s=%d\n", |
497 | conf->strip_zone[j].zone_start, | |
019c4e2f | 498 | conf->strip_zone[j].dev_start, |
83838ed8 | 499 | conf->strip_zone[j].sectors); |
1da177e4 LT |
500 | } |
501 | #endif | |
502 | seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); | |
503 | return; | |
504 | } | |
505 | ||
2604b703 | 506 | static struct mdk_personality raid0_personality= |
1da177e4 LT |
507 | { |
508 | .name = "raid0", | |
2604b703 | 509 | .level = 0, |
1da177e4 LT |
510 | .owner = THIS_MODULE, |
511 | .make_request = raid0_make_request, | |
512 | .run = raid0_run, | |
513 | .stop = raid0_stop, | |
514 | .status = raid0_status, | |
515 | }; | |
516 | ||
517 | static int __init raid0_init (void) | |
518 | { | |
2604b703 | 519 | return register_md_personality (&raid0_personality); |
1da177e4 LT |
520 | } |
521 | ||
522 | static void raid0_exit (void) | |
523 | { | |
2604b703 | 524 | unregister_md_personality (&raid0_personality); |
1da177e4 LT |
525 | } |
526 | ||
527 | module_init(raid0_init); | |
528 | module_exit(raid0_exit); | |
529 | MODULE_LICENSE("GPL"); | |
530 | MODULE_ALIAS("md-personality-2"); /* RAID0 */ | |
d9d166c2 | 531 | MODULE_ALIAS("md-raid0"); |
2604b703 | 532 | MODULE_ALIAS("md-level-0"); |