]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/raid10.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / md / raid10.c
CommitLineData
1da177e4
LT
1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for futher copyright information.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
5a0e3ad6 21#include <linux/slab.h>
25570727 22#include <linux/delay.h>
bff61975 23#include <linux/blkdev.h>
bff61975 24#include <linux/seq_file.h>
43b2e5d8 25#include "md.h"
ef740c37
CH
26#include "raid10.h"
27#include "bitmap.h"
1da177e4
LT
28
29/*
30 * RAID10 provides a combination of RAID0 and RAID1 functionality.
31 * The layout of data is defined by
32 * chunk_size
33 * raid_disks
34 * near_copies (stored in low byte of layout)
35 * far_copies (stored in second byte of layout)
c93983bf 36 * far_offset (stored in bit 16 of layout )
1da177e4
LT
37 *
38 * The data to be stored is divided into chunks using chunksize.
39 * Each device is divided into far_copies sections.
40 * In each section, chunks are laid out in a style similar to raid0, but
41 * near_copies copies of each chunk is stored (each on a different drive).
42 * The starting device for each section is offset near_copies from the starting
43 * device of the previous section.
c93983bf 44 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
1da177e4
LT
45 * drive.
46 * near_copies and far_copies must be at least one, and their product is at most
47 * raid_disks.
c93983bf
N
48 *
49 * If far_offset is true, then the far_copies are handled a bit differently.
50 * The copies are still in different stripes, but instead of be very far apart
51 * on disk, there are adjacent stripes.
1da177e4
LT
52 */
53
54/*
55 * Number of guaranteed r10bios in case of extreme VM load:
56 */
57#define NR_RAID10_BIOS 256
58
59static void unplug_slaves(mddev_t *mddev);
60
0a27ec96
N
61static void allow_barrier(conf_t *conf);
62static void lower_barrier(conf_t *conf);
63
dd0fc66f 64static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4
LT
65{
66 conf_t *conf = data;
67 r10bio_t *r10_bio;
68 int size = offsetof(struct r10bio_s, devs[conf->copies]);
69
70 /* allocate a r10bio with room for raid_disks entries in the bios array */
9ffae0cf 71 r10_bio = kzalloc(size, gfp_flags);
ed9bfdf1 72 if (!r10_bio && conf->mddev)
1da177e4
LT
73 unplug_slaves(conf->mddev);
74
75 return r10_bio;
76}
77
78static void r10bio_pool_free(void *r10_bio, void *data)
79{
80 kfree(r10_bio);
81}
82
0310fa21 83/* Maximum size of each resync request */
1da177e4 84#define RESYNC_BLOCK_SIZE (64*1024)
1da177e4 85#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
0310fa21
N
86/* amount of memory to reserve for resync requests */
87#define RESYNC_WINDOW (1024*1024)
88/* maximum number of concurrent requests, memory permitting */
89#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
1da177e4
LT
90
91/*
92 * When performing a resync, we need to read and compare, so
93 * we need as many pages are there are copies.
94 * When performing a recovery, we need 2 bios, one for read,
95 * one for write (we recover only one drive per r10buf)
96 *
97 */
dd0fc66f 98static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4
LT
99{
100 conf_t *conf = data;
101 struct page *page;
102 r10bio_t *r10_bio;
103 struct bio *bio;
104 int i, j;
105 int nalloc;
106
107 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
108 if (!r10_bio) {
109 unplug_slaves(conf->mddev);
110 return NULL;
111 }
112
113 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
114 nalloc = conf->copies; /* resync */
115 else
116 nalloc = 2; /* recovery */
117
118 /*
119 * Allocate bios.
120 */
121 for (j = nalloc ; j-- ; ) {
122 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
123 if (!bio)
124 goto out_free_bio;
125 r10_bio->devs[j].bio = bio;
126 }
127 /*
128 * Allocate RESYNC_PAGES data pages and attach them
129 * where needed.
130 */
131 for (j = 0 ; j < nalloc; j++) {
132 bio = r10_bio->devs[j].bio;
133 for (i = 0; i < RESYNC_PAGES; i++) {
134 page = alloc_page(gfp_flags);
135 if (unlikely(!page))
136 goto out_free_pages;
137
138 bio->bi_io_vec[i].bv_page = page;
139 }
140 }
141
142 return r10_bio;
143
144out_free_pages:
145 for ( ; i > 0 ; i--)
1345b1d8 146 safe_put_page(bio->bi_io_vec[i-1].bv_page);
1da177e4
LT
147 while (j--)
148 for (i = 0; i < RESYNC_PAGES ; i++)
1345b1d8 149 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
1da177e4
LT
150 j = -1;
151out_free_bio:
152 while ( ++j < nalloc )
153 bio_put(r10_bio->devs[j].bio);
154 r10bio_pool_free(r10_bio, conf);
155 return NULL;
156}
157
158static void r10buf_pool_free(void *__r10_bio, void *data)
159{
160 int i;
161 conf_t *conf = data;
162 r10bio_t *r10bio = __r10_bio;
163 int j;
164
165 for (j=0; j < conf->copies; j++) {
166 struct bio *bio = r10bio->devs[j].bio;
167 if (bio) {
168 for (i = 0; i < RESYNC_PAGES; i++) {
1345b1d8 169 safe_put_page(bio->bi_io_vec[i].bv_page);
1da177e4
LT
170 bio->bi_io_vec[i].bv_page = NULL;
171 }
172 bio_put(bio);
173 }
174 }
175 r10bio_pool_free(r10bio, conf);
176}
177
178static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
179{
180 int i;
181
182 for (i = 0; i < conf->copies; i++) {
183 struct bio **bio = & r10_bio->devs[i].bio;
0eb3ff12 184 if (*bio && *bio != IO_BLOCKED)
1da177e4
LT
185 bio_put(*bio);
186 *bio = NULL;
187 }
188}
189
858119e1 190static void free_r10bio(r10bio_t *r10_bio)
1da177e4 191{
070ec55d 192 conf_t *conf = r10_bio->mddev->private;
1da177e4
LT
193
194 /*
195 * Wake up any possible resync thread that waits for the device
196 * to go idle.
197 */
0a27ec96 198 allow_barrier(conf);
1da177e4
LT
199
200 put_all_bios(conf, r10_bio);
201 mempool_free(r10_bio, conf->r10bio_pool);
202}
203
858119e1 204static void put_buf(r10bio_t *r10_bio)
1da177e4 205{
070ec55d 206 conf_t *conf = r10_bio->mddev->private;
1da177e4
LT
207
208 mempool_free(r10_bio, conf->r10buf_pool);
209
0a27ec96 210 lower_barrier(conf);
1da177e4
LT
211}
212
213static void reschedule_retry(r10bio_t *r10_bio)
214{
215 unsigned long flags;
216 mddev_t *mddev = r10_bio->mddev;
070ec55d 217 conf_t *conf = mddev->private;
1da177e4
LT
218
219 spin_lock_irqsave(&conf->device_lock, flags);
220 list_add(&r10_bio->retry_list, &conf->retry_list);
4443ae10 221 conf->nr_queued ++;
1da177e4
LT
222 spin_unlock_irqrestore(&conf->device_lock, flags);
223
388667be
AJ
224 /* wake up frozen array... */
225 wake_up(&conf->wait_barrier);
226
1da177e4
LT
227 md_wakeup_thread(mddev->thread);
228}
229
230/*
231 * raid_end_bio_io() is called when we have finished servicing a mirrored
232 * operation and are ready to return a success/failure code to the buffer
233 * cache layer.
234 */
235static void raid_end_bio_io(r10bio_t *r10_bio)
236{
237 struct bio *bio = r10_bio->master_bio;
238
6712ecf8 239 bio_endio(bio,
1da177e4
LT
240 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
241 free_r10bio(r10_bio);
242}
243
244/*
245 * Update disk head position estimator based on IRQ completion info.
246 */
247static inline void update_head_pos(int slot, r10bio_t *r10_bio)
248{
070ec55d 249 conf_t *conf = r10_bio->mddev->private;
1da177e4
LT
250
251 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
252 r10_bio->devs[slot].addr + (r10_bio->sectors);
253}
254
6712ecf8 255static void raid10_end_read_request(struct bio *bio, int error)
1da177e4
LT
256{
257 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
258 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
259 int slot, dev;
070ec55d 260 conf_t *conf = r10_bio->mddev->private;
1da177e4 261
1da177e4
LT
262
263 slot = r10_bio->read_slot;
264 dev = r10_bio->devs[slot].devnum;
265 /*
266 * this branch is our 'one mirror IO has finished' event handler:
267 */
4443ae10
N
268 update_head_pos(slot, r10_bio);
269
270 if (uptodate) {
1da177e4
LT
271 /*
272 * Set R10BIO_Uptodate in our master bio, so that
273 * we will return a good error code to the higher
274 * levels even if IO on some other mirrored buffer fails.
275 *
276 * The 'master' represents the composite IO operation to
277 * user-side. So if something waits for IO, then it will
278 * wait for the 'master' bio.
279 */
280 set_bit(R10BIO_Uptodate, &r10_bio->state);
1da177e4 281 raid_end_bio_io(r10_bio);
4443ae10 282 } else {
1da177e4
LT
283 /*
284 * oops, read error:
285 */
286 char b[BDEVNAME_SIZE];
287 if (printk_ratelimit())
288 printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
289 bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
290 reschedule_retry(r10_bio);
291 }
292
293 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
1da177e4
LT
294}
295
6712ecf8 296static void raid10_end_write_request(struct bio *bio, int error)
1da177e4
LT
297{
298 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
299 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
300 int slot, dev;
070ec55d 301 conf_t *conf = r10_bio->mddev->private;
1da177e4 302
1da177e4
LT
303 for (slot = 0; slot < conf->copies; slot++)
304 if (r10_bio->devs[slot].bio == bio)
305 break;
306 dev = r10_bio->devs[slot].devnum;
307
308 /*
309 * this branch is our 'one mirror IO has finished' event handler:
310 */
6cce3b23 311 if (!uptodate) {
1da177e4 312 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
6cce3b23
N
313 /* an I/O failed, we can't clear the bitmap */
314 set_bit(R10BIO_Degraded, &r10_bio->state);
315 } else
1da177e4
LT
316 /*
317 * Set R10BIO_Uptodate in our master bio, so that
318 * we will return a good error code for to the higher
319 * levels even if IO on some other mirrored buffer fails.
320 *
321 * The 'master' represents the composite IO operation to
322 * user-side. So if something waits for IO, then it will
323 * wait for the 'master' bio.
324 */
325 set_bit(R10BIO_Uptodate, &r10_bio->state);
326
327 update_head_pos(slot, r10_bio);
328
329 /*
330 *
331 * Let's see if all mirrored write operations have finished
332 * already.
333 */
334 if (atomic_dec_and_test(&r10_bio->remaining)) {
6cce3b23
N
335 /* clear the bitmap if all writes complete successfully */
336 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
337 r10_bio->sectors,
338 !test_bit(R10BIO_Degraded, &r10_bio->state),
339 0);
1da177e4
LT
340 md_write_end(r10_bio->mddev);
341 raid_end_bio_io(r10_bio);
342 }
343
344 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
1da177e4
LT
345}
346
347
348/*
349 * RAID10 layout manager
350 * Aswell as the chunksize and raid_disks count, there are two
351 * parameters: near_copies and far_copies.
352 * near_copies * far_copies must be <= raid_disks.
353 * Normally one of these will be 1.
354 * If both are 1, we get raid0.
355 * If near_copies == raid_disks, we get raid1.
356 *
357 * Chunks are layed out in raid0 style with near_copies copies of the
358 * first chunk, followed by near_copies copies of the next chunk and
359 * so on.
360 * If far_copies > 1, then after 1/far_copies of the array has been assigned
361 * as described above, we start again with a device offset of near_copies.
362 * So we effectively have another copy of the whole array further down all
363 * the drives, but with blocks on different drives.
364 * With this layout, and block is never stored twice on the one device.
365 *
366 * raid10_find_phys finds the sector offset of a given virtual sector
c93983bf 367 * on each device that it is on.
1da177e4
LT
368 *
369 * raid10_find_virt does the reverse mapping, from a device and a
370 * sector offset to a virtual address
371 */
372
373static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
374{
375 int n,f;
376 sector_t sector;
377 sector_t chunk;
378 sector_t stripe;
379 int dev;
380
381 int slot = 0;
382
383 /* now calculate first sector/dev */
384 chunk = r10bio->sector >> conf->chunk_shift;
385 sector = r10bio->sector & conf->chunk_mask;
386
387 chunk *= conf->near_copies;
388 stripe = chunk;
389 dev = sector_div(stripe, conf->raid_disks);
c93983bf
N
390 if (conf->far_offset)
391 stripe *= conf->far_copies;
1da177e4
LT
392
393 sector += stripe << conf->chunk_shift;
394
395 /* and calculate all the others */
396 for (n=0; n < conf->near_copies; n++) {
397 int d = dev;
398 sector_t s = sector;
399 r10bio->devs[slot].addr = sector;
400 r10bio->devs[slot].devnum = d;
401 slot++;
402
403 for (f = 1; f < conf->far_copies; f++) {
404 d += conf->near_copies;
405 if (d >= conf->raid_disks)
406 d -= conf->raid_disks;
407 s += conf->stride;
408 r10bio->devs[slot].devnum = d;
409 r10bio->devs[slot].addr = s;
410 slot++;
411 }
412 dev++;
413 if (dev >= conf->raid_disks) {
414 dev = 0;
415 sector += (conf->chunk_mask + 1);
416 }
417 }
418 BUG_ON(slot != conf->copies);
419}
420
421static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
422{
423 sector_t offset, chunk, vchunk;
424
1da177e4 425 offset = sector & conf->chunk_mask;
c93983bf
N
426 if (conf->far_offset) {
427 int fc;
428 chunk = sector >> conf->chunk_shift;
429 fc = sector_div(chunk, conf->far_copies);
430 dev -= fc * conf->near_copies;
431 if (dev < 0)
432 dev += conf->raid_disks;
433 } else {
64a742bc 434 while (sector >= conf->stride) {
c93983bf
N
435 sector -= conf->stride;
436 if (dev < conf->near_copies)
437 dev += conf->raid_disks - conf->near_copies;
438 else
439 dev -= conf->near_copies;
440 }
441 chunk = sector >> conf->chunk_shift;
442 }
1da177e4
LT
443 vchunk = chunk * conf->raid_disks + dev;
444 sector_div(vchunk, conf->near_copies);
445 return (vchunk << conf->chunk_shift) + offset;
446}
447
448/**
449 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
450 * @q: request queue
cc371e66 451 * @bvm: properties of new bio
1da177e4
LT
452 * @biovec: the request that could be merged to it.
453 *
454 * Return amount of bytes we can accept at this offset
455 * If near_copies == raid_disk, there are no striping issues,
456 * but in that case, the function isn't called at all.
457 */
cc371e66
AK
458static int raid10_mergeable_bvec(struct request_queue *q,
459 struct bvec_merge_data *bvm,
460 struct bio_vec *biovec)
1da177e4
LT
461{
462 mddev_t *mddev = q->queuedata;
cc371e66 463 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
1da177e4 464 int max;
9d8f0363 465 unsigned int chunk_sectors = mddev->chunk_sectors;
cc371e66 466 unsigned int bio_sectors = bvm->bi_size >> 9;
1da177e4
LT
467
468 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
469 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
cc371e66
AK
470 if (max <= biovec->bv_len && bio_sectors == 0)
471 return biovec->bv_len;
1da177e4
LT
472 else
473 return max;
474}
475
476/*
477 * This routine returns the disk from which the requested read should
478 * be done. There is a per-array 'next expected sequential IO' sector
479 * number - if this matches on the next IO then we use the last disk.
480 * There is also a per-disk 'last know head position' sector that is
481 * maintained from IRQ contexts, both the normal and the resync IO
482 * completion handlers update this position correctly. If there is no
483 * perfect sequential match then we pick the disk whose head is closest.
484 *
485 * If there are 2 mirrors in the same 2 devices, performance degrades
486 * because position is mirror, not device based.
487 *
488 * The rdev for the device selected will have nr_pending incremented.
489 */
490
491/*
492 * FIXME: possibly should rethink readbalancing and do it differently
493 * depending on near_copies / far_copies geometry.
494 */
495static int read_balance(conf_t *conf, r10bio_t *r10_bio)
496{
497 const unsigned long this_sector = r10_bio->sector;
498 int disk, slot, nslot;
499 const int sectors = r10_bio->sectors;
500 sector_t new_distance, current_distance;
d6065f7b 501 mdk_rdev_t *rdev;
1da177e4
LT
502
503 raid10_find_phys(conf, r10_bio);
504 rcu_read_lock();
505 /*
506 * Check if we can balance. We can balance on the whole
6cce3b23
N
507 * device if no resync is going on (recovery is ok), or below
508 * the resync window. We take the first readable disk when
509 * above the resync window.
1da177e4
LT
510 */
511 if (conf->mddev->recovery_cp < MaxSector
512 && (this_sector + sectors >= conf->next_resync)) {
513 /* make sure that disk is operational */
514 slot = 0;
515 disk = r10_bio->devs[slot].devnum;
516
d6065f7b 517 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
0eb3ff12 518 r10_bio->devs[slot].bio == IO_BLOCKED ||
b2d444d7 519 !test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
520 slot++;
521 if (slot == conf->copies) {
522 slot = 0;
523 disk = -1;
524 break;
525 }
526 disk = r10_bio->devs[slot].devnum;
527 }
528 goto rb_out;
529 }
530
531
532 /* make sure the disk is operational */
533 slot = 0;
534 disk = r10_bio->devs[slot].devnum;
d6065f7b 535 while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
0eb3ff12 536 r10_bio->devs[slot].bio == IO_BLOCKED ||
b2d444d7 537 !test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
538 slot ++;
539 if (slot == conf->copies) {
540 disk = -1;
541 goto rb_out;
542 }
543 disk = r10_bio->devs[slot].devnum;
544 }
545
546
3ec67ac1
N
547 current_distance = abs(r10_bio->devs[slot].addr -
548 conf->mirrors[disk].head_position);
1da177e4 549
8ed3a195
KS
550 /* Find the disk whose head is closest,
551 * or - for far > 1 - find the closest to partition beginning */
1da177e4
LT
552
553 for (nslot = slot; nslot < conf->copies; nslot++) {
554 int ndisk = r10_bio->devs[nslot].devnum;
555
556
d6065f7b 557 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
0eb3ff12 558 r10_bio->devs[nslot].bio == IO_BLOCKED ||
b2d444d7 559 !test_bit(In_sync, &rdev->flags))
1da177e4
LT
560 continue;
561
22dfdf52
N
562 /* This optimisation is debatable, and completely destroys
563 * sequential read speed for 'far copies' arrays. So only
564 * keep it for 'near' arrays, and review those later.
565 */
566 if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
1da177e4
LT
567 disk = ndisk;
568 slot = nslot;
569 break;
570 }
8ed3a195
KS
571
572 /* for far > 1 always use the lowest address */
573 if (conf->far_copies > 1)
574 new_distance = r10_bio->devs[nslot].addr;
575 else
576 new_distance = abs(r10_bio->devs[nslot].addr -
577 conf->mirrors[ndisk].head_position);
1da177e4
LT
578 if (new_distance < current_distance) {
579 current_distance = new_distance;
580 disk = ndisk;
581 slot = nslot;
582 }
583 }
584
585rb_out:
586 r10_bio->read_slot = slot;
587/* conf->next_seq_sect = this_sector + sectors;*/
588
d6065f7b 589 if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
1da177e4 590 atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
29fc7e3e
N
591 else
592 disk = -1;
1da177e4
LT
593 rcu_read_unlock();
594
595 return disk;
596}
597
598static void unplug_slaves(mddev_t *mddev)
599{
070ec55d 600 conf_t *conf = mddev->private;
1da177e4
LT
601 int i;
602
603 rcu_read_lock();
604 for (i=0; i<mddev->raid_disks; i++) {
d6065f7b 605 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
b2d444d7 606 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
165125e1 607 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
1da177e4
LT
608
609 atomic_inc(&rdev->nr_pending);
610 rcu_read_unlock();
611
2ad8b1ef 612 blk_unplug(r_queue);
1da177e4
LT
613
614 rdev_dec_pending(rdev, mddev);
615 rcu_read_lock();
616 }
617 }
618 rcu_read_unlock();
619}
620
165125e1 621static void raid10_unplug(struct request_queue *q)
1da177e4 622{
6cce3b23
N
623 mddev_t *mddev = q->queuedata;
624
1da177e4 625 unplug_slaves(q->queuedata);
6cce3b23 626 md_wakeup_thread(mddev->thread);
1da177e4
LT
627}
628
0d129228
N
629static int raid10_congested(void *data, int bits)
630{
631 mddev_t *mddev = data;
070ec55d 632 conf_t *conf = mddev->private;
0d129228
N
633 int i, ret = 0;
634
3fa841d7
N
635 if (mddev_congested(mddev, bits))
636 return 1;
0d129228
N
637 rcu_read_lock();
638 for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
639 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
640 if (rdev && !test_bit(Faulty, &rdev->flags)) {
165125e1 641 struct request_queue *q = bdev_get_queue(rdev->bdev);
0d129228
N
642
643 ret |= bdi_congested(&q->backing_dev_info, bits);
644 }
645 }
646 rcu_read_unlock();
647 return ret;
648}
649
a35e63ef
N
650static int flush_pending_writes(conf_t *conf)
651{
652 /* Any writes that have been queued but are awaiting
653 * bitmap updates get flushed here.
654 * We return 1 if any requests were actually submitted.
655 */
656 int rv = 0;
657
658 spin_lock_irq(&conf->device_lock);
659
660 if (conf->pending_bio_list.head) {
661 struct bio *bio;
662 bio = bio_list_get(&conf->pending_bio_list);
663 blk_remove_plug(conf->mddev->queue);
664 spin_unlock_irq(&conf->device_lock);
665 /* flush any pending bitmap writes to disk
666 * before proceeding w/ I/O */
667 bitmap_unplug(conf->mddev->bitmap);
668
669 while (bio) { /* submit pending writes */
670 struct bio *next = bio->bi_next;
671 bio->bi_next = NULL;
672 generic_make_request(bio);
673 bio = next;
674 }
675 rv = 1;
676 } else
677 spin_unlock_irq(&conf->device_lock);
678 return rv;
679}
0a27ec96
N
680/* Barriers....
681 * Sometimes we need to suspend IO while we do something else,
682 * either some resync/recovery, or reconfigure the array.
683 * To do this we raise a 'barrier'.
684 * The 'barrier' is a counter that can be raised multiple times
685 * to count how many activities are happening which preclude
686 * normal IO.
687 * We can only raise the barrier if there is no pending IO.
688 * i.e. if nr_pending == 0.
689 * We choose only to raise the barrier if no-one is waiting for the
690 * barrier to go down. This means that as soon as an IO request
691 * is ready, no other operations which require a barrier will start
692 * until the IO request has had a chance.
693 *
694 * So: regular IO calls 'wait_barrier'. When that returns there
695 * is no backgroup IO happening, It must arrange to call
696 * allow_barrier when it has finished its IO.
697 * backgroup IO calls must call raise_barrier. Once that returns
698 * there is no normal IO happeing. It must arrange to call
699 * lower_barrier when the particular background IO completes.
1da177e4 700 */
1da177e4 701
6cce3b23 702static void raise_barrier(conf_t *conf, int force)
1da177e4 703{
6cce3b23 704 BUG_ON(force && !conf->barrier);
1da177e4 705 spin_lock_irq(&conf->resync_lock);
0a27ec96 706
6cce3b23
N
707 /* Wait until no block IO is waiting (unless 'force') */
708 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
0a27ec96
N
709 conf->resync_lock,
710 raid10_unplug(conf->mddev->queue));
711
712 /* block any new IO from starting */
713 conf->barrier++;
714
715 /* No wait for all pending IO to complete */
716 wait_event_lock_irq(conf->wait_barrier,
717 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
718 conf->resync_lock,
719 raid10_unplug(conf->mddev->queue));
720
721 spin_unlock_irq(&conf->resync_lock);
722}
723
724static void lower_barrier(conf_t *conf)
725{
726 unsigned long flags;
727 spin_lock_irqsave(&conf->resync_lock, flags);
728 conf->barrier--;
729 spin_unlock_irqrestore(&conf->resync_lock, flags);
730 wake_up(&conf->wait_barrier);
731}
732
733static void wait_barrier(conf_t *conf)
734{
735 spin_lock_irq(&conf->resync_lock);
736 if (conf->barrier) {
737 conf->nr_waiting++;
738 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
739 conf->resync_lock,
740 raid10_unplug(conf->mddev->queue));
741 conf->nr_waiting--;
1da177e4 742 }
0a27ec96 743 conf->nr_pending++;
1da177e4
LT
744 spin_unlock_irq(&conf->resync_lock);
745}
746
0a27ec96
N
747static void allow_barrier(conf_t *conf)
748{
749 unsigned long flags;
750 spin_lock_irqsave(&conf->resync_lock, flags);
751 conf->nr_pending--;
752 spin_unlock_irqrestore(&conf->resync_lock, flags);
753 wake_up(&conf->wait_barrier);
754}
755
4443ae10
N
756static void freeze_array(conf_t *conf)
757{
758 /* stop syncio and normal IO and wait for everything to
f188593e 759 * go quiet.
4443ae10 760 * We increment barrier and nr_waiting, and then
1c830532
N
761 * wait until nr_pending match nr_queued+1
762 * This is called in the context of one normal IO request
763 * that has failed. Thus any sync request that might be pending
764 * will be blocked by nr_pending, and we need to wait for
765 * pending IO requests to complete or be queued for re-try.
766 * Thus the number queued (nr_queued) plus this request (1)
767 * must match the number of pending IOs (nr_pending) before
768 * we continue.
4443ae10
N
769 */
770 spin_lock_irq(&conf->resync_lock);
771 conf->barrier++;
772 conf->nr_waiting++;
773 wait_event_lock_irq(conf->wait_barrier,
1c830532 774 conf->nr_pending == conf->nr_queued+1,
4443ae10 775 conf->resync_lock,
a35e63ef
N
776 ({ flush_pending_writes(conf);
777 raid10_unplug(conf->mddev->queue); }));
4443ae10
N
778 spin_unlock_irq(&conf->resync_lock);
779}
780
781static void unfreeze_array(conf_t *conf)
782{
783 /* reverse the effect of the freeze */
784 spin_lock_irq(&conf->resync_lock);
785 conf->barrier--;
786 conf->nr_waiting--;
787 wake_up(&conf->wait_barrier);
788 spin_unlock_irq(&conf->resync_lock);
789}
790
165125e1 791static int make_request(struct request_queue *q, struct bio * bio)
1da177e4
LT
792{
793 mddev_t *mddev = q->queuedata;
070ec55d 794 conf_t *conf = mddev->private;
1da177e4
LT
795 mirror_info_t *mirror;
796 r10bio_t *r10_bio;
797 struct bio *read_bio;
c9959059 798 int cpu;
1da177e4
LT
799 int i;
800 int chunk_sects = conf->chunk_mask + 1;
a362357b 801 const int rw = bio_data_dir(bio);
1f98a13f 802 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
6cce3b23
N
803 struct bio_list bl;
804 unsigned long flags;
6bfe0b49 805 mdk_rdev_t *blocked_rdev;
1da177e4 806
1f98a13f 807 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
a2826aa9 808 md_barrier_request(mddev, bio);
e5dcdd80
N
809 return 0;
810 }
811
1da177e4
LT
812 /* If this request crosses a chunk boundary, we need to
813 * split it. This will only happen for 1 PAGE (or less) requests.
814 */
815 if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
816 > chunk_sects &&
817 conf->near_copies < conf->raid_disks)) {
818 struct bio_pair *bp;
819 /* Sanity check -- queue functions should prevent this happening */
820 if (bio->bi_vcnt != 1 ||
821 bio->bi_idx != 0)
822 goto bad_map;
823 /* This is a one page bio that upper layers
824 * refuse to split for us, so we need to split it.
825 */
6feef531 826 bp = bio_split(bio,
1da177e4
LT
827 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
828 if (make_request(q, &bp->bio1))
829 generic_make_request(&bp->bio1);
830 if (make_request(q, &bp->bio2))
831 generic_make_request(&bp->bio2);
832
833 bio_pair_release(bp);
834 return 0;
835 bad_map:
836 printk("raid10_make_request bug: can't convert block across chunks"
837 " or bigger than %dk %llu %d\n", chunk_sects/2,
838 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
839
6712ecf8 840 bio_io_error(bio);
1da177e4
LT
841 return 0;
842 }
843
3d310eb7 844 md_write_start(mddev, bio);
06d91a5f 845
1da177e4
LT
846 /*
847 * Register the new request and wait if the reconstruction
848 * thread has put up a bar for new requests.
849 * Continue immediately if no resync is active currently.
850 */
0a27ec96 851 wait_barrier(conf);
1da177e4 852
074a7aca
TH
853 cpu = part_stat_lock();
854 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
855 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
856 bio_sectors(bio));
857 part_stat_unlock();
1da177e4
LT
858
859 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
860
861 r10_bio->master_bio = bio;
862 r10_bio->sectors = bio->bi_size >> 9;
863
864 r10_bio->mddev = mddev;
865 r10_bio->sector = bio->bi_sector;
6cce3b23 866 r10_bio->state = 0;
1da177e4 867
a362357b 868 if (rw == READ) {
1da177e4
LT
869 /*
870 * read balancing logic:
871 */
872 int disk = read_balance(conf, r10_bio);
873 int slot = r10_bio->read_slot;
874 if (disk < 0) {
875 raid_end_bio_io(r10_bio);
876 return 0;
877 }
878 mirror = conf->mirrors + disk;
879
880 read_bio = bio_clone(bio, GFP_NOIO);
881
882 r10_bio->devs[slot].bio = read_bio;
883
884 read_bio->bi_sector = r10_bio->devs[slot].addr +
885 mirror->rdev->data_offset;
886 read_bio->bi_bdev = mirror->rdev->bdev;
887 read_bio->bi_end_io = raid10_end_read_request;
1ef04fef 888 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
1da177e4
LT
889 read_bio->bi_private = r10_bio;
890
891 generic_make_request(read_bio);
892 return 0;
893 }
894
895 /*
896 * WRITE:
897 */
6bfe0b49 898 /* first select target devices under rcu_lock and
1da177e4
LT
899 * inc refcount on their rdev. Record them by setting
900 * bios[x] to bio
901 */
902 raid10_find_phys(conf, r10_bio);
6bfe0b49 903 retry_write:
cb6969e8 904 blocked_rdev = NULL;
1da177e4
LT
905 rcu_read_lock();
906 for (i = 0; i < conf->copies; i++) {
907 int d = r10_bio->devs[i].devnum;
d6065f7b 908 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
6bfe0b49
DW
909 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
910 atomic_inc(&rdev->nr_pending);
911 blocked_rdev = rdev;
912 break;
913 }
914 if (rdev && !test_bit(Faulty, &rdev->flags)) {
d6065f7b 915 atomic_inc(&rdev->nr_pending);
1da177e4 916 r10_bio->devs[i].bio = bio;
6cce3b23 917 } else {
1da177e4 918 r10_bio->devs[i].bio = NULL;
6cce3b23
N
919 set_bit(R10BIO_Degraded, &r10_bio->state);
920 }
1da177e4
LT
921 }
922 rcu_read_unlock();
923
6bfe0b49
DW
924 if (unlikely(blocked_rdev)) {
925 /* Have to wait for this device to get unblocked, then retry */
926 int j;
927 int d;
928
929 for (j = 0; j < i; j++)
930 if (r10_bio->devs[j].bio) {
931 d = r10_bio->devs[j].devnum;
932 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
933 }
934 allow_barrier(conf);
935 md_wait_for_blocked_rdev(blocked_rdev, mddev);
936 wait_barrier(conf);
937 goto retry_write;
938 }
939
6cce3b23 940 atomic_set(&r10_bio->remaining, 0);
06d91a5f 941
6cce3b23 942 bio_list_init(&bl);
1da177e4
LT
943 for (i = 0; i < conf->copies; i++) {
944 struct bio *mbio;
945 int d = r10_bio->devs[i].devnum;
946 if (!r10_bio->devs[i].bio)
947 continue;
948
949 mbio = bio_clone(bio, GFP_NOIO);
950 r10_bio->devs[i].bio = mbio;
951
952 mbio->bi_sector = r10_bio->devs[i].addr+
953 conf->mirrors[d].rdev->data_offset;
954 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
955 mbio->bi_end_io = raid10_end_write_request;
1ef04fef 956 mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
1da177e4
LT
957 mbio->bi_private = r10_bio;
958
959 atomic_inc(&r10_bio->remaining);
6cce3b23 960 bio_list_add(&bl, mbio);
1da177e4
LT
961 }
962
f6f953aa
AR
963 if (unlikely(!atomic_read(&r10_bio->remaining))) {
964 /* the array is dead */
965 md_write_end(mddev);
966 raid_end_bio_io(r10_bio);
967 return 0;
968 }
969
6cce3b23
N
970 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
971 spin_lock_irqsave(&conf->device_lock, flags);
972 bio_list_merge(&conf->pending_bio_list, &bl);
973 blk_plug_device(mddev->queue);
974 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 975
a35e63ef
N
976 /* In case raid10d snuck in to freeze_array */
977 wake_up(&conf->wait_barrier);
978
e3881a68
LE
979 if (do_sync)
980 md_wakeup_thread(mddev->thread);
981
1da177e4
LT
982 return 0;
983}
984
985static void status(struct seq_file *seq, mddev_t *mddev)
986{
070ec55d 987 conf_t *conf = mddev->private;
1da177e4
LT
988 int i;
989
990 if (conf->near_copies < conf->raid_disks)
9d8f0363 991 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1da177e4
LT
992 if (conf->near_copies > 1)
993 seq_printf(seq, " %d near-copies", conf->near_copies);
c93983bf
N
994 if (conf->far_copies > 1) {
995 if (conf->far_offset)
996 seq_printf(seq, " %d offset-copies", conf->far_copies);
997 else
998 seq_printf(seq, " %d far-copies", conf->far_copies);
999 }
1da177e4 1000 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
76186dd8 1001 conf->raid_disks - mddev->degraded);
1da177e4
LT
1002 for (i = 0; i < conf->raid_disks; i++)
1003 seq_printf(seq, "%s",
1004 conf->mirrors[i].rdev &&
b2d444d7 1005 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1da177e4
LT
1006 seq_printf(seq, "]");
1007}
1008
1009static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1010{
1011 char b[BDEVNAME_SIZE];
070ec55d 1012 conf_t *conf = mddev->private;
1da177e4
LT
1013
1014 /*
1015 * If it is not operational, then we have already marked it as dead
1016 * else if it is the last working disks, ignore the error, let the
1017 * next level up know.
1018 * else mark the drive as failed
1019 */
b2d444d7 1020 if (test_bit(In_sync, &rdev->flags)
76186dd8 1021 && conf->raid_disks-mddev->degraded == 1)
1da177e4
LT
1022 /*
1023 * Don't fail the drive, just return an IO error.
1024 * The test should really be more sophisticated than
1025 * "working_disks == 1", but it isn't critical, and
1026 * can wait until we do more sophisticated "is the drive
1027 * really dead" tests...
1028 */
1029 return;
c04be0aa
N
1030 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1031 unsigned long flags;
1032 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1033 mddev->degraded++;
c04be0aa 1034 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1035 /*
1036 * if recovery is running, make sure it aborts.
1037 */
dfc70645 1038 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4 1039 }
b2d444d7 1040 set_bit(Faulty, &rdev->flags);
850b2b42 1041 set_bit(MD_CHANGE_DEVS, &mddev->flags);
d7a420c9
NA
1042 printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
1043 "raid10: Operation continuing on %d devices.\n",
76186dd8 1044 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1da177e4
LT
1045}
1046
1047static void print_conf(conf_t *conf)
1048{
1049 int i;
1050 mirror_info_t *tmp;
1051
1052 printk("RAID10 conf printout:\n");
1053 if (!conf) {
1054 printk("(!conf)\n");
1055 return;
1056 }
76186dd8 1057 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1da177e4
LT
1058 conf->raid_disks);
1059
1060 for (i = 0; i < conf->raid_disks; i++) {
1061 char b[BDEVNAME_SIZE];
1062 tmp = conf->mirrors + i;
1063 if (tmp->rdev)
1064 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
b2d444d7
N
1065 i, !test_bit(In_sync, &tmp->rdev->flags),
1066 !test_bit(Faulty, &tmp->rdev->flags),
1da177e4
LT
1067 bdevname(tmp->rdev->bdev,b));
1068 }
1069}
1070
1071static void close_sync(conf_t *conf)
1072{
0a27ec96
N
1073 wait_barrier(conf);
1074 allow_barrier(conf);
1da177e4
LT
1075
1076 mempool_destroy(conf->r10buf_pool);
1077 conf->r10buf_pool = NULL;
1078}
1079
6d508242
N
1080/* check if there are enough drives for
1081 * every block to appear on atleast one
1082 */
1083static int enough(conf_t *conf)
1084{
1085 int first = 0;
1086
1087 do {
1088 int n = conf->copies;
1089 int cnt = 0;
1090 while (n--) {
1091 if (conf->mirrors[first].rdev)
1092 cnt++;
1093 first = (first+1) % conf->raid_disks;
1094 }
1095 if (cnt == 0)
1096 return 0;
1097 } while (first != 0);
1098 return 1;
1099}
1100
1da177e4
LT
1101static int raid10_spare_active(mddev_t *mddev)
1102{
1103 int i;
1104 conf_t *conf = mddev->private;
1105 mirror_info_t *tmp;
1106
1107 /*
1108 * Find all non-in_sync disks within the RAID10 configuration
1109 * and mark them in_sync
1110 */
1111 for (i = 0; i < conf->raid_disks; i++) {
1112 tmp = conf->mirrors + i;
1113 if (tmp->rdev
b2d444d7 1114 && !test_bit(Faulty, &tmp->rdev->flags)
c04be0aa
N
1115 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1116 unsigned long flags;
1117 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1118 mddev->degraded--;
c04be0aa 1119 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1120 }
1121 }
1122
1123 print_conf(conf);
1124 return 0;
1125}
1126
1127
1128static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1129{
1130 conf_t *conf = mddev->private;
199050ea 1131 int err = -EEXIST;
1da177e4
LT
1132 int mirror;
1133 mirror_info_t *p;
6c2fce2e
NB
1134 int first = 0;
1135 int last = mddev->raid_disks - 1;
1da177e4
LT
1136
1137 if (mddev->recovery_cp < MaxSector)
1138 /* only hot-add to in-sync arrays, as recovery is
1139 * very different from resync
1140 */
199050ea 1141 return -EBUSY;
6d508242 1142 if (!enough(conf))
199050ea 1143 return -EINVAL;
1da177e4 1144
a53a6c85 1145 if (rdev->raid_disk >= 0)
6c2fce2e 1146 first = last = rdev->raid_disk;
1da177e4 1147
6cce3b23 1148 if (rdev->saved_raid_disk >= 0 &&
6c2fce2e 1149 rdev->saved_raid_disk >= first &&
6cce3b23
N
1150 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1151 mirror = rdev->saved_raid_disk;
1152 else
6c2fce2e
NB
1153 mirror = first;
1154 for ( ; mirror <= last ; mirror++)
1da177e4
LT
1155 if ( !(p=conf->mirrors+mirror)->rdev) {
1156
8f6c2e4b
MP
1157 disk_stack_limits(mddev->gendisk, rdev->bdev,
1158 rdev->data_offset << 9);
627a2d3c
N
1159 /* as we don't honour merge_bvec_fn, we must
1160 * never risk violating it, so limit
1161 * ->max_segments to one lying with a single
1162 * page, as a one page request is never in
1163 * violation.
1da177e4 1164 */
627a2d3c
N
1165 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1166 blk_queue_max_segments(mddev->queue, 1);
1167 blk_queue_segment_boundary(mddev->queue,
1168 PAGE_CACHE_SIZE - 1);
1169 }
1da177e4
LT
1170
1171 p->head_position = 0;
1172 rdev->raid_disk = mirror;
199050ea 1173 err = 0;
6cce3b23
N
1174 if (rdev->saved_raid_disk != mirror)
1175 conf->fullsync = 1;
d6065f7b 1176 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
1177 break;
1178 }
1179
ac5e7113 1180 md_integrity_add_rdev(rdev, mddev);
1da177e4 1181 print_conf(conf);
199050ea 1182 return err;
1da177e4
LT
1183}
1184
1185static int raid10_remove_disk(mddev_t *mddev, int number)
1186{
1187 conf_t *conf = mddev->private;
1188 int err = 0;
1189 mdk_rdev_t *rdev;
1190 mirror_info_t *p = conf->mirrors+ number;
1191
1192 print_conf(conf);
1193 rdev = p->rdev;
1194 if (rdev) {
b2d444d7 1195 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
1196 atomic_read(&rdev->nr_pending)) {
1197 err = -EBUSY;
1198 goto abort;
1199 }
dfc70645
N
1200 /* Only remove faulty devices in recovery
1201 * is not possible.
1202 */
1203 if (!test_bit(Faulty, &rdev->flags) &&
1204 enough(conf)) {
1205 err = -EBUSY;
1206 goto abort;
1207 }
1da177e4 1208 p->rdev = NULL;
fbd568a3 1209 synchronize_rcu();
1da177e4
LT
1210 if (atomic_read(&rdev->nr_pending)) {
1211 /* lost the race, try later */
1212 err = -EBUSY;
1213 p->rdev = rdev;
ac5e7113 1214 goto abort;
1da177e4 1215 }
ac5e7113 1216 md_integrity_register(mddev);
1da177e4
LT
1217 }
1218abort:
1219
1220 print_conf(conf);
1221 return err;
1222}
1223
1224
6712ecf8 1225static void end_sync_read(struct bio *bio, int error)
1da177e4 1226{
1da177e4 1227 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
070ec55d 1228 conf_t *conf = r10_bio->mddev->private;
1da177e4
LT
1229 int i,d;
1230
1da177e4
LT
1231 for (i=0; i<conf->copies; i++)
1232 if (r10_bio->devs[i].bio == bio)
1233 break;
b6385483 1234 BUG_ON(i == conf->copies);
1da177e4
LT
1235 update_head_pos(i, r10_bio);
1236 d = r10_bio->devs[i].devnum;
0eb3ff12
N
1237
1238 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1239 set_bit(R10BIO_Uptodate, &r10_bio->state);
4dbcdc75
N
1240 else {
1241 atomic_add(r10_bio->sectors,
1242 &conf->mirrors[d].rdev->corrected_errors);
1243 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1244 md_error(r10_bio->mddev,
1245 conf->mirrors[d].rdev);
1246 }
1da177e4
LT
1247
1248 /* for reconstruct, we always reschedule after a read.
1249 * for resync, only after all reads
1250 */
73d5c38a 1251 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1da177e4
LT
1252 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1253 atomic_dec_and_test(&r10_bio->remaining)) {
1254 /* we have read all the blocks,
1255 * do the comparison in process context in raid10d
1256 */
1257 reschedule_retry(r10_bio);
1258 }
1da177e4
LT
1259}
1260
6712ecf8 1261static void end_sync_write(struct bio *bio, int error)
1da177e4
LT
1262{
1263 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1264 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1265 mddev_t *mddev = r10_bio->mddev;
070ec55d 1266 conf_t *conf = mddev->private;
1da177e4
LT
1267 int i,d;
1268
1da177e4
LT
1269 for (i = 0; i < conf->copies; i++)
1270 if (r10_bio->devs[i].bio == bio)
1271 break;
1272 d = r10_bio->devs[i].devnum;
1273
1274 if (!uptodate)
1275 md_error(mddev, conf->mirrors[d].rdev);
dfc70645 1276
1da177e4
LT
1277 update_head_pos(i, r10_bio);
1278
73d5c38a 1279 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1da177e4
LT
1280 while (atomic_dec_and_test(&r10_bio->remaining)) {
1281 if (r10_bio->master_bio == NULL) {
1282 /* the primary of several recovery bios */
73d5c38a 1283 sector_t s = r10_bio->sectors;
1da177e4 1284 put_buf(r10_bio);
73d5c38a 1285 md_done_sync(mddev, s, 1);
1da177e4
LT
1286 break;
1287 } else {
1288 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1289 put_buf(r10_bio);
1290 r10_bio = r10_bio2;
1291 }
1292 }
1da177e4
LT
1293}
1294
1295/*
1296 * Note: sync and recover and handled very differently for raid10
1297 * This code is for resync.
1298 * For resync, we read through virtual addresses and read all blocks.
1299 * If there is any error, we schedule a write. The lowest numbered
1300 * drive is authoritative.
1301 * However requests come for physical address, so we need to map.
1302 * For every physical address there are raid_disks/copies virtual addresses,
1303 * which is always are least one, but is not necessarly an integer.
1304 * This means that a physical address can span multiple chunks, so we may
1305 * have to submit multiple io requests for a single sync request.
1306 */
1307/*
1308 * We check if all blocks are in-sync and only write to blocks that
1309 * aren't in sync
1310 */
1311static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1312{
070ec55d 1313 conf_t *conf = mddev->private;
1da177e4
LT
1314 int i, first;
1315 struct bio *tbio, *fbio;
1316
1317 atomic_set(&r10_bio->remaining, 1);
1318
1319 /* find the first device with a block */
1320 for (i=0; i<conf->copies; i++)
1321 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1322 break;
1323
1324 if (i == conf->copies)
1325 goto done;
1326
1327 first = i;
1328 fbio = r10_bio->devs[i].bio;
1329
1330 /* now find blocks with errors */
0eb3ff12
N
1331 for (i=0 ; i < conf->copies ; i++) {
1332 int j, d;
1333 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1da177e4 1334
1da177e4 1335 tbio = r10_bio->devs[i].bio;
0eb3ff12
N
1336
1337 if (tbio->bi_end_io != end_sync_read)
1338 continue;
1339 if (i == first)
1da177e4 1340 continue;
0eb3ff12
N
1341 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1342 /* We know that the bi_io_vec layout is the same for
1343 * both 'first' and 'i', so we just compare them.
1344 * All vec entries are PAGE_SIZE;
1345 */
1346 for (j = 0; j < vcnt; j++)
1347 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1348 page_address(tbio->bi_io_vec[j].bv_page),
1349 PAGE_SIZE))
1350 break;
1351 if (j == vcnt)
1352 continue;
1353 mddev->resync_mismatches += r10_bio->sectors;
1354 }
18f08819
N
1355 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1356 /* Don't fix anything. */
1357 continue;
1da177e4
LT
1358 /* Ok, we need to write this bio
1359 * First we need to fixup bv_offset, bv_len and
1360 * bi_vecs, as the read request might have corrupted these
1361 */
1362 tbio->bi_vcnt = vcnt;
1363 tbio->bi_size = r10_bio->sectors << 9;
1364 tbio->bi_idx = 0;
1365 tbio->bi_phys_segments = 0;
1da177e4
LT
1366 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1367 tbio->bi_flags |= 1 << BIO_UPTODATE;
1368 tbio->bi_next = NULL;
1369 tbio->bi_rw = WRITE;
1370 tbio->bi_private = r10_bio;
1371 tbio->bi_sector = r10_bio->devs[i].addr;
1372
1373 for (j=0; j < vcnt ; j++) {
1374 tbio->bi_io_vec[j].bv_offset = 0;
1375 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1376
1377 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1378 page_address(fbio->bi_io_vec[j].bv_page),
1379 PAGE_SIZE);
1380 }
1381 tbio->bi_end_io = end_sync_write;
1382
1383 d = r10_bio->devs[i].devnum;
1384 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1385 atomic_inc(&r10_bio->remaining);
1386 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1387
1388 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1389 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1390 generic_make_request(tbio);
1391 }
1392
1393done:
1394 if (atomic_dec_and_test(&r10_bio->remaining)) {
1395 md_done_sync(mddev, r10_bio->sectors, 1);
1396 put_buf(r10_bio);
1397 }
1398}
1399
1400/*
1401 * Now for the recovery code.
1402 * Recovery happens across physical sectors.
1403 * We recover all non-is_sync drives by finding the virtual address of
1404 * each, and then choose a working drive that also has that virt address.
1405 * There is a separate r10_bio for each non-in_sync drive.
1406 * Only the first two slots are in use. The first for reading,
1407 * The second for writing.
1408 *
1409 */
1410
1411static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1412{
070ec55d 1413 conf_t *conf = mddev->private;
1da177e4
LT
1414 int i, d;
1415 struct bio *bio, *wbio;
1416
1417
1418 /* move the pages across to the second bio
1419 * and submit the write request
1420 */
1421 bio = r10_bio->devs[0].bio;
1422 wbio = r10_bio->devs[1].bio;
1423 for (i=0; i < wbio->bi_vcnt; i++) {
1424 struct page *p = bio->bi_io_vec[i].bv_page;
1425 bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1426 wbio->bi_io_vec[i].bv_page = p;
1427 }
1428 d = r10_bio->devs[1].devnum;
1429
1430 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1431 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
0eb3ff12
N
1432 if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1433 generic_make_request(wbio);
1434 else
6712ecf8 1435 bio_endio(wbio, -EIO);
1da177e4
LT
1436}
1437
1438
1e50915f
RB
1439/*
1440 * Used by fix_read_error() to decay the per rdev read_errors.
1441 * We halve the read error count for every hour that has elapsed
1442 * since the last recorded read error.
1443 *
1444 */
1445static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
1446{
1447 struct timespec cur_time_mon;
1448 unsigned long hours_since_last;
1449 unsigned int read_errors = atomic_read(&rdev->read_errors);
1450
1451 ktime_get_ts(&cur_time_mon);
1452
1453 if (rdev->last_read_error.tv_sec == 0 &&
1454 rdev->last_read_error.tv_nsec == 0) {
1455 /* first time we've seen a read error */
1456 rdev->last_read_error = cur_time_mon;
1457 return;
1458 }
1459
1460 hours_since_last = (cur_time_mon.tv_sec -
1461 rdev->last_read_error.tv_sec) / 3600;
1462
1463 rdev->last_read_error = cur_time_mon;
1464
1465 /*
1466 * if hours_since_last is > the number of bits in read_errors
1467 * just set read errors to 0. We do this to avoid
1468 * overflowing the shift of read_errors by hours_since_last.
1469 */
1470 if (hours_since_last >= 8 * sizeof(read_errors))
1471 atomic_set(&rdev->read_errors, 0);
1472 else
1473 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
1474}
1475
1da177e4
LT
1476/*
1477 * This is a kernel thread which:
1478 *
1479 * 1. Retries failed read operations on working mirrors.
1480 * 2. Updates the raid superblock when problems encounter.
6814d536 1481 * 3. Performs writes following reads for array synchronising.
1da177e4
LT
1482 */
1483
6814d536
N
1484static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1485{
1486 int sect = 0; /* Offset from r10_bio->sector */
1487 int sectors = r10_bio->sectors;
1488 mdk_rdev_t*rdev;
1e50915f
RB
1489 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
1490
1491 rcu_read_lock();
1492 {
1493 int d = r10_bio->devs[r10_bio->read_slot].devnum;
1494 char b[BDEVNAME_SIZE];
1495 int cur_read_error_count = 0;
1496
1497 rdev = rcu_dereference(conf->mirrors[d].rdev);
1498 bdevname(rdev->bdev, b);
1499
1500 if (test_bit(Faulty, &rdev->flags)) {
1501 rcu_read_unlock();
1502 /* drive has already been failed, just ignore any
1503 more fix_read_error() attempts */
1504 return;
1505 }
1506
1507 check_decay_read_errors(mddev, rdev);
1508 atomic_inc(&rdev->read_errors);
1509 cur_read_error_count = atomic_read(&rdev->read_errors);
1510 if (cur_read_error_count > max_read_errors) {
1511 rcu_read_unlock();
1512 printk(KERN_NOTICE
1513 "raid10: %s: Raid device exceeded "
1514 "read_error threshold "
1515 "[cur %d:max %d]\n",
1516 b, cur_read_error_count, max_read_errors);
1517 printk(KERN_NOTICE
1518 "raid10: %s: Failing raid "
1519 "device\n", b);
1520 md_error(mddev, conf->mirrors[d].rdev);
1521 return;
1522 }
1523 }
1524 rcu_read_unlock();
1525
6814d536
N
1526 while(sectors) {
1527 int s = sectors;
1528 int sl = r10_bio->read_slot;
1529 int success = 0;
1530 int start;
1531
1532 if (s > (PAGE_SIZE>>9))
1533 s = PAGE_SIZE >> 9;
1534
1535 rcu_read_lock();
1536 do {
1537 int d = r10_bio->devs[sl].devnum;
1538 rdev = rcu_dereference(conf->mirrors[d].rdev);
1539 if (rdev &&
1540 test_bit(In_sync, &rdev->flags)) {
1541 atomic_inc(&rdev->nr_pending);
1542 rcu_read_unlock();
1543 success = sync_page_io(rdev->bdev,
1544 r10_bio->devs[sl].addr +
1545 sect + rdev->data_offset,
1546 s<<9,
1547 conf->tmppage, READ);
1548 rdev_dec_pending(rdev, mddev);
1549 rcu_read_lock();
1550 if (success)
1551 break;
1552 }
1553 sl++;
1554 if (sl == conf->copies)
1555 sl = 0;
1556 } while (!success && sl != r10_bio->read_slot);
1557 rcu_read_unlock();
1558
1559 if (!success) {
1560 /* Cannot read from anywhere -- bye bye array */
1561 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1562 md_error(mddev, conf->mirrors[dn].rdev);
1563 break;
1564 }
1565
1566 start = sl;
1567 /* write it back and re-read */
1568 rcu_read_lock();
1569 while (sl != r10_bio->read_slot) {
67b8dc4b 1570 char b[BDEVNAME_SIZE];
6814d536
N
1571 int d;
1572 if (sl==0)
1573 sl = conf->copies;
1574 sl--;
1575 d = r10_bio->devs[sl].devnum;
1576 rdev = rcu_dereference(conf->mirrors[d].rdev);
1577 if (rdev &&
1578 test_bit(In_sync, &rdev->flags)) {
1579 atomic_inc(&rdev->nr_pending);
1580 rcu_read_unlock();
1581 atomic_add(s, &rdev->corrected_errors);
1582 if (sync_page_io(rdev->bdev,
1583 r10_bio->devs[sl].addr +
1584 sect + rdev->data_offset,
1585 s<<9, conf->tmppage, WRITE)
67b8dc4b 1586 == 0) {
6814d536 1587 /* Well, this device is dead */
67b8dc4b
RB
1588 printk(KERN_NOTICE
1589 "raid10:%s: read correction "
1590 "write failed"
1591 " (%d sectors at %llu on %s)\n",
1592 mdname(mddev), s,
1593 (unsigned long long)(sect+
1594 rdev->data_offset),
1595 bdevname(rdev->bdev, b));
1596 printk(KERN_NOTICE "raid10:%s: failing "
1597 "drive\n",
1598 bdevname(rdev->bdev, b));
6814d536 1599 md_error(mddev, rdev);
67b8dc4b 1600 }
6814d536
N
1601 rdev_dec_pending(rdev, mddev);
1602 rcu_read_lock();
1603 }
1604 }
1605 sl = start;
1606 while (sl != r10_bio->read_slot) {
1607 int d;
1608 if (sl==0)
1609 sl = conf->copies;
1610 sl--;
1611 d = r10_bio->devs[sl].devnum;
1612 rdev = rcu_dereference(conf->mirrors[d].rdev);
1613 if (rdev &&
1614 test_bit(In_sync, &rdev->flags)) {
1615 char b[BDEVNAME_SIZE];
1616 atomic_inc(&rdev->nr_pending);
1617 rcu_read_unlock();
1618 if (sync_page_io(rdev->bdev,
1619 r10_bio->devs[sl].addr +
1620 sect + rdev->data_offset,
67b8dc4b
RB
1621 s<<9, conf->tmppage,
1622 READ) == 0) {
6814d536 1623 /* Well, this device is dead */
67b8dc4b
RB
1624 printk(KERN_NOTICE
1625 "raid10:%s: unable to read back "
1626 "corrected sectors"
1627 " (%d sectors at %llu on %s)\n",
1628 mdname(mddev), s,
1629 (unsigned long long)(sect+
1630 rdev->data_offset),
1631 bdevname(rdev->bdev, b));
1632 printk(KERN_NOTICE "raid10:%s: failing drive\n",
1633 bdevname(rdev->bdev, b));
1634
6814d536 1635 md_error(mddev, rdev);
67b8dc4b 1636 } else {
6814d536
N
1637 printk(KERN_INFO
1638 "raid10:%s: read error corrected"
1639 " (%d sectors at %llu on %s)\n",
1640 mdname(mddev), s,
969b755a
RD
1641 (unsigned long long)(sect+
1642 rdev->data_offset),
6814d536 1643 bdevname(rdev->bdev, b));
67b8dc4b 1644 }
6814d536
N
1645
1646 rdev_dec_pending(rdev, mddev);
1647 rcu_read_lock();
1648 }
1649 }
1650 rcu_read_unlock();
1651
1652 sectors -= s;
1653 sect += s;
1654 }
1655}
1656
1da177e4
LT
1657static void raid10d(mddev_t *mddev)
1658{
1659 r10bio_t *r10_bio;
1660 struct bio *bio;
1661 unsigned long flags;
070ec55d 1662 conf_t *conf = mddev->private;
1da177e4
LT
1663 struct list_head *head = &conf->retry_list;
1664 int unplug=0;
1665 mdk_rdev_t *rdev;
1666
1667 md_check_recovery(mddev);
1da177e4
LT
1668
1669 for (;;) {
1670 char b[BDEVNAME_SIZE];
6cce3b23 1671
a35e63ef 1672 unplug += flush_pending_writes(conf);
6cce3b23 1673
a35e63ef
N
1674 spin_lock_irqsave(&conf->device_lock, flags);
1675 if (list_empty(head)) {
1676 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 1677 break;
a35e63ef 1678 }
1da177e4
LT
1679 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1680 list_del(head->prev);
4443ae10 1681 conf->nr_queued--;
1da177e4
LT
1682 spin_unlock_irqrestore(&conf->device_lock, flags);
1683
1684 mddev = r10_bio->mddev;
070ec55d 1685 conf = mddev->private;
1da177e4
LT
1686 if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1687 sync_request_write(mddev, r10_bio);
1688 unplug = 1;
1689 } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1690 recovery_request_write(mddev, r10_bio);
1691 unplug = 1;
1692 } else {
1693 int mirror;
4443ae10
N
1694 /* we got a read error. Maybe the drive is bad. Maybe just
1695 * the block and we can fix it.
1696 * We freeze all other IO, and try reading the block from
1697 * other devices. When we find one, we re-write
1698 * and check it that fixes the read error.
1699 * This is all done synchronously while the array is
1700 * frozen.
1701 */
6814d536
N
1702 if (mddev->ro == 0) {
1703 freeze_array(conf);
1704 fix_read_error(conf, mddev, r10_bio);
1705 unfreeze_array(conf);
4443ae10
N
1706 }
1707
1da177e4 1708 bio = r10_bio->devs[r10_bio->read_slot].bio;
0eb3ff12
N
1709 r10_bio->devs[r10_bio->read_slot].bio =
1710 mddev->ro ? IO_BLOCKED : NULL;
1da177e4
LT
1711 mirror = read_balance(conf, r10_bio);
1712 if (mirror == -1) {
1713 printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
1714 " read error for block %llu\n",
1715 bdevname(bio->bi_bdev,b),
1716 (unsigned long long)r10_bio->sector);
1717 raid_end_bio_io(r10_bio);
14e71344 1718 bio_put(bio);
1da177e4 1719 } else {
1f98a13f 1720 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
14e71344 1721 bio_put(bio);
1da177e4
LT
1722 rdev = conf->mirrors[mirror].rdev;
1723 if (printk_ratelimit())
1724 printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
1725 " another mirror\n",
1726 bdevname(rdev->bdev,b),
1727 (unsigned long long)r10_bio->sector);
1728 bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1729 r10_bio->devs[r10_bio->read_slot].bio = bio;
1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1731 + rdev->data_offset;
1732 bio->bi_bdev = rdev->bdev;
1ef04fef 1733 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
1da177e4
LT
1734 bio->bi_private = r10_bio;
1735 bio->bi_end_io = raid10_end_read_request;
1736 unplug = 1;
1737 generic_make_request(bio);
1738 }
1739 }
1d9d5241 1740 cond_resched();
1da177e4 1741 }
1da177e4
LT
1742 if (unplug)
1743 unplug_slaves(mddev);
1744}
1745
1746
1747static int init_resync(conf_t *conf)
1748{
1749 int buffs;
1750
1751 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
b6385483 1752 BUG_ON(conf->r10buf_pool);
1da177e4
LT
1753 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1754 if (!conf->r10buf_pool)
1755 return -ENOMEM;
1756 conf->next_resync = 0;
1757 return 0;
1758}
1759
1760/*
1761 * perform a "sync" on one "block"
1762 *
1763 * We need to make sure that no normal I/O request - particularly write
1764 * requests - conflict with active sync requests.
1765 *
1766 * This is achieved by tracking pending requests and a 'barrier' concept
1767 * that can be installed to exclude normal IO requests.
1768 *
1769 * Resync and recovery are handled very differently.
1770 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1771 *
1772 * For resync, we iterate over virtual addresses, read all copies,
1773 * and update if there are differences. If only one copy is live,
1774 * skip it.
1775 * For recovery, we iterate over physical addresses, read a good
1776 * value for each non-in_sync drive, and over-write.
1777 *
1778 * So, for recovery we may have several outstanding complex requests for a
1779 * given address, one for each out-of-sync device. We model this by allocating
1780 * a number of r10_bio structures, one for each out-of-sync device.
1781 * As we setup these structures, we collect all bio's together into a list
1782 * which we then process collectively to add pages, and then process again
1783 * to pass to generic_make_request.
1784 *
1785 * The r10_bio structures are linked using a borrowed master_bio pointer.
1786 * This link is counted in ->remaining. When the r10_bio that points to NULL
1787 * has its remaining count decremented to 0, the whole complex operation
1788 * is complete.
1789 *
1790 */
1791
57afd89f 1792static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1da177e4 1793{
070ec55d 1794 conf_t *conf = mddev->private;
1da177e4
LT
1795 r10bio_t *r10_bio;
1796 struct bio *biolist = NULL, *bio;
1797 sector_t max_sector, nr_sectors;
1798 int disk;
1799 int i;
6cce3b23
N
1800 int max_sync;
1801 int sync_blocks;
1da177e4
LT
1802
1803 sector_t sectors_skipped = 0;
1804 int chunks_skipped = 0;
1805
1806 if (!conf->r10buf_pool)
1807 if (init_resync(conf))
57afd89f 1808 return 0;
1da177e4
LT
1809
1810 skipped:
58c0fed4 1811 max_sector = mddev->dev_sectors;
1da177e4
LT
1812 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1813 max_sector = mddev->resync_max_sectors;
1814 if (sector_nr >= max_sector) {
6cce3b23
N
1815 /* If we aborted, we need to abort the
1816 * sync on the 'current' bitmap chucks (there can
1817 * be several when recovering multiple devices).
1818 * as we may have started syncing it but not finished.
1819 * We can find the current address in
1820 * mddev->curr_resync, but for recovery,
1821 * we need to convert that to several
1822 * virtual addresses.
1823 */
1824 if (mddev->curr_resync < max_sector) { /* aborted */
1825 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1826 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1827 &sync_blocks, 1);
1828 else for (i=0; i<conf->raid_disks; i++) {
1829 sector_t sect =
1830 raid10_find_virt(conf, mddev->curr_resync, i);
1831 bitmap_end_sync(mddev->bitmap, sect,
1832 &sync_blocks, 1);
1833 }
1834 } else /* completed sync */
1835 conf->fullsync = 0;
1836
1837 bitmap_close_sync(mddev->bitmap);
1da177e4 1838 close_sync(conf);
57afd89f 1839 *skipped = 1;
1da177e4
LT
1840 return sectors_skipped;
1841 }
1842 if (chunks_skipped >= conf->raid_disks) {
1843 /* if there has been nothing to do on any drive,
1844 * then there is nothing to do at all..
1845 */
57afd89f
N
1846 *skipped = 1;
1847 return (max_sector - sector_nr) + sectors_skipped;
1da177e4
LT
1848 }
1849
c6207277
N
1850 if (max_sector > mddev->resync_max)
1851 max_sector = mddev->resync_max; /* Don't do IO beyond here */
1852
1da177e4
LT
1853 /* make sure whole request will fit in a chunk - if chunks
1854 * are meaningful
1855 */
1856 if (conf->near_copies < conf->raid_disks &&
1857 max_sector > (sector_nr | conf->chunk_mask))
1858 max_sector = (sector_nr | conf->chunk_mask) + 1;
1859 /*
1860 * If there is non-resync activity waiting for us then
1861 * put in a delay to throttle resync.
1862 */
0a27ec96 1863 if (!go_faster && conf->nr_waiting)
1da177e4 1864 msleep_interruptible(1000);
1da177e4
LT
1865
1866 /* Again, very different code for resync and recovery.
1867 * Both must result in an r10bio with a list of bios that
1868 * have bi_end_io, bi_sector, bi_bdev set,
1869 * and bi_private set to the r10bio.
1870 * For recovery, we may actually create several r10bios
1871 * with 2 bios in each, that correspond to the bios in the main one.
1872 * In this case, the subordinate r10bios link back through a
1873 * borrowed master_bio pointer, and the counter in the master
1874 * includes a ref from each subordinate.
1875 */
1876 /* First, we decide what to do and set ->bi_end_io
1877 * To end_sync_read if we want to read, and
1878 * end_sync_write if we will want to write.
1879 */
1880
6cce3b23 1881 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1da177e4
LT
1882 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1883 /* recovery... the complicated one */
a9f326eb 1884 int j, k;
1da177e4
LT
1885 r10_bio = NULL;
1886
1887 for (i=0 ; i<conf->raid_disks; i++)
1888 if (conf->mirrors[i].rdev &&
b2d444d7 1889 !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
6cce3b23 1890 int still_degraded = 0;
1da177e4
LT
1891 /* want to reconstruct this device */
1892 r10bio_t *rb2 = r10_bio;
6cce3b23
N
1893 sector_t sect = raid10_find_virt(conf, sector_nr, i);
1894 int must_sync;
1895 /* Unless we are doing a full sync, we only need
1896 * to recover the block if it is set in the bitmap
1897 */
1898 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1899 &sync_blocks, 1);
1900 if (sync_blocks < max_sync)
1901 max_sync = sync_blocks;
1902 if (!must_sync &&
1903 !conf->fullsync) {
1904 /* yep, skip the sync_blocks here, but don't assume
1905 * that there will never be anything to do here
1906 */
1907 chunks_skipped = -1;
1908 continue;
1909 }
1da177e4
LT
1910
1911 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
6cce3b23 1912 raise_barrier(conf, rb2 != NULL);
1da177e4
LT
1913 atomic_set(&r10_bio->remaining, 0);
1914
1915 r10_bio->master_bio = (struct bio*)rb2;
1916 if (rb2)
1917 atomic_inc(&rb2->remaining);
1918 r10_bio->mddev = mddev;
1919 set_bit(R10BIO_IsRecover, &r10_bio->state);
6cce3b23
N
1920 r10_bio->sector = sect;
1921
1da177e4 1922 raid10_find_phys(conf, r10_bio);
18055569
N
1923
1924 /* Need to check if the array will still be
6cce3b23
N
1925 * degraded
1926 */
18055569
N
1927 for (j=0; j<conf->raid_disks; j++)
1928 if (conf->mirrors[j].rdev == NULL ||
1929 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
6cce3b23 1930 still_degraded = 1;
a24a8dd8
N
1931 break;
1932 }
18055569 1933
6cce3b23
N
1934 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1935 &sync_blocks, still_degraded);
1936
1da177e4
LT
1937 for (j=0; j<conf->copies;j++) {
1938 int d = r10_bio->devs[j].devnum;
1939 if (conf->mirrors[d].rdev &&
b2d444d7 1940 test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
1da177e4
LT
1941 /* This is where we read from */
1942 bio = r10_bio->devs[0].bio;
1943 bio->bi_next = biolist;
1944 biolist = bio;
1945 bio->bi_private = r10_bio;
1946 bio->bi_end_io = end_sync_read;
802ba064 1947 bio->bi_rw = READ;
1da177e4
LT
1948 bio->bi_sector = r10_bio->devs[j].addr +
1949 conf->mirrors[d].rdev->data_offset;
1950 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1951 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1952 atomic_inc(&r10_bio->remaining);
1953 /* and we write to 'i' */
1954
1955 for (k=0; k<conf->copies; k++)
1956 if (r10_bio->devs[k].devnum == i)
1957 break;
64a742bc 1958 BUG_ON(k == conf->copies);
1da177e4
LT
1959 bio = r10_bio->devs[1].bio;
1960 bio->bi_next = biolist;
1961 biolist = bio;
1962 bio->bi_private = r10_bio;
1963 bio->bi_end_io = end_sync_write;
802ba064 1964 bio->bi_rw = WRITE;
1da177e4
LT
1965 bio->bi_sector = r10_bio->devs[k].addr +
1966 conf->mirrors[i].rdev->data_offset;
1967 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1968
1969 r10_bio->devs[0].devnum = d;
1970 r10_bio->devs[1].devnum = i;
1971
1972 break;
1973 }
1974 }
1975 if (j == conf->copies) {
87fc767b
N
1976 /* Cannot recover, so abort the recovery */
1977 put_buf(r10_bio);
a07e6ab4
T
1978 if (rb2)
1979 atomic_dec(&rb2->remaining);
87fc767b 1980 r10_bio = rb2;
dfc70645
N
1981 if (!test_and_set_bit(MD_RECOVERY_INTR,
1982 &mddev->recovery))
87fc767b
N
1983 printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
1984 mdname(mddev));
1985 break;
1da177e4
LT
1986 }
1987 }
1988 if (biolist == NULL) {
1989 while (r10_bio) {
1990 r10bio_t *rb2 = r10_bio;
1991 r10_bio = (r10bio_t*) rb2->master_bio;
1992 rb2->master_bio = NULL;
1993 put_buf(rb2);
1994 }
1995 goto giveup;
1996 }
1997 } else {
1998 /* resync. Schedule a read for every block at this virt offset */
1999 int count = 0;
6cce3b23 2000
78200d45
N
2001 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2002
6cce3b23
N
2003 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2004 &sync_blocks, mddev->degraded) &&
2005 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2006 /* We can skip this block */
2007 *skipped = 1;
2008 return sync_blocks + sectors_skipped;
2009 }
2010 if (sync_blocks < max_sync)
2011 max_sync = sync_blocks;
1da177e4
LT
2012 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2013
1da177e4
LT
2014 r10_bio->mddev = mddev;
2015 atomic_set(&r10_bio->remaining, 0);
6cce3b23
N
2016 raise_barrier(conf, 0);
2017 conf->next_resync = sector_nr;
1da177e4
LT
2018
2019 r10_bio->master_bio = NULL;
2020 r10_bio->sector = sector_nr;
2021 set_bit(R10BIO_IsSync, &r10_bio->state);
2022 raid10_find_phys(conf, r10_bio);
2023 r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
2024
2025 for (i=0; i<conf->copies; i++) {
2026 int d = r10_bio->devs[i].devnum;
2027 bio = r10_bio->devs[i].bio;
2028 bio->bi_end_io = NULL;
af03b8e4 2029 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1da177e4 2030 if (conf->mirrors[d].rdev == NULL ||
b2d444d7 2031 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1da177e4
LT
2032 continue;
2033 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2034 atomic_inc(&r10_bio->remaining);
2035 bio->bi_next = biolist;
2036 biolist = bio;
2037 bio->bi_private = r10_bio;
2038 bio->bi_end_io = end_sync_read;
802ba064 2039 bio->bi_rw = READ;
1da177e4
LT
2040 bio->bi_sector = r10_bio->devs[i].addr +
2041 conf->mirrors[d].rdev->data_offset;
2042 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
2043 count++;
2044 }
2045
2046 if (count < 2) {
2047 for (i=0; i<conf->copies; i++) {
2048 int d = r10_bio->devs[i].devnum;
2049 if (r10_bio->devs[i].bio->bi_end_io)
2050 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
2051 }
2052 put_buf(r10_bio);
2053 biolist = NULL;
2054 goto giveup;
2055 }
2056 }
2057
2058 for (bio = biolist; bio ; bio=bio->bi_next) {
2059
2060 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
2061 if (bio->bi_end_io)
2062 bio->bi_flags |= 1 << BIO_UPTODATE;
2063 bio->bi_vcnt = 0;
2064 bio->bi_idx = 0;
2065 bio->bi_phys_segments = 0;
1da177e4
LT
2066 bio->bi_size = 0;
2067 }
2068
2069 nr_sectors = 0;
6cce3b23
N
2070 if (sector_nr + max_sync < max_sector)
2071 max_sector = sector_nr + max_sync;
1da177e4
LT
2072 do {
2073 struct page *page;
2074 int len = PAGE_SIZE;
2075 disk = 0;
2076 if (sector_nr + (len>>9) > max_sector)
2077 len = (max_sector - sector_nr) << 9;
2078 if (len == 0)
2079 break;
2080 for (bio= biolist ; bio ; bio=bio->bi_next) {
2081 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2082 if (bio_add_page(bio, page, len, 0) == 0) {
2083 /* stop here */
2084 struct bio *bio2;
2085 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2086 for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
2087 /* remove last page from this bio */
2088 bio2->bi_vcnt--;
2089 bio2->bi_size -= len;
2090 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
2091 }
2092 goto bio_full;
2093 }
2094 disk = i;
2095 }
2096 nr_sectors += len>>9;
2097 sector_nr += len>>9;
2098 } while (biolist->bi_vcnt < RESYNC_PAGES);
2099 bio_full:
2100 r10_bio->sectors = nr_sectors;
2101
2102 while (biolist) {
2103 bio = biolist;
2104 biolist = biolist->bi_next;
2105
2106 bio->bi_next = NULL;
2107 r10_bio = bio->bi_private;
2108 r10_bio->sectors = nr_sectors;
2109
2110 if (bio->bi_end_io == end_sync_read) {
2111 md_sync_acct(bio->bi_bdev, nr_sectors);
2112 generic_make_request(bio);
2113 }
2114 }
2115
57afd89f
N
2116 if (sectors_skipped)
2117 /* pretend they weren't skipped, it makes
2118 * no important difference in this case
2119 */
2120 md_done_sync(mddev, sectors_skipped, 1);
2121
1da177e4
LT
2122 return sectors_skipped + nr_sectors;
2123 giveup:
2124 /* There is nowhere to write, so all non-sync
2125 * drives must be failed, so try the next chunk...
2126 */
09b4068a
N
2127 if (sector_nr + max_sync < max_sector)
2128 max_sector = sector_nr + max_sync;
2129
2130 sectors_skipped += (max_sector - sector_nr);
1da177e4
LT
2131 chunks_skipped ++;
2132 sector_nr = max_sector;
1da177e4 2133 goto skipped;
1da177e4
LT
2134}
2135
80c3a6ce
DW
2136static sector_t
2137raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2138{
2139 sector_t size;
070ec55d 2140 conf_t *conf = mddev->private;
80c3a6ce
DW
2141
2142 if (!raid_disks)
2143 raid_disks = mddev->raid_disks;
2144 if (!sectors)
2145 sectors = mddev->dev_sectors;
2146
2147 size = sectors >> conf->chunk_shift;
2148 sector_div(size, conf->far_copies);
2149 size = size * raid_disks;
2150 sector_div(size, conf->near_copies);
2151
2152 return size << conf->chunk_shift;
2153}
2154
1da177e4
LT
2155static int run(mddev_t *mddev)
2156{
2157 conf_t *conf;
8f6c2e4b 2158 int i, disk_idx, chunk_size;
1da177e4
LT
2159 mirror_info_t *disk;
2160 mdk_rdev_t *rdev;
c93983bf 2161 int nc, fc, fo;
1da177e4
LT
2162 sector_t stride, size;
2163
9d8f0363
AN
2164 if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
2165 !is_power_of_2(mddev->chunk_sectors)) {
4bbf3771 2166 printk(KERN_ERR "md/raid10: chunk size must be "
964e7913 2167 "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
2604b703 2168 return -EINVAL;
1da177e4 2169 }
2604b703 2170
1da177e4
LT
2171 nc = mddev->layout & 255;
2172 fc = (mddev->layout >> 8) & 255;
c93983bf 2173 fo = mddev->layout & (1<<16);
1da177e4 2174 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
c93983bf 2175 (mddev->layout >> 17)) {
1da177e4
LT
2176 printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
2177 mdname(mddev), mddev->layout);
2178 goto out;
2179 }
2180 /*
2181 * copy the already verified devices into our private RAID10
2182 * bookkeeping area. [whatever we allocate in run(),
2183 * should be freed in stop()]
2184 */
4443ae10 2185 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1da177e4
LT
2186 mddev->private = conf;
2187 if (!conf) {
2188 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2189 mdname(mddev));
2190 goto out;
2191 }
4443ae10 2192 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1da177e4
LT
2193 GFP_KERNEL);
2194 if (!conf->mirrors) {
2195 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2196 mdname(mddev));
2197 goto out_free_conf;
2198 }
4443ae10
N
2199
2200 conf->tmppage = alloc_page(GFP_KERNEL);
2201 if (!conf->tmppage)
2202 goto out_free_conf;
1da177e4 2203
64a742bc 2204 conf->raid_disks = mddev->raid_disks;
1da177e4
LT
2205 conf->near_copies = nc;
2206 conf->far_copies = fc;
2207 conf->copies = nc*fc;
c93983bf 2208 conf->far_offset = fo;
9d8f0363
AN
2209 conf->chunk_mask = mddev->chunk_sectors - 1;
2210 conf->chunk_shift = ffz(~mddev->chunk_sectors);
58c0fed4 2211 size = mddev->dev_sectors >> conf->chunk_shift;
64a742bc
N
2212 sector_div(size, fc);
2213 size = size * conf->raid_disks;
2214 sector_div(size, nc);
2215 /* 'size' is now the number of chunks in the array */
2216 /* calculate "used chunks per device" in 'stride' */
2217 stride = size * conf->copies;
af03b8e4
N
2218
2219 /* We need to round up when dividing by raid_disks to
2220 * get the stride size.
2221 */
2222 stride += conf->raid_disks - 1;
64a742bc 2223 sector_div(stride, conf->raid_disks);
58c0fed4 2224 mddev->dev_sectors = stride << conf->chunk_shift;
64a742bc 2225
c93983bf 2226 if (fo)
64a742bc
N
2227 stride = 1;
2228 else
c93983bf 2229 sector_div(stride, fc);
64a742bc
N
2230 conf->stride = stride << conf->chunk_shift;
2231
1da177e4
LT
2232 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2233 r10bio_pool_free, conf);
2234 if (!conf->r10bio_pool) {
2235 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2236 mdname(mddev));
2237 goto out_free_conf;
2238 }
1da177e4 2239
ed9bfdf1 2240 conf->mddev = mddev;
e7e72bf6
NB
2241 spin_lock_init(&conf->device_lock);
2242 mddev->queue->queue_lock = &conf->device_lock;
2243
8f6c2e4b
MP
2244 chunk_size = mddev->chunk_sectors << 9;
2245 blk_queue_io_min(mddev->queue, chunk_size);
2246 if (conf->raid_disks % conf->near_copies)
2247 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2248 else
2249 blk_queue_io_opt(mddev->queue, chunk_size *
2250 (conf->raid_disks / conf->near_copies));
2251
159ec1fc 2252 list_for_each_entry(rdev, &mddev->disks, same_set) {
1da177e4
LT
2253 disk_idx = rdev->raid_disk;
2254 if (disk_idx >= mddev->raid_disks
2255 || disk_idx < 0)
2256 continue;
2257 disk = conf->mirrors + disk_idx;
2258
2259 disk->rdev = rdev;
8f6c2e4b
MP
2260 disk_stack_limits(mddev->gendisk, rdev->bdev,
2261 rdev->data_offset << 9);
1da177e4 2262 /* as we don't honour merge_bvec_fn, we must never risk
627a2d3c
N
2263 * violating it, so limit max_segments to 1 lying
2264 * within a single page.
1da177e4 2265 */
627a2d3c
N
2266 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2267 blk_queue_max_segments(mddev->queue, 1);
2268 blk_queue_segment_boundary(mddev->queue,
2269 PAGE_CACHE_SIZE - 1);
2270 }
1da177e4
LT
2271
2272 disk->head_position = 0;
1da177e4 2273 }
1da177e4
LT
2274 INIT_LIST_HEAD(&conf->retry_list);
2275
2276 spin_lock_init(&conf->resync_lock);
0a27ec96 2277 init_waitqueue_head(&conf->wait_barrier);
1da177e4 2278
6d508242
N
2279 /* need to check that every block has at least one working mirror */
2280 if (!enough(conf)) {
2281 printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
2282 mdname(mddev));
1da177e4
LT
2283 goto out_free_conf;
2284 }
2285
2286 mddev->degraded = 0;
2287 for (i = 0; i < conf->raid_disks; i++) {
2288
2289 disk = conf->mirrors + i;
2290
5fd6c1dc 2291 if (!disk->rdev ||
2e333e89 2292 !test_bit(In_sync, &disk->rdev->flags)) {
1da177e4
LT
2293 disk->head_position = 0;
2294 mddev->degraded++;
8c2e870a
NB
2295 if (disk->rdev)
2296 conf->fullsync = 1;
1da177e4
LT
2297 }
2298 }
2299
2300
0da3c619 2301 mddev->thread = md_register_thread(raid10d, mddev, NULL);
1da177e4
LT
2302 if (!mddev->thread) {
2303 printk(KERN_ERR
2304 "raid10: couldn't allocate thread for %s\n",
2305 mdname(mddev));
2306 goto out_free_conf;
2307 }
2308
8c6ac868
AN
2309 if (mddev->recovery_cp != MaxSector)
2310 printk(KERN_NOTICE "raid10: %s is not clean"
2311 " -- starting background reconstruction\n",
2312 mdname(mddev));
1da177e4
LT
2313 printk(KERN_INFO
2314 "raid10: raid set %s active with %d out of %d devices\n",
2315 mdname(mddev), mddev->raid_disks - mddev->degraded,
2316 mddev->raid_disks);
2317 /*
2318 * Ok, everything is just fine now
2319 */
1f403624 2320 md_set_array_sectors(mddev, raid10_size(mddev, 0, 0));
b522adcd 2321 mddev->resync_max_sectors = raid10_size(mddev, 0, 0);
1da177e4 2322
7a5febe9 2323 mddev->queue->unplug_fn = raid10_unplug;
0d129228
N
2324 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2325 mddev->queue->backing_dev_info.congested_data = mddev;
7a5febe9 2326
1da177e4
LT
2327 /* Calculate max read-ahead size.
2328 * We need to readahead at least twice a whole stripe....
2329 * maybe...
2330 */
2331 {
9d8f0363
AN
2332 int stripe = conf->raid_disks *
2333 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
1da177e4
LT
2334 stripe /= conf->near_copies;
2335 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2336 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2337 }
2338
2339 if (conf->near_copies < mddev->raid_disks)
2340 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
ac5e7113 2341 md_integrity_register(mddev);
1da177e4
LT
2342 return 0;
2343
2344out_free_conf:
2345 if (conf->r10bio_pool)
2346 mempool_destroy(conf->r10bio_pool);
1345b1d8 2347 safe_put_page(conf->tmppage);
990a8baf 2348 kfree(conf->mirrors);
1da177e4
LT
2349 kfree(conf);
2350 mddev->private = NULL;
2351out:
2352 return -EIO;
2353}
2354
2355static int stop(mddev_t *mddev)
2356{
070ec55d 2357 conf_t *conf = mddev->private;
1da177e4 2358
409c57f3
N
2359 raise_barrier(conf, 0);
2360 lower_barrier(conf);
2361
1da177e4
LT
2362 md_unregister_thread(mddev->thread);
2363 mddev->thread = NULL;
2364 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2365 if (conf->r10bio_pool)
2366 mempool_destroy(conf->r10bio_pool);
990a8baf 2367 kfree(conf->mirrors);
1da177e4
LT
2368 kfree(conf);
2369 mddev->private = NULL;
2370 return 0;
2371}
2372
6cce3b23
N
2373static void raid10_quiesce(mddev_t *mddev, int state)
2374{
070ec55d 2375 conf_t *conf = mddev->private;
6cce3b23
N
2376
2377 switch(state) {
2378 case 1:
2379 raise_barrier(conf, 0);
2380 break;
2381 case 0:
2382 lower_barrier(conf);
2383 break;
2384 }
6cce3b23 2385}
1da177e4 2386
2604b703 2387static struct mdk_personality raid10_personality =
1da177e4
LT
2388{
2389 .name = "raid10",
2604b703 2390 .level = 10,
1da177e4
LT
2391 .owner = THIS_MODULE,
2392 .make_request = make_request,
2393 .run = run,
2394 .stop = stop,
2395 .status = status,
2396 .error_handler = error,
2397 .hot_add_disk = raid10_add_disk,
2398 .hot_remove_disk= raid10_remove_disk,
2399 .spare_active = raid10_spare_active,
2400 .sync_request = sync_request,
6cce3b23 2401 .quiesce = raid10_quiesce,
80c3a6ce 2402 .size = raid10_size,
1da177e4
LT
2403};
2404
2405static int __init raid_init(void)
2406{
2604b703 2407 return register_md_personality(&raid10_personality);
1da177e4
LT
2408}
2409
2410static void raid_exit(void)
2411{
2604b703 2412 unregister_md_personality(&raid10_personality);
1da177e4
LT
2413}
2414
2415module_init(raid_init);
2416module_exit(raid_exit);
2417MODULE_LICENSE("GPL");
0efb9e61 2418MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
1da177e4 2419MODULE_ALIAS("md-personality-9"); /* RAID10 */
d9d166c2 2420MODULE_ALIAS("md-raid10");
2604b703 2421MODULE_ALIAS("md-level-10");