]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/md/md.c
[PATCH] md: split reshape portion of raid5 sync_request into a separate function
[net-next-2.6.git] / drivers / md / md.c
CommitLineData
1da177e4
LT
1/*
2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5 completely rewritten, based on the MD driver code from Marc Zyngier
6
7 Changes:
8
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
19
20 Neil Brown <neilb@cse.unsw.edu.au>.
21
32a7627c
N
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
1da177e4
LT
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
28 any later version.
29
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33*/
34
35#include <linux/module.h>
36#include <linux/config.h>
a6fb0934 37#include <linux/kthread.h>
1da177e4
LT
38#include <linux/linkage.h>
39#include <linux/raid/md.h>
32a7627c 40#include <linux/raid/bitmap.h>
1da177e4
LT
41#include <linux/sysctl.h>
42#include <linux/devfs_fs_kernel.h>
43#include <linux/buffer_head.h> /* for invalidate_bdev */
44#include <linux/suspend.h>
d7603b7e 45#include <linux/poll.h>
48c9c27b 46#include <linux/mutex.h>
16f17b39 47#include <linux/ctype.h>
1da177e4
LT
48
49#include <linux/init.h>
50
32a7627c
N
51#include <linux/file.h>
52
1da177e4
LT
53#ifdef CONFIG_KMOD
54#include <linux/kmod.h>
55#endif
56
57#include <asm/unaligned.h>
58
59#define MAJOR_NR MD_MAJOR
60#define MD_DRIVER
61
62/* 63 partitions with the alternate major number (mdp) */
63#define MdpMinorShift 6
64
65#define DEBUG 0
66#define dprintk(x...) ((void)(DEBUG && printk(x)))
67
68
69#ifndef MODULE
70static void autostart_arrays (int part);
71#endif
72
2604b703 73static LIST_HEAD(pers_list);
1da177e4
LT
74static DEFINE_SPINLOCK(pers_lock);
75
5e56341d
AB
76static void md_print_devices(void);
77
78#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79
1da177e4
LT
80/*
81 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
82 * is 1000 KB/sec, so the extra system load does not show up that much.
83 * Increase it if you want to have more _guaranteed_ speed. Note that
338cec32 84 * the RAID driver will use the maximum available bandwidth if the IO
1da177e4
LT
85 * subsystem is idle. There is also an 'absolute maximum' reconstruction
86 * speed limit - in case reconstruction slows down your system despite
87 * idle IO detection.
88 *
89 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88202a0c 90 * or /sys/block/mdX/md/sync_speed_{min,max}
1da177e4
LT
91 */
92
93static int sysctl_speed_limit_min = 1000;
94static int sysctl_speed_limit_max = 200000;
88202a0c
N
95static inline int speed_min(mddev_t *mddev)
96{
97 return mddev->sync_speed_min ?
98 mddev->sync_speed_min : sysctl_speed_limit_min;
99}
100
101static inline int speed_max(mddev_t *mddev)
102{
103 return mddev->sync_speed_max ?
104 mddev->sync_speed_max : sysctl_speed_limit_max;
105}
1da177e4
LT
106
107static struct ctl_table_header *raid_table_header;
108
109static ctl_table raid_table[] = {
110 {
111 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
112 .procname = "speed_limit_min",
113 .data = &sysctl_speed_limit_min,
114 .maxlen = sizeof(int),
115 .mode = 0644,
116 .proc_handler = &proc_dointvec,
117 },
118 {
119 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
120 .procname = "speed_limit_max",
121 .data = &sysctl_speed_limit_max,
122 .maxlen = sizeof(int),
123 .mode = 0644,
124 .proc_handler = &proc_dointvec,
125 },
126 { .ctl_name = 0 }
127};
128
129static ctl_table raid_dir_table[] = {
130 {
131 .ctl_name = DEV_RAID,
132 .procname = "raid",
133 .maxlen = 0,
134 .mode = 0555,
135 .child = raid_table,
136 },
137 { .ctl_name = 0 }
138};
139
140static ctl_table raid_root_table[] = {
141 {
142 .ctl_name = CTL_DEV,
143 .procname = "dev",
144 .maxlen = 0,
145 .mode = 0555,
146 .child = raid_dir_table,
147 },
148 { .ctl_name = 0 }
149};
150
151static struct block_device_operations md_fops;
152
f91de92e
N
153static int start_readonly;
154
d7603b7e
N
155/*
156 * We have a system wide 'event count' that is incremented
157 * on any 'interesting' event, and readers of /proc/mdstat
158 * can use 'poll' or 'select' to find out when the event
159 * count increases.
160 *
161 * Events are:
162 * start array, stop array, error, add device, remove device,
163 * start build, activate spare
164 */
2989ddbd 165static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
d7603b7e 166static atomic_t md_event_count;
29269553 167void md_new_event(mddev_t *mddev)
d7603b7e
N
168{
169 atomic_inc(&md_event_count);
170 wake_up(&md_event_waiters);
4508a7a7 171 sysfs_notify(&mddev->kobj, NULL, "sync_action");
d7603b7e 172}
29269553 173EXPORT_SYMBOL_GPL(md_new_event);
d7603b7e 174
c331eb04
N
175/* Alternate version that can be called from interrupts
176 * when calling sysfs_notify isn't needed.
177 */
178void md_new_event_inintr(mddev_t *mddev)
179{
180 atomic_inc(&md_event_count);
181 wake_up(&md_event_waiters);
182}
183
1da177e4
LT
184/*
185 * Enables to iterate over all existing md arrays
186 * all_mddevs_lock protects this list.
187 */
188static LIST_HEAD(all_mddevs);
189static DEFINE_SPINLOCK(all_mddevs_lock);
190
191
192/*
193 * iterates through all used mddevs in the system.
194 * We take care to grab the all_mddevs_lock whenever navigating
195 * the list, and to always hold a refcount when unlocked.
196 * Any code which breaks out of this loop while own
197 * a reference to the current mddev and must mddev_put it.
198 */
199#define ITERATE_MDDEV(mddev,tmp) \
200 \
201 for (({ spin_lock(&all_mddevs_lock); \
202 tmp = all_mddevs.next; \
203 mddev = NULL;}); \
204 ({ if (tmp != &all_mddevs) \
205 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
206 spin_unlock(&all_mddevs_lock); \
207 if (mddev) mddev_put(mddev); \
208 mddev = list_entry(tmp, mddev_t, all_mddevs); \
209 tmp != &all_mddevs;}); \
210 ({ spin_lock(&all_mddevs_lock); \
211 tmp = tmp->next;}) \
212 )
213
214
215static int md_fail_request (request_queue_t *q, struct bio *bio)
216{
217 bio_io_error(bio, bio->bi_size);
218 return 0;
219}
220
221static inline mddev_t *mddev_get(mddev_t *mddev)
222{
223 atomic_inc(&mddev->active);
224 return mddev;
225}
226
227static void mddev_put(mddev_t *mddev)
228{
229 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230 return;
231 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
232 list_del(&mddev->all_mddevs);
926ce2d8 233 spin_unlock(&all_mddevs_lock);
1312f40e 234 blk_cleanup_queue(mddev->queue);
eae1701f 235 kobject_unregister(&mddev->kobj);
926ce2d8
N
236 } else
237 spin_unlock(&all_mddevs_lock);
1da177e4
LT
238}
239
240static mddev_t * mddev_find(dev_t unit)
241{
242 mddev_t *mddev, *new = NULL;
243
244 retry:
245 spin_lock(&all_mddevs_lock);
246 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
247 if (mddev->unit == unit) {
248 mddev_get(mddev);
249 spin_unlock(&all_mddevs_lock);
990a8baf 250 kfree(new);
1da177e4
LT
251 return mddev;
252 }
253
254 if (new) {
255 list_add(&new->all_mddevs, &all_mddevs);
256 spin_unlock(&all_mddevs_lock);
257 return new;
258 }
259 spin_unlock(&all_mddevs_lock);
260
9ffae0cf 261 new = kzalloc(sizeof(*new), GFP_KERNEL);
1da177e4
LT
262 if (!new)
263 return NULL;
264
1da177e4
LT
265 new->unit = unit;
266 if (MAJOR(unit) == MD_MAJOR)
267 new->md_minor = MINOR(unit);
268 else
269 new->md_minor = MINOR(unit) >> MdpMinorShift;
270
df5b89b3 271 mutex_init(&new->reconfig_mutex);
1da177e4
LT
272 INIT_LIST_HEAD(&new->disks);
273 INIT_LIST_HEAD(&new->all_mddevs);
274 init_timer(&new->safemode_timer);
275 atomic_set(&new->active, 1);
06d91a5f 276 spin_lock_init(&new->write_lock);
3d310eb7 277 init_waitqueue_head(&new->sb_wait);
1da177e4
LT
278
279 new->queue = blk_alloc_queue(GFP_KERNEL);
280 if (!new->queue) {
281 kfree(new);
282 return NULL;
283 }
89e5c8b5 284 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
1da177e4
LT
285
286 blk_queue_make_request(new->queue, md_fail_request);
287
288 goto retry;
289}
290
291static inline int mddev_lock(mddev_t * mddev)
292{
df5b89b3 293 return mutex_lock_interruptible(&mddev->reconfig_mutex);
1da177e4
LT
294}
295
1da177e4
LT
296static inline int mddev_trylock(mddev_t * mddev)
297{
df5b89b3 298 return mutex_trylock(&mddev->reconfig_mutex);
1da177e4
LT
299}
300
301static inline void mddev_unlock(mddev_t * mddev)
302{
df5b89b3 303 mutex_unlock(&mddev->reconfig_mutex);
1da177e4 304
005eca5e 305 md_wakeup_thread(mddev->thread);
1da177e4
LT
306}
307
2989ddbd 308static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
1da177e4
LT
309{
310 mdk_rdev_t * rdev;
311 struct list_head *tmp;
312
313 ITERATE_RDEV(mddev,rdev,tmp) {
314 if (rdev->desc_nr == nr)
315 return rdev;
316 }
317 return NULL;
318}
319
320static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
321{
322 struct list_head *tmp;
323 mdk_rdev_t *rdev;
324
325 ITERATE_RDEV(mddev,rdev,tmp) {
326 if (rdev->bdev->bd_dev == dev)
327 return rdev;
328 }
329 return NULL;
330}
331
d9d166c2 332static struct mdk_personality *find_pers(int level, char *clevel)
2604b703
N
333{
334 struct mdk_personality *pers;
d9d166c2
N
335 list_for_each_entry(pers, &pers_list, list) {
336 if (level != LEVEL_NONE && pers->level == level)
2604b703 337 return pers;
d9d166c2
N
338 if (strcmp(pers->name, clevel)==0)
339 return pers;
340 }
2604b703
N
341 return NULL;
342}
343
77933d72 344static inline sector_t calc_dev_sboffset(struct block_device *bdev)
1da177e4
LT
345{
346 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
347 return MD_NEW_SIZE_BLOCKS(size);
348}
349
350static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
351{
352 sector_t size;
353
354 size = rdev->sb_offset;
355
356 if (chunk_size)
357 size &= ~((sector_t)chunk_size/1024 - 1);
358 return size;
359}
360
361static int alloc_disk_sb(mdk_rdev_t * rdev)
362{
363 if (rdev->sb_page)
364 MD_BUG();
365
366 rdev->sb_page = alloc_page(GFP_KERNEL);
367 if (!rdev->sb_page) {
368 printk(KERN_ALERT "md: out of memory.\n");
369 return -EINVAL;
370 }
371
372 return 0;
373}
374
375static void free_disk_sb(mdk_rdev_t * rdev)
376{
377 if (rdev->sb_page) {
2d1f3b5d 378 put_page(rdev->sb_page);
1da177e4
LT
379 rdev->sb_loaded = 0;
380 rdev->sb_page = NULL;
381 rdev->sb_offset = 0;
382 rdev->size = 0;
383 }
384}
385
386
7bfa19f2
N
387static int super_written(struct bio *bio, unsigned int bytes_done, int error)
388{
389 mdk_rdev_t *rdev = bio->bi_private;
a9701a30 390 mddev_t *mddev = rdev->mddev;
7bfa19f2
N
391 if (bio->bi_size)
392 return 1;
393
394 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
a9701a30 395 md_error(mddev, rdev);
7bfa19f2 396
a9701a30
N
397 if (atomic_dec_and_test(&mddev->pending_writes))
398 wake_up(&mddev->sb_wait);
f8b58edf 399 bio_put(bio);
7bfa19f2
N
400 return 0;
401}
402
a9701a30
N
403static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
404{
405 struct bio *bio2 = bio->bi_private;
406 mdk_rdev_t *rdev = bio2->bi_private;
407 mddev_t *mddev = rdev->mddev;
408 if (bio->bi_size)
409 return 1;
410
411 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
412 error == -EOPNOTSUPP) {
413 unsigned long flags;
414 /* barriers don't appear to be supported :-( */
415 set_bit(BarriersNotsupp, &rdev->flags);
416 mddev->barriers_work = 0;
417 spin_lock_irqsave(&mddev->write_lock, flags);
418 bio2->bi_next = mddev->biolist;
419 mddev->biolist = bio2;
420 spin_unlock_irqrestore(&mddev->write_lock, flags);
421 wake_up(&mddev->sb_wait);
422 bio_put(bio);
423 return 0;
424 }
425 bio_put(bio2);
426 bio->bi_private = rdev;
427 return super_written(bio, bytes_done, error);
428}
429
7bfa19f2
N
430void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
431 sector_t sector, int size, struct page *page)
432{
433 /* write first size bytes of page to sector of rdev
434 * Increment mddev->pending_writes before returning
435 * and decrement it on completion, waking up sb_wait
436 * if zero is reached.
437 * If an error occurred, call md_error
a9701a30
N
438 *
439 * As we might need to resubmit the request if BIO_RW_BARRIER
440 * causes ENOTSUPP, we allocate a spare bio...
7bfa19f2
N
441 */
442 struct bio *bio = bio_alloc(GFP_NOIO, 1);
a9701a30 443 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
7bfa19f2
N
444
445 bio->bi_bdev = rdev->bdev;
446 bio->bi_sector = sector;
447 bio_add_page(bio, page, size, 0);
448 bio->bi_private = rdev;
449 bio->bi_end_io = super_written;
a9701a30
N
450 bio->bi_rw = rw;
451
7bfa19f2 452 atomic_inc(&mddev->pending_writes);
a9701a30
N
453 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
454 struct bio *rbio;
455 rw |= (1<<BIO_RW_BARRIER);
456 rbio = bio_clone(bio, GFP_NOIO);
457 rbio->bi_private = bio;
458 rbio->bi_end_io = super_written_barrier;
459 submit_bio(rw, rbio);
460 } else
461 submit_bio(rw, bio);
462}
463
464void md_super_wait(mddev_t *mddev)
465{
466 /* wait for all superblock writes that were scheduled to complete.
467 * if any had to be retried (due to BARRIER problems), retry them
468 */
469 DEFINE_WAIT(wq);
470 for(;;) {
471 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
472 if (atomic_read(&mddev->pending_writes)==0)
473 break;
474 while (mddev->biolist) {
475 struct bio *bio;
476 spin_lock_irq(&mddev->write_lock);
477 bio = mddev->biolist;
478 mddev->biolist = bio->bi_next ;
479 bio->bi_next = NULL;
480 spin_unlock_irq(&mddev->write_lock);
481 submit_bio(bio->bi_rw, bio);
482 }
483 schedule();
484 }
485 finish_wait(&mddev->sb_wait, &wq);
7bfa19f2
N
486}
487
1da177e4
LT
488static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
489{
490 if (bio->bi_size)
491 return 1;
492
493 complete((struct completion*)bio->bi_private);
494 return 0;
495}
496
a654b9d8 497int sync_page_io(struct block_device *bdev, sector_t sector, int size,
1da177e4
LT
498 struct page *page, int rw)
499{
baaa2c51 500 struct bio *bio = bio_alloc(GFP_NOIO, 1);
1da177e4
LT
501 struct completion event;
502 int ret;
503
504 rw |= (1 << BIO_RW_SYNC);
505
506 bio->bi_bdev = bdev;
507 bio->bi_sector = sector;
508 bio_add_page(bio, page, size, 0);
509 init_completion(&event);
510 bio->bi_private = &event;
511 bio->bi_end_io = bi_complete;
512 submit_bio(rw, bio);
513 wait_for_completion(&event);
514
515 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
516 bio_put(bio);
517 return ret;
518}
a8745db2 519EXPORT_SYMBOL_GPL(sync_page_io);
1da177e4 520
0002b271 521static int read_disk_sb(mdk_rdev_t * rdev, int size)
1da177e4
LT
522{
523 char b[BDEVNAME_SIZE];
524 if (!rdev->sb_page) {
525 MD_BUG();
526 return -EINVAL;
527 }
528 if (rdev->sb_loaded)
529 return 0;
530
531
0002b271 532 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
1da177e4
LT
533 goto fail;
534 rdev->sb_loaded = 1;
535 return 0;
536
537fail:
538 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
539 bdevname(rdev->bdev,b));
540 return -EINVAL;
541}
542
543static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
544{
545 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
546 (sb1->set_uuid1 == sb2->set_uuid1) &&
547 (sb1->set_uuid2 == sb2->set_uuid2) &&
548 (sb1->set_uuid3 == sb2->set_uuid3))
549
550 return 1;
551
552 return 0;
553}
554
555
556static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
557{
558 int ret;
559 mdp_super_t *tmp1, *tmp2;
560
561 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
562 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
563
564 if (!tmp1 || !tmp2) {
565 ret = 0;
566 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
567 goto abort;
568 }
569
570 *tmp1 = *sb1;
571 *tmp2 = *sb2;
572
573 /*
574 * nr_disks is not constant
575 */
576 tmp1->nr_disks = 0;
577 tmp2->nr_disks = 0;
578
579 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
580 ret = 0;
581 else
582 ret = 1;
583
584abort:
990a8baf
JJ
585 kfree(tmp1);
586 kfree(tmp2);
1da177e4
LT
587 return ret;
588}
589
590static unsigned int calc_sb_csum(mdp_super_t * sb)
591{
592 unsigned int disk_csum, csum;
593
594 disk_csum = sb->sb_csum;
595 sb->sb_csum = 0;
596 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
597 sb->sb_csum = disk_csum;
598 return csum;
599}
600
601
602/*
603 * Handle superblock details.
604 * We want to be able to handle multiple superblock formats
605 * so we have a common interface to them all, and an array of
606 * different handlers.
607 * We rely on user-space to write the initial superblock, and support
608 * reading and updating of superblocks.
609 * Interface methods are:
610 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
611 * loads and validates a superblock on dev.
612 * if refdev != NULL, compare superblocks on both devices
613 * Return:
614 * 0 - dev has a superblock that is compatible with refdev
615 * 1 - dev has a superblock that is compatible and newer than refdev
616 * so dev should be used as the refdev in future
617 * -EINVAL superblock incompatible or invalid
618 * -othererror e.g. -EIO
619 *
620 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
621 * Verify that dev is acceptable into mddev.
622 * The first time, mddev->raid_disks will be 0, and data from
623 * dev should be merged in. Subsequent calls check that dev
624 * is new enough. Return 0 or -EINVAL
625 *
626 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
627 * Update the superblock for rdev with data in mddev
628 * This does not write to disc.
629 *
630 */
631
632struct super_type {
633 char *name;
634 struct module *owner;
635 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
636 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
637 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
638};
639
640/*
641 * load_super for 0.90.0
642 */
643static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
644{
645 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
646 mdp_super_t *sb;
647 int ret;
648 sector_t sb_offset;
649
650 /*
651 * Calculate the position of the superblock,
652 * it's at the end of the disk.
653 *
654 * It also happens to be a multiple of 4Kb.
655 */
656 sb_offset = calc_dev_sboffset(rdev->bdev);
657 rdev->sb_offset = sb_offset;
658
0002b271 659 ret = read_disk_sb(rdev, MD_SB_BYTES);
1da177e4
LT
660 if (ret) return ret;
661
662 ret = -EINVAL;
663
664 bdevname(rdev->bdev, b);
665 sb = (mdp_super_t*)page_address(rdev->sb_page);
666
667 if (sb->md_magic != MD_SB_MAGIC) {
668 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
669 b);
670 goto abort;
671 }
672
673 if (sb->major_version != 0 ||
f6705578
N
674 sb->minor_version < 90 ||
675 sb->minor_version > 91) {
1da177e4
LT
676 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
677 sb->major_version, sb->minor_version,
678 b);
679 goto abort;
680 }
681
682 if (sb->raid_disks <= 0)
683 goto abort;
684
685 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
686 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
687 b);
688 goto abort;
689 }
690
691 rdev->preferred_minor = sb->md_minor;
692 rdev->data_offset = 0;
0002b271 693 rdev->sb_size = MD_SB_BYTES;
1da177e4
LT
694
695 if (sb->level == LEVEL_MULTIPATH)
696 rdev->desc_nr = -1;
697 else
698 rdev->desc_nr = sb->this_disk.number;
699
700 if (refdev == 0)
701 ret = 1;
702 else {
703 __u64 ev1, ev2;
704 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
705 if (!uuid_equal(refsb, sb)) {
706 printk(KERN_WARNING "md: %s has different UUID to %s\n",
707 b, bdevname(refdev->bdev,b2));
708 goto abort;
709 }
710 if (!sb_equal(refsb, sb)) {
711 printk(KERN_WARNING "md: %s has same UUID"
712 " but different superblock to %s\n",
713 b, bdevname(refdev->bdev, b2));
714 goto abort;
715 }
716 ev1 = md_event(sb);
717 ev2 = md_event(refsb);
718 if (ev1 > ev2)
719 ret = 1;
720 else
721 ret = 0;
722 }
723 rdev->size = calc_dev_size(rdev, sb->chunk_size);
724
2bf071bf
N
725 if (rdev->size < sb->size && sb->level > 1)
726 /* "this cannot possibly happen" ... */
727 ret = -EINVAL;
728
1da177e4
LT
729 abort:
730 return ret;
731}
732
733/*
734 * validate_super for 0.90.0
735 */
736static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
737{
738 mdp_disk_t *desc;
739 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
740
41158c7e 741 rdev->raid_disk = -1;
b2d444d7 742 rdev->flags = 0;
1da177e4
LT
743 if (mddev->raid_disks == 0) {
744 mddev->major_version = 0;
745 mddev->minor_version = sb->minor_version;
746 mddev->patch_version = sb->patch_version;
747 mddev->persistent = ! sb->not_persistent;
748 mddev->chunk_size = sb->chunk_size;
749 mddev->ctime = sb->ctime;
750 mddev->utime = sb->utime;
751 mddev->level = sb->level;
d9d166c2 752 mddev->clevel[0] = 0;
1da177e4
LT
753 mddev->layout = sb->layout;
754 mddev->raid_disks = sb->raid_disks;
755 mddev->size = sb->size;
756 mddev->events = md_event(sb);
9223214e 757 mddev->bitmap_offset = 0;
36fa3063 758 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
1da177e4 759
f6705578
N
760 if (mddev->minor_version >= 91) {
761 mddev->reshape_position = sb->reshape_position;
762 mddev->delta_disks = sb->delta_disks;
763 mddev->new_level = sb->new_level;
764 mddev->new_layout = sb->new_layout;
765 mddev->new_chunk = sb->new_chunk;
766 } else {
767 mddev->reshape_position = MaxSector;
768 mddev->delta_disks = 0;
769 mddev->new_level = mddev->level;
770 mddev->new_layout = mddev->layout;
771 mddev->new_chunk = mddev->chunk_size;
772 }
773
1da177e4
LT
774 if (sb->state & (1<<MD_SB_CLEAN))
775 mddev->recovery_cp = MaxSector;
776 else {
777 if (sb->events_hi == sb->cp_events_hi &&
778 sb->events_lo == sb->cp_events_lo) {
779 mddev->recovery_cp = sb->recovery_cp;
780 } else
781 mddev->recovery_cp = 0;
782 }
783
784 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
785 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
786 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
787 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
788
789 mddev->max_disks = MD_SB_DISKS;
a654b9d8
N
790
791 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
792 mddev->bitmap_file == NULL) {
c5a10f62
N
793 if (mddev->level != 1 && mddev->level != 4
794 && mddev->level != 5 && mddev->level != 6
6cce3b23 795 && mddev->level != 10) {
a654b9d8 796 /* FIXME use a better test */
6cce3b23 797 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
a654b9d8
N
798 return -EINVAL;
799 }
36fa3063 800 mddev->bitmap_offset = mddev->default_bitmap_offset;
a654b9d8
N
801 }
802
41158c7e
N
803 } else if (mddev->pers == NULL) {
804 /* Insist on good event counter while assembling */
805 __u64 ev1 = md_event(sb);
1da177e4
LT
806 ++ev1;
807 if (ev1 < mddev->events)
808 return -EINVAL;
41158c7e
N
809 } else if (mddev->bitmap) {
810 /* if adding to array with a bitmap, then we can accept an
811 * older device ... but not too old.
812 */
813 __u64 ev1 = md_event(sb);
814 if (ev1 < mddev->bitmap->events_cleared)
815 return 0;
816 } else /* just a hot-add of a new device, leave raid_disk at -1 */
817 return 0;
818
1da177e4 819 if (mddev->level != LEVEL_MULTIPATH) {
1da177e4
LT
820 desc = sb->disks + rdev->desc_nr;
821
822 if (desc->state & (1<<MD_DISK_FAULTY))
b2d444d7 823 set_bit(Faulty, &rdev->flags);
7c7546cc
N
824 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
825 desc->raid_disk < mddev->raid_disks */) {
b2d444d7 826 set_bit(In_sync, &rdev->flags);
1da177e4
LT
827 rdev->raid_disk = desc->raid_disk;
828 }
8ddf9efe
N
829 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
830 set_bit(WriteMostly, &rdev->flags);
41158c7e 831 } else /* MULTIPATH are always insync */
b2d444d7 832 set_bit(In_sync, &rdev->flags);
1da177e4
LT
833 return 0;
834}
835
836/*
837 * sync_super for 0.90.0
838 */
839static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
840{
841 mdp_super_t *sb;
842 struct list_head *tmp;
843 mdk_rdev_t *rdev2;
844 int next_spare = mddev->raid_disks;
19133a42 845
1da177e4
LT
846
847 /* make rdev->sb match mddev data..
848 *
849 * 1/ zero out disks
850 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
851 * 3/ any empty disks < next_spare become removed
852 *
853 * disks[0] gets initialised to REMOVED because
854 * we cannot be sure from other fields if it has
855 * been initialised or not.
856 */
857 int i;
858 int active=0, working=0,failed=0,spare=0,nr_disks=0;
859
61181565
N
860 rdev->sb_size = MD_SB_BYTES;
861
1da177e4
LT
862 sb = (mdp_super_t*)page_address(rdev->sb_page);
863
864 memset(sb, 0, sizeof(*sb));
865
866 sb->md_magic = MD_SB_MAGIC;
867 sb->major_version = mddev->major_version;
1da177e4
LT
868 sb->patch_version = mddev->patch_version;
869 sb->gvalid_words = 0; /* ignored */
870 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
871 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
872 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
873 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
874
875 sb->ctime = mddev->ctime;
876 sb->level = mddev->level;
877 sb->size = mddev->size;
878 sb->raid_disks = mddev->raid_disks;
879 sb->md_minor = mddev->md_minor;
880 sb->not_persistent = !mddev->persistent;
881 sb->utime = mddev->utime;
882 sb->state = 0;
883 sb->events_hi = (mddev->events>>32);
884 sb->events_lo = (u32)mddev->events;
885
f6705578
N
886 if (mddev->reshape_position == MaxSector)
887 sb->minor_version = 90;
888 else {
889 sb->minor_version = 91;
890 sb->reshape_position = mddev->reshape_position;
891 sb->new_level = mddev->new_level;
892 sb->delta_disks = mddev->delta_disks;
893 sb->new_layout = mddev->new_layout;
894 sb->new_chunk = mddev->new_chunk;
895 }
896 mddev->minor_version = sb->minor_version;
1da177e4
LT
897 if (mddev->in_sync)
898 {
899 sb->recovery_cp = mddev->recovery_cp;
900 sb->cp_events_hi = (mddev->events>>32);
901 sb->cp_events_lo = (u32)mddev->events;
902 if (mddev->recovery_cp == MaxSector)
903 sb->state = (1<< MD_SB_CLEAN);
904 } else
905 sb->recovery_cp = 0;
906
907 sb->layout = mddev->layout;
908 sb->chunk_size = mddev->chunk_size;
909
a654b9d8
N
910 if (mddev->bitmap && mddev->bitmap_file == NULL)
911 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
912
1da177e4
LT
913 sb->disks[0].state = (1<<MD_DISK_REMOVED);
914 ITERATE_RDEV(mddev,rdev2,tmp) {
915 mdp_disk_t *d;
86e6ffdd 916 int desc_nr;
b2d444d7
N
917 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
918 && !test_bit(Faulty, &rdev2->flags))
86e6ffdd 919 desc_nr = rdev2->raid_disk;
1da177e4 920 else
86e6ffdd 921 desc_nr = next_spare++;
19133a42 922 rdev2->desc_nr = desc_nr;
1da177e4
LT
923 d = &sb->disks[rdev2->desc_nr];
924 nr_disks++;
925 d->number = rdev2->desc_nr;
926 d->major = MAJOR(rdev2->bdev->bd_dev);
927 d->minor = MINOR(rdev2->bdev->bd_dev);
b2d444d7
N
928 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
929 && !test_bit(Faulty, &rdev2->flags))
1da177e4
LT
930 d->raid_disk = rdev2->raid_disk;
931 else
932 d->raid_disk = rdev2->desc_nr; /* compatibility */
1be7892f 933 if (test_bit(Faulty, &rdev2->flags))
1da177e4 934 d->state = (1<<MD_DISK_FAULTY);
1be7892f 935 else if (test_bit(In_sync, &rdev2->flags)) {
1da177e4
LT
936 d->state = (1<<MD_DISK_ACTIVE);
937 d->state |= (1<<MD_DISK_SYNC);
938 active++;
939 working++;
940 } else {
941 d->state = 0;
942 spare++;
943 working++;
944 }
8ddf9efe
N
945 if (test_bit(WriteMostly, &rdev2->flags))
946 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1da177e4 947 }
1da177e4
LT
948 /* now set the "removed" and "faulty" bits on any missing devices */
949 for (i=0 ; i < mddev->raid_disks ; i++) {
950 mdp_disk_t *d = &sb->disks[i];
951 if (d->state == 0 && d->number == 0) {
952 d->number = i;
953 d->raid_disk = i;
954 d->state = (1<<MD_DISK_REMOVED);
955 d->state |= (1<<MD_DISK_FAULTY);
956 failed++;
957 }
958 }
959 sb->nr_disks = nr_disks;
960 sb->active_disks = active;
961 sb->working_disks = working;
962 sb->failed_disks = failed;
963 sb->spare_disks = spare;
964
965 sb->this_disk = sb->disks[rdev->desc_nr];
966 sb->sb_csum = calc_sb_csum(sb);
967}
968
969/*
970 * version 1 superblock
971 */
972
973static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
974{
975 unsigned int disk_csum, csum;
976 unsigned long long newcsum;
977 int size = 256 + le32_to_cpu(sb->max_dev)*2;
978 unsigned int *isuper = (unsigned int*)sb;
979 int i;
980
981 disk_csum = sb->sb_csum;
982 sb->sb_csum = 0;
983 newcsum = 0;
984 for (i=0; size>=4; size -= 4 )
985 newcsum += le32_to_cpu(*isuper++);
986
987 if (size == 2)
988 newcsum += le16_to_cpu(*(unsigned short*) isuper);
989
990 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
991 sb->sb_csum = disk_csum;
992 return cpu_to_le32(csum);
993}
994
995static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
996{
997 struct mdp_superblock_1 *sb;
998 int ret;
999 sector_t sb_offset;
1000 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
0002b271 1001 int bmask;
1da177e4
LT
1002
1003 /*
1004 * Calculate the position of the superblock.
1005 * It is always aligned to a 4K boundary and
1006 * depeding on minor_version, it can be:
1007 * 0: At least 8K, but less than 12K, from end of device
1008 * 1: At start of device
1009 * 2: 4K from start of device.
1010 */
1011 switch(minor_version) {
1012 case 0:
1013 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1014 sb_offset -= 8*2;
39730960 1015 sb_offset &= ~(sector_t)(4*2-1);
1da177e4
LT
1016 /* convert from sectors to K */
1017 sb_offset /= 2;
1018 break;
1019 case 1:
1020 sb_offset = 0;
1021 break;
1022 case 2:
1023 sb_offset = 4;
1024 break;
1025 default:
1026 return -EINVAL;
1027 }
1028 rdev->sb_offset = sb_offset;
1029
0002b271
N
1030 /* superblock is rarely larger than 1K, but it can be larger,
1031 * and it is safe to read 4k, so we do that
1032 */
1033 ret = read_disk_sb(rdev, 4096);
1da177e4
LT
1034 if (ret) return ret;
1035
1036
1037 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1038
1039 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1040 sb->major_version != cpu_to_le32(1) ||
1041 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1042 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
71c0805c 1043 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1da177e4
LT
1044 return -EINVAL;
1045
1046 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1047 printk("md: invalid superblock checksum on %s\n",
1048 bdevname(rdev->bdev,b));
1049 return -EINVAL;
1050 }
1051 if (le64_to_cpu(sb->data_size) < 10) {
1052 printk("md: data_size too small on %s\n",
1053 bdevname(rdev->bdev,b));
1054 return -EINVAL;
1055 }
1056 rdev->preferred_minor = 0xffff;
1057 rdev->data_offset = le64_to_cpu(sb->data_offset);
4dbcdc75 1058 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1da177e4 1059
0002b271 1060 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
720a3dc3 1061 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
0002b271
N
1062 if (rdev->sb_size & bmask)
1063 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1064
1da177e4 1065 if (refdev == 0)
8ed75463 1066 ret = 1;
1da177e4
LT
1067 else {
1068 __u64 ev1, ev2;
1069 struct mdp_superblock_1 *refsb =
1070 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1071
1072 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1073 sb->level != refsb->level ||
1074 sb->layout != refsb->layout ||
1075 sb->chunksize != refsb->chunksize) {
1076 printk(KERN_WARNING "md: %s has strangely different"
1077 " superblock to %s\n",
1078 bdevname(rdev->bdev,b),
1079 bdevname(refdev->bdev,b2));
1080 return -EINVAL;
1081 }
1082 ev1 = le64_to_cpu(sb->events);
1083 ev2 = le64_to_cpu(refsb->events);
1084
1085 if (ev1 > ev2)
8ed75463
N
1086 ret = 1;
1087 else
1088 ret = 0;
1da177e4
LT
1089 }
1090 if (minor_version)
1091 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1092 else
1093 rdev->size = rdev->sb_offset;
1094 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1095 return -EINVAL;
1096 rdev->size = le64_to_cpu(sb->data_size)/2;
1097 if (le32_to_cpu(sb->chunksize))
1098 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
2bf071bf
N
1099
1100 if (le32_to_cpu(sb->size) > rdev->size*2)
1101 return -EINVAL;
8ed75463 1102 return ret;
1da177e4
LT
1103}
1104
1105static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1106{
1107 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1108
41158c7e 1109 rdev->raid_disk = -1;
b2d444d7 1110 rdev->flags = 0;
1da177e4
LT
1111 if (mddev->raid_disks == 0) {
1112 mddev->major_version = 1;
1113 mddev->patch_version = 0;
1114 mddev->persistent = 1;
1115 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1116 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1117 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1118 mddev->level = le32_to_cpu(sb->level);
d9d166c2 1119 mddev->clevel[0] = 0;
1da177e4
LT
1120 mddev->layout = le32_to_cpu(sb->layout);
1121 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1122 mddev->size = le64_to_cpu(sb->size)/2;
1123 mddev->events = le64_to_cpu(sb->events);
9223214e 1124 mddev->bitmap_offset = 0;
29fc7e3e 1125 mddev->default_bitmap_offset = 1024 >> 9;
1da177e4
LT
1126
1127 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1128 memcpy(mddev->uuid, sb->set_uuid, 16);
1129
1130 mddev->max_disks = (4096-256)/2;
a654b9d8 1131
71c0805c 1132 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
a654b9d8 1133 mddev->bitmap_file == NULL ) {
6cce3b23
N
1134 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1135 && mddev->level != 10) {
1136 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
a654b9d8
N
1137 return -EINVAL;
1138 }
1139 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1140 }
f6705578
N
1141 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1142 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1143 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1144 mddev->new_level = le32_to_cpu(sb->new_level);
1145 mddev->new_layout = le32_to_cpu(sb->new_layout);
1146 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1147 } else {
1148 mddev->reshape_position = MaxSector;
1149 mddev->delta_disks = 0;
1150 mddev->new_level = mddev->level;
1151 mddev->new_layout = mddev->layout;
1152 mddev->new_chunk = mddev->chunk_size;
1153 }
1154
41158c7e
N
1155 } else if (mddev->pers == NULL) {
1156 /* Insist of good event counter while assembling */
1157 __u64 ev1 = le64_to_cpu(sb->events);
1da177e4
LT
1158 ++ev1;
1159 if (ev1 < mddev->events)
1160 return -EINVAL;
41158c7e
N
1161 } else if (mddev->bitmap) {
1162 /* If adding to array with a bitmap, then we can accept an
1163 * older device, but not too old.
1164 */
1165 __u64 ev1 = le64_to_cpu(sb->events);
1166 if (ev1 < mddev->bitmap->events_cleared)
1167 return 0;
1168 } else /* just a hot-add of a new device, leave raid_disk at -1 */
1169 return 0;
1da177e4
LT
1170
1171 if (mddev->level != LEVEL_MULTIPATH) {
1172 int role;
1173 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1174 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1175 switch(role) {
1176 case 0xffff: /* spare */
1da177e4
LT
1177 break;
1178 case 0xfffe: /* faulty */
b2d444d7 1179 set_bit(Faulty, &rdev->flags);
1da177e4
LT
1180 break;
1181 default:
5fd6c1dc
N
1182 if ((le32_to_cpu(sb->feature_map) &
1183 MD_FEATURE_RECOVERY_OFFSET))
1184 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1185 else
1186 set_bit(In_sync, &rdev->flags);
1da177e4
LT
1187 rdev->raid_disk = role;
1188 break;
1189 }
8ddf9efe
N
1190 if (sb->devflags & WriteMostly1)
1191 set_bit(WriteMostly, &rdev->flags);
41158c7e 1192 } else /* MULTIPATH are always insync */
b2d444d7 1193 set_bit(In_sync, &rdev->flags);
41158c7e 1194
1da177e4
LT
1195 return 0;
1196}
1197
1198static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1199{
1200 struct mdp_superblock_1 *sb;
1201 struct list_head *tmp;
1202 mdk_rdev_t *rdev2;
1203 int max_dev, i;
1204 /* make rdev->sb match mddev and rdev data. */
1205
1206 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1207
1208 sb->feature_map = 0;
1209 sb->pad0 = 0;
5fd6c1dc 1210 sb->recovery_offset = cpu_to_le64(0);
1da177e4
LT
1211 memset(sb->pad1, 0, sizeof(sb->pad1));
1212 memset(sb->pad2, 0, sizeof(sb->pad2));
1213 memset(sb->pad3, 0, sizeof(sb->pad3));
1214
1215 sb->utime = cpu_to_le64((__u64)mddev->utime);
1216 sb->events = cpu_to_le64(mddev->events);
1217 if (mddev->in_sync)
1218 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1219 else
1220 sb->resync_offset = cpu_to_le64(0);
1221
4dbcdc75
N
1222 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1223
f0ca340c 1224 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
29fc7e3e 1225 sb->size = cpu_to_le64(mddev->size<<1);
f0ca340c 1226
a654b9d8
N
1227 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1228 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
71c0805c 1229 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
a654b9d8 1230 }
5fd6c1dc
N
1231
1232 if (rdev->raid_disk >= 0 &&
1233 !test_bit(In_sync, &rdev->flags) &&
1234 rdev->recovery_offset > 0) {
1235 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1236 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1237 }
1238
f6705578
N
1239 if (mddev->reshape_position != MaxSector) {
1240 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1241 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1242 sb->new_layout = cpu_to_le32(mddev->new_layout);
1243 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1244 sb->new_level = cpu_to_le32(mddev->new_level);
1245 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1246 }
a654b9d8 1247
1da177e4
LT
1248 max_dev = 0;
1249 ITERATE_RDEV(mddev,rdev2,tmp)
1250 if (rdev2->desc_nr+1 > max_dev)
1251 max_dev = rdev2->desc_nr+1;
1252
1253 sb->max_dev = cpu_to_le32(max_dev);
1254 for (i=0; i<max_dev;i++)
1255 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1256
1257 ITERATE_RDEV(mddev,rdev2,tmp) {
1258 i = rdev2->desc_nr;
b2d444d7 1259 if (test_bit(Faulty, &rdev2->flags))
1da177e4 1260 sb->dev_roles[i] = cpu_to_le16(0xfffe);
b2d444d7 1261 else if (test_bit(In_sync, &rdev2->flags))
1da177e4 1262 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
5fd6c1dc
N
1263 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1264 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1da177e4
LT
1265 else
1266 sb->dev_roles[i] = cpu_to_le16(0xffff);
1267 }
1268
1da177e4
LT
1269 sb->sb_csum = calc_sb_1_csum(sb);
1270}
1271
1272
75c96f85 1273static struct super_type super_types[] = {
1da177e4
LT
1274 [0] = {
1275 .name = "0.90.0",
1276 .owner = THIS_MODULE,
1277 .load_super = super_90_load,
1278 .validate_super = super_90_validate,
1279 .sync_super = super_90_sync,
1280 },
1281 [1] = {
1282 .name = "md-1",
1283 .owner = THIS_MODULE,
1284 .load_super = super_1_load,
1285 .validate_super = super_1_validate,
1286 .sync_super = super_1_sync,
1287 },
1288};
1289
1290static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1291{
1292 struct list_head *tmp;
1293 mdk_rdev_t *rdev;
1294
1295 ITERATE_RDEV(mddev,rdev,tmp)
1296 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1297 return rdev;
1298
1299 return NULL;
1300}
1301
1302static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1303{
1304 struct list_head *tmp;
1305 mdk_rdev_t *rdev;
1306
1307 ITERATE_RDEV(mddev1,rdev,tmp)
1308 if (match_dev_unit(mddev2, rdev))
1309 return 1;
1310
1311 return 0;
1312}
1313
1314static LIST_HEAD(pending_raid_disks);
1315
1316static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1317{
1318 mdk_rdev_t *same_pdev;
1319 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
f637b9f9 1320 struct kobject *ko;
1edf80d3 1321 char *s;
1da177e4
LT
1322
1323 if (rdev->mddev) {
1324 MD_BUG();
1325 return -EINVAL;
1326 }
2bf071bf
N
1327 /* make sure rdev->size exceeds mddev->size */
1328 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1329 if (mddev->pers)
1330 /* Cannot change size, so fail */
1331 return -ENOSPC;
1332 else
1333 mddev->size = rdev->size;
1334 }
1da177e4
LT
1335 same_pdev = match_dev_unit(mddev, rdev);
1336 if (same_pdev)
1337 printk(KERN_WARNING
1338 "%s: WARNING: %s appears to be on the same physical"
1339 " disk as %s. True\n protection against single-disk"
1340 " failure might be compromised.\n",
1341 mdname(mddev), bdevname(rdev->bdev,b),
1342 bdevname(same_pdev->bdev,b2));
1343
1344 /* Verify rdev->desc_nr is unique.
1345 * If it is -1, assign a free number, else
1346 * check number is not in use
1347 */
1348 if (rdev->desc_nr < 0) {
1349 int choice = 0;
1350 if (mddev->pers) choice = mddev->raid_disks;
1351 while (find_rdev_nr(mddev, choice))
1352 choice++;
1353 rdev->desc_nr = choice;
1354 } else {
1355 if (find_rdev_nr(mddev, rdev->desc_nr))
1356 return -EBUSY;
1357 }
19133a42
N
1358 bdevname(rdev->bdev,b);
1359 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1360 return -ENOMEM;
1edf80d3
NB
1361 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1362 *s = '!';
1da177e4
LT
1363
1364 list_add(&rdev->same_set, &mddev->disks);
1365 rdev->mddev = mddev;
19133a42 1366 printk(KERN_INFO "md: bind<%s>\n", b);
86e6ffdd 1367
9c791977 1368 rdev->kobj.parent = &mddev->kobj;
86e6ffdd
N
1369 kobject_add(&rdev->kobj);
1370
f637b9f9
N
1371 if (rdev->bdev->bd_part)
1372 ko = &rdev->bdev->bd_part->kobj;
1373 else
1374 ko = &rdev->bdev->bd_disk->kobj;
1375 sysfs_create_link(&rdev->kobj, ko, "block");
5463c790 1376 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1da177e4
LT
1377 return 0;
1378}
1379
1380static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1381{
1382 char b[BDEVNAME_SIZE];
1383 if (!rdev->mddev) {
1384 MD_BUG();
1385 return;
1386 }
5463c790 1387 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1da177e4
LT
1388 list_del_init(&rdev->same_set);
1389 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1390 rdev->mddev = NULL;
86e6ffdd
N
1391 sysfs_remove_link(&rdev->kobj, "block");
1392 kobject_del(&rdev->kobj);
1da177e4
LT
1393}
1394
1395/*
1396 * prevent the device from being mounted, repartitioned or
1397 * otherwise reused by a RAID array (or any other kernel
1398 * subsystem), by bd_claiming the device.
1399 */
1400static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1401{
1402 int err = 0;
1403 struct block_device *bdev;
1404 char b[BDEVNAME_SIZE];
1405
1406 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1407 if (IS_ERR(bdev)) {
1408 printk(KERN_ERR "md: could not open %s.\n",
1409 __bdevname(dev, b));
1410 return PTR_ERR(bdev);
1411 }
1412 err = bd_claim(bdev, rdev);
1413 if (err) {
1414 printk(KERN_ERR "md: could not bd_claim %s.\n",
1415 bdevname(bdev, b));
1416 blkdev_put(bdev);
1417 return err;
1418 }
1419 rdev->bdev = bdev;
1420 return err;
1421}
1422
1423static void unlock_rdev(mdk_rdev_t *rdev)
1424{
1425 struct block_device *bdev = rdev->bdev;
1426 rdev->bdev = NULL;
1427 if (!bdev)
1428 MD_BUG();
1429 bd_release(bdev);
1430 blkdev_put(bdev);
1431}
1432
1433void md_autodetect_dev(dev_t dev);
1434
1435static void export_rdev(mdk_rdev_t * rdev)
1436{
1437 char b[BDEVNAME_SIZE];
1438 printk(KERN_INFO "md: export_rdev(%s)\n",
1439 bdevname(rdev->bdev,b));
1440 if (rdev->mddev)
1441 MD_BUG();
1442 free_disk_sb(rdev);
1443 list_del_init(&rdev->same_set);
1444#ifndef MODULE
1445 md_autodetect_dev(rdev->bdev->bd_dev);
1446#endif
1447 unlock_rdev(rdev);
86e6ffdd 1448 kobject_put(&rdev->kobj);
1da177e4
LT
1449}
1450
1451static void kick_rdev_from_array(mdk_rdev_t * rdev)
1452{
1453 unbind_rdev_from_array(rdev);
1454 export_rdev(rdev);
1455}
1456
1457static void export_array(mddev_t *mddev)
1458{
1459 struct list_head *tmp;
1460 mdk_rdev_t *rdev;
1461
1462 ITERATE_RDEV(mddev,rdev,tmp) {
1463 if (!rdev->mddev) {
1464 MD_BUG();
1465 continue;
1466 }
1467 kick_rdev_from_array(rdev);
1468 }
1469 if (!list_empty(&mddev->disks))
1470 MD_BUG();
1471 mddev->raid_disks = 0;
1472 mddev->major_version = 0;
1473}
1474
1475static void print_desc(mdp_disk_t *desc)
1476{
1477 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1478 desc->major,desc->minor,desc->raid_disk,desc->state);
1479}
1480
1481static void print_sb(mdp_super_t *sb)
1482{
1483 int i;
1484
1485 printk(KERN_INFO
1486 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1487 sb->major_version, sb->minor_version, sb->patch_version,
1488 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1489 sb->ctime);
1490 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1491 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1492 sb->md_minor, sb->layout, sb->chunk_size);
1493 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1494 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1495 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1496 sb->failed_disks, sb->spare_disks,
1497 sb->sb_csum, (unsigned long)sb->events_lo);
1498
1499 printk(KERN_INFO);
1500 for (i = 0; i < MD_SB_DISKS; i++) {
1501 mdp_disk_t *desc;
1502
1503 desc = sb->disks + i;
1504 if (desc->number || desc->major || desc->minor ||
1505 desc->raid_disk || (desc->state && (desc->state != 4))) {
1506 printk(" D %2d: ", i);
1507 print_desc(desc);
1508 }
1509 }
1510 printk(KERN_INFO "md: THIS: ");
1511 print_desc(&sb->this_disk);
1512
1513}
1514
1515static void print_rdev(mdk_rdev_t *rdev)
1516{
1517 char b[BDEVNAME_SIZE];
1518 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1519 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
b2d444d7
N
1520 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1521 rdev->desc_nr);
1da177e4
LT
1522 if (rdev->sb_loaded) {
1523 printk(KERN_INFO "md: rdev superblock:\n");
1524 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1525 } else
1526 printk(KERN_INFO "md: no rdev superblock!\n");
1527}
1528
5e56341d 1529static void md_print_devices(void)
1da177e4
LT
1530{
1531 struct list_head *tmp, *tmp2;
1532 mdk_rdev_t *rdev;
1533 mddev_t *mddev;
1534 char b[BDEVNAME_SIZE];
1535
1536 printk("\n");
1537 printk("md: **********************************\n");
1538 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1539 printk("md: **********************************\n");
1540 ITERATE_MDDEV(mddev,tmp) {
1da177e4 1541
32a7627c
N
1542 if (mddev->bitmap)
1543 bitmap_print_sb(mddev->bitmap);
1544 else
1545 printk("%s: ", mdname(mddev));
1da177e4
LT
1546 ITERATE_RDEV(mddev,rdev,tmp2)
1547 printk("<%s>", bdevname(rdev->bdev,b));
1548 printk("\n");
1549
1550 ITERATE_RDEV(mddev,rdev,tmp2)
1551 print_rdev(rdev);
1552 }
1553 printk("md: **********************************\n");
1554 printk("\n");
1555}
1556
1557
1da177e4
LT
1558static void sync_sbs(mddev_t * mddev)
1559{
1560 mdk_rdev_t *rdev;
1561 struct list_head *tmp;
1562
1563 ITERATE_RDEV(mddev,rdev,tmp) {
1564 super_types[mddev->major_version].
1565 sync_super(mddev, rdev);
1566 rdev->sb_loaded = 1;
1567 }
1568}
1569
f6705578 1570void md_update_sb(mddev_t * mddev)
1da177e4 1571{
7bfa19f2 1572 int err;
1da177e4
LT
1573 struct list_head *tmp;
1574 mdk_rdev_t *rdev;
06d91a5f 1575 int sync_req;
1da177e4 1576
1da177e4 1577repeat:
a9701a30 1578 spin_lock_irq(&mddev->write_lock);
06d91a5f 1579 sync_req = mddev->in_sync;
1da177e4
LT
1580 mddev->utime = get_seconds();
1581 mddev->events ++;
1582
1583 if (!mddev->events) {
1584 /*
1585 * oops, this 64-bit counter should never wrap.
1586 * Either we are in around ~1 trillion A.C., assuming
1587 * 1 reboot per second, or we have a bug:
1588 */
1589 MD_BUG();
1590 mddev->events --;
1591 }
7bfa19f2 1592 mddev->sb_dirty = 2;
1da177e4
LT
1593 sync_sbs(mddev);
1594
1595 /*
1596 * do not write anything to disk if using
1597 * nonpersistent superblocks
1598 */
06d91a5f
N
1599 if (!mddev->persistent) {
1600 mddev->sb_dirty = 0;
a9701a30 1601 spin_unlock_irq(&mddev->write_lock);
3d310eb7 1602 wake_up(&mddev->sb_wait);
1da177e4 1603 return;
06d91a5f 1604 }
a9701a30 1605 spin_unlock_irq(&mddev->write_lock);
1da177e4
LT
1606
1607 dprintk(KERN_INFO
1608 "md: updating %s RAID superblock on device (in sync %d)\n",
1609 mdname(mddev),mddev->in_sync);
1610
32a7627c 1611 err = bitmap_update_sb(mddev->bitmap);
1da177e4
LT
1612 ITERATE_RDEV(mddev,rdev,tmp) {
1613 char b[BDEVNAME_SIZE];
1614 dprintk(KERN_INFO "md: ");
b2d444d7 1615 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
1616 dprintk("(skipping faulty ");
1617
1618 dprintk("%s ", bdevname(rdev->bdev,b));
b2d444d7 1619 if (!test_bit(Faulty, &rdev->flags)) {
7bfa19f2 1620 md_super_write(mddev,rdev,
0002b271 1621 rdev->sb_offset<<1, rdev->sb_size,
7bfa19f2
N
1622 rdev->sb_page);
1623 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1624 bdevname(rdev->bdev,b),
1625 (unsigned long long)rdev->sb_offset);
1626
1da177e4
LT
1627 } else
1628 dprintk(")\n");
7bfa19f2 1629 if (mddev->level == LEVEL_MULTIPATH)
1da177e4
LT
1630 /* only need to write one superblock... */
1631 break;
1632 }
a9701a30 1633 md_super_wait(mddev);
7bfa19f2
N
1634 /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1635
a9701a30 1636 spin_lock_irq(&mddev->write_lock);
7bfa19f2 1637 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
06d91a5f 1638 /* have to write it out again */
a9701a30 1639 spin_unlock_irq(&mddev->write_lock);
06d91a5f
N
1640 goto repeat;
1641 }
1642 mddev->sb_dirty = 0;
a9701a30 1643 spin_unlock_irq(&mddev->write_lock);
3d310eb7 1644 wake_up(&mddev->sb_wait);
06d91a5f 1645
1da177e4 1646}
f6705578 1647EXPORT_SYMBOL_GPL(md_update_sb);
1da177e4 1648
bce74dac
N
1649/* words written to sysfs files may, or my not, be \n terminated.
1650 * We want to accept with case. For this we use cmd_match.
1651 */
1652static int cmd_match(const char *cmd, const char *str)
1653{
1654 /* See if cmd, written into a sysfs file, matches
1655 * str. They must either be the same, or cmd can
1656 * have a trailing newline
1657 */
1658 while (*cmd && *str && *cmd == *str) {
1659 cmd++;
1660 str++;
1661 }
1662 if (*cmd == '\n')
1663 cmd++;
1664 if (*str || *cmd)
1665 return 0;
1666 return 1;
1667}
1668
86e6ffdd
N
1669struct rdev_sysfs_entry {
1670 struct attribute attr;
1671 ssize_t (*show)(mdk_rdev_t *, char *);
1672 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1673};
1674
1675static ssize_t
96de1e66 1676state_show(mdk_rdev_t *rdev, char *page)
86e6ffdd
N
1677{
1678 char *sep = "";
1679 int len=0;
1680
b2d444d7 1681 if (test_bit(Faulty, &rdev->flags)) {
86e6ffdd
N
1682 len+= sprintf(page+len, "%sfaulty",sep);
1683 sep = ",";
1684 }
b2d444d7 1685 if (test_bit(In_sync, &rdev->flags)) {
86e6ffdd
N
1686 len += sprintf(page+len, "%sin_sync",sep);
1687 sep = ",";
1688 }
b2d444d7
N
1689 if (!test_bit(Faulty, &rdev->flags) &&
1690 !test_bit(In_sync, &rdev->flags)) {
86e6ffdd
N
1691 len += sprintf(page+len, "%sspare", sep);
1692 sep = ",";
1693 }
1694 return len+sprintf(page+len, "\n");
1695}
1696
96de1e66
N
1697static struct rdev_sysfs_entry
1698rdev_state = __ATTR_RO(state);
86e6ffdd
N
1699
1700static ssize_t
96de1e66 1701super_show(mdk_rdev_t *rdev, char *page)
86e6ffdd
N
1702{
1703 if (rdev->sb_loaded && rdev->sb_size) {
1704 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1705 return rdev->sb_size;
1706 } else
1707 return 0;
1708}
96de1e66
N
1709static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1710
4dbcdc75
N
1711static ssize_t
1712errors_show(mdk_rdev_t *rdev, char *page)
1713{
1714 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1715}
1716
1717static ssize_t
1718errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1719{
1720 char *e;
1721 unsigned long n = simple_strtoul(buf, &e, 10);
1722 if (*buf && (*e == 0 || *e == '\n')) {
1723 atomic_set(&rdev->corrected_errors, n);
1724 return len;
1725 }
1726 return -EINVAL;
1727}
1728static struct rdev_sysfs_entry rdev_errors =
1729__ATTR(errors, 0644, errors_show, errors_store);
1730
014236d2
N
1731static ssize_t
1732slot_show(mdk_rdev_t *rdev, char *page)
1733{
1734 if (rdev->raid_disk < 0)
1735 return sprintf(page, "none\n");
1736 else
1737 return sprintf(page, "%d\n", rdev->raid_disk);
1738}
1739
1740static ssize_t
1741slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1742{
1743 char *e;
1744 int slot = simple_strtoul(buf, &e, 10);
1745 if (strncmp(buf, "none", 4)==0)
1746 slot = -1;
1747 else if (e==buf || (*e && *e!= '\n'))
1748 return -EINVAL;
1749 if (rdev->mddev->pers)
1750 /* Cannot set slot in active array (yet) */
1751 return -EBUSY;
1752 if (slot >= rdev->mddev->raid_disks)
1753 return -ENOSPC;
1754 rdev->raid_disk = slot;
1755 /* assume it is working */
1756 rdev->flags = 0;
1757 set_bit(In_sync, &rdev->flags);
1758 return len;
1759}
1760
1761
1762static struct rdev_sysfs_entry rdev_slot =
1763__ATTR(slot, 0644, slot_show, slot_store);
1764
93c8cad0
N
1765static ssize_t
1766offset_show(mdk_rdev_t *rdev, char *page)
1767{
6961ece4 1768 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
93c8cad0
N
1769}
1770
1771static ssize_t
1772offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1773{
1774 char *e;
1775 unsigned long long offset = simple_strtoull(buf, &e, 10);
1776 if (e==buf || (*e && *e != '\n'))
1777 return -EINVAL;
1778 if (rdev->mddev->pers)
1779 return -EBUSY;
1780 rdev->data_offset = offset;
1781 return len;
1782}
1783
1784static struct rdev_sysfs_entry rdev_offset =
1785__ATTR(offset, 0644, offset_show, offset_store);
1786
83303b61
N
1787static ssize_t
1788rdev_size_show(mdk_rdev_t *rdev, char *page)
1789{
1790 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1791}
1792
1793static ssize_t
1794rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1795{
1796 char *e;
1797 unsigned long long size = simple_strtoull(buf, &e, 10);
1798 if (e==buf || (*e && *e != '\n'))
1799 return -EINVAL;
1800 if (rdev->mddev->pers)
1801 return -EBUSY;
1802 rdev->size = size;
1803 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1804 rdev->mddev->size = size;
1805 return len;
1806}
1807
1808static struct rdev_sysfs_entry rdev_size =
1809__ATTR(size, 0644, rdev_size_show, rdev_size_store);
1810
86e6ffdd
N
1811static struct attribute *rdev_default_attrs[] = {
1812 &rdev_state.attr,
1813 &rdev_super.attr,
4dbcdc75 1814 &rdev_errors.attr,
014236d2 1815 &rdev_slot.attr,
93c8cad0 1816 &rdev_offset.attr,
83303b61 1817 &rdev_size.attr,
86e6ffdd
N
1818 NULL,
1819};
1820static ssize_t
1821rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1822{
1823 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1824 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1825
1826 if (!entry->show)
1827 return -EIO;
1828 return entry->show(rdev, page);
1829}
1830
1831static ssize_t
1832rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1833 const char *page, size_t length)
1834{
1835 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1836 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1837
1838 if (!entry->store)
1839 return -EIO;
1840 return entry->store(rdev, page, length);
1841}
1842
1843static void rdev_free(struct kobject *ko)
1844{
1845 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1846 kfree(rdev);
1847}
1848static struct sysfs_ops rdev_sysfs_ops = {
1849 .show = rdev_attr_show,
1850 .store = rdev_attr_store,
1851};
1852static struct kobj_type rdev_ktype = {
1853 .release = rdev_free,
1854 .sysfs_ops = &rdev_sysfs_ops,
1855 .default_attrs = rdev_default_attrs,
1856};
1857
1da177e4
LT
1858/*
1859 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1860 *
1861 * mark the device faulty if:
1862 *
1863 * - the device is nonexistent (zero size)
1864 * - the device has no valid superblock
1865 *
1866 * a faulty rdev _never_ has rdev->sb set.
1867 */
1868static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1869{
1870 char b[BDEVNAME_SIZE];
1871 int err;
1872 mdk_rdev_t *rdev;
1873 sector_t size;
1874
9ffae0cf 1875 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1da177e4
LT
1876 if (!rdev) {
1877 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1878 return ERR_PTR(-ENOMEM);
1879 }
1da177e4
LT
1880
1881 if ((err = alloc_disk_sb(rdev)))
1882 goto abort_free;
1883
1884 err = lock_rdev(rdev, newdev);
1885 if (err)
1886 goto abort_free;
1887
86e6ffdd
N
1888 rdev->kobj.parent = NULL;
1889 rdev->kobj.ktype = &rdev_ktype;
1890 kobject_init(&rdev->kobj);
1891
1da177e4 1892 rdev->desc_nr = -1;
b2d444d7 1893 rdev->flags = 0;
1da177e4
LT
1894 rdev->data_offset = 0;
1895 atomic_set(&rdev->nr_pending, 0);
ba22dcbf 1896 atomic_set(&rdev->read_errors, 0);
4dbcdc75 1897 atomic_set(&rdev->corrected_errors, 0);
1da177e4
LT
1898
1899 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1900 if (!size) {
1901 printk(KERN_WARNING
1902 "md: %s has zero or unknown size, marking faulty!\n",
1903 bdevname(rdev->bdev,b));
1904 err = -EINVAL;
1905 goto abort_free;
1906 }
1907
1908 if (super_format >= 0) {
1909 err = super_types[super_format].
1910 load_super(rdev, NULL, super_minor);
1911 if (err == -EINVAL) {
1912 printk(KERN_WARNING
1913 "md: %s has invalid sb, not importing!\n",
1914 bdevname(rdev->bdev,b));
1915 goto abort_free;
1916 }
1917 if (err < 0) {
1918 printk(KERN_WARNING
1919 "md: could not read %s's sb, not importing!\n",
1920 bdevname(rdev->bdev,b));
1921 goto abort_free;
1922 }
1923 }
1924 INIT_LIST_HEAD(&rdev->same_set);
1925
1926 return rdev;
1927
1928abort_free:
1929 if (rdev->sb_page) {
1930 if (rdev->bdev)
1931 unlock_rdev(rdev);
1932 free_disk_sb(rdev);
1933 }
1934 kfree(rdev);
1935 return ERR_PTR(err);
1936}
1937
1938/*
1939 * Check a full RAID array for plausibility
1940 */
1941
1942
a757e64c 1943static void analyze_sbs(mddev_t * mddev)
1da177e4
LT
1944{
1945 int i;
1946 struct list_head *tmp;
1947 mdk_rdev_t *rdev, *freshest;
1948 char b[BDEVNAME_SIZE];
1949
1950 freshest = NULL;
1951 ITERATE_RDEV(mddev,rdev,tmp)
1952 switch (super_types[mddev->major_version].
1953 load_super(rdev, freshest, mddev->minor_version)) {
1954 case 1:
1955 freshest = rdev;
1956 break;
1957 case 0:
1958 break;
1959 default:
1960 printk( KERN_ERR \
1961 "md: fatal superblock inconsistency in %s"
1962 " -- removing from array\n",
1963 bdevname(rdev->bdev,b));
1964 kick_rdev_from_array(rdev);
1965 }
1966
1967
1968 super_types[mddev->major_version].
1969 validate_super(mddev, freshest);
1970
1971 i = 0;
1972 ITERATE_RDEV(mddev,rdev,tmp) {
1973 if (rdev != freshest)
1974 if (super_types[mddev->major_version].
1975 validate_super(mddev, rdev)) {
1976 printk(KERN_WARNING "md: kicking non-fresh %s"
1977 " from array!\n",
1978 bdevname(rdev->bdev,b));
1979 kick_rdev_from_array(rdev);
1980 continue;
1981 }
1982 if (mddev->level == LEVEL_MULTIPATH) {
1983 rdev->desc_nr = i++;
1984 rdev->raid_disk = rdev->desc_nr;
b2d444d7 1985 set_bit(In_sync, &rdev->flags);
1da177e4
LT
1986 }
1987 }
1988
1989
1990
1991 if (mddev->recovery_cp != MaxSector &&
1992 mddev->level >= 1)
1993 printk(KERN_ERR "md: %s: raid array is not clean"
1994 " -- starting background reconstruction\n",
1995 mdname(mddev));
1996
1da177e4
LT
1997}
1998
16f17b39
N
1999static ssize_t
2000safe_delay_show(mddev_t *mddev, char *page)
2001{
2002 int msec = (mddev->safemode_delay*1000)/HZ;
2003 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2004}
2005static ssize_t
2006safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2007{
2008 int scale=1;
2009 int dot=0;
2010 int i;
2011 unsigned long msec;
2012 char buf[30];
2013 char *e;
2014 /* remove a period, and count digits after it */
2015 if (len >= sizeof(buf))
2016 return -EINVAL;
2017 strlcpy(buf, cbuf, len);
2018 buf[len] = 0;
2019 for (i=0; i<len; i++) {
2020 if (dot) {
2021 if (isdigit(buf[i])) {
2022 buf[i-1] = buf[i];
2023 scale *= 10;
2024 }
2025 buf[i] = 0;
2026 } else if (buf[i] == '.') {
2027 dot=1;
2028 buf[i] = 0;
2029 }
2030 }
2031 msec = simple_strtoul(buf, &e, 10);
2032 if (e == buf || (*e && *e != '\n'))
2033 return -EINVAL;
2034 msec = (msec * 1000) / scale;
2035 if (msec == 0)
2036 mddev->safemode_delay = 0;
2037 else {
2038 mddev->safemode_delay = (msec*HZ)/1000;
2039 if (mddev->safemode_delay == 0)
2040 mddev->safemode_delay = 1;
2041 }
2042 return len;
2043}
2044static struct md_sysfs_entry md_safe_delay =
2045__ATTR(safe_mode_delay, 0644,safe_delay_show, safe_delay_store);
2046
eae1701f 2047static ssize_t
96de1e66 2048level_show(mddev_t *mddev, char *page)
eae1701f 2049{
2604b703 2050 struct mdk_personality *p = mddev->pers;
d9d166c2 2051 if (p)
eae1701f 2052 return sprintf(page, "%s\n", p->name);
d9d166c2
N
2053 else if (mddev->clevel[0])
2054 return sprintf(page, "%s\n", mddev->clevel);
2055 else if (mddev->level != LEVEL_NONE)
2056 return sprintf(page, "%d\n", mddev->level);
2057 else
2058 return 0;
eae1701f
N
2059}
2060
d9d166c2
N
2061static ssize_t
2062level_store(mddev_t *mddev, const char *buf, size_t len)
2063{
2064 int rv = len;
2065 if (mddev->pers)
2066 return -EBUSY;
2067 if (len == 0)
2068 return 0;
2069 if (len >= sizeof(mddev->clevel))
2070 return -ENOSPC;
2071 strncpy(mddev->clevel, buf, len);
2072 if (mddev->clevel[len-1] == '\n')
2073 len--;
2074 mddev->clevel[len] = 0;
2075 mddev->level = LEVEL_NONE;
2076 return rv;
2077}
2078
2079static struct md_sysfs_entry md_level =
2080__ATTR(level, 0644, level_show, level_store);
eae1701f
N
2081
2082static ssize_t
96de1e66 2083raid_disks_show(mddev_t *mddev, char *page)
eae1701f 2084{
bb636547
N
2085 if (mddev->raid_disks == 0)
2086 return 0;
eae1701f
N
2087 return sprintf(page, "%d\n", mddev->raid_disks);
2088}
2089
da943b99
N
2090static int update_raid_disks(mddev_t *mddev, int raid_disks);
2091
2092static ssize_t
2093raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2094{
2095 /* can only set raid_disks if array is not yet active */
2096 char *e;
2097 int rv = 0;
2098 unsigned long n = simple_strtoul(buf, &e, 10);
2099
2100 if (!*buf || (*e && *e != '\n'))
2101 return -EINVAL;
2102
2103 if (mddev->pers)
2104 rv = update_raid_disks(mddev, n);
2105 else
2106 mddev->raid_disks = n;
2107 return rv ? rv : len;
2108}
2109static struct md_sysfs_entry md_raid_disks =
2110__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
eae1701f 2111
3b34380a
N
2112static ssize_t
2113chunk_size_show(mddev_t *mddev, char *page)
2114{
2115 return sprintf(page, "%d\n", mddev->chunk_size);
2116}
2117
2118static ssize_t
2119chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2120{
2121 /* can only set chunk_size if array is not yet active */
2122 char *e;
2123 unsigned long n = simple_strtoul(buf, &e, 10);
2124
2125 if (mddev->pers)
2126 return -EBUSY;
2127 if (!*buf || (*e && *e != '\n'))
2128 return -EINVAL;
2129
2130 mddev->chunk_size = n;
2131 return len;
2132}
2133static struct md_sysfs_entry md_chunk_size =
2134__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
2135
6d7ff738
N
2136static ssize_t
2137null_show(mddev_t *mddev, char *page)
2138{
2139 return -EINVAL;
2140}
2141
2142static ssize_t
2143new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2144{
2145 /* buf must be %d:%d\n? giving major and minor numbers */
2146 /* The new device is added to the array.
2147 * If the array has a persistent superblock, we read the
2148 * superblock to initialise info and check validity.
2149 * Otherwise, only checking done is that in bind_rdev_to_array,
2150 * which mainly checks size.
2151 */
2152 char *e;
2153 int major = simple_strtoul(buf, &e, 10);
2154 int minor;
2155 dev_t dev;
2156 mdk_rdev_t *rdev;
2157 int err;
2158
2159 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2160 return -EINVAL;
2161 minor = simple_strtoul(e+1, &e, 10);
2162 if (*e && *e != '\n')
2163 return -EINVAL;
2164 dev = MKDEV(major, minor);
2165 if (major != MAJOR(dev) ||
2166 minor != MINOR(dev))
2167 return -EOVERFLOW;
2168
2169
2170 if (mddev->persistent) {
2171 rdev = md_import_device(dev, mddev->major_version,
2172 mddev->minor_version);
2173 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2174 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2175 mdk_rdev_t, same_set);
2176 err = super_types[mddev->major_version]
2177 .load_super(rdev, rdev0, mddev->minor_version);
2178 if (err < 0)
2179 goto out;
2180 }
2181 } else
2182 rdev = md_import_device(dev, -1, -1);
2183
2184 if (IS_ERR(rdev))
2185 return PTR_ERR(rdev);
2186 err = bind_rdev_to_array(rdev, mddev);
2187 out:
2188 if (err)
2189 export_rdev(rdev);
2190 return err ? err : len;
2191}
2192
2193static struct md_sysfs_entry md_new_device =
2194__ATTR(new_dev, 0200, null_show, new_dev_store);
3b34380a 2195
a35b0d69
N
2196static ssize_t
2197size_show(mddev_t *mddev, char *page)
2198{
2199 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2200}
2201
2202static int update_size(mddev_t *mddev, unsigned long size);
2203
2204static ssize_t
2205size_store(mddev_t *mddev, const char *buf, size_t len)
2206{
2207 /* If array is inactive, we can reduce the component size, but
2208 * not increase it (except from 0).
2209 * If array is active, we can try an on-line resize
2210 */
2211 char *e;
2212 int err = 0;
2213 unsigned long long size = simple_strtoull(buf, &e, 10);
2214 if (!*buf || *buf == '\n' ||
2215 (*e && *e != '\n'))
2216 return -EINVAL;
2217
2218 if (mddev->pers) {
2219 err = update_size(mddev, size);
2220 md_update_sb(mddev);
2221 } else {
2222 if (mddev->size == 0 ||
2223 mddev->size > size)
2224 mddev->size = size;
2225 else
2226 err = -ENOSPC;
2227 }
2228 return err ? err : len;
2229}
2230
2231static struct md_sysfs_entry md_size =
2232__ATTR(component_size, 0644, size_show, size_store);
2233
8bb93aac
N
2234
2235/* Metdata version.
2236 * This is either 'none' for arrays with externally managed metadata,
2237 * or N.M for internally known formats
2238 */
2239static ssize_t
2240metadata_show(mddev_t *mddev, char *page)
2241{
2242 if (mddev->persistent)
2243 return sprintf(page, "%d.%d\n",
2244 mddev->major_version, mddev->minor_version);
2245 else
2246 return sprintf(page, "none\n");
2247}
2248
2249static ssize_t
2250metadata_store(mddev_t *mddev, const char *buf, size_t len)
2251{
2252 int major, minor;
2253 char *e;
2254 if (!list_empty(&mddev->disks))
2255 return -EBUSY;
2256
2257 if (cmd_match(buf, "none")) {
2258 mddev->persistent = 0;
2259 mddev->major_version = 0;
2260 mddev->minor_version = 90;
2261 return len;
2262 }
2263 major = simple_strtoul(buf, &e, 10);
2264 if (e==buf || *e != '.')
2265 return -EINVAL;
2266 buf = e+1;
2267 minor = simple_strtoul(buf, &e, 10);
2268 if (e==buf || *e != '\n')
2269 return -EINVAL;
2270 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2271 super_types[major].name == NULL)
2272 return -ENOENT;
2273 mddev->major_version = major;
2274 mddev->minor_version = minor;
2275 mddev->persistent = 1;
2276 return len;
2277}
2278
2279static struct md_sysfs_entry md_metadata =
2280__ATTR(metadata_version, 0644, metadata_show, metadata_store);
2281
24dd469d 2282static ssize_t
7eec314d 2283action_show(mddev_t *mddev, char *page)
24dd469d 2284{
7eec314d 2285 char *type = "idle";
31399d9e
N
2286 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2287 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
ccfcc3c1
N
2288 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2289 type = "reshape";
2290 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
24dd469d
N
2291 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2292 type = "resync";
2293 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2294 type = "check";
2295 else
2296 type = "repair";
2297 } else
2298 type = "recover";
2299 }
2300 return sprintf(page, "%s\n", type);
2301}
2302
2303static ssize_t
7eec314d 2304action_store(mddev_t *mddev, const char *page, size_t len)
24dd469d 2305{
7eec314d
N
2306 if (!mddev->pers || !mddev->pers->sync_request)
2307 return -EINVAL;
2308
bce74dac 2309 if (cmd_match(page, "idle")) {
7eec314d
N
2310 if (mddev->sync_thread) {
2311 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2312 md_unregister_thread(mddev->sync_thread);
2313 mddev->sync_thread = NULL;
2314 mddev->recovery = 0;
2315 }
03c902e1
N
2316 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2317 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
24dd469d 2318 return -EBUSY;
03c902e1 2319 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
7eec314d 2320 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
16484bf5
N
2321 else if (cmd_match(page, "reshape")) {
2322 int err;
2323 if (mddev->pers->start_reshape == NULL)
2324 return -EINVAL;
2325 err = mddev->pers->start_reshape(mddev);
2326 if (err)
2327 return err;
2328 } else {
bce74dac 2329 if (cmd_match(page, "check"))
7eec314d 2330 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2adc7d47 2331 else if (!cmd_match(page, "repair"))
7eec314d
N
2332 return -EINVAL;
2333 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2334 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7eec314d 2335 }
03c902e1 2336 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
24dd469d
N
2337 md_wakeup_thread(mddev->thread);
2338 return len;
2339}
2340
9d88883e 2341static ssize_t
96de1e66 2342mismatch_cnt_show(mddev_t *mddev, char *page)
9d88883e
N
2343{
2344 return sprintf(page, "%llu\n",
2345 (unsigned long long) mddev->resync_mismatches);
2346}
2347
96de1e66 2348static struct md_sysfs_entry
7eec314d 2349md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
24dd469d 2350
96de1e66
N
2351
2352static struct md_sysfs_entry
2353md_mismatches = __ATTR_RO(mismatch_cnt);
9d88883e 2354
88202a0c
N
2355static ssize_t
2356sync_min_show(mddev_t *mddev, char *page)
2357{
2358 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2359 mddev->sync_speed_min ? "local": "system");
2360}
2361
2362static ssize_t
2363sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2364{
2365 int min;
2366 char *e;
2367 if (strncmp(buf, "system", 6)==0) {
2368 mddev->sync_speed_min = 0;
2369 return len;
2370 }
2371 min = simple_strtoul(buf, &e, 10);
2372 if (buf == e || (*e && *e != '\n') || min <= 0)
2373 return -EINVAL;
2374 mddev->sync_speed_min = min;
2375 return len;
2376}
2377
2378static struct md_sysfs_entry md_sync_min =
2379__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2380
2381static ssize_t
2382sync_max_show(mddev_t *mddev, char *page)
2383{
2384 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2385 mddev->sync_speed_max ? "local": "system");
2386}
2387
2388static ssize_t
2389sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2390{
2391 int max;
2392 char *e;
2393 if (strncmp(buf, "system", 6)==0) {
2394 mddev->sync_speed_max = 0;
2395 return len;
2396 }
2397 max = simple_strtoul(buf, &e, 10);
2398 if (buf == e || (*e && *e != '\n') || max <= 0)
2399 return -EINVAL;
2400 mddev->sync_speed_max = max;
2401 return len;
2402}
2403
2404static struct md_sysfs_entry md_sync_max =
2405__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2406
2407
2408static ssize_t
2409sync_speed_show(mddev_t *mddev, char *page)
2410{
2411 unsigned long resync, dt, db;
2412 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2413 dt = ((jiffies - mddev->resync_mark) / HZ);
2414 if (!dt) dt++;
2415 db = resync - (mddev->resync_mark_cnt);
2416 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2417}
2418
2419static struct md_sysfs_entry
2420md_sync_speed = __ATTR_RO(sync_speed);
2421
2422static ssize_t
2423sync_completed_show(mddev_t *mddev, char *page)
2424{
2425 unsigned long max_blocks, resync;
2426
2427 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2428 max_blocks = mddev->resync_max_sectors;
2429 else
2430 max_blocks = mddev->size << 1;
2431
2432 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2433 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2434}
2435
2436static struct md_sysfs_entry
2437md_sync_completed = __ATTR_RO(sync_completed);
2438
e464eafd
N
2439static ssize_t
2440suspend_lo_show(mddev_t *mddev, char *page)
2441{
2442 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2443}
2444
2445static ssize_t
2446suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2447{
2448 char *e;
2449 unsigned long long new = simple_strtoull(buf, &e, 10);
2450
2451 if (mddev->pers->quiesce == NULL)
2452 return -EINVAL;
2453 if (buf == e || (*e && *e != '\n'))
2454 return -EINVAL;
2455 if (new >= mddev->suspend_hi ||
2456 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2457 mddev->suspend_lo = new;
2458 mddev->pers->quiesce(mddev, 2);
2459 return len;
2460 } else
2461 return -EINVAL;
2462}
2463static struct md_sysfs_entry md_suspend_lo =
2464__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2465
2466
2467static ssize_t
2468suspend_hi_show(mddev_t *mddev, char *page)
2469{
2470 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2471}
2472
2473static ssize_t
2474suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2475{
2476 char *e;
2477 unsigned long long new = simple_strtoull(buf, &e, 10);
2478
2479 if (mddev->pers->quiesce == NULL)
2480 return -EINVAL;
2481 if (buf == e || (*e && *e != '\n'))
2482 return -EINVAL;
2483 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2484 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2485 mddev->suspend_hi = new;
2486 mddev->pers->quiesce(mddev, 1);
2487 mddev->pers->quiesce(mddev, 0);
2488 return len;
2489 } else
2490 return -EINVAL;
2491}
2492static struct md_sysfs_entry md_suspend_hi =
2493__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2494
2495
eae1701f
N
2496static struct attribute *md_default_attrs[] = {
2497 &md_level.attr,
2498 &md_raid_disks.attr,
3b34380a 2499 &md_chunk_size.attr,
a35b0d69 2500 &md_size.attr,
8bb93aac 2501 &md_metadata.attr,
6d7ff738 2502 &md_new_device.attr,
16f17b39 2503 &md_safe_delay.attr,
411036fa
N
2504 NULL,
2505};
2506
2507static struct attribute *md_redundancy_attrs[] = {
24dd469d 2508 &md_scan_mode.attr,
9d88883e 2509 &md_mismatches.attr,
88202a0c
N
2510 &md_sync_min.attr,
2511 &md_sync_max.attr,
2512 &md_sync_speed.attr,
2513 &md_sync_completed.attr,
e464eafd
N
2514 &md_suspend_lo.attr,
2515 &md_suspend_hi.attr,
eae1701f
N
2516 NULL,
2517};
411036fa
N
2518static struct attribute_group md_redundancy_group = {
2519 .name = NULL,
2520 .attrs = md_redundancy_attrs,
2521};
2522
eae1701f
N
2523
2524static ssize_t
2525md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2526{
2527 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2528 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
96de1e66 2529 ssize_t rv;
eae1701f
N
2530
2531 if (!entry->show)
2532 return -EIO;
5dc5cf7d
IM
2533 rv = mddev_lock(mddev);
2534 if (!rv) {
2535 rv = entry->show(mddev, page);
2536 mddev_unlock(mddev);
2537 }
96de1e66 2538 return rv;
eae1701f
N
2539}
2540
2541static ssize_t
2542md_attr_store(struct kobject *kobj, struct attribute *attr,
2543 const char *page, size_t length)
2544{
2545 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2546 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
96de1e66 2547 ssize_t rv;
eae1701f
N
2548
2549 if (!entry->store)
2550 return -EIO;
5dc5cf7d
IM
2551 rv = mddev_lock(mddev);
2552 if (!rv) {
2553 rv = entry->store(mddev, page, length);
2554 mddev_unlock(mddev);
2555 }
96de1e66 2556 return rv;
eae1701f
N
2557}
2558
2559static void md_free(struct kobject *ko)
2560{
2561 mddev_t *mddev = container_of(ko, mddev_t, kobj);
2562 kfree(mddev);
2563}
2564
2565static struct sysfs_ops md_sysfs_ops = {
2566 .show = md_attr_show,
2567 .store = md_attr_store,
2568};
2569static struct kobj_type md_ktype = {
2570 .release = md_free,
2571 .sysfs_ops = &md_sysfs_ops,
2572 .default_attrs = md_default_attrs,
2573};
2574
1da177e4
LT
2575int mdp_major = 0;
2576
2577static struct kobject *md_probe(dev_t dev, int *part, void *data)
2578{
48c9c27b 2579 static DEFINE_MUTEX(disks_mutex);
1da177e4
LT
2580 mddev_t *mddev = mddev_find(dev);
2581 struct gendisk *disk;
2582 int partitioned = (MAJOR(dev) != MD_MAJOR);
2583 int shift = partitioned ? MdpMinorShift : 0;
2584 int unit = MINOR(dev) >> shift;
2585
2586 if (!mddev)
2587 return NULL;
2588
48c9c27b 2589 mutex_lock(&disks_mutex);
1da177e4 2590 if (mddev->gendisk) {
48c9c27b 2591 mutex_unlock(&disks_mutex);
1da177e4
LT
2592 mddev_put(mddev);
2593 return NULL;
2594 }
2595 disk = alloc_disk(1 << shift);
2596 if (!disk) {
48c9c27b 2597 mutex_unlock(&disks_mutex);
1da177e4
LT
2598 mddev_put(mddev);
2599 return NULL;
2600 }
2601 disk->major = MAJOR(dev);
2602 disk->first_minor = unit << shift;
2603 if (partitioned) {
2604 sprintf(disk->disk_name, "md_d%d", unit);
2605 sprintf(disk->devfs_name, "md/d%d", unit);
2606 } else {
2607 sprintf(disk->disk_name, "md%d", unit);
2608 sprintf(disk->devfs_name, "md/%d", unit);
2609 }
2610 disk->fops = &md_fops;
2611 disk->private_data = mddev;
2612 disk->queue = mddev->queue;
2613 add_disk(disk);
2614 mddev->gendisk = disk;
48c9c27b 2615 mutex_unlock(&disks_mutex);
9c791977 2616 mddev->kobj.parent = &disk->kobj;
eae1701f
N
2617 mddev->kobj.k_name = NULL;
2618 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2619 mddev->kobj.ktype = &md_ktype;
2620 kobject_register(&mddev->kobj);
1da177e4
LT
2621 return NULL;
2622}
2623
1da177e4
LT
2624static void md_safemode_timeout(unsigned long data)
2625{
2626 mddev_t *mddev = (mddev_t *) data;
2627
2628 mddev->safemode = 1;
2629 md_wakeup_thread(mddev->thread);
2630}
2631
6ff8d8ec 2632static int start_dirty_degraded;
1da177e4
LT
2633
2634static int do_md_run(mddev_t * mddev)
2635{
2604b703 2636 int err;
1da177e4
LT
2637 int chunk_size;
2638 struct list_head *tmp;
2639 mdk_rdev_t *rdev;
2640 struct gendisk *disk;
2604b703 2641 struct mdk_personality *pers;
1da177e4
LT
2642 char b[BDEVNAME_SIZE];
2643
a757e64c
N
2644 if (list_empty(&mddev->disks))
2645 /* cannot run an array with no devices.. */
1da177e4 2646 return -EINVAL;
1da177e4
LT
2647
2648 if (mddev->pers)
2649 return -EBUSY;
2650
2651 /*
2652 * Analyze all RAID superblock(s)
2653 */
a757e64c
N
2654 if (!mddev->raid_disks)
2655 analyze_sbs(mddev);
1da177e4
LT
2656
2657 chunk_size = mddev->chunk_size;
2604b703
N
2658
2659 if (chunk_size) {
1da177e4
LT
2660 if (chunk_size > MAX_CHUNK_SIZE) {
2661 printk(KERN_ERR "too big chunk_size: %d > %d\n",
2662 chunk_size, MAX_CHUNK_SIZE);
2663 return -EINVAL;
2664 }
2665 /*
2666 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
2667 */
2668 if ( (1 << ffz(~chunk_size)) != chunk_size) {
a757e64c 2669 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
1da177e4
LT
2670 return -EINVAL;
2671 }
2672 if (chunk_size < PAGE_SIZE) {
2673 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
2674 chunk_size, PAGE_SIZE);
2675 return -EINVAL;
2676 }
2677
2678 /* devices must have minimum size of one chunk */
2679 ITERATE_RDEV(mddev,rdev,tmp) {
b2d444d7 2680 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2681 continue;
2682 if (rdev->size < chunk_size / 1024) {
2683 printk(KERN_WARNING
2684 "md: Dev %s smaller than chunk_size:"
2685 " %lluk < %dk\n",
2686 bdevname(rdev->bdev,b),
2687 (unsigned long long)rdev->size,
2688 chunk_size / 1024);
2689 return -EINVAL;
2690 }
2691 }
2692 }
2693
1da177e4 2694#ifdef CONFIG_KMOD
d9d166c2
N
2695 if (mddev->level != LEVEL_NONE)
2696 request_module("md-level-%d", mddev->level);
2697 else if (mddev->clevel[0])
2698 request_module("md-%s", mddev->clevel);
1da177e4
LT
2699#endif
2700
2701 /*
2702 * Drop all container device buffers, from now on
2703 * the only valid external interface is through the md
2704 * device.
2705 * Also find largest hardsector size
2706 */
2707 ITERATE_RDEV(mddev,rdev,tmp) {
b2d444d7 2708 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
2709 continue;
2710 sync_blockdev(rdev->bdev);
2711 invalidate_bdev(rdev->bdev, 0);
2712 }
2713
2714 md_probe(mddev->unit, NULL, NULL);
2715 disk = mddev->gendisk;
2716 if (!disk)
2717 return -ENOMEM;
2718
2719 spin_lock(&pers_lock);
d9d166c2 2720 pers = find_pers(mddev->level, mddev->clevel);
2604b703 2721 if (!pers || !try_module_get(pers->owner)) {
1da177e4 2722 spin_unlock(&pers_lock);
d9d166c2
N
2723 if (mddev->level != LEVEL_NONE)
2724 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
2725 mddev->level);
2726 else
2727 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
2728 mddev->clevel);
1da177e4
LT
2729 return -EINVAL;
2730 }
2604b703 2731 mddev->pers = pers;
1da177e4 2732 spin_unlock(&pers_lock);
d9d166c2
N
2733 mddev->level = pers->level;
2734 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
1da177e4 2735
f6705578 2736 if (mddev->reshape_position != MaxSector &&
63c70c4f 2737 pers->start_reshape == NULL) {
f6705578
N
2738 /* This personality cannot handle reshaping... */
2739 mddev->pers = NULL;
2740 module_put(pers->owner);
2741 return -EINVAL;
2742 }
2743
657390d2 2744 mddev->recovery = 0;
1da177e4 2745 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
a9701a30 2746 mddev->barriers_work = 1;
6ff8d8ec 2747 mddev->ok_start_degraded = start_dirty_degraded;
1da177e4 2748
f91de92e
N
2749 if (start_readonly)
2750 mddev->ro = 2; /* read-only, but switch on first write */
2751
b15c2e57
N
2752 err = mddev->pers->run(mddev);
2753 if (!err && mddev->pers->sync_request) {
2754 err = bitmap_create(mddev);
2755 if (err) {
2756 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2757 mdname(mddev), err);
2758 mddev->pers->stop(mddev);
2759 }
2760 }
1da177e4
LT
2761 if (err) {
2762 printk(KERN_ERR "md: pers->run() failed ...\n");
2763 module_put(mddev->pers->owner);
2764 mddev->pers = NULL;
32a7627c
N
2765 bitmap_destroy(mddev);
2766 return err;
1da177e4 2767 }
411036fa
N
2768 if (mddev->pers->sync_request)
2769 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
fd9d49ca
N
2770 else if (mddev->ro == 2) /* auto-readonly not meaningful */
2771 mddev->ro = 0;
2772
1da177e4
LT
2773 atomic_set(&mddev->writes_pending,0);
2774 mddev->safemode = 0;
2775 mddev->safemode_timer.function = md_safemode_timeout;
2776 mddev->safemode_timer.data = (unsigned long) mddev;
16f17b39 2777 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
1da177e4 2778 mddev->in_sync = 1;
86e6ffdd
N
2779
2780 ITERATE_RDEV(mddev,rdev,tmp)
2781 if (rdev->raid_disk >= 0) {
2782 char nm[20];
2783 sprintf(nm, "rd%d", rdev->raid_disk);
2784 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2785 }
1da177e4
LT
2786
2787 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
005eca5e 2788 md_wakeup_thread(mddev->thread);
1da177e4
LT
2789
2790 if (mddev->sb_dirty)
2791 md_update_sb(mddev);
2792
2793 set_capacity(disk, mddev->array_size<<1);
2794
2795 /* If we call blk_queue_make_request here, it will
2796 * re-initialise max_sectors etc which may have been
2797 * refined inside -> run. So just set the bits we need to set.
2798 * Most initialisation happended when we called
2799 * blk_queue_make_request(..., md_fail_request)
2800 * earlier.
2801 */
2802 mddev->queue->queuedata = mddev;
2803 mddev->queue->make_request_fn = mddev->pers->make_request;
2804
5fd6c1dc
N
2805 /* If there is a partially-recovered drive we need to
2806 * start recovery here. If we leave it to md_check_recovery,
2807 * it will remove the drives and not do the right thing
2808 */
2809 if (mddev->degraded) {
2810 struct list_head *rtmp;
2811 int spares = 0;
2812 ITERATE_RDEV(mddev,rdev,rtmp)
2813 if (rdev->raid_disk >= 0 &&
2814 !test_bit(In_sync, &rdev->flags) &&
2815 !test_bit(Faulty, &rdev->flags))
2816 /* complete an interrupted recovery */
2817 spares++;
2818 if (spares && mddev->pers->sync_request) {
2819 mddev->recovery = 0;
2820 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
2821 mddev->sync_thread = md_register_thread(md_do_sync,
2822 mddev,
2823 "%s_resync");
2824 if (!mddev->sync_thread) {
2825 printk(KERN_ERR "%s: could not start resync"
2826 " thread...\n",
2827 mdname(mddev));
2828 /* leave the spares where they are, it shouldn't hurt */
2829 mddev->recovery = 0;
2830 } else
2831 md_wakeup_thread(mddev->sync_thread);
2832 }
2833 }
2834
1da177e4 2835 mddev->changed = 1;
d7603b7e 2836 md_new_event(mddev);
1da177e4
LT
2837 return 0;
2838}
2839
2840static int restart_array(mddev_t *mddev)
2841{
2842 struct gendisk *disk = mddev->gendisk;
2843 int err;
2844
2845 /*
2846 * Complain if it has no devices
2847 */
2848 err = -ENXIO;
2849 if (list_empty(&mddev->disks))
2850 goto out;
2851
2852 if (mddev->pers) {
2853 err = -EBUSY;
2854 if (!mddev->ro)
2855 goto out;
2856
2857 mddev->safemode = 0;
2858 mddev->ro = 0;
2859 set_disk_ro(disk, 0);
2860
2861 printk(KERN_INFO "md: %s switched to read-write mode.\n",
2862 mdname(mddev));
2863 /*
2864 * Kick recovery or resync if necessary
2865 */
2866 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2867 md_wakeup_thread(mddev->thread);
5fd6c1dc 2868 md_wakeup_thread(mddev->sync_thread);
1da177e4
LT
2869 err = 0;
2870 } else {
2871 printk(KERN_ERR "md: %s has no personality assigned.\n",
2872 mdname(mddev));
2873 err = -EINVAL;
2874 }
2875
2876out:
2877 return err;
2878}
2879
2880static int do_md_stop(mddev_t * mddev, int ro)
2881{
2882 int err = 0;
2883 struct gendisk *disk = mddev->gendisk;
2884
2885 if (mddev->pers) {
2886 if (atomic_read(&mddev->active)>2) {
2887 printk("md: %s still in use.\n",mdname(mddev));
2888 return -EBUSY;
2889 }
2890
2891 if (mddev->sync_thread) {
5fd6c1dc 2892 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1da177e4
LT
2893 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2894 md_unregister_thread(mddev->sync_thread);
2895 mddev->sync_thread = NULL;
2896 }
2897
2898 del_timer_sync(&mddev->safemode_timer);
2899
2900 invalidate_partition(disk, 0);
2901
2902 if (ro) {
2903 err = -ENXIO;
f91de92e 2904 if (mddev->ro==1)
1da177e4
LT
2905 goto out;
2906 mddev->ro = 1;
2907 } else {
6b8b3e8a 2908 bitmap_flush(mddev);
a9701a30 2909 md_super_wait(mddev);
1da177e4
LT
2910 if (mddev->ro)
2911 set_disk_ro(disk, 0);
2912 blk_queue_make_request(mddev->queue, md_fail_request);
2913 mddev->pers->stop(mddev);
411036fa
N
2914 if (mddev->pers->sync_request)
2915 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2916
1da177e4
LT
2917 module_put(mddev->pers->owner);
2918 mddev->pers = NULL;
2919 if (mddev->ro)
2920 mddev->ro = 0;
2921 }
5fd6c1dc 2922 if (!mddev->in_sync || mddev->sb_dirty) {
1da177e4
LT
2923 /* mark array as shutdown cleanly */
2924 mddev->in_sync = 1;
2925 md_update_sb(mddev);
2926 }
2927 if (ro)
2928 set_disk_ro(disk, 1);
5fd6c1dc 2929 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1da177e4 2930 }
32a7627c 2931
1da177e4
LT
2932 /*
2933 * Free resources if final stop
2934 */
2935 if (!ro) {
86e6ffdd
N
2936 mdk_rdev_t *rdev;
2937 struct list_head *tmp;
1da177e4
LT
2938 struct gendisk *disk;
2939 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
2940
978f946b
N
2941 bitmap_destroy(mddev);
2942 if (mddev->bitmap_file) {
2943 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2944 fput(mddev->bitmap_file);
2945 mddev->bitmap_file = NULL;
2946 }
2947 mddev->bitmap_offset = 0;
2948
86e6ffdd
N
2949 ITERATE_RDEV(mddev,rdev,tmp)
2950 if (rdev->raid_disk >= 0) {
2951 char nm[20];
2952 sprintf(nm, "rd%d", rdev->raid_disk);
2953 sysfs_remove_link(&mddev->kobj, nm);
2954 }
2955
1da177e4
LT
2956 export_array(mddev);
2957
2958 mddev->array_size = 0;
2959 disk = mddev->gendisk;
2960 if (disk)
2961 set_capacity(disk, 0);
2962 mddev->changed = 1;
a8a55c38 2963 } else if (mddev->pers)
1da177e4
LT
2964 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2965 mdname(mddev));
2966 err = 0;
d7603b7e 2967 md_new_event(mddev);
1da177e4
LT
2968out:
2969 return err;
2970}
2971
2972static void autorun_array(mddev_t *mddev)
2973{
2974 mdk_rdev_t *rdev;
2975 struct list_head *tmp;
2976 int err;
2977
a757e64c 2978 if (list_empty(&mddev->disks))
1da177e4 2979 return;
1da177e4
LT
2980
2981 printk(KERN_INFO "md: running: ");
2982
2983 ITERATE_RDEV(mddev,rdev,tmp) {
2984 char b[BDEVNAME_SIZE];
2985 printk("<%s>", bdevname(rdev->bdev,b));
2986 }
2987 printk("\n");
2988
2989 err = do_md_run (mddev);
2990 if (err) {
2991 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
2992 do_md_stop (mddev, 0);
2993 }
2994}
2995
2996/*
2997 * lets try to run arrays based on all disks that have arrived
2998 * until now. (those are in pending_raid_disks)
2999 *
3000 * the method: pick the first pending disk, collect all disks with
3001 * the same UUID, remove all from the pending list and put them into
3002 * the 'same_array' list. Then order this list based on superblock
3003 * update time (freshest comes first), kick out 'old' disks and
3004 * compare superblocks. If everything's fine then run it.
3005 *
3006 * If "unit" is allocated, then bump its reference count
3007 */
3008static void autorun_devices(int part)
3009{
1da177e4
LT
3010 struct list_head *tmp;
3011 mdk_rdev_t *rdev0, *rdev;
3012 mddev_t *mddev;
3013 char b[BDEVNAME_SIZE];
3014
3015 printk(KERN_INFO "md: autorun ...\n");
3016 while (!list_empty(&pending_raid_disks)) {
3017 dev_t dev;
ad01c9e3 3018 LIST_HEAD(candidates);
1da177e4
LT
3019 rdev0 = list_entry(pending_raid_disks.next,
3020 mdk_rdev_t, same_set);
3021
3022 printk(KERN_INFO "md: considering %s ...\n",
3023 bdevname(rdev0->bdev,b));
3024 INIT_LIST_HEAD(&candidates);
3025 ITERATE_RDEV_PENDING(rdev,tmp)
3026 if (super_90_load(rdev, rdev0, 0) >= 0) {
3027 printk(KERN_INFO "md: adding %s ...\n",
3028 bdevname(rdev->bdev,b));
3029 list_move(&rdev->same_set, &candidates);
3030 }
3031 /*
3032 * now we have a set of devices, with all of them having
3033 * mostly sane superblocks. It's time to allocate the
3034 * mddev.
3035 */
3036 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
3037 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3038 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3039 break;
3040 }
3041 if (part)
3042 dev = MKDEV(mdp_major,
3043 rdev0->preferred_minor << MdpMinorShift);
3044 else
3045 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3046
3047 md_probe(dev, NULL, NULL);
3048 mddev = mddev_find(dev);
3049 if (!mddev) {
3050 printk(KERN_ERR
3051 "md: cannot allocate memory for md drive.\n");
3052 break;
3053 }
3054 if (mddev_lock(mddev))
3055 printk(KERN_WARNING "md: %s locked, cannot run\n",
3056 mdname(mddev));
3057 else if (mddev->raid_disks || mddev->major_version
3058 || !list_empty(&mddev->disks)) {
3059 printk(KERN_WARNING
3060 "md: %s already running, cannot run %s\n",
3061 mdname(mddev), bdevname(rdev0->bdev,b));
3062 mddev_unlock(mddev);
3063 } else {
3064 printk(KERN_INFO "md: created %s\n", mdname(mddev));
3065 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3066 list_del_init(&rdev->same_set);
3067 if (bind_rdev_to_array(rdev, mddev))
3068 export_rdev(rdev);
3069 }
3070 autorun_array(mddev);
3071 mddev_unlock(mddev);
3072 }
3073 /* on success, candidates will be empty, on error
3074 * it won't...
3075 */
3076 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3077 export_rdev(rdev);
3078 mddev_put(mddev);
3079 }
3080 printk(KERN_INFO "md: ... autorun DONE.\n");
3081}
3082
3083/*
3084 * import RAID devices based on one partition
3085 * if possible, the array gets run as well.
3086 */
3087
3088static int autostart_array(dev_t startdev)
3089{
3090 char b[BDEVNAME_SIZE];
3091 int err = -EINVAL, i;
3092 mdp_super_t *sb = NULL;
3093 mdk_rdev_t *start_rdev = NULL, *rdev;
3094
3095 start_rdev = md_import_device(startdev, 0, 0);
3096 if (IS_ERR(start_rdev))
3097 return err;
3098
3099
3100 /* NOTE: this can only work for 0.90.0 superblocks */
3101 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
3102 if (sb->major_version != 0 ||
3103 sb->minor_version != 90 ) {
3104 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
3105 export_rdev(start_rdev);
3106 return err;
3107 }
3108
b2d444d7 3109 if (test_bit(Faulty, &start_rdev->flags)) {
1da177e4
LT
3110 printk(KERN_WARNING
3111 "md: can not autostart based on faulty %s!\n",
3112 bdevname(start_rdev->bdev,b));
3113 export_rdev(start_rdev);
3114 return err;
3115 }
3116 list_add(&start_rdev->same_set, &pending_raid_disks);
3117
3118 for (i = 0; i < MD_SB_DISKS; i++) {
3119 mdp_disk_t *desc = sb->disks + i;
3120 dev_t dev = MKDEV(desc->major, desc->minor);
3121
3122 if (!dev)
3123 continue;
3124 if (dev == startdev)
3125 continue;
3126 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
3127 continue;
3128 rdev = md_import_device(dev, 0, 0);
3129 if (IS_ERR(rdev))
3130 continue;
3131
3132 list_add(&rdev->same_set, &pending_raid_disks);
3133 }
3134
3135 /*
3136 * possibly return codes
3137 */
3138 autorun_devices(0);
3139 return 0;
3140
3141}
3142
3143
3144static int get_version(void __user * arg)
3145{
3146 mdu_version_t ver;
3147
3148 ver.major = MD_MAJOR_VERSION;
3149 ver.minor = MD_MINOR_VERSION;
3150 ver.patchlevel = MD_PATCHLEVEL_VERSION;
3151
3152 if (copy_to_user(arg, &ver, sizeof(ver)))
3153 return -EFAULT;
3154
3155 return 0;
3156}
3157
3158static int get_array_info(mddev_t * mddev, void __user * arg)
3159{
3160 mdu_array_info_t info;
3161 int nr,working,active,failed,spare;
3162 mdk_rdev_t *rdev;
3163 struct list_head *tmp;
3164
3165 nr=working=active=failed=spare=0;
3166 ITERATE_RDEV(mddev,rdev,tmp) {
3167 nr++;
b2d444d7 3168 if (test_bit(Faulty, &rdev->flags))
1da177e4
LT
3169 failed++;
3170 else {
3171 working++;
b2d444d7 3172 if (test_bit(In_sync, &rdev->flags))
1da177e4
LT
3173 active++;
3174 else
3175 spare++;
3176 }
3177 }
3178
3179 info.major_version = mddev->major_version;
3180 info.minor_version = mddev->minor_version;
3181 info.patch_version = MD_PATCHLEVEL_VERSION;
3182 info.ctime = mddev->ctime;
3183 info.level = mddev->level;
3184 info.size = mddev->size;
284ae7ca
N
3185 if (info.size != mddev->size) /* overflow */
3186 info.size = -1;
1da177e4
LT
3187 info.nr_disks = nr;
3188 info.raid_disks = mddev->raid_disks;
3189 info.md_minor = mddev->md_minor;
3190 info.not_persistent= !mddev->persistent;
3191
3192 info.utime = mddev->utime;
3193 info.state = 0;
3194 if (mddev->in_sync)
3195 info.state = (1<<MD_SB_CLEAN);
36fa3063
N
3196 if (mddev->bitmap && mddev->bitmap_offset)
3197 info.state = (1<<MD_SB_BITMAP_PRESENT);
1da177e4
LT
3198 info.active_disks = active;
3199 info.working_disks = working;
3200 info.failed_disks = failed;
3201 info.spare_disks = spare;
3202
3203 info.layout = mddev->layout;
3204 info.chunk_size = mddev->chunk_size;
3205
3206 if (copy_to_user(arg, &info, sizeof(info)))
3207 return -EFAULT;
3208
3209 return 0;
3210}
3211
87162a28 3212static int get_bitmap_file(mddev_t * mddev, void __user * arg)
32a7627c
N
3213{
3214 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3215 char *ptr, *buf = NULL;
3216 int err = -ENOMEM;
3217
3218 file = kmalloc(sizeof(*file), GFP_KERNEL);
3219 if (!file)
3220 goto out;
3221
3222 /* bitmap disabled, zero the first byte and copy out */
3223 if (!mddev->bitmap || !mddev->bitmap->file) {
3224 file->pathname[0] = '\0';
3225 goto copy_out;
3226 }
3227
3228 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3229 if (!buf)
3230 goto out;
3231
3232 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3233 if (!ptr)
3234 goto out;
3235
3236 strcpy(file->pathname, ptr);
3237
3238copy_out:
3239 err = 0;
3240 if (copy_to_user(arg, file, sizeof(*file)))
3241 err = -EFAULT;
3242out:
3243 kfree(buf);
3244 kfree(file);
3245 return err;
3246}
3247
1da177e4
LT
3248static int get_disk_info(mddev_t * mddev, void __user * arg)
3249{
3250 mdu_disk_info_t info;
3251 unsigned int nr;
3252 mdk_rdev_t *rdev;
3253
3254 if (copy_from_user(&info, arg, sizeof(info)))
3255 return -EFAULT;
3256
3257 nr = info.number;
3258
3259 rdev = find_rdev_nr(mddev, nr);
3260 if (rdev) {
3261 info.major = MAJOR(rdev->bdev->bd_dev);
3262 info.minor = MINOR(rdev->bdev->bd_dev);
3263 info.raid_disk = rdev->raid_disk;
3264 info.state = 0;
b2d444d7 3265 if (test_bit(Faulty, &rdev->flags))
1da177e4 3266 info.state |= (1<<MD_DISK_FAULTY);
b2d444d7 3267 else if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
3268 info.state |= (1<<MD_DISK_ACTIVE);
3269 info.state |= (1<<MD_DISK_SYNC);
3270 }
8ddf9efe
N
3271 if (test_bit(WriteMostly, &rdev->flags))
3272 info.state |= (1<<MD_DISK_WRITEMOSTLY);
1da177e4
LT
3273 } else {
3274 info.major = info.minor = 0;
3275 info.raid_disk = -1;
3276 info.state = (1<<MD_DISK_REMOVED);
3277 }
3278
3279 if (copy_to_user(arg, &info, sizeof(info)))
3280 return -EFAULT;
3281
3282 return 0;
3283}
3284
3285static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3286{
3287 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3288 mdk_rdev_t *rdev;
3289 dev_t dev = MKDEV(info->major,info->minor);
3290
3291 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3292 return -EOVERFLOW;
3293
3294 if (!mddev->raid_disks) {
3295 int err;
3296 /* expecting a device which has a superblock */
3297 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3298 if (IS_ERR(rdev)) {
3299 printk(KERN_WARNING
3300 "md: md_import_device returned %ld\n",
3301 PTR_ERR(rdev));
3302 return PTR_ERR(rdev);
3303 }
3304 if (!list_empty(&mddev->disks)) {
3305 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3306 mdk_rdev_t, same_set);
3307 int err = super_types[mddev->major_version]
3308 .load_super(rdev, rdev0, mddev->minor_version);
3309 if (err < 0) {
3310 printk(KERN_WARNING
3311 "md: %s has different UUID to %s\n",
3312 bdevname(rdev->bdev,b),
3313 bdevname(rdev0->bdev,b2));
3314 export_rdev(rdev);
3315 return -EINVAL;
3316 }
3317 }
3318 err = bind_rdev_to_array(rdev, mddev);
3319 if (err)
3320 export_rdev(rdev);
3321 return err;
3322 }
3323
3324 /*
3325 * add_new_disk can be used once the array is assembled
3326 * to add "hot spares". They must already have a superblock
3327 * written
3328 */
3329 if (mddev->pers) {
3330 int err;
3331 if (!mddev->pers->hot_add_disk) {
3332 printk(KERN_WARNING
3333 "%s: personality does not support diskops!\n",
3334 mdname(mddev));
3335 return -EINVAL;
3336 }
7b1e35f6
N
3337 if (mddev->persistent)
3338 rdev = md_import_device(dev, mddev->major_version,
3339 mddev->minor_version);
3340 else
3341 rdev = md_import_device(dev, -1, -1);
1da177e4
LT
3342 if (IS_ERR(rdev)) {
3343 printk(KERN_WARNING
3344 "md: md_import_device returned %ld\n",
3345 PTR_ERR(rdev));
3346 return PTR_ERR(rdev);
3347 }
41158c7e
N
3348 /* set save_raid_disk if appropriate */
3349 if (!mddev->persistent) {
3350 if (info->state & (1<<MD_DISK_SYNC) &&
3351 info->raid_disk < mddev->raid_disks)
3352 rdev->raid_disk = info->raid_disk;
3353 else
3354 rdev->raid_disk = -1;
3355 } else
3356 super_types[mddev->major_version].
3357 validate_super(mddev, rdev);
3358 rdev->saved_raid_disk = rdev->raid_disk;
3359
b2d444d7 3360 clear_bit(In_sync, &rdev->flags); /* just to be sure */
8ddf9efe
N
3361 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3362 set_bit(WriteMostly, &rdev->flags);
3363
1da177e4
LT
3364 rdev->raid_disk = -1;
3365 err = bind_rdev_to_array(rdev, mddev);
7c7546cc
N
3366 if (!err && !mddev->pers->hot_remove_disk) {
3367 /* If there is hot_add_disk but no hot_remove_disk
3368 * then added disks for geometry changes,
3369 * and should be added immediately.
3370 */
3371 super_types[mddev->major_version].
3372 validate_super(mddev, rdev);
3373 err = mddev->pers->hot_add_disk(mddev, rdev);
3374 if (err)
3375 unbind_rdev_from_array(rdev);
3376 }
1da177e4
LT
3377 if (err)
3378 export_rdev(rdev);
c361777f
N
3379
3380 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
005eca5e 3381 md_wakeup_thread(mddev->thread);
1da177e4
LT
3382 return err;
3383 }
3384
3385 /* otherwise, add_new_disk is only allowed
3386 * for major_version==0 superblocks
3387 */
3388 if (mddev->major_version != 0) {
3389 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3390 mdname(mddev));
3391 return -EINVAL;
3392 }
3393
3394 if (!(info->state & (1<<MD_DISK_FAULTY))) {
3395 int err;
3396 rdev = md_import_device (dev, -1, 0);
3397 if (IS_ERR(rdev)) {
3398 printk(KERN_WARNING
3399 "md: error, md_import_device() returned %ld\n",
3400 PTR_ERR(rdev));
3401 return PTR_ERR(rdev);
3402 }
3403 rdev->desc_nr = info->number;
3404 if (info->raid_disk < mddev->raid_disks)
3405 rdev->raid_disk = info->raid_disk;
3406 else
3407 rdev->raid_disk = -1;
3408
b2d444d7
N
3409 rdev->flags = 0;
3410
1da177e4 3411 if (rdev->raid_disk < mddev->raid_disks)
b2d444d7
N
3412 if (info->state & (1<<MD_DISK_SYNC))
3413 set_bit(In_sync, &rdev->flags);
1da177e4 3414
8ddf9efe
N
3415 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3416 set_bit(WriteMostly, &rdev->flags);
3417
1da177e4
LT
3418 if (!mddev->persistent) {
3419 printk(KERN_INFO "md: nonpersistent superblock ...\n");
3420 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3421 } else
3422 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3423 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3424
2bf071bf
N
3425 err = bind_rdev_to_array(rdev, mddev);
3426 if (err) {
3427 export_rdev(rdev);
3428 return err;
3429 }
1da177e4
LT
3430 }
3431
3432 return 0;
3433}
3434
3435static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3436{
3437 char b[BDEVNAME_SIZE];
3438 mdk_rdev_t *rdev;
3439
3440 if (!mddev->pers)
3441 return -ENODEV;
3442
3443 rdev = find_rdev(mddev, dev);
3444 if (!rdev)
3445 return -ENXIO;
3446
3447 if (rdev->raid_disk >= 0)
3448 goto busy;
3449
3450 kick_rdev_from_array(rdev);
3451 md_update_sb(mddev);
d7603b7e 3452 md_new_event(mddev);
1da177e4
LT
3453
3454 return 0;
3455busy:
3456 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3457 bdevname(rdev->bdev,b), mdname(mddev));
3458 return -EBUSY;
3459}
3460
3461static int hot_add_disk(mddev_t * mddev, dev_t dev)
3462{
3463 char b[BDEVNAME_SIZE];
3464 int err;
3465 unsigned int size;
3466 mdk_rdev_t *rdev;
3467
3468 if (!mddev->pers)
3469 return -ENODEV;
3470
3471 if (mddev->major_version != 0) {
3472 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3473 " version-0 superblocks.\n",
3474 mdname(mddev));
3475 return -EINVAL;
3476 }
3477 if (!mddev->pers->hot_add_disk) {
3478 printk(KERN_WARNING
3479 "%s: personality does not support diskops!\n",
3480 mdname(mddev));
3481 return -EINVAL;
3482 }
3483
3484 rdev = md_import_device (dev, -1, 0);
3485 if (IS_ERR(rdev)) {
3486 printk(KERN_WARNING
3487 "md: error, md_import_device() returned %ld\n",
3488 PTR_ERR(rdev));
3489 return -EINVAL;
3490 }
3491
3492 if (mddev->persistent)
3493 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3494 else
3495 rdev->sb_offset =
3496 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3497
3498 size = calc_dev_size(rdev, mddev->chunk_size);
3499 rdev->size = size;
3500
b2d444d7 3501 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
3502 printk(KERN_WARNING
3503 "md: can not hot-add faulty %s disk to %s!\n",
3504 bdevname(rdev->bdev,b), mdname(mddev));
3505 err = -EINVAL;
3506 goto abort_export;
3507 }
b2d444d7 3508 clear_bit(In_sync, &rdev->flags);
1da177e4 3509 rdev->desc_nr = -1;
2bf071bf
N
3510 err = bind_rdev_to_array(rdev, mddev);
3511 if (err)
3512 goto abort_export;
1da177e4
LT
3513
3514 /*
3515 * The rest should better be atomic, we can have disk failures
3516 * noticed in interrupt contexts ...
3517 */
3518
3519 if (rdev->desc_nr == mddev->max_disks) {
3520 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3521 mdname(mddev));
3522 err = -EBUSY;
3523 goto abort_unbind_export;
3524 }
3525
3526 rdev->raid_disk = -1;
3527
3528 md_update_sb(mddev);
3529
3530 /*
3531 * Kick recovery, maybe this spare has to be added to the
3532 * array immediately.
3533 */
3534 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3535 md_wakeup_thread(mddev->thread);
d7603b7e 3536 md_new_event(mddev);
1da177e4
LT
3537 return 0;
3538
3539abort_unbind_export:
3540 unbind_rdev_from_array(rdev);
3541
3542abort_export:
3543 export_rdev(rdev);
3544 return err;
3545}
3546
32a7627c
N
3547/* similar to deny_write_access, but accounts for our holding a reference
3548 * to the file ourselves */
3549static int deny_bitmap_write_access(struct file * file)
3550{
3551 struct inode *inode = file->f_mapping->host;
3552
3553 spin_lock(&inode->i_lock);
3554 if (atomic_read(&inode->i_writecount) > 1) {
3555 spin_unlock(&inode->i_lock);
3556 return -ETXTBSY;
3557 }
3558 atomic_set(&inode->i_writecount, -1);
3559 spin_unlock(&inode->i_lock);
3560
3561 return 0;
3562}
3563
3564static int set_bitmap_file(mddev_t *mddev, int fd)
3565{
3566 int err;
3567
36fa3063
N
3568 if (mddev->pers) {
3569 if (!mddev->pers->quiesce)
3570 return -EBUSY;
3571 if (mddev->recovery || mddev->sync_thread)
3572 return -EBUSY;
3573 /* we should be able to change the bitmap.. */
3574 }
32a7627c 3575
32a7627c 3576
36fa3063
N
3577 if (fd >= 0) {
3578 if (mddev->bitmap)
3579 return -EEXIST; /* cannot add when bitmap is present */
3580 mddev->bitmap_file = fget(fd);
32a7627c 3581
36fa3063
N
3582 if (mddev->bitmap_file == NULL) {
3583 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3584 mdname(mddev));
3585 return -EBADF;
3586 }
3587
3588 err = deny_bitmap_write_access(mddev->bitmap_file);
3589 if (err) {
3590 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3591 mdname(mddev));
3592 fput(mddev->bitmap_file);
3593 mddev->bitmap_file = NULL;
3594 return err;
3595 }
a654b9d8 3596 mddev->bitmap_offset = 0; /* file overrides offset */
36fa3063
N
3597 } else if (mddev->bitmap == NULL)
3598 return -ENOENT; /* cannot remove what isn't there */
3599 err = 0;
3600 if (mddev->pers) {
3601 mddev->pers->quiesce(mddev, 1);
3602 if (fd >= 0)
3603 err = bitmap_create(mddev);
3604 if (fd < 0 || err)
3605 bitmap_destroy(mddev);
3606 mddev->pers->quiesce(mddev, 0);
3607 } else if (fd < 0) {
3608 if (mddev->bitmap_file)
3609 fput(mddev->bitmap_file);
3610 mddev->bitmap_file = NULL;
3611 }
3612
32a7627c
N
3613 return err;
3614}
3615
1da177e4
LT
3616/*
3617 * set_array_info is used two different ways
3618 * The original usage is when creating a new array.
3619 * In this usage, raid_disks is > 0 and it together with
3620 * level, size, not_persistent,layout,chunksize determine the
3621 * shape of the array.
3622 * This will always create an array with a type-0.90.0 superblock.
3623 * The newer usage is when assembling an array.
3624 * In this case raid_disks will be 0, and the major_version field is
3625 * use to determine which style super-blocks are to be found on the devices.
3626 * The minor and patch _version numbers are also kept incase the
3627 * super_block handler wishes to interpret them.
3628 */
3629static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3630{
3631
3632 if (info->raid_disks == 0) {
3633 /* just setting version number for superblock loading */
3634 if (info->major_version < 0 ||
3635 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3636 super_types[info->major_version].name == NULL) {
3637 /* maybe try to auto-load a module? */
3638 printk(KERN_INFO
3639 "md: superblock version %d not known\n",
3640 info->major_version);
3641 return -EINVAL;
3642 }
3643 mddev->major_version = info->major_version;
3644 mddev->minor_version = info->minor_version;
3645 mddev->patch_version = info->patch_version;
3646 return 0;
3647 }
3648 mddev->major_version = MD_MAJOR_VERSION;
3649 mddev->minor_version = MD_MINOR_VERSION;
3650 mddev->patch_version = MD_PATCHLEVEL_VERSION;
3651 mddev->ctime = get_seconds();
3652
3653 mddev->level = info->level;
17115e03 3654 mddev->clevel[0] = 0;
1da177e4
LT
3655 mddev->size = info->size;
3656 mddev->raid_disks = info->raid_disks;
3657 /* don't set md_minor, it is determined by which /dev/md* was
3658 * openned
3659 */
3660 if (info->state & (1<<MD_SB_CLEAN))
3661 mddev->recovery_cp = MaxSector;
3662 else
3663 mddev->recovery_cp = 0;
3664 mddev->persistent = ! info->not_persistent;
3665
3666 mddev->layout = info->layout;
3667 mddev->chunk_size = info->chunk_size;
3668
3669 mddev->max_disks = MD_SB_DISKS;
3670
3671 mddev->sb_dirty = 1;
3672
b2a2703c
N
3673 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
3674 mddev->bitmap_offset = 0;
3675
f6705578
N
3676 mddev->reshape_position = MaxSector;
3677
1da177e4
LT
3678 /*
3679 * Generate a 128 bit UUID
3680 */
3681 get_random_bytes(mddev->uuid, 16);
3682
f6705578
N
3683 mddev->new_level = mddev->level;
3684 mddev->new_chunk = mddev->chunk_size;
3685 mddev->new_layout = mddev->layout;
3686 mddev->delta_disks = 0;
3687
1da177e4
LT
3688 return 0;
3689}
3690
a35b0d69
N
3691static int update_size(mddev_t *mddev, unsigned long size)
3692{
3693 mdk_rdev_t * rdev;
3694 int rv;
3695 struct list_head *tmp;
8ddeeae5 3696 int fit = (size == 0);
a35b0d69
N
3697
3698 if (mddev->pers->resize == NULL)
3699 return -EINVAL;
3700 /* The "size" is the amount of each device that is used.
3701 * This can only make sense for arrays with redundancy.
3702 * linear and raid0 always use whatever space is available
3703 * We can only consider changing the size if no resync
3704 * or reconstruction is happening, and if the new size
3705 * is acceptable. It must fit before the sb_offset or,
3706 * if that is <data_offset, it must fit before the
3707 * size of each device.
3708 * If size is zero, we find the largest size that fits.
3709 */
3710 if (mddev->sync_thread)
3711 return -EBUSY;
3712 ITERATE_RDEV(mddev,rdev,tmp) {
3713 sector_t avail;
a35b0d69
N
3714 if (rdev->sb_offset > rdev->data_offset)
3715 avail = (rdev->sb_offset*2) - rdev->data_offset;
3716 else
3717 avail = get_capacity(rdev->bdev->bd_disk)
3718 - rdev->data_offset;
3719 if (fit && (size == 0 || size > avail/2))
3720 size = avail/2;
3721 if (avail < ((sector_t)size << 1))
3722 return -ENOSPC;
3723 }
3724 rv = mddev->pers->resize(mddev, (sector_t)size *2);
3725 if (!rv) {
3726 struct block_device *bdev;
3727
3728 bdev = bdget_disk(mddev->gendisk, 0);
3729 if (bdev) {
1b1dcc1b 3730 mutex_lock(&bdev->bd_inode->i_mutex);
6d89332b 3731 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
1b1dcc1b 3732 mutex_unlock(&bdev->bd_inode->i_mutex);
a35b0d69
N
3733 bdput(bdev);
3734 }
3735 }
3736 return rv;
3737}
3738
da943b99
N
3739static int update_raid_disks(mddev_t *mddev, int raid_disks)
3740{
3741 int rv;
3742 /* change the number of raid disks */
63c70c4f 3743 if (mddev->pers->check_reshape == NULL)
da943b99
N
3744 return -EINVAL;
3745 if (raid_disks <= 0 ||
3746 raid_disks >= mddev->max_disks)
3747 return -EINVAL;
63c70c4f 3748 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
da943b99 3749 return -EBUSY;
63c70c4f
N
3750 mddev->delta_disks = raid_disks - mddev->raid_disks;
3751
3752 rv = mddev->pers->check_reshape(mddev);
da943b99
N
3753 return rv;
3754}
3755
3756
1da177e4
LT
3757/*
3758 * update_array_info is used to change the configuration of an
3759 * on-line array.
3760 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
3761 * fields in the info are checked against the array.
3762 * Any differences that cannot be handled will cause an error.
3763 * Normally, only one change can be managed at a time.
3764 */
3765static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
3766{
3767 int rv = 0;
3768 int cnt = 0;
36fa3063
N
3769 int state = 0;
3770
3771 /* calculate expected state,ignoring low bits */
3772 if (mddev->bitmap && mddev->bitmap_offset)
3773 state |= (1 << MD_SB_BITMAP_PRESENT);
1da177e4
LT
3774
3775 if (mddev->major_version != info->major_version ||
3776 mddev->minor_version != info->minor_version ||
3777/* mddev->patch_version != info->patch_version || */
3778 mddev->ctime != info->ctime ||
3779 mddev->level != info->level ||
3780/* mddev->layout != info->layout || */
3781 !mddev->persistent != info->not_persistent||
36fa3063
N
3782 mddev->chunk_size != info->chunk_size ||
3783 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
3784 ((state^info->state) & 0xfffffe00)
3785 )
1da177e4
LT
3786 return -EINVAL;
3787 /* Check there is only one change */
284ae7ca 3788 if (info->size >= 0 && mddev->size != info->size) cnt++;
1da177e4
LT
3789 if (mddev->raid_disks != info->raid_disks) cnt++;
3790 if (mddev->layout != info->layout) cnt++;
36fa3063 3791 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
1da177e4
LT
3792 if (cnt == 0) return 0;
3793 if (cnt > 1) return -EINVAL;
3794
3795 if (mddev->layout != info->layout) {
3796 /* Change layout
3797 * we don't need to do anything at the md level, the
3798 * personality will take care of it all.
3799 */
3800 if (mddev->pers->reconfig == NULL)
3801 return -EINVAL;
3802 else
3803 return mddev->pers->reconfig(mddev, info->layout, -1);
3804 }
284ae7ca 3805 if (info->size >= 0 && mddev->size != info->size)
a35b0d69
N
3806 rv = update_size(mddev, info->size);
3807
da943b99
N
3808 if (mddev->raid_disks != info->raid_disks)
3809 rv = update_raid_disks(mddev, info->raid_disks);
3810
36fa3063
N
3811 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3812 if (mddev->pers->quiesce == NULL)
3813 return -EINVAL;
3814 if (mddev->recovery || mddev->sync_thread)
3815 return -EBUSY;
3816 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
3817 /* add the bitmap */
3818 if (mddev->bitmap)
3819 return -EEXIST;
3820 if (mddev->default_bitmap_offset == 0)
3821 return -EINVAL;
3822 mddev->bitmap_offset = mddev->default_bitmap_offset;
3823 mddev->pers->quiesce(mddev, 1);
3824 rv = bitmap_create(mddev);
3825 if (rv)
3826 bitmap_destroy(mddev);
3827 mddev->pers->quiesce(mddev, 0);
3828 } else {
3829 /* remove the bitmap */
3830 if (!mddev->bitmap)
3831 return -ENOENT;
3832 if (mddev->bitmap->file)
3833 return -EINVAL;
3834 mddev->pers->quiesce(mddev, 1);
3835 bitmap_destroy(mddev);
3836 mddev->pers->quiesce(mddev, 0);
3837 mddev->bitmap_offset = 0;
3838 }
3839 }
1da177e4
LT
3840 md_update_sb(mddev);
3841 return rv;
3842}
3843
3844static int set_disk_faulty(mddev_t *mddev, dev_t dev)
3845{
3846 mdk_rdev_t *rdev;
3847
3848 if (mddev->pers == NULL)
3849 return -ENODEV;
3850
3851 rdev = find_rdev(mddev, dev);
3852 if (!rdev)
3853 return -ENODEV;
3854
3855 md_error(mddev, rdev);
3856 return 0;
3857}
3858
a885c8c4
CH
3859static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3860{
3861 mddev_t *mddev = bdev->bd_disk->private_data;
3862
3863 geo->heads = 2;
3864 geo->sectors = 4;
3865 geo->cylinders = get_capacity(mddev->gendisk) / 8;
3866 return 0;
3867}
3868
1da177e4
LT
3869static int md_ioctl(struct inode *inode, struct file *file,
3870 unsigned int cmd, unsigned long arg)
3871{
3872 int err = 0;
3873 void __user *argp = (void __user *)arg;
1da177e4
LT
3874 mddev_t *mddev = NULL;
3875
3876 if (!capable(CAP_SYS_ADMIN))
3877 return -EACCES;
3878
3879 /*
3880 * Commands dealing with the RAID driver but not any
3881 * particular array:
3882 */
3883 switch (cmd)
3884 {
3885 case RAID_VERSION:
3886 err = get_version(argp);
3887 goto done;
3888
3889 case PRINT_RAID_DEBUG:
3890 err = 0;
3891 md_print_devices();
3892 goto done;
3893
3894#ifndef MODULE
3895 case RAID_AUTORUN:
3896 err = 0;
3897 autostart_arrays(arg);
3898 goto done;
3899#endif
3900 default:;
3901 }
3902
3903 /*
3904 * Commands creating/starting a new array:
3905 */
3906
3907 mddev = inode->i_bdev->bd_disk->private_data;
3908
3909 if (!mddev) {
3910 BUG();
3911 goto abort;
3912 }
3913
3914
3915 if (cmd == START_ARRAY) {
3916 /* START_ARRAY doesn't need to lock the array as autostart_array
3917 * does the locking, and it could even be a different array
3918 */
3919 static int cnt = 3;
3920 if (cnt > 0 ) {
3921 printk(KERN_WARNING
3922 "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
e8a00334 3923 "This will not be supported beyond July 2006\n",
1da177e4
LT
3924 current->comm, current->pid);
3925 cnt--;
3926 }
3927 err = autostart_array(new_decode_dev(arg));
3928 if (err) {
3929 printk(KERN_WARNING "md: autostart failed!\n");
3930 goto abort;
3931 }
3932 goto done;
3933 }
3934
3935 err = mddev_lock(mddev);
3936 if (err) {
3937 printk(KERN_INFO
3938 "md: ioctl lock interrupted, reason %d, cmd %d\n",
3939 err, cmd);
3940 goto abort;
3941 }
3942
3943 switch (cmd)
3944 {
3945 case SET_ARRAY_INFO:
3946 {
3947 mdu_array_info_t info;
3948 if (!arg)
3949 memset(&info, 0, sizeof(info));
3950 else if (copy_from_user(&info, argp, sizeof(info))) {
3951 err = -EFAULT;
3952 goto abort_unlock;
3953 }
3954 if (mddev->pers) {
3955 err = update_array_info(mddev, &info);
3956 if (err) {
3957 printk(KERN_WARNING "md: couldn't update"
3958 " array info. %d\n", err);
3959 goto abort_unlock;
3960 }
3961 goto done_unlock;
3962 }
3963 if (!list_empty(&mddev->disks)) {
3964 printk(KERN_WARNING
3965 "md: array %s already has disks!\n",
3966 mdname(mddev));
3967 err = -EBUSY;
3968 goto abort_unlock;
3969 }
3970 if (mddev->raid_disks) {
3971 printk(KERN_WARNING
3972 "md: array %s already initialised!\n",
3973 mdname(mddev));
3974 err = -EBUSY;
3975 goto abort_unlock;
3976 }
3977 err = set_array_info(mddev, &info);
3978 if (err) {
3979 printk(KERN_WARNING "md: couldn't set"
3980 " array info. %d\n", err);
3981 goto abort_unlock;
3982 }
3983 }
3984 goto done_unlock;
3985
3986 default:;
3987 }
3988
3989 /*
3990 * Commands querying/configuring an existing array:
3991 */
32a7627c
N
3992 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
3993 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
3994 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
3995 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
1da177e4
LT
3996 err = -ENODEV;
3997 goto abort_unlock;
3998 }
3999
4000 /*
4001 * Commands even a read-only array can execute:
4002 */
4003 switch (cmd)
4004 {
4005 case GET_ARRAY_INFO:
4006 err = get_array_info(mddev, argp);
4007 goto done_unlock;
4008
32a7627c 4009 case GET_BITMAP_FILE:
87162a28 4010 err = get_bitmap_file(mddev, argp);
32a7627c
N
4011 goto done_unlock;
4012
1da177e4
LT
4013 case GET_DISK_INFO:
4014 err = get_disk_info(mddev, argp);
4015 goto done_unlock;
4016
4017 case RESTART_ARRAY_RW:
4018 err = restart_array(mddev);
4019 goto done_unlock;
4020
4021 case STOP_ARRAY:
4022 err = do_md_stop (mddev, 0);
4023 goto done_unlock;
4024
4025 case STOP_ARRAY_RO:
4026 err = do_md_stop (mddev, 1);
4027 goto done_unlock;
4028
4029 /*
4030 * We have a problem here : there is no easy way to give a CHS
4031 * virtual geometry. We currently pretend that we have a 2 heads
4032 * 4 sectors (with a BIG number of cylinders...). This drives
4033 * dosfs just mad... ;-)
4034 */
1da177e4
LT
4035 }
4036
4037 /*
4038 * The remaining ioctls are changing the state of the
f91de92e
N
4039 * superblock, so we do not allow them on read-only arrays.
4040 * However non-MD ioctls (e.g. get-size) will still come through
4041 * here and hit the 'default' below, so only disallow
4042 * 'md' ioctls, and switch to rw mode if started auto-readonly.
1da177e4 4043 */
f91de92e
N
4044 if (_IOC_TYPE(cmd) == MD_MAJOR &&
4045 mddev->ro && mddev->pers) {
4046 if (mddev->ro == 2) {
4047 mddev->ro = 0;
4048 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4049 md_wakeup_thread(mddev->thread);
4050
4051 } else {
4052 err = -EROFS;
4053 goto abort_unlock;
4054 }
1da177e4
LT
4055 }
4056
4057 switch (cmd)
4058 {
4059 case ADD_NEW_DISK:
4060 {
4061 mdu_disk_info_t info;
4062 if (copy_from_user(&info, argp, sizeof(info)))
4063 err = -EFAULT;
4064 else
4065 err = add_new_disk(mddev, &info);
4066 goto done_unlock;
4067 }
4068
4069 case HOT_REMOVE_DISK:
4070 err = hot_remove_disk(mddev, new_decode_dev(arg));
4071 goto done_unlock;
4072
4073 case HOT_ADD_DISK:
4074 err = hot_add_disk(mddev, new_decode_dev(arg));
4075 goto done_unlock;
4076
4077 case SET_DISK_FAULTY:
4078 err = set_disk_faulty(mddev, new_decode_dev(arg));
4079 goto done_unlock;
4080
4081 case RUN_ARRAY:
4082 err = do_md_run (mddev);
4083 goto done_unlock;
4084
32a7627c
N
4085 case SET_BITMAP_FILE:
4086 err = set_bitmap_file(mddev, (int)arg);
4087 goto done_unlock;
4088
1da177e4 4089 default:
1da177e4
LT
4090 err = -EINVAL;
4091 goto abort_unlock;
4092 }
4093
4094done_unlock:
4095abort_unlock:
4096 mddev_unlock(mddev);
4097
4098 return err;
4099done:
4100 if (err)
4101 MD_BUG();
4102abort:
4103 return err;
4104}
4105
4106static int md_open(struct inode *inode, struct file *file)
4107{
4108 /*
4109 * Succeed if we can lock the mddev, which confirms that
4110 * it isn't being stopped right now.
4111 */
4112 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4113 int err;
4114
4115 if ((err = mddev_lock(mddev)))
4116 goto out;
4117
4118 err = 0;
4119 mddev_get(mddev);
4120 mddev_unlock(mddev);
4121
4122 check_disk_change(inode->i_bdev);
4123 out:
4124 return err;
4125}
4126
4127static int md_release(struct inode *inode, struct file * file)
4128{
4129 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4130
4131 if (!mddev)
4132 BUG();
4133 mddev_put(mddev);
4134
4135 return 0;
4136}
4137
4138static int md_media_changed(struct gendisk *disk)
4139{
4140 mddev_t *mddev = disk->private_data;
4141
4142 return mddev->changed;
4143}
4144
4145static int md_revalidate(struct gendisk *disk)
4146{
4147 mddev_t *mddev = disk->private_data;
4148
4149 mddev->changed = 0;
4150 return 0;
4151}
4152static struct block_device_operations md_fops =
4153{
4154 .owner = THIS_MODULE,
4155 .open = md_open,
4156 .release = md_release,
4157 .ioctl = md_ioctl,
a885c8c4 4158 .getgeo = md_getgeo,
1da177e4
LT
4159 .media_changed = md_media_changed,
4160 .revalidate_disk= md_revalidate,
4161};
4162
75c96f85 4163static int md_thread(void * arg)
1da177e4
LT
4164{
4165 mdk_thread_t *thread = arg;
4166
1da177e4
LT
4167 /*
4168 * md_thread is a 'system-thread', it's priority should be very
4169 * high. We avoid resource deadlocks individually in each
4170 * raid personality. (RAID5 does preallocation) We also use RR and
4171 * the very same RT priority as kswapd, thus we will never get
4172 * into a priority inversion deadlock.
4173 *
4174 * we definitely have to have equal or higher priority than
4175 * bdflush, otherwise bdflush will deadlock if there are too
4176 * many dirty RAID5 blocks.
4177 */
1da177e4 4178
6985c43f 4179 allow_signal(SIGKILL);
a6fb0934 4180 while (!kthread_should_stop()) {
1da177e4 4181
93588e22
N
4182 /* We need to wait INTERRUPTIBLE so that
4183 * we don't add to the load-average.
4184 * That means we need to be sure no signals are
4185 * pending
4186 */
4187 if (signal_pending(current))
4188 flush_signals(current);
4189
4190 wait_event_interruptible_timeout
4191 (thread->wqueue,
4192 test_bit(THREAD_WAKEUP, &thread->flags)
4193 || kthread_should_stop(),
4194 thread->timeout);
3e1d1d28 4195 try_to_freeze();
1da177e4
LT
4196
4197 clear_bit(THREAD_WAKEUP, &thread->flags);
4198
787453c2 4199 thread->run(thread->mddev);
1da177e4 4200 }
a6fb0934 4201
1da177e4
LT
4202 return 0;
4203}
4204
4205void md_wakeup_thread(mdk_thread_t *thread)
4206{
4207 if (thread) {
4208 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4209 set_bit(THREAD_WAKEUP, &thread->flags);
4210 wake_up(&thread->wqueue);
4211 }
4212}
4213
4214mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4215 const char *name)
4216{
4217 mdk_thread_t *thread;
1da177e4 4218
9ffae0cf 4219 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
1da177e4
LT
4220 if (!thread)
4221 return NULL;
4222
1da177e4
LT
4223 init_waitqueue_head(&thread->wqueue);
4224
1da177e4
LT
4225 thread->run = run;
4226 thread->mddev = mddev;
32a7627c 4227 thread->timeout = MAX_SCHEDULE_TIMEOUT;
6985c43f 4228 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
a6fb0934 4229 if (IS_ERR(thread->tsk)) {
1da177e4
LT
4230 kfree(thread);
4231 return NULL;
4232 }
1da177e4
LT
4233 return thread;
4234}
4235
1da177e4
LT
4236void md_unregister_thread(mdk_thread_t *thread)
4237{
d28446fe 4238 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
a6fb0934
N
4239
4240 kthread_stop(thread->tsk);
1da177e4
LT
4241 kfree(thread);
4242}
4243
4244void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4245{
4246 if (!mddev) {
4247 MD_BUG();
4248 return;
4249 }
4250
b2d444d7 4251 if (!rdev || test_bit(Faulty, &rdev->flags))
1da177e4 4252 return;
32a7627c 4253/*
1da177e4
LT
4254 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4255 mdname(mddev),
4256 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4257 __builtin_return_address(0),__builtin_return_address(1),
4258 __builtin_return_address(2),__builtin_return_address(3));
32a7627c 4259*/
1da177e4
LT
4260 if (!mddev->pers->error_handler)
4261 return;
4262 mddev->pers->error_handler(mddev,rdev);
4263 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4264 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4265 md_wakeup_thread(mddev->thread);
c331eb04 4266 md_new_event_inintr(mddev);
1da177e4
LT
4267}
4268
4269/* seq_file implementation /proc/mdstat */
4270
4271static void status_unused(struct seq_file *seq)
4272{
4273 int i = 0;
4274 mdk_rdev_t *rdev;
4275 struct list_head *tmp;
4276
4277 seq_printf(seq, "unused devices: ");
4278
4279 ITERATE_RDEV_PENDING(rdev,tmp) {
4280 char b[BDEVNAME_SIZE];
4281 i++;
4282 seq_printf(seq, "%s ",
4283 bdevname(rdev->bdev,b));
4284 }
4285 if (!i)
4286 seq_printf(seq, "<none>");
4287
4288 seq_printf(seq, "\n");
4289}
4290
4291
4292static void status_resync(struct seq_file *seq, mddev_t * mddev)
4293{
4588b42e
N
4294 sector_t max_blocks, resync, res;
4295 unsigned long dt, db, rt;
4296 int scale;
4297 unsigned int per_milli;
1da177e4
LT
4298
4299 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4300
4301 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4302 max_blocks = mddev->resync_max_sectors >> 1;
4303 else
4304 max_blocks = mddev->size;
4305
4306 /*
4307 * Should not happen.
4308 */
4309 if (!max_blocks) {
4310 MD_BUG();
4311 return;
4312 }
4588b42e
N
4313 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4314 * in a sector_t, and (max_blocks>>scale) will fit in a
4315 * u32, as those are the requirements for sector_div.
4316 * Thus 'scale' must be at least 10
4317 */
4318 scale = 10;
4319 if (sizeof(sector_t) > sizeof(unsigned long)) {
4320 while ( max_blocks/2 > (1ULL<<(scale+32)))
4321 scale++;
4322 }
4323 res = (resync>>scale)*1000;
4324 sector_div(res, (u32)((max_blocks>>scale)+1));
4325
4326 per_milli = res;
1da177e4 4327 {
4588b42e 4328 int i, x = per_milli/50, y = 20-x;
1da177e4
LT
4329 seq_printf(seq, "[");
4330 for (i = 0; i < x; i++)
4331 seq_printf(seq, "=");
4332 seq_printf(seq, ">");
4333 for (i = 0; i < y; i++)
4334 seq_printf(seq, ".");
4335 seq_printf(seq, "] ");
4336 }
4588b42e 4337 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
ccfcc3c1
N
4338 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4339 "reshape" :
1da177e4 4340 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
ccfcc3c1 4341 "resync" : "recovery")),
4588b42e
N
4342 per_milli/10, per_milli % 10,
4343 (unsigned long long) resync,
4344 (unsigned long long) max_blocks);
1da177e4
LT
4345
4346 /*
4347 * We do not want to overflow, so the order of operands and
4348 * the * 100 / 100 trick are important. We do a +1 to be
4349 * safe against division by zero. We only estimate anyway.
4350 *
4351 * dt: time from mark until now
4352 * db: blocks written from mark until now
4353 * rt: remaining time
4354 */
4355 dt = ((jiffies - mddev->resync_mark) / HZ);
4356 if (!dt) dt++;
4357 db = resync - (mddev->resync_mark_cnt/2);
4588b42e 4358 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100;
1da177e4
LT
4359
4360 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4361
4362 seq_printf(seq, " speed=%ldK/sec", db/dt);
4363}
4364
4365static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4366{
4367 struct list_head *tmp;
4368 loff_t l = *pos;
4369 mddev_t *mddev;
4370
4371 if (l >= 0x10000)
4372 return NULL;
4373 if (!l--)
4374 /* header */
4375 return (void*)1;
4376
4377 spin_lock(&all_mddevs_lock);
4378 list_for_each(tmp,&all_mddevs)
4379 if (!l--) {
4380 mddev = list_entry(tmp, mddev_t, all_mddevs);
4381 mddev_get(mddev);
4382 spin_unlock(&all_mddevs_lock);
4383 return mddev;
4384 }
4385 spin_unlock(&all_mddevs_lock);
4386 if (!l--)
4387 return (void*)2;/* tail */
4388 return NULL;
4389}
4390
4391static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4392{
4393 struct list_head *tmp;
4394 mddev_t *next_mddev, *mddev = v;
4395
4396 ++*pos;
4397 if (v == (void*)2)
4398 return NULL;
4399
4400 spin_lock(&all_mddevs_lock);
4401 if (v == (void*)1)
4402 tmp = all_mddevs.next;
4403 else
4404 tmp = mddev->all_mddevs.next;
4405 if (tmp != &all_mddevs)
4406 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4407 else {
4408 next_mddev = (void*)2;
4409 *pos = 0x10000;
4410 }
4411 spin_unlock(&all_mddevs_lock);
4412
4413 if (v != (void*)1)
4414 mddev_put(mddev);
4415 return next_mddev;
4416
4417}
4418
4419static void md_seq_stop(struct seq_file *seq, void *v)
4420{
4421 mddev_t *mddev = v;
4422
4423 if (mddev && v != (void*)1 && v != (void*)2)
4424 mddev_put(mddev);
4425}
4426
d7603b7e
N
4427struct mdstat_info {
4428 int event;
4429};
4430
1da177e4
LT
4431static int md_seq_show(struct seq_file *seq, void *v)
4432{
4433 mddev_t *mddev = v;
4434 sector_t size;
4435 struct list_head *tmp2;
4436 mdk_rdev_t *rdev;
d7603b7e 4437 struct mdstat_info *mi = seq->private;
32a7627c 4438 struct bitmap *bitmap;
1da177e4
LT
4439
4440 if (v == (void*)1) {
2604b703 4441 struct mdk_personality *pers;
1da177e4
LT
4442 seq_printf(seq, "Personalities : ");
4443 spin_lock(&pers_lock);
2604b703
N
4444 list_for_each_entry(pers, &pers_list, list)
4445 seq_printf(seq, "[%s] ", pers->name);
1da177e4
LT
4446
4447 spin_unlock(&pers_lock);
4448 seq_printf(seq, "\n");
d7603b7e 4449 mi->event = atomic_read(&md_event_count);
1da177e4
LT
4450 return 0;
4451 }
4452 if (v == (void*)2) {
4453 status_unused(seq);
4454 return 0;
4455 }
4456
5dc5cf7d 4457 if (mddev_lock(mddev) < 0)
1da177e4 4458 return -EINTR;
5dc5cf7d 4459
1da177e4
LT
4460 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4461 seq_printf(seq, "%s : %sactive", mdname(mddev),
4462 mddev->pers ? "" : "in");
4463 if (mddev->pers) {
f91de92e 4464 if (mddev->ro==1)
1da177e4 4465 seq_printf(seq, " (read-only)");
f91de92e
N
4466 if (mddev->ro==2)
4467 seq_printf(seq, "(auto-read-only)");
1da177e4
LT
4468 seq_printf(seq, " %s", mddev->pers->name);
4469 }
4470
4471 size = 0;
4472 ITERATE_RDEV(mddev,rdev,tmp2) {
4473 char b[BDEVNAME_SIZE];
4474 seq_printf(seq, " %s[%d]",
4475 bdevname(rdev->bdev,b), rdev->desc_nr);
8ddf9efe
N
4476 if (test_bit(WriteMostly, &rdev->flags))
4477 seq_printf(seq, "(W)");
b2d444d7 4478 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
4479 seq_printf(seq, "(F)");
4480 continue;
b325a32e
N
4481 } else if (rdev->raid_disk < 0)
4482 seq_printf(seq, "(S)"); /* spare */
1da177e4
LT
4483 size += rdev->size;
4484 }
4485
4486 if (!list_empty(&mddev->disks)) {
4487 if (mddev->pers)
4488 seq_printf(seq, "\n %llu blocks",
4489 (unsigned long long)mddev->array_size);
4490 else
4491 seq_printf(seq, "\n %llu blocks",
4492 (unsigned long long)size);
4493 }
1cd6bf19
N
4494 if (mddev->persistent) {
4495 if (mddev->major_version != 0 ||
4496 mddev->minor_version != 90) {
4497 seq_printf(seq," super %d.%d",
4498 mddev->major_version,
4499 mddev->minor_version);
4500 }
4501 } else
4502 seq_printf(seq, " super non-persistent");
1da177e4
LT
4503
4504 if (mddev->pers) {
4505 mddev->pers->status (seq, mddev);
4506 seq_printf(seq, "\n ");
8e1b39d6
N
4507 if (mddev->pers->sync_request) {
4508 if (mddev->curr_resync > 2) {
4509 status_resync (seq, mddev);
4510 seq_printf(seq, "\n ");
4511 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4512 seq_printf(seq, "\tresync=DELAYED\n ");
4513 else if (mddev->recovery_cp < MaxSector)
4514 seq_printf(seq, "\tresync=PENDING\n ");
4515 }
32a7627c
N
4516 } else
4517 seq_printf(seq, "\n ");
4518
4519 if ((bitmap = mddev->bitmap)) {
32a7627c
N
4520 unsigned long chunk_kb;
4521 unsigned long flags;
32a7627c
N
4522 spin_lock_irqsave(&bitmap->lock, flags);
4523 chunk_kb = bitmap->chunksize >> 10;
4524 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4525 "%lu%s chunk",
4526 bitmap->pages - bitmap->missing_pages,
4527 bitmap->pages,
4528 (bitmap->pages - bitmap->missing_pages)
4529 << (PAGE_SHIFT - 10),
4530 chunk_kb ? chunk_kb : bitmap->chunksize,
4531 chunk_kb ? "KB" : "B");
78d742d8
N
4532 if (bitmap->file) {
4533 seq_printf(seq, ", file: ");
4534 seq_path(seq, bitmap->file->f_vfsmnt,
4535 bitmap->file->f_dentry," \t\n");
32a7627c 4536 }
78d742d8 4537
32a7627c
N
4538 seq_printf(seq, "\n");
4539 spin_unlock_irqrestore(&bitmap->lock, flags);
1da177e4
LT
4540 }
4541
4542 seq_printf(seq, "\n");
4543 }
4544 mddev_unlock(mddev);
4545
4546 return 0;
4547}
4548
4549static struct seq_operations md_seq_ops = {
4550 .start = md_seq_start,
4551 .next = md_seq_next,
4552 .stop = md_seq_stop,
4553 .show = md_seq_show,
4554};
4555
4556static int md_seq_open(struct inode *inode, struct file *file)
4557{
4558 int error;
d7603b7e
N
4559 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4560 if (mi == NULL)
4561 return -ENOMEM;
1da177e4
LT
4562
4563 error = seq_open(file, &md_seq_ops);
d7603b7e
N
4564 if (error)
4565 kfree(mi);
4566 else {
4567 struct seq_file *p = file->private_data;
4568 p->private = mi;
4569 mi->event = atomic_read(&md_event_count);
4570 }
1da177e4
LT
4571 return error;
4572}
4573
d7603b7e
N
4574static int md_seq_release(struct inode *inode, struct file *file)
4575{
4576 struct seq_file *m = file->private_data;
4577 struct mdstat_info *mi = m->private;
4578 m->private = NULL;
4579 kfree(mi);
4580 return seq_release(inode, file);
4581}
4582
4583static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4584{
4585 struct seq_file *m = filp->private_data;
4586 struct mdstat_info *mi = m->private;
4587 int mask;
4588
4589 poll_wait(filp, &md_event_waiters, wait);
4590
4591 /* always allow read */
4592 mask = POLLIN | POLLRDNORM;
4593
4594 if (mi->event != atomic_read(&md_event_count))
4595 mask |= POLLERR | POLLPRI;
4596 return mask;
4597}
4598
1da177e4
LT
4599static struct file_operations md_seq_fops = {
4600 .open = md_seq_open,
4601 .read = seq_read,
4602 .llseek = seq_lseek,
d7603b7e
N
4603 .release = md_seq_release,
4604 .poll = mdstat_poll,
1da177e4
LT
4605};
4606
2604b703 4607int register_md_personality(struct mdk_personality *p)
1da177e4 4608{
1da177e4 4609 spin_lock(&pers_lock);
2604b703
N
4610 list_add_tail(&p->list, &pers_list);
4611 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
1da177e4
LT
4612 spin_unlock(&pers_lock);
4613 return 0;
4614}
4615
2604b703 4616int unregister_md_personality(struct mdk_personality *p)
1da177e4 4617{
2604b703 4618 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
1da177e4 4619 spin_lock(&pers_lock);
2604b703 4620 list_del_init(&p->list);
1da177e4
LT
4621 spin_unlock(&pers_lock);
4622 return 0;
4623}
4624
4625static int is_mddev_idle(mddev_t *mddev)
4626{
4627 mdk_rdev_t * rdev;
4628 struct list_head *tmp;
4629 int idle;
4630 unsigned long curr_events;
4631
4632 idle = 1;
4633 ITERATE_RDEV(mddev,rdev,tmp) {
4634 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
a362357b
JA
4635 curr_events = disk_stat_read(disk, sectors[0]) +
4636 disk_stat_read(disk, sectors[1]) -
1da177e4 4637 atomic_read(&disk->sync_io);
c0e48521
N
4638 /* The difference between curr_events and last_events
4639 * will be affected by any new non-sync IO (making
4640 * curr_events bigger) and any difference in the amount of
4641 * in-flight syncio (making current_events bigger or smaller)
4642 * The amount in-flight is currently limited to
4643 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4644 * which is at most 4096 sectors.
4645 * These numbers are fairly fragile and should be made
4646 * more robust, probably by enforcing the
4647 * 'window size' that md_do_sync sort-of uses.
4648 *
1da177e4
LT
4649 * Note: the following is an unsigned comparison.
4650 */
c0e48521 4651 if ((curr_events - rdev->last_events + 4096) > 8192) {
1da177e4
LT
4652 rdev->last_events = curr_events;
4653 idle = 0;
4654 }
4655 }
4656 return idle;
4657}
4658
4659void md_done_sync(mddev_t *mddev, int blocks, int ok)
4660{
4661 /* another "blocks" (512byte) blocks have been synced */
4662 atomic_sub(blocks, &mddev->recovery_active);
4663 wake_up(&mddev->recovery_wait);
4664 if (!ok) {
4665 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4666 md_wakeup_thread(mddev->thread);
4667 // stop recovery, signal do_sync ....
4668 }
4669}
4670
4671
06d91a5f
N
4672/* md_write_start(mddev, bi)
4673 * If we need to update some array metadata (e.g. 'active' flag
3d310eb7
N
4674 * in superblock) before writing, schedule a superblock update
4675 * and wait for it to complete.
06d91a5f 4676 */
3d310eb7 4677void md_write_start(mddev_t *mddev, struct bio *bi)
1da177e4 4678{
06d91a5f 4679 if (bio_data_dir(bi) != WRITE)
3d310eb7 4680 return;
06d91a5f 4681
f91de92e
N
4682 BUG_ON(mddev->ro == 1);
4683 if (mddev->ro == 2) {
4684 /* need to switch to read/write */
4685 mddev->ro = 0;
4686 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4687 md_wakeup_thread(mddev->thread);
4688 }
06d91a5f 4689 atomic_inc(&mddev->writes_pending);
06d91a5f 4690 if (mddev->in_sync) {
a9701a30 4691 spin_lock_irq(&mddev->write_lock);
3d310eb7
N
4692 if (mddev->in_sync) {
4693 mddev->in_sync = 0;
4694 mddev->sb_dirty = 1;
4695 md_wakeup_thread(mddev->thread);
4696 }
a9701a30 4697 spin_unlock_irq(&mddev->write_lock);
06d91a5f 4698 }
3d310eb7 4699 wait_event(mddev->sb_wait, mddev->sb_dirty==0);
1da177e4
LT
4700}
4701
4702void md_write_end(mddev_t *mddev)
4703{
4704 if (atomic_dec_and_test(&mddev->writes_pending)) {
4705 if (mddev->safemode == 2)
4706 md_wakeup_thread(mddev->thread);
16f17b39 4707 else if (mddev->safemode_delay)
1da177e4
LT
4708 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
4709 }
4710}
4711
75c96f85 4712static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
1da177e4
LT
4713
4714#define SYNC_MARKS 10
4715#define SYNC_MARK_STEP (3*HZ)
29269553 4716void md_do_sync(mddev_t *mddev)
1da177e4
LT
4717{
4718 mddev_t *mddev2;
4719 unsigned int currspeed = 0,
4720 window;
57afd89f 4721 sector_t max_sectors,j, io_sectors;
1da177e4
LT
4722 unsigned long mark[SYNC_MARKS];
4723 sector_t mark_cnt[SYNC_MARKS];
4724 int last_mark,m;
4725 struct list_head *tmp;
4726 sector_t last_check;
57afd89f 4727 int skipped = 0;
5fd6c1dc
N
4728 struct list_head *rtmp;
4729 mdk_rdev_t *rdev;
1da177e4
LT
4730
4731 /* just incase thread restarts... */
4732 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
4733 return;
5fd6c1dc
N
4734 if (mddev->ro) /* never try to sync a read-only array */
4735 return;
1da177e4
LT
4736
4737 /* we overload curr_resync somewhat here.
4738 * 0 == not engaged in resync at all
4739 * 2 == checking that there is no conflict with another sync
4740 * 1 == like 2, but have yielded to allow conflicting resync to
4741 * commense
4742 * other == active in resync - this many blocks
4743 *
4744 * Before starting a resync we must have set curr_resync to
4745 * 2, and then checked that every "conflicting" array has curr_resync
4746 * less than ours. When we find one that is the same or higher
4747 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
4748 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
4749 * This will mean we have to start checking from the beginning again.
4750 *
4751 */
4752
4753 do {
4754 mddev->curr_resync = 2;
4755
4756 try_again:
787453c2 4757 if (kthread_should_stop()) {
6985c43f 4758 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4
LT
4759 goto skip;
4760 }
4761 ITERATE_MDDEV(mddev2,tmp) {
1da177e4
LT
4762 if (mddev2 == mddev)
4763 continue;
4764 if (mddev2->curr_resync &&
4765 match_mddev_units(mddev,mddev2)) {
4766 DEFINE_WAIT(wq);
4767 if (mddev < mddev2 && mddev->curr_resync == 2) {
4768 /* arbitrarily yield */
4769 mddev->curr_resync = 1;
4770 wake_up(&resync_wait);
4771 }
4772 if (mddev > mddev2 && mddev->curr_resync == 1)
4773 /* no need to wait here, we can wait the next
4774 * time 'round when curr_resync == 2
4775 */
4776 continue;
787453c2
N
4777 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
4778 if (!kthread_should_stop() &&
8712e553 4779 mddev2->curr_resync >= mddev->curr_resync) {
1da177e4
LT
4780 printk(KERN_INFO "md: delaying resync of %s"
4781 " until %s has finished resync (they"
4782 " share one or more physical units)\n",
4783 mdname(mddev), mdname(mddev2));
4784 mddev_put(mddev2);
4785 schedule();
4786 finish_wait(&resync_wait, &wq);
4787 goto try_again;
4788 }
4789 finish_wait(&resync_wait, &wq);
4790 }
4791 }
4792 } while (mddev->curr_resync < 2);
4793
5fd6c1dc 4794 j = 0;
9d88883e 4795 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1da177e4 4796 /* resync follows the size requested by the personality,
57afd89f 4797 * which defaults to physical size, but can be virtual size
1da177e4
LT
4798 */
4799 max_sectors = mddev->resync_max_sectors;
9d88883e 4800 mddev->resync_mismatches = 0;
5fd6c1dc
N
4801 /* we don't use the checkpoint if there's a bitmap */
4802 if (!mddev->bitmap &&
4803 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
4804 j = mddev->recovery_cp;
ccfcc3c1
N
4805 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4806 max_sectors = mddev->size << 1;
5fd6c1dc 4807 else {
1da177e4
LT
4808 /* recovery follows the physical size of devices */
4809 max_sectors = mddev->size << 1;
5fd6c1dc
N
4810 j = MaxSector;
4811 ITERATE_RDEV(mddev,rdev,rtmp)
4812 if (rdev->raid_disk >= 0 &&
4813 !test_bit(Faulty, &rdev->flags) &&
4814 !test_bit(In_sync, &rdev->flags) &&
4815 rdev->recovery_offset < j)
4816 j = rdev->recovery_offset;
4817 }
1da177e4
LT
4818
4819 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4820 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
88202a0c 4821 " %d KB/sec/disc.\n", speed_min(mddev));
338cec32 4822 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
1da177e4 4823 "(but not more than %d KB/sec) for reconstruction.\n",
88202a0c 4824 speed_max(mddev));
1da177e4
LT
4825
4826 is_mddev_idle(mddev); /* this also initializes IO event counters */
5fd6c1dc 4827
57afd89f 4828 io_sectors = 0;
1da177e4
LT
4829 for (m = 0; m < SYNC_MARKS; m++) {
4830 mark[m] = jiffies;
57afd89f 4831 mark_cnt[m] = io_sectors;
1da177e4
LT
4832 }
4833 last_mark = 0;
4834 mddev->resync_mark = mark[last_mark];
4835 mddev->resync_mark_cnt = mark_cnt[last_mark];
4836
4837 /*
4838 * Tune reconstruction:
4839 */
4840 window = 32*(PAGE_SIZE/512);
4841 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
4842 window/2,(unsigned long long) max_sectors/2);
4843
4844 atomic_set(&mddev->recovery_active, 0);
4845 init_waitqueue_head(&mddev->recovery_wait);
4846 last_check = 0;
4847
4848 if (j>2) {
4849 printk(KERN_INFO
4850 "md: resuming recovery of %s from checkpoint.\n",
4851 mdname(mddev));
4852 mddev->curr_resync = j;
4853 }
4854
4855 while (j < max_sectors) {
57afd89f 4856 sector_t sectors;
1da177e4 4857
57afd89f
N
4858 skipped = 0;
4859 sectors = mddev->pers->sync_request(mddev, j, &skipped,
88202a0c 4860 currspeed < speed_min(mddev));
57afd89f 4861 if (sectors == 0) {
1da177e4
LT
4862 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4863 goto out;
4864 }
57afd89f
N
4865
4866 if (!skipped) { /* actual IO requested */
4867 io_sectors += sectors;
4868 atomic_add(sectors, &mddev->recovery_active);
4869 }
4870
1da177e4
LT
4871 j += sectors;
4872 if (j>1) mddev->curr_resync = j;
d7603b7e
N
4873 if (last_check == 0)
4874 /* this is the earliers that rebuilt will be
4875 * visible in /proc/mdstat
4876 */
4877 md_new_event(mddev);
57afd89f
N
4878
4879 if (last_check + window > io_sectors || j == max_sectors)
1da177e4
LT
4880 continue;
4881
57afd89f 4882 last_check = io_sectors;
1da177e4
LT
4883
4884 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
4885 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
4886 break;
4887
4888 repeat:
4889 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
4890 /* step marks */
4891 int next = (last_mark+1) % SYNC_MARKS;
4892
4893 mddev->resync_mark = mark[next];
4894 mddev->resync_mark_cnt = mark_cnt[next];
4895 mark[next] = jiffies;
57afd89f 4896 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
1da177e4
LT
4897 last_mark = next;
4898 }
4899
4900
787453c2 4901 if (kthread_should_stop()) {
1da177e4
LT
4902 /*
4903 * got a signal, exit.
4904 */
4905 printk(KERN_INFO
4906 "md: md_do_sync() got signal ... exiting\n");
1da177e4
LT
4907 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4908 goto out;
4909 }
4910
4911 /*
4912 * this loop exits only if either when we are slower than
4913 * the 'hard' speed limit, or the system was IO-idle for
4914 * a jiffy.
4915 * the system might be non-idle CPU-wise, but we only care
4916 * about not overloading the IO subsystem. (things like an
4917 * e2fsck being done on the RAID array should execute fast)
4918 */
4919 mddev->queue->unplug_fn(mddev->queue);
4920 cond_resched();
4921
57afd89f
N
4922 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4923 /((jiffies-mddev->resync_mark)/HZ +1) +1;
1da177e4 4924
88202a0c
N
4925 if (currspeed > speed_min(mddev)) {
4926 if ((currspeed > speed_max(mddev)) ||
1da177e4 4927 !is_mddev_idle(mddev)) {
c0e48521 4928 msleep(500);
1da177e4
LT
4929 goto repeat;
4930 }
4931 }
4932 }
4933 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
4934 /*
4935 * this also signals 'finished resyncing' to md_stop
4936 */
4937 out:
4938 mddev->queue->unplug_fn(mddev->queue);
4939
4940 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
4941
4942 /* tell personality that we are finished */
57afd89f 4943 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
1da177e4
LT
4944
4945 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
ccfcc3c1
N
4946 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
4947 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5fd6c1dc
N
4948 mddev->curr_resync > 2) {
4949 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4950 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4951 if (mddev->curr_resync >= mddev->recovery_cp) {
4952 printk(KERN_INFO
4953 "md: checkpointing recovery of %s.\n",
4954 mdname(mddev));
4955 mddev->recovery_cp = mddev->curr_resync;
4956 }
4957 } else
4958 mddev->recovery_cp = MaxSector;
4959 } else {
4960 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4961 mddev->curr_resync = MaxSector;
4962 ITERATE_RDEV(mddev,rdev,rtmp)
4963 if (rdev->raid_disk >= 0 &&
4964 !test_bit(Faulty, &rdev->flags) &&
4965 !test_bit(In_sync, &rdev->flags) &&
4966 rdev->recovery_offset < mddev->curr_resync)
4967 rdev->recovery_offset = mddev->curr_resync;
4968 mddev->sb_dirty = 1;
4969 }
1da177e4
LT
4970 }
4971
1da177e4
LT
4972 skip:
4973 mddev->curr_resync = 0;
4974 wake_up(&resync_wait);
4975 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
4976 md_wakeup_thread(mddev->thread);
4977}
29269553 4978EXPORT_SYMBOL_GPL(md_do_sync);
1da177e4
LT
4979
4980
4981/*
4982 * This routine is regularly called by all per-raid-array threads to
4983 * deal with generic issues like resync and super-block update.
4984 * Raid personalities that don't have a thread (linear/raid0) do not
4985 * need this as they never do any recovery or update the superblock.
4986 *
4987 * It does not do any resync itself, but rather "forks" off other threads
4988 * to do that as needed.
4989 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
4990 * "->recovery" and create a thread at ->sync_thread.
4991 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
4992 * and wakeups up this thread which will reap the thread and finish up.
4993 * This thread also removes any faulty devices (with nr_pending == 0).
4994 *
4995 * The overall approach is:
4996 * 1/ if the superblock needs updating, update it.
4997 * 2/ If a recovery thread is running, don't do anything else.
4998 * 3/ If recovery has finished, clean up, possibly marking spares active.
4999 * 4/ If there are any faulty devices, remove them.
5000 * 5/ If array is degraded, try to add spares devices
5001 * 6/ If array has spares or is not in-sync, start a resync thread.
5002 */
5003void md_check_recovery(mddev_t *mddev)
5004{
5005 mdk_rdev_t *rdev;
5006 struct list_head *rtmp;
5007
5008
5f40402d
N
5009 if (mddev->bitmap)
5010 bitmap_daemon_work(mddev->bitmap);
1da177e4
LT
5011
5012 if (mddev->ro)
5013 return;
fca4d848
N
5014
5015 if (signal_pending(current)) {
5016 if (mddev->pers->sync_request) {
5017 printk(KERN_INFO "md: %s in immediate safe mode\n",
5018 mdname(mddev));
5019 mddev->safemode = 2;
5020 }
5021 flush_signals(current);
5022 }
5023
1da177e4
LT
5024 if ( ! (
5025 mddev->sb_dirty ||
5026 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
fca4d848
N
5027 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5028 (mddev->safemode == 1) ||
5029 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5030 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
1da177e4
LT
5031 ))
5032 return;
fca4d848 5033
df5b89b3 5034 if (mddev_trylock(mddev)) {
1da177e4 5035 int spares =0;
fca4d848 5036
a9701a30 5037 spin_lock_irq(&mddev->write_lock);
fca4d848
N
5038 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5039 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5040 mddev->in_sync = 1;
5041 mddev->sb_dirty = 1;
5042 }
5043 if (mddev->safemode == 1)
5044 mddev->safemode = 0;
a9701a30 5045 spin_unlock_irq(&mddev->write_lock);
fca4d848 5046
1da177e4
LT
5047 if (mddev->sb_dirty)
5048 md_update_sb(mddev);
06d91a5f 5049
06d91a5f 5050
1da177e4
LT
5051 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5052 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5053 /* resync/recovery still happening */
5054 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5055 goto unlock;
5056 }
5057 if (mddev->sync_thread) {
5058 /* resync has finished, collect result */
5059 md_unregister_thread(mddev->sync_thread);
5060 mddev->sync_thread = NULL;
5061 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5062 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5063 /* success...*/
5064 /* activate any spares */
5065 mddev->pers->spare_active(mddev);
5066 }
5067 md_update_sb(mddev);
41158c7e
N
5068
5069 /* if array is no-longer degraded, then any saved_raid_disk
5070 * information must be scrapped
5071 */
5072 if (!mddev->degraded)
5073 ITERATE_RDEV(mddev,rdev,rtmp)
5074 rdev->saved_raid_disk = -1;
5075
1da177e4
LT
5076 mddev->recovery = 0;
5077 /* flag recovery needed just to double check */
5078 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
d7603b7e 5079 md_new_event(mddev);
1da177e4
LT
5080 goto unlock;
5081 }
24dd469d
N
5082 /* Clear some bits that don't mean anything, but
5083 * might be left set
5084 */
5085 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5086 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5087 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5088 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
1da177e4 5089
5fd6c1dc
N
5090 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5091 goto unlock;
1da177e4
LT
5092 /* no recovery is running.
5093 * remove any failed drives, then
5094 * add spares if possible.
5095 * Spare are also removed and re-added, to allow
5096 * the personality to fail the re-add.
5097 */
5098 ITERATE_RDEV(mddev,rdev,rtmp)
5099 if (rdev->raid_disk >= 0 &&
b2d444d7 5100 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
1da177e4 5101 atomic_read(&rdev->nr_pending)==0) {
86e6ffdd
N
5102 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5103 char nm[20];
5104 sprintf(nm,"rd%d", rdev->raid_disk);
5105 sysfs_remove_link(&mddev->kobj, nm);
1da177e4 5106 rdev->raid_disk = -1;
86e6ffdd 5107 }
1da177e4
LT
5108 }
5109
5110 if (mddev->degraded) {
5111 ITERATE_RDEV(mddev,rdev,rtmp)
5112 if (rdev->raid_disk < 0
b2d444d7 5113 && !test_bit(Faulty, &rdev->flags)) {
5fd6c1dc 5114 rdev->recovery_offset = 0;
86e6ffdd
N
5115 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5116 char nm[20];
5117 sprintf(nm, "rd%d", rdev->raid_disk);
5118 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
1da177e4 5119 spares++;
d7603b7e 5120 md_new_event(mddev);
86e6ffdd 5121 } else
1da177e4
LT
5122 break;
5123 }
5124 }
5125
24dd469d
N
5126 if (spares) {
5127 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5128 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5129 } else if (mddev->recovery_cp < MaxSector) {
5130 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5131 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5132 /* nothing to be done ... */
1da177e4 5133 goto unlock;
24dd469d 5134
1da177e4
LT
5135 if (mddev->pers->sync_request) {
5136 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
a654b9d8
N
5137 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5138 /* We are adding a device or devices to an array
5139 * which has the bitmap stored on all devices.
5140 * So make sure all bitmap pages get written
5141 */
5142 bitmap_write_all(mddev->bitmap);
5143 }
1da177e4
LT
5144 mddev->sync_thread = md_register_thread(md_do_sync,
5145 mddev,
5146 "%s_resync");
5147 if (!mddev->sync_thread) {
5148 printk(KERN_ERR "%s: could not start resync"
5149 " thread...\n",
5150 mdname(mddev));
5151 /* leave the spares where they are, it shouldn't hurt */
5152 mddev->recovery = 0;
d7603b7e 5153 } else
1da177e4 5154 md_wakeup_thread(mddev->sync_thread);
d7603b7e 5155 md_new_event(mddev);
1da177e4
LT
5156 }
5157 unlock:
5158 mddev_unlock(mddev);
5159 }
5160}
5161
75c96f85
AB
5162static int md_notify_reboot(struct notifier_block *this,
5163 unsigned long code, void *x)
1da177e4
LT
5164{
5165 struct list_head *tmp;
5166 mddev_t *mddev;
5167
5168 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5169
5170 printk(KERN_INFO "md: stopping all md devices.\n");
5171
5172 ITERATE_MDDEV(mddev,tmp)
c71d4887 5173 if (mddev_trylock(mddev)) {
1da177e4 5174 do_md_stop (mddev, 1);
c71d4887
NB
5175 mddev_unlock(mddev);
5176 }
1da177e4
LT
5177 /*
5178 * certain more exotic SCSI devices are known to be
5179 * volatile wrt too early system reboots. While the
5180 * right place to handle this issue is the given
5181 * driver, we do want to have a safe RAID driver ...
5182 */
5183 mdelay(1000*1);
5184 }
5185 return NOTIFY_DONE;
5186}
5187
75c96f85 5188static struct notifier_block md_notifier = {
1da177e4
LT
5189 .notifier_call = md_notify_reboot,
5190 .next = NULL,
5191 .priority = INT_MAX, /* before any real devices */
5192};
5193
5194static void md_geninit(void)
5195{
5196 struct proc_dir_entry *p;
5197
5198 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5199
5200 p = create_proc_entry("mdstat", S_IRUGO, NULL);
5201 if (p)
5202 p->proc_fops = &md_seq_fops;
5203}
5204
75c96f85 5205static int __init md_init(void)
1da177e4
LT
5206{
5207 int minor;
5208
5209 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
5210 " MD_SB_DISKS=%d\n",
5211 MD_MAJOR_VERSION, MD_MINOR_VERSION,
5212 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
bd926c63 5213 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
32a7627c 5214 BITMAP_MINOR);
1da177e4
LT
5215
5216 if (register_blkdev(MAJOR_NR, "md"))
5217 return -1;
5218 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5219 unregister_blkdev(MAJOR_NR, "md");
5220 return -1;
5221 }
5222 devfs_mk_dir("md");
5223 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
5224 md_probe, NULL, NULL);
5225 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
5226 md_probe, NULL, NULL);
5227
5228 for (minor=0; minor < MAX_MD_DEVS; ++minor)
5229 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
5230 S_IFBLK|S_IRUSR|S_IWUSR,
5231 "md/%d", minor);
5232
5233 for (minor=0; minor < MAX_MD_DEVS; ++minor)
5234 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
5235 S_IFBLK|S_IRUSR|S_IWUSR,
5236 "md/mdp%d", minor);
5237
5238
5239 register_reboot_notifier(&md_notifier);
5240 raid_table_header = register_sysctl_table(raid_root_table, 1);
5241
5242 md_geninit();
5243 return (0);
5244}
5245
5246
5247#ifndef MODULE
5248
5249/*
5250 * Searches all registered partitions for autorun RAID arrays
5251 * at boot time.
5252 */
5253static dev_t detected_devices[128];
5254static int dev_cnt;
5255
5256void md_autodetect_dev(dev_t dev)
5257{
5258 if (dev_cnt >= 0 && dev_cnt < 127)
5259 detected_devices[dev_cnt++] = dev;
5260}
5261
5262
5263static void autostart_arrays(int part)
5264{
5265 mdk_rdev_t *rdev;
5266 int i;
5267
5268 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5269
5270 for (i = 0; i < dev_cnt; i++) {
5271 dev_t dev = detected_devices[i];
5272
5273 rdev = md_import_device(dev,0, 0);
5274 if (IS_ERR(rdev))
5275 continue;
5276
b2d444d7 5277 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
5278 MD_BUG();
5279 continue;
5280 }
5281 list_add(&rdev->same_set, &pending_raid_disks);
5282 }
5283 dev_cnt = 0;
5284
5285 autorun_devices(part);
5286}
5287
5288#endif
5289
5290static __exit void md_exit(void)
5291{
5292 mddev_t *mddev;
5293 struct list_head *tmp;
5294 int i;
5295 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
5296 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
5297 for (i=0; i < MAX_MD_DEVS; i++)
5298 devfs_remove("md/%d", i);
5299 for (i=0; i < MAX_MD_DEVS; i++)
5300 devfs_remove("md/d%d", i);
5301
5302 devfs_remove("md");
5303
5304 unregister_blkdev(MAJOR_NR,"md");
5305 unregister_blkdev(mdp_major, "mdp");
5306 unregister_reboot_notifier(&md_notifier);
5307 unregister_sysctl_table(raid_table_header);
5308 remove_proc_entry("mdstat", NULL);
5309 ITERATE_MDDEV(mddev,tmp) {
5310 struct gendisk *disk = mddev->gendisk;
5311 if (!disk)
5312 continue;
5313 export_array(mddev);
5314 del_gendisk(disk);
5315 put_disk(disk);
5316 mddev->gendisk = NULL;
5317 mddev_put(mddev);
5318 }
5319}
5320
5321module_init(md_init)
5322module_exit(md_exit)
5323
f91de92e
N
5324static int get_ro(char *buffer, struct kernel_param *kp)
5325{
5326 return sprintf(buffer, "%d", start_readonly);
5327}
5328static int set_ro(const char *val, struct kernel_param *kp)
5329{
5330 char *e;
5331 int num = simple_strtoul(val, &e, 10);
5332 if (*val && (*e == '\0' || *e == '\n')) {
5333 start_readonly = num;
4dbcdc75 5334 return 0;
f91de92e
N
5335 }
5336 return -EINVAL;
5337}
5338
5339module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
6ff8d8ec
N
5340module_param(start_dirty_degraded, int, 0644);
5341
f91de92e 5342
1da177e4
LT
5343EXPORT_SYMBOL(register_md_personality);
5344EXPORT_SYMBOL(unregister_md_personality);
5345EXPORT_SYMBOL(md_error);
5346EXPORT_SYMBOL(md_done_sync);
5347EXPORT_SYMBOL(md_write_start);
5348EXPORT_SYMBOL(md_write_end);
1da177e4
LT
5349EXPORT_SYMBOL(md_register_thread);
5350EXPORT_SYMBOL(md_unregister_thread);
5351EXPORT_SYMBOL(md_wakeup_thread);
1da177e4
LT
5352EXPORT_SYMBOL(md_check_recovery);
5353MODULE_LICENSE("GPL");
aa1595e9 5354MODULE_ALIAS("md");
72008652 5355MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);