]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/md/md.c
md: add 'recovery_start' per-device sysfs attribute
[net-next-2.6.git] / drivers / md / md.c
index 859edbf8c9b0a72679f86ff035c23e5e2949cb6c..e1f3c1715cca2ae459f0a70d7229128685ee2d70 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/random.h>
 #include <linux/reboot.h>
 #include <linux/file.h>
+#include <linux/compat.h>
 #include <linux/delay.h>
 #include <linux/raid/md_p.h>
 #include <linux/raid/md_u.h>
@@ -67,6 +68,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
 
 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
 
+/*
+ * Default number of read corrections we'll attempt on an rdev
+ * before ejecting it from the array. We divide the read error
+ * count by 2 for every hour elapsed between read errors.
+ */
+#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
 /*
  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  * is 1000 KB/sec, so the extra system load does not show up that much.
@@ -1501,12 +1508,10 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 
        if (rdev->raid_disk >= 0 &&
            !test_bit(In_sync, &rdev->flags)) {
-               if (rdev->recovery_offset > 0) {
-                       sb->feature_map |=
-                               cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
-                       sb->recovery_offset =
-                               cpu_to_le64(rdev->recovery_offset);
-               }
+               sb->feature_map |=
+                       cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+               sb->recovery_offset =
+                       cpu_to_le64(rdev->recovery_offset);
        }
 
        if (mddev->reshape_position != MaxSector) {
@@ -1540,7 +1545,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
                        sb->dev_roles[i] = cpu_to_le16(0xfffe);
                else if (test_bit(In_sync, &rdev2->flags))
                        sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
-               else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
+               else if (rdev2->raid_disk >= 0)
                        sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
                else
                        sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -2546,12 +2551,49 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 static struct rdev_sysfs_entry rdev_size =
 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
 
+
+static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
+{
+       unsigned long long recovery_start = rdev->recovery_offset;
+
+       if (test_bit(In_sync, &rdev->flags) ||
+           recovery_start == MaxSector)
+               return sprintf(page, "none\n");
+
+       return sprintf(page, "%llu\n", recovery_start);
+}
+
+static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+       unsigned long long recovery_start;
+
+       if (cmd_match(buf, "none"))
+               recovery_start = MaxSector;
+       else if (strict_strtoull(buf, 10, &recovery_start))
+               return -EINVAL;
+
+       if (rdev->mddev->pers &&
+           rdev->raid_disk >= 0)
+               return -EBUSY;
+
+       rdev->recovery_offset = recovery_start;
+       if (recovery_start == MaxSector)
+               set_bit(In_sync, &rdev->flags);
+       else
+               clear_bit(In_sync, &rdev->flags);
+       return len;
+}
+
+static struct rdev_sysfs_entry rdev_recovery_start =
+__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+
 static struct attribute *rdev_default_attrs[] = {
        &rdev_state.attr,
        &rdev_errors.attr,
        &rdev_slot.attr,
        &rdev_offset.attr,
        &rdev_size.attr,
+       &rdev_recovery_start.attr,
        NULL,
 };
 static ssize_t
@@ -2653,6 +2695,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
        rdev->flags = 0;
        rdev->data_offset = 0;
        rdev->sb_events = 0;
+       rdev->last_read_error.tv_sec  = 0;
+       rdev->last_read_error.tv_nsec = 0;
        atomic_set(&rdev->nr_pending, 0);
        atomic_set(&rdev->read_errors, 0);
        atomic_set(&rdev->corrected_errors, 0);
@@ -3094,7 +3138,9 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len)
 
        if (mddev->pers)
                return -EBUSY;
-       if (!*buf || (*e && *e != '\n'))
+       if (cmd_match(buf, "none"))
+               n = MaxSector;
+       else if (!*buf || (*e && *e != '\n'))
                return -EINVAL;
 
        mddev->recovery_cp = n;
@@ -3289,6 +3335,29 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
 static struct md_sysfs_entry md_array_state =
 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
 
+static ssize_t
+max_corrected_read_errors_show(mddev_t *mddev, char *page) {
+       return sprintf(page, "%d\n",
+                      atomic_read(&mddev->max_corr_read_errors));
+}
+
+static ssize_t
+max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       char *e;
+       unsigned long n = simple_strtoul(buf, &e, 10);
+
+       if (*buf && (*e == 0 || *e == '\n')) {
+               atomic_set(&mddev->max_corr_read_errors, n);
+               return len;
+       }
+       return -EINVAL;
+}
+
+static struct md_sysfs_entry max_corr_read_errors =
+__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
+       max_corrected_read_errors_store);
+
 static ssize_t
 null_show(mddev_t *mddev, char *page)
 {
@@ -3914,6 +3983,7 @@ static struct attribute *md_default_attrs[] = {
        &md_array_state.attr,
        &md_reshape_position.attr,
        &md_array_size.attr,
+       &max_corr_read_errors.attr,
        NULL,
 };
 
@@ -4333,6 +4403,8 @@ static int do_md_run(mddev_t * mddev)
                mddev->ro = 0;
 
        atomic_set(&mddev->writes_pending,0);
+       atomic_set(&mddev->max_corr_read_errors,
+                  MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
        mddev->safemode = 0;
        mddev->safemode_timer.function = md_safemode_timeout;
        mddev->safemode_timer.data = (unsigned long) mddev;
@@ -5657,6 +5729,25 @@ done:
 abort:
        return err;
 }
+#ifdef CONFIG_COMPAT
+static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
+                   unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case HOT_REMOVE_DISK:
+       case HOT_ADD_DISK:
+       case SET_DISK_FAULTY:
+       case SET_BITMAP_FILE:
+               /* These take in integer arg, do not convert */
+               break;
+       default:
+               arg = (unsigned long)compat_ptr(arg);
+               break;
+       }
+
+       return md_ioctl(bdev, mode, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
 
 static int md_open(struct block_device *bdev, fmode_t mode)
 {
@@ -5722,6 +5813,9 @@ static const struct block_device_operations md_fops =
        .open           = md_open,
        .release        = md_release,
        .ioctl          = md_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = md_compat_ioctl,
+#endif
        .getgeo         = md_getgeo,
        .media_changed  = md_media_changed,
        .revalidate_disk= md_revalidate,
@@ -6471,12 +6565,14 @@ void md_do_sync(mddev_t *mddev)
                /* recovery follows the physical size of devices */
                max_sectors = mddev->dev_sectors;
                j = MaxSector;
-               list_for_each_entry(rdev, &mddev->disks, same_set)
+               rcu_read_lock();
+               list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
                        if (rdev->raid_disk >= 0 &&
                            !test_bit(Faulty, &rdev->flags) &&
                            !test_bit(In_sync, &rdev->flags) &&
                            rdev->recovery_offset < j)
                                j = rdev->recovery_offset;
+               rcu_read_unlock();
        }
 
        printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -6646,12 +6742,14 @@ void md_do_sync(mddev_t *mddev)
                } else {
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                                mddev->curr_resync = MaxSector;
-                       list_for_each_entry(rdev, &mddev->disks, same_set)
+                       rcu_read_lock();
+                       list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
                                if (rdev->raid_disk >= 0 &&
                                    !test_bit(Faulty, &rdev->flags) &&
                                    !test_bit(In_sync, &rdev->flags) &&
                                    rdev->recovery_offset < mddev->curr_resync)
                                        rdev->recovery_offset = mddev->curr_resync;
+                       rcu_read_unlock();
                }
        }
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -6729,6 +6827,7 @@ static int remove_and_add_spares(mddev_t *mddev)
                                                       nm, mdname(mddev));
                                        spares++;
                                        md_new_event(mddev);
+                                       set_bit(MD_CHANGE_DEVS, &mddev->flags);
                                } else
                                        break;
                        }
@@ -7134,5 +7233,6 @@ EXPORT_SYMBOL(md_unregister_thread);
 EXPORT_SYMBOL(md_wakeup_thread);
 EXPORT_SYMBOL(md_check_recovery);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD RAID framework");
 MODULE_ALIAS("md");
 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);