]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'master' into for-next
authorNeil Brown <neilb@notabene.brown>
Tue, 8 Jul 2008 00:11:50 +0000 (10:11 +1000)
committerNeil Brown <neilb@notabene.brown>
Tue, 8 Jul 2008 00:11:50 +0000 (10:11 +1000)
Documentation/md.txt
drivers/md/bitmap.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
include/linux/raid/bitmap.h
include/linux/raid/md_k.h
include/linux/raid/raid5.h

index a8b430627473aa243995ab6f6e173b9cf1ff819e..e06cc59437e4a0b58422ed06450186054b9c38ab 100644 (file)
@@ -236,6 +236,11 @@ All md devices contain:
      writing the word for the desired state, however some states
      cannot be explicitly set, and some transitions are not allowed.
 
+     Select/poll works on this file.  All changes except between
+       active_idle and active (which can be frequent and are not
+       very interesting) are notified.  active->active_idle is
+       reported if the metadata is externally managed.
+
      clear
          No devices, no size, no level
          Writing is equivalent to STOP_ARRAY ioctl
@@ -292,6 +297,10 @@ Each directory contains:
              writemostly - device will only be subject to read
                         requests if there are no other options.
                         This applies only to raid1 arrays.
+             blocked  - device has failed, metadata is "external",
+                        and the failure hasn't been acknowledged yet.
+                        Writes that would write to this device if
+                        it were not faulty are blocked.
              spare    - device is working, but not a full member.
                         This includes spares that are in the process
                         of being recovered to
@@ -301,6 +310,12 @@ Each directory contains:
        Writing "remove" removes the device from the array.
        Writing "writemostly" sets the writemostly flag.
        Writing "-writemostly" clears the writemostly flag.
+       Writing "blocked" sets the "blocked" flag.
+       Writing "-blocked" clear the "blocked" flag and allows writes
+               to complete.
+
+       This file responds to select/poll. Any change to 'faulty'
+       or 'blocked' causes an event.
 
       errors
        An approximate count of read errors that have been detected on
@@ -381,6 +396,19 @@ also have
        'check' and 'repair' will start the appropriate process
            providing the current state is 'idle'.
 
+      This file responds to select/poll.  Any important change in the value
+      triggers a poll event.  Sometimes the value will briefly be
+      "recover" if a recovery seems to be needed, but cannot be
+      achieved. In that case, the transition to "recover" isn't
+      notified, but the transition away is.
+
+   degraded
+      This contains a count of the number of devices by which the
+      arrays is degraded.  So an optimal array with show '0'.  A
+      single failed/missing drive will show '1', etc.
+      This file responds to select/poll, any increase or decrease
+      in the count of missing devices will trigger an event.
+
    mismatch_count
       When performing 'check' and 'repair', and possibly when
       performing 'resync', md will count the number of errors that are
index b26927ce889cead2717db3f4d3896bbd9bacc4e2..dedba16d42f723cb75f87396659e31899d010fc8 100644 (file)
@@ -454,8 +454,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
        spin_unlock_irqrestore(&bitmap->lock, flags);
        sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
        sb->events = cpu_to_le64(bitmap->mddev->events);
-       if (!bitmap->mddev->degraded)
-               sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+       if (bitmap->mddev->events < bitmap->events_cleared) {
+               /* rocking back to read-only */
+               bitmap->events_cleared = bitmap->mddev->events;
+               sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
+       }
        kunmap_atomic(sb, KM_USER0);
        write_page(bitmap, bitmap->sb_page, 1);
 }
@@ -1085,9 +1088,19 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                        } else
                                spin_unlock_irqrestore(&bitmap->lock, flags);
                        lastpage = page;
-/*
-                       printk("bitmap clean at page %lu\n", j);
-*/
+
+                       /* We are possibly going to clear some bits, so make
+                        * sure that events_cleared is up-to-date.
+                        */
+                       if (bitmap->need_sync) {
+                               bitmap_super_t *sb;
+                               bitmap->need_sync = 0;
+                               sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+                               sb->events_cleared =
+                                       cpu_to_le64(bitmap->events_cleared);
+                               kunmap_atomic(sb, KM_USER0);
+                               write_page(bitmap, bitmap->sb_page, 1);
+                       }
                        spin_lock_irqsave(&bitmap->lock, flags);
                        clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
                }
@@ -1257,6 +1270,12 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
+               if (success &&
+                   bitmap->events_cleared < bitmap->mddev->events) {
+                       bitmap->events_cleared = bitmap->mddev->events;
+                       bitmap->need_sync = 1;
+               }
+
                if (!success && ! (*bmc & NEEDED_MASK))
                        *bmc |= NEEDED_MASK;
 
index 10748240cb2fde2bd8942dbb19e300cc6c32961c..ec921f58fbb8224cec0772494a75926fbc660be7 100644 (file)
@@ -126,7 +126,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
                int j = rdev->raid_disk;
                dev_info_t *disk = conf->disks + j;
 
-               if (j < 0 || j > raid_disks || disk->rdev) {
+               if (j < 0 || j >= raid_disks || disk->rdev) {
                        printk("linear: disk numbering problem. Aborting!\n");
                        goto out;
                }
index 2580ac1b9b0ff06f34d37f12ea829682017e5ef1..df1230af02cd5cb3db9f8bdc1771841d707b4011 100644 (file)
@@ -169,7 +169,6 @@ void md_new_event(mddev_t *mddev)
 {
        atomic_inc(&md_event_count);
        wake_up(&md_event_waiters);
-       sysfs_notify(&mddev->kobj, NULL, "sync_action");
 }
 EXPORT_SYMBOL_GPL(md_new_event);
 
@@ -278,6 +277,7 @@ static mddev_t * mddev_find(dev_t unit)
        init_waitqueue_head(&new->sb_wait);
        init_waitqueue_head(&new->recovery_wait);
        new->reshape_position = MaxSector;
+       new->resync_min = 0;
        new->resync_max = MaxSector;
        new->level = LEVEL_NONE;
 
@@ -658,11 +658,14 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
  */
 
 struct super_type  {
-       char            *name;
-       struct module   *owner;
-       int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
-       int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
-       void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+       char                *name;
+       struct module       *owner;
+       int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
+                                         int minor_version);
+       int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+       void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
+       unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
+                                               unsigned long long size);
 };
 
 /*
@@ -1003,6 +1006,27 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->sb_csum = calc_sb_csum(sb);
 }
 
+/*
+ * rdev_size_change for 0.90.0
+ */
+static unsigned long long
+super_90_rdev_size_change(mdk_rdev_t *rdev, unsigned long long size)
+{
+       if (size && size < rdev->mddev->size)
+               return 0; /* component must fit device */
+       size *= 2; /* convert to sectors */
+       if (rdev->mddev->bitmap_offset)
+               return 0; /* can't move bitmap */
+       rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
+       if (!size || size > rdev->sb_offset*2)
+               size = rdev->sb_offset*2;
+       md_super_write(rdev->mddev, rdev, rdev->sb_offset << 1, rdev->sb_size,
+                      rdev->sb_page);
+       md_super_wait(rdev->mddev);
+       return size/2; /* kB for sysfs */
+}
+
+
 /*
  * version 1 superblock
  */
@@ -1328,21 +1352,59 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        sb->sb_csum = calc_sb_1_csum(sb);
 }
 
+static unsigned long long
+super_1_rdev_size_change(mdk_rdev_t *rdev, unsigned long long size)
+{
+       struct mdp_superblock_1 *sb;
+       unsigned long long max_size;
+       if (size && size < rdev->mddev->size)
+               return 0; /* component must fit device */
+       size *= 2; /* convert to sectors */
+       if (rdev->sb_offset < rdev->data_offset/2) {
+               /* minor versions 1 and 2; superblock before data */
+               max_size = (rdev->bdev->bd_inode->i_size >> 9);
+               max_size -= rdev->data_offset;
+               if (!size || size > max_size)
+                       size = max_size;
+       } else if (rdev->mddev->bitmap_offset) {
+               /* minor version 0 with bitmap we can't move */
+               return 0;
+       } else {
+               /* minor version 0; superblock after data */
+               sector_t sb_offset;
+               sb_offset = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
+               sb_offset &= ~(sector_t)(4*2 - 1);
+               max_size = rdev->size*2 + sb_offset - rdev->sb_offset*2;
+               if (!size || size > max_size)
+                       size = max_size;
+               rdev->sb_offset = sb_offset/2;
+       }
+       sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
+       sb->data_size = cpu_to_le64(size);
+       sb->super_offset = rdev->sb_offset*2;
+       sb->sb_csum = calc_sb_1_csum(sb);
+       md_super_write(rdev->mddev, rdev, rdev->sb_offset << 1, rdev->sb_size,
+                      rdev->sb_page);
+       md_super_wait(rdev->mddev);
+       return size/2; /* kB for sysfs */
+}
 
 static struct super_type super_types[] = {
        [0] = {
                .name   = "0.90.0",
                .owner  = THIS_MODULE,
-               .load_super     = super_90_load,
-               .validate_super = super_90_validate,
-               .sync_super     = super_90_sync,
+               .load_super         = super_90_load,
+               .validate_super     = super_90_validate,
+               .sync_super         = super_90_sync,
+               .rdev_size_change   = super_90_rdev_size_change,
        },
        [1] = {
                .name   = "md-1",
                .owner  = THIS_MODULE,
-               .load_super     = super_1_load,
-               .validate_super = super_1_validate,
-               .sync_super     = super_1_sync,
+               .load_super         = super_1_load,
+               .validate_super     = super_1_validate,
+               .sync_super         = super_1_sync,
+               .rdev_size_change   = super_1_rdev_size_change,
        },
 };
 
@@ -1886,6 +1948,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 
                err = 0;
        }
+       if (!err)
+               sysfs_notify(&rdev->kobj, NULL, "state");
        return err ? err : len;
 }
 static struct rdev_sysfs_entry rdev_state =
@@ -1931,7 +1995,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                slot = -1;
        else if (e==buf || (*e && *e!= '\n'))
                return -EINVAL;
-       if (rdev->mddev->pers) {
+       if (rdev->mddev->pers && slot == -1) {
                /* Setting 'slot' on an active array requires also
                 * updating the 'rd%d' link, and communicating
                 * with the personality with ->hot_*_disk.
@@ -1939,8 +2003,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                 * failed/spare devices.  This normally happens automatically,
                 * but not when the metadata is externally managed.
                 */
-               if (slot != -1)
-                       return -EBUSY;
                if (rdev->raid_disk == -1)
                        return -EEXIST;
                /* personality does all needed checks */
@@ -1954,6 +2016,43 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                sysfs_remove_link(&rdev->mddev->kobj, nm);
                set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
                md_wakeup_thread(rdev->mddev->thread);
+       } else if (rdev->mddev->pers) {
+               mdk_rdev_t *rdev2;
+               struct list_head *tmp;
+               /* Activating a spare .. or possibly reactivating
+                * if we every get bitmaps working here.
+                */
+
+               if (rdev->raid_disk != -1)
+                       return -EBUSY;
+
+               if (rdev->mddev->pers->hot_add_disk == NULL)
+                       return -EINVAL;
+
+               rdev_for_each(rdev2, tmp, rdev->mddev)
+                       if (rdev2->raid_disk == slot)
+                               return -EEXIST;
+
+               rdev->raid_disk = slot;
+               if (test_bit(In_sync, &rdev->flags))
+                       rdev->saved_raid_disk = slot;
+               else
+                       rdev->saved_raid_disk = -1;
+               err = rdev->mddev->pers->
+                       hot_add_disk(rdev->mddev, rdev);
+               if (err) {
+                       rdev->raid_disk = -1;
+                       return err;
+               } else
+                       sysfs_notify(&rdev->kobj, NULL, "state");
+               sprintf(nm, "rd%d", rdev->raid_disk);
+               if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
+                       printk(KERN_WARNING
+                              "md: cannot register "
+                              "%s for %s\n",
+                              nm, mdname(rdev->mddev));
+
+               /* don't wakeup anyone, leave that to userspace. */
        } else {
                if (slot >= rdev->mddev->raid_disks)
                        return -ENOSPC;
@@ -1962,6 +2061,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                clear_bit(Faulty, &rdev->flags);
                clear_bit(WriteMostly, &rdev->flags);
                set_bit(In_sync, &rdev->flags);
+               sysfs_notify(&rdev->kobj, NULL, "state");
        }
        return len;
 }
@@ -1983,7 +2083,7 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
        unsigned long long offset = simple_strtoull(buf, &e, 10);
        if (e==buf || (*e && *e != '\n'))
                return -EINVAL;
-       if (rdev->mddev->pers)
+       if (rdev->mddev->pers && rdev->raid_disk >= 0)
                return -EBUSY;
        if (rdev->size && rdev->mddev->external)
                /* Must set offset before size, so overlap checks
@@ -2022,8 +2122,20 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
 
        if (e==buf || (*e && *e != '\n'))
                return -EINVAL;
-       if (my_mddev->pers)
-               return -EBUSY;
+       if (my_mddev->pers && rdev->raid_disk >= 0) {
+               if (rdev->mddev->persistent) {
+                       size = super_types[rdev->mddev->major_version].
+                               rdev_size_change(rdev, size);
+                       if (!size)
+                               return -EBUSY;
+               } else if (!size) {
+                       size = (rdev->bdev->bd_inode->i_size >> 10);
+                       size -= rdev->data_offset/2;
+               }
+               if (size < rdev->mddev->size)
+                       return -EINVAL; /* component must fit device */
+       }
+
        rdev->size = size;
        if (size > oldsize && rdev->mddev->external) {
                /* need to check that all other rdevs with the same ->bdev
@@ -2681,8 +2793,10 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
        }
        if (err)
                return err;
-       else
+       else {
+               sysfs_notify(&mddev->kobj, NULL, "array_state");
                return len;
+       }
 }
 static struct md_sysfs_entry md_array_state =
 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
@@ -2899,7 +3013,7 @@ action_show(mddev_t *mddev, char *page)
                                type = "check";
                        else
                                type = "repair";
-               } else
+               } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
                        type = "recover";
        }
        return sprintf(page, "%s\n", type);
@@ -2921,15 +3035,19 @@ action_store(mddev_t *mddev, const char *page, size_t len)
        } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
                   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
-       else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
+       else if (cmd_match(page, "resync"))
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-       else if (cmd_match(page, "reshape")) {
+       else if (cmd_match(page, "recover")) {
+               set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       } else if (cmd_match(page, "reshape")) {
                int err;
                if (mddev->pers->start_reshape == NULL)
                        return -EINVAL;
                err = mddev->pers->start_reshape(mddev);
                if (err)
                        return err;
+               sysfs_notify(&mddev->kobj, NULL, "degraded");
        } else {
                if (cmd_match(page, "check"))
                        set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -2940,6 +3058,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
        }
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
+       sysfs_notify(&mddev->kobj, NULL, "sync_action");
        return len;
 }
 
@@ -3074,6 +3193,36 @@ sync_completed_show(mddev_t *mddev, char *page)
 
 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
 
+static ssize_t
+min_sync_show(mddev_t *mddev, char *page)
+{
+       return sprintf(page, "%llu\n",
+                      (unsigned long long)mddev->resync_min);
+}
+static ssize_t
+min_sync_store(mddev_t *mddev, const char *buf, size_t len)
+{
+       unsigned long long min;
+       if (strict_strtoull(buf, 10, &min))
+               return -EINVAL;
+       if (min > mddev->resync_max)
+               return -EINVAL;
+       if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+               return -EBUSY;
+
+       /* Must be a multiple of chunk_size */
+       if (mddev->chunk_size) {
+               if (min & (sector_t)((mddev->chunk_size>>9)-1))
+                       return -EINVAL;
+       }
+       mddev->resync_min = min;
+
+       return len;
+}
+
+static struct md_sysfs_entry md_min_sync =
+__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
+
 static ssize_t
 max_sync_show(mddev_t *mddev, char *page)
 {
@@ -3089,9 +3238,10 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
        if (strncmp(buf, "max", 3) == 0)
                mddev->resync_max = MaxSector;
        else {
-               char *ep;
-               unsigned long long max = simple_strtoull(buf, &ep, 10);
-               if (ep == buf || (*ep != 0 && *ep != '\n'))
+               unsigned long long max;
+               if (strict_strtoull(buf, 10, &max))
+                       return -EINVAL;
+               if (max < mddev->resync_min)
                        return -EINVAL;
                if (max < mddev->resync_max &&
                    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -3222,6 +3372,7 @@ static struct attribute *md_redundancy_attrs[] = {
        &md_sync_speed.attr,
        &md_sync_force_parallel.attr,
        &md_sync_completed.attr,
+       &md_min_sync.attr,
        &md_max_sync.attr,
        &md_suspend_lo.attr,
        &md_suspend_hi.attr,
@@ -3326,9 +3477,9 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
        disk->queue = mddev->queue;
        add_disk(disk);
        mddev->gendisk = disk;
-       mutex_unlock(&disks_mutex);
        error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
                                     "%s", "md");
+       mutex_unlock(&disks_mutex);
        if (error)
                printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
                       disk->disk_name);
@@ -3341,7 +3492,11 @@ static void md_safemode_timeout(unsigned long data)
 {
        mddev_t *mddev = (mddev_t *) data;
 
-       mddev->safemode = 1;
+       if (!atomic_read(&mddev->writes_pending)) {
+               mddev->safemode = 1;
+               if (mddev->external)
+                       sysfs_notify(&mddev->kobj, NULL, "array_state");
+       }
        md_wakeup_thread(mddev->thread);
 }
 
@@ -3448,6 +3603,7 @@ static int do_md_run(mddev_t * mddev)
                                return -EINVAL;
                        }
                }
+               sysfs_notify(&rdev->kobj, NULL, "state");
        }
 
        md_probe(mddev->unit, NULL, NULL);
@@ -3608,6 +3764,9 @@ static int do_md_run(mddev_t * mddev)
 
        mddev->changed = 1;
        md_new_event(mddev);
+       sysfs_notify(&mddev->kobj, NULL, "array_state");
+       sysfs_notify(&mddev->kobj, NULL, "sync_action");
+       sysfs_notify(&mddev->kobj, NULL, "degraded");
        kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
        return 0;
 }
@@ -3642,6 +3801,8 @@ static int restart_array(mddev_t *mddev)
                md_wakeup_thread(mddev->thread);
                md_wakeup_thread(mddev->sync_thread);
                err = 0;
+               sysfs_notify(&mddev->kobj, NULL, "array_state");
+
        } else
                err = -EINVAL;
 
@@ -3777,6 +3938,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
                mddev->size = 0;
                mddev->raid_disks = 0;
                mddev->recovery_cp = 0;
+               mddev->resync_min = 0;
                mddev->resync_max = MaxSector;
                mddev->reshape_position = MaxSector;
                mddev->external = 0;
@@ -3811,6 +3973,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
                        mdname(mddev));
        err = 0;
        md_new_event(mddev);
+       sysfs_notify(&mddev->kobj, NULL, "array_state");
 out:
        return err;
 }
@@ -4172,8 +4335,12 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
                }
                if (err)
                        export_rdev(rdev);
+               else
+                       sysfs_notify(&rdev->kobj, NULL, "state");
 
                md_update_sb(mddev, 1);
+               if (mddev->degraded)
+                       set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                return err;
@@ -4232,9 +4399,6 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
        char b[BDEVNAME_SIZE];
        mdk_rdev_t *rdev;
 
-       if (!mddev->pers)
-               return -ENODEV;
-
        rdev = find_rdev(mddev, dev);
        if (!rdev)
                return -ENXIO;
@@ -4811,8 +4975,9 @@ static int md_ioctl(struct inode *inode, struct file *file,
            mddev->ro && mddev->pers) {
                if (mddev->ro == 2) {
                        mddev->ro = 0;
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-               md_wakeup_thread(mddev->thread);
+                       sysfs_notify(&mddev->kobj, NULL, "array_state");
+                       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       md_wakeup_thread(mddev->thread);
 
                } else {
                        err = -EROFS;
@@ -5029,6 +5194,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
        if (!mddev->pers->error_handler)
                return;
        mddev->pers->error_handler(mddev,rdev);
+       if (mddev->degraded)
+               set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+       set_bit(StateChanged, &rdev->flags);
        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
@@ -5451,6 +5619,7 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
  */
 void md_write_start(mddev_t *mddev, struct bio *bi)
 {
+       int did_change = 0;
        if (bio_data_dir(bi) != WRITE)
                return;
 
@@ -5461,6 +5630,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                md_wakeup_thread(mddev->thread);
                md_wakeup_thread(mddev->sync_thread);
+               did_change = 1;
        }
        atomic_inc(&mddev->writes_pending);
        if (mddev->safemode == 1)
@@ -5471,10 +5641,12 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                        mddev->in_sync = 0;
                        set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        md_wakeup_thread(mddev->thread);
+                       did_change = 1;
                }
                spin_unlock_irq(&mddev->write_lock);
-               sysfs_notify(&mddev->kobj, NULL, "array_state");
        }
+       if (did_change)
+               sysfs_notify(&mddev->kobj, NULL, "array_state");
        wait_event(mddev->sb_wait,
                   !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
                   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
@@ -5502,6 +5674,8 @@ void md_allow_write(mddev_t *mddev)
                return;
        if (mddev->ro)
                return;
+       if (!mddev->pers->sync_request)
+               return;
 
        spin_lock_irq(&mddev->write_lock);
        if (mddev->in_sync) {
@@ -5625,9 +5799,11 @@ void md_do_sync(mddev_t *mddev)
                max_sectors = mddev->resync_max_sectors;
                mddev->resync_mismatches = 0;
                /* we don't use the checkpoint if there's a bitmap */
-               if (!mddev->bitmap &&
-                   !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+               if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+                       j = mddev->resync_min;
+               else if (!mddev->bitmap)
                        j = mddev->recovery_cp;
+
        } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
                max_sectors = mddev->size << 1;
        else {
@@ -5796,6 +5972,7 @@ void md_do_sync(mddev_t *mddev)
 
  skip:
        mddev->curr_resync = 0;
+       mddev->resync_min = 0;
        mddev->resync_max = MaxSector;
        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
        wake_up(&resync_wait);
@@ -5845,7 +6022,8 @@ static int remove_and_add_spares(mddev_t *mddev)
                        if (rdev->raid_disk < 0
                            && !test_bit(Faulty, &rdev->flags)) {
                                rdev->recovery_offset = 0;
-                               if (mddev->pers->hot_add_disk(mddev,rdev)) {
+                               if (mddev->pers->
+                                   hot_add_disk(mddev, rdev) == 0) {
                                        char nm[20];
                                        sprintf(nm, "rd%d", rdev->raid_disk);
                                        if (sysfs_create_link(&mddev->kobj,
@@ -5920,23 +6098,31 @@ void md_check_recovery(mddev_t *mddev)
                int spares = 0;
 
                if (!mddev->external) {
+                       int did_change = 0;
                        spin_lock_irq(&mddev->write_lock);
                        if (mddev->safemode &&
                            !atomic_read(&mddev->writes_pending) &&
                            !mddev->in_sync &&
                            mddev->recovery_cp == MaxSector) {
                                mddev->in_sync = 1;
+                               did_change = 1;
                                if (mddev->persistent)
                                        set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        }
                        if (mddev->safemode == 1)
                                mddev->safemode = 0;
                        spin_unlock_irq(&mddev->write_lock);
+                       if (did_change)
+                               sysfs_notify(&mddev->kobj, NULL, "array_state");
                }
 
                if (mddev->flags)
                        md_update_sb(mddev, 0);
 
+               rdev_for_each(rdev, rtmp, mddev)
+                       if (test_and_clear_bit(StateChanged, &rdev->flags))
+                               sysfs_notify(&rdev->kobj, NULL, "state");
+
 
                if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
                    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -5951,7 +6137,9 @@ void md_check_recovery(mddev_t *mddev)
                        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
                                /* success...*/
                                /* activate any spares */
-                               mddev->pers->spare_active(mddev);
+                               if (mddev->pers->spare_active(mddev))
+                                       sysfs_notify(&mddev->kobj, NULL,
+                                                    "degraded");
                        }
                        md_update_sb(mddev, 1);
 
@@ -5965,13 +6153,18 @@ void md_check_recovery(mddev_t *mddev)
                        mddev->recovery = 0;
                        /* flag recovery needed just to double check */
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       sysfs_notify(&mddev->kobj, NULL, "sync_action");
                        md_new_event(mddev);
                        goto unlock;
                }
+               /* Set RUNNING before clearing NEEDED to avoid
+                * any transients in the value of "sync_action".
+                */
+               set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                /* Clear some bits that don't mean anything, but
                 * might be left set
                 */
-               clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
                clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
 
@@ -5989,17 +6182,19 @@ void md_check_recovery(mddev_t *mddev)
                                /* Cannot proceed */
                                goto unlock;
                        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+                       clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                } else if ((spares = remove_and_add_spares(mddev))) {
                        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+                       set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                } else if (mddev->recovery_cp < MaxSector) {
                        set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+                       clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
                        /* nothing to be done ... */
                        goto unlock;
 
                if (mddev->pers->sync_request) {
-                       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
                        if (spares && mddev->bitmap && ! mddev->bitmap->file) {
                                /* We are adding a device or devices to an array
                                 * which has the bitmap stored on all devices.
@@ -6018,9 +6213,16 @@ void md_check_recovery(mddev_t *mddev)
                                mddev->recovery = 0;
                        } else
                                md_wakeup_thread(mddev->sync_thread);
+                       sysfs_notify(&mddev->kobj, NULL, "sync_action");
                        md_new_event(mddev);
                }
        unlock:
+               if (!mddev->sync_thread) {
+                       clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+                       if (test_and_clear_bit(MD_RECOVERY_RECOVER,
+                                              &mddev->recovery))
+                               sysfs_notify(&mddev->kobj, NULL, "sync_action");
+               }
                mddev_unlock(mddev);
        }
 }
index e968116e0de9699d2bef0f114ae79f638506bf1c..541cbe3414bd29dc2e0260b322b3a45fa3d04c55 100644 (file)
@@ -281,13 +281,18 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        multipath_conf_t *conf = mddev->private;
        struct request_queue *q;
-       int found = 0;
+       int err = -EEXIST;
        int path;
        struct multipath_info *p;
+       int first = 0;
+       int last = mddev->raid_disks - 1;
+
+       if (rdev->raid_disk >= 0)
+               first = last = rdev->raid_disk;
 
        print_multipath_conf(conf);
 
-       for (path=0; path<mddev->raid_disks; path++) 
+       for (path = first; path <= last; path++)
                if ((p=conf->multipaths+path)->rdev == NULL) {
                        q = rdev->bdev->bd_disk->queue;
                        blk_queue_stack_limits(mddev->queue, q);
@@ -307,11 +312,13 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                        rdev->raid_disk = path;
                        set_bit(In_sync, &rdev->flags);
                        rcu_assign_pointer(p->rdev, rdev);
-                       found = 1;
+                       err = 0;
+                       break;
                }
 
        print_multipath_conf(conf);
-       return found;
+
+       return err;
 }
 
 static int multipath_remove_disk(mddev_t *mddev, int number)
index c610b947218afb73f49982d59dd178a7fdbd0959..f05d5983efb64db1654edef6412eccf1b08ef1f1 100644 (file)
@@ -1100,11 +1100,16 @@ static int raid1_spare_active(mddev_t *mddev)
 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        conf_t *conf = mddev->private;
-       int found = 0;
+       int err = -EEXIST;
        int mirror = 0;
        mirror_info_t *p;
+       int first = 0;
+       int last = mddev->raid_disks - 1;
 
-       for (mirror=0; mirror < mddev->raid_disks; mirror++)
+       if (rdev->raid_disk >= 0)
+               first = last = rdev->raid_disk;
+
+       for (mirror = first; mirror <= last; mirror++)
                if ( !(p=conf->mirrors+mirror)->rdev) {
 
                        blk_queue_stack_limits(mddev->queue,
@@ -1119,7 +1124,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
-                       found = 1;
+                       err = 0;
                        /* As all devices are equivalent, we don't need a full recovery
                         * if this was recently any drive of the array
                         */
@@ -1130,7 +1135,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                }
 
        print_conf(conf);
-       return found;
+       return err;
 }
 
 static int raid1_remove_disk(mddev_t *mddev, int number)
index a71277b640ab036bea60206e29900c034ddd50a3..df08a9fa3a1fc8c694f42a1a626736709f8b808d 100644 (file)
@@ -1113,24 +1113,30 @@ static int raid10_spare_active(mddev_t *mddev)
 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        conf_t *conf = mddev->private;
-       int found = 0;
+       int err = -EEXIST;
        int mirror;
        mirror_info_t *p;
+       int first = 0;
+       int last = mddev->raid_disks - 1;
 
        if (mddev->recovery_cp < MaxSector)
                /* only hot-add to in-sync arrays, as recovery is
                 * very different from resync
                 */
-               return 0;
+               return -EBUSY;
        if (!enough(conf))
-               return 0;
+               return -EINVAL;
+
+       if (rdev->raid_disk)
+               first = last = rdev->raid_disk;
 
        if (rdev->saved_raid_disk >= 0 &&
+           rdev->saved_raid_disk >= first &&
            conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
                mirror = rdev->saved_raid_disk;
        else
-               mirror = 0;
-       for ( ; mirror < mddev->raid_disks; mirror++)
+               mirror = first;
+       for ( ; mirror <= last ; mirror++)
                if ( !(p=conf->mirrors+mirror)->rdev) {
 
                        blk_queue_stack_limits(mddev->queue,
@@ -1145,7 +1151,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
-                       found = 1;
+                       err = 0;
                        if (rdev->saved_raid_disk != mirror)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
@@ -1153,7 +1159,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                }
 
        print_conf(conf);
-       return found;
+       return err;
 }
 
 static int raid10_remove_disk(mddev_t *mddev, int number)
index 54c8ee28fcc4940517262389b16d4de1030709f3..442622067caef55823100e73ea33e9eb11dcf8ac 100644 (file)
@@ -115,15 +115,20 @@ static void return_io(struct bio *return_bi)
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
-               bi->bi_end_io(bi,
-                             test_bit(BIO_UPTODATE, &bi->bi_flags)
-                               ? 0 : -EIO);
+               bio_endio(bi, 0);
                bi = return_bi;
        }
 }
 
 static void print_raid5_conf (raid5_conf_t *conf);
 
+static int stripe_operations_active(struct stripe_head *sh)
+{
+       return sh->check_state || sh->reconstruct_state ||
+              test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
+              test_bit(STRIPE_COMPUTE_RUN, &sh->state);
+}
+
 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 {
        if (atomic_dec_and_test(&sh->count)) {
@@ -143,7 +148,7 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                        }
                        md_wakeup_thread(conf->mddev->thread);
                } else {
-                       BUG_ON(sh->ops.pending);
+                       BUG_ON(stripe_operations_active(sh));
                        if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
                                atomic_dec(&conf->preread_active_stripes);
                                if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -245,7 +250,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
 
        BUG_ON(atomic_read(&sh->count) != 0);
        BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
-       BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete);
+       BUG_ON(stripe_operations_active(sh));
 
        CHECK_DEVLOCK();
        pr_debug("init_stripe called, stripe %llu\n",
@@ -346,62 +351,18 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
        return sh;
 }
 
-/* test_and_ack_op() ensures that we only dequeue an operation once */
-#define test_and_ack_op(op, pend) \
-do {                                                   \
-       if (test_bit(op, &sh->ops.pending) &&           \
-               !test_bit(op, &sh->ops.complete)) {     \
-               if (test_and_set_bit(op, &sh->ops.ack)) \
-                       clear_bit(op, &pend);           \
-               else                                    \
-                       ack++;                          \
-       } else                                          \
-               clear_bit(op, &pend);                   \
-} while (0)
-
-/* find new work to run, do not resubmit work that is already
- * in flight
- */
-static unsigned long get_stripe_work(struct stripe_head *sh)
-{
-       unsigned long pending;
-       int ack = 0;
-
-       pending = sh->ops.pending;
-
-       test_and_ack_op(STRIPE_OP_BIOFILL, pending);
-       test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending);
-       test_and_ack_op(STRIPE_OP_PREXOR, pending);
-       test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
-       test_and_ack_op(STRIPE_OP_POSTXOR, pending);
-       test_and_ack_op(STRIPE_OP_CHECK, pending);
-       if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
-               ack++;
-
-       sh->ops.count -= ack;
-       if (unlikely(sh->ops.count < 0)) {
-               printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx "
-                       "ops.complete: %#lx\n", pending, sh->ops.pending,
-                       sh->ops.ack, sh->ops.complete);
-               BUG();
-       }
-
-       return pending;
-}
-
 static void
 raid5_end_read_request(struct bio *bi, int error);
 static void
 raid5_end_write_request(struct bio *bi, int error);
 
-static void ops_run_io(struct stripe_head *sh)
+static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 {
        raid5_conf_t *conf = sh->raid_conf;
        int i, disks = sh->disks;
 
        might_sleep();
 
-       set_bit(STRIPE_IO_STARTED, &sh->state);
        for (i = disks; i--; ) {
                int rw;
                struct bio *bi;
@@ -430,11 +391,11 @@ static void ops_run_io(struct stripe_head *sh)
                rcu_read_unlock();
 
                if (rdev) {
-                       if (test_bit(STRIPE_SYNCING, &sh->state) ||
-                               test_bit(STRIPE_EXPAND_SOURCE, &sh->state) ||
-                               test_bit(STRIPE_EXPAND_READY, &sh->state))
+                       if (s->syncing || s->expanding || s->expanded)
                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 
+                       set_bit(STRIPE_IO_STARTED, &sh->state);
+
                        bi->bi_bdev = rdev->bdev;
                        pr_debug("%s: for %llu schedule op %ld on disc %d\n",
                                __func__, (unsigned long long)sh->sector,
@@ -528,38 +489,34 @@ static void ops_complete_biofill(void *stripe_head_ref)
                (unsigned long long)sh->sector);
 
        /* clear completed biofills */
+       spin_lock_irq(&conf->device_lock);
        for (i = sh->disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
 
                /* acknowledge completion of a biofill operation */
                /* and check if we need to reply to a read request,
                 * new R5_Wantfill requests are held off until
-                * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+                * !STRIPE_BIOFILL_RUN
                 */
                if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
                        struct bio *rbi, *rbi2;
 
-                       /* The access to dev->read is outside of the
-                        * spin_lock_irq(&conf->device_lock), but is protected
-                        * by the STRIPE_OP_BIOFILL pending bit
-                        */
                        BUG_ON(!dev->read);
                        rbi = dev->read;
                        dev->read = NULL;
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
-                               spin_lock_irq(&conf->device_lock);
                                if (--rbi->bi_phys_segments == 0) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
-                               spin_unlock_irq(&conf->device_lock);
                                rbi = rbi2;
                        }
                }
        }
-       set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
+       spin_unlock_irq(&conf->device_lock);
+       clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 
        return_io(return_bi);
 
@@ -610,13 +567,14 @@ static void ops_complete_compute5(void *stripe_head_ref)
        set_bit(R5_UPTODATE, &tgt->flags);
        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
        clear_bit(R5_Wantcompute, &tgt->flags);
-       set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+       clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
+       if (sh->check_state == check_state_compute_run)
+               sh->check_state = check_state_compute_result;
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
 }
 
-static struct dma_async_tx_descriptor *
-ops_run_compute5(struct stripe_head *sh, unsigned long pending)
+static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
 {
        /* kernel stack size limits the total number of disks */
        int disks = sh->disks;
@@ -646,10 +604,6 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending)
                        ASYNC_TX_XOR_ZERO_DST, NULL,
                        ops_complete_compute5, sh);
 
-       /* ack now if postxor is not set to be run */
-       if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending))
-               async_tx_ack(tx);
-
        return tx;
 }
 
@@ -659,8 +613,6 @@ static void ops_complete_prexor(void *stripe_head_ref)
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
-
-       set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
 }
 
 static struct dma_async_tx_descriptor *
@@ -680,7 +632,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
        for (i = disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
                /* Only process blocks that are known to be uptodate */
-               if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags))
+               if (test_bit(R5_Wantdrain, &dev->flags))
                        xor_srcs[count++] = dev->page;
        }
 
@@ -692,16 +644,10 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 }
 
 static struct dma_async_tx_descriptor *
-ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
-                unsigned long pending)
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
        int disks = sh->disks;
-       int pd_idx = sh->pd_idx, i;
-
-       /* check if prexor is active which means only process blocks
-        * that are part of a read-modify-write (Wantprexor)
-        */
-       int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
+       int i;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
@@ -709,20 +655,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        for (i = disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
                struct bio *chosen;
-               int towrite;
 
-               towrite = 0;
-               if (prexor) { /* rmw */
-                       if (dev->towrite &&
-                           test_bit(R5_Wantprexor, &dev->flags))
-                               towrite = 1;
-               } else { /* rcw */
-                       if (i != pd_idx && dev->towrite &&
-                               test_bit(R5_LOCKED, &dev->flags))
-                               towrite = 1;
-               }
-
-               if (towrite) {
+               if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
                        struct bio *wbi;
 
                        spin_lock(&sh->lock);
@@ -745,18 +679,6 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
 }
 
 static void ops_complete_postxor(void *stripe_head_ref)
-{
-       struct stripe_head *sh = stripe_head_ref;
-
-       pr_debug("%s: stripe %llu\n", __func__,
-               (unsigned long long)sh->sector);
-
-       set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
-       set_bit(STRIPE_HANDLE, &sh->state);
-       release_stripe(sh);
-}
-
-static void ops_complete_write(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
        int disks = sh->disks, i, pd_idx = sh->pd_idx;
@@ -770,16 +692,21 @@ static void ops_complete_write(void *stripe_head_ref)
                        set_bit(R5_UPTODATE, &dev->flags);
        }
 
-       set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
-       set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+       if (sh->reconstruct_state == reconstruct_state_drain_run)
+               sh->reconstruct_state = reconstruct_state_drain_result;
+       else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
+               sh->reconstruct_state = reconstruct_state_prexor_drain_result;
+       else {
+               BUG_ON(sh->reconstruct_state != reconstruct_state_run);
+               sh->reconstruct_state = reconstruct_state_result;
+       }
 
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
 }
 
 static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
-               unsigned long pending)
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
        /* kernel stack size limits the total number of disks */
        int disks = sh->disks;
@@ -787,9 +714,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
 
        int count = 0, pd_idx = sh->pd_idx, i;
        struct page *xor_dest;
-       int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
+       int prexor = 0;
        unsigned long flags;
-       dma_async_tx_callback callback;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
@@ -797,7 +723,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        /* check if prexor is active which means only process blocks
         * that are part of a read-modify-write (written)
         */
-       if (prexor) {
+       if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+               prexor = 1;
                xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -813,10 +740,6 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
                }
        }
 
-       /* check whether this postxor is part of a write */
-       callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
-               ops_complete_write : ops_complete_postxor;
-
        /* 1/ if we prexor'd then the dest is reused as a source
         * 2/ if we did not prexor then we are redoing the parity
         * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
@@ -830,25 +753,20 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        if (unlikely(count == 1)) {
                flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
                tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
-                       flags, tx, callback, sh);
+                       flags, tx, ops_complete_postxor, sh);
        } else
                tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
-                       flags, tx, callback, sh);
+                       flags, tx, ops_complete_postxor, sh);
 }
 
 static void ops_complete_check(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
-       int pd_idx = sh->pd_idx;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
-       if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
-               sh->ops.zero_sum_result == 0)
-               set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-
-       set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
+       sh->check_state = check_state_check_result;
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
 }
@@ -875,46 +793,42 @@ static void ops_run_check(struct stripe_head *sh)
        tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
                &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
 
-       if (tx)
-               set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
-       else
-               clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
-
        atomic_inc(&sh->count);
        tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
                ops_complete_check, sh);
 }
 
-static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
+static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
 {
        int overlap_clear = 0, i, disks = sh->disks;
        struct dma_async_tx_descriptor *tx = NULL;
 
-       if (test_bit(STRIPE_OP_BIOFILL, &pending)) {
+       if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
                ops_run_biofill(sh);
                overlap_clear++;
        }
 
-       if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending))
-               tx = ops_run_compute5(sh, pending);
+       if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
+               tx = ops_run_compute5(sh);
+               /* terminate the chain if postxor is not set to be run */
+               if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
+                       async_tx_ack(tx);
+       }
 
-       if (test_bit(STRIPE_OP_PREXOR, &pending))
+       if (test_bit(STRIPE_OP_PREXOR, &ops_request))
                tx = ops_run_prexor(sh, tx);
 
-       if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
-               tx = ops_run_biodrain(sh, tx, pending);
+       if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
+               tx = ops_run_biodrain(sh, tx);
                overlap_clear++;
        }
 
-       if (test_bit(STRIPE_OP_POSTXOR, &pending))
-               ops_run_postxor(sh, tx, pending);
+       if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
+               ops_run_postxor(sh, tx);
 
-       if (test_bit(STRIPE_OP_CHECK, &pending))
+       if (test_bit(STRIPE_OP_CHECK, &ops_request))
                ops_run_check(sh);
 
-       if (test_bit(STRIPE_OP_IO, &pending))
-               ops_run_io(sh);
-
        if (overlap_clear)
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -1703,11 +1617,11 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
        }
 }
 
-static int
-handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
+static void
+schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
+                        int rcw, int expand)
 {
        int i, pd_idx = sh->pd_idx, disks = sh->disks;
-       int locked = 0;
 
        if (rcw) {
                /* if we are not expanding this is a proper write request, and
@@ -1715,53 +1629,48 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
                 * stripe cache
                 */
                if (!expand) {
-                       set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
-                       sh->ops.count++;
-               }
+                       sh->reconstruct_state = reconstruct_state_drain_run;
+                       set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+               } else
+                       sh->reconstruct_state = reconstruct_state_run;
 
-               set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
-               sh->ops.count++;
+               set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
 
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
 
                        if (dev->towrite) {
                                set_bit(R5_LOCKED, &dev->flags);
+                               set_bit(R5_Wantdrain, &dev->flags);
                                if (!expand)
                                        clear_bit(R5_UPTODATE, &dev->flags);
-                               locked++;
+                               s->locked++;
                        }
                }
-               if (locked + 1 == disks)
+               if (s->locked + 1 == disks)
                        if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
                                atomic_inc(&sh->raid_conf->pending_full_writes);
        } else {
                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
 
-               set_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
-               set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
-               set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
-
-               sh->ops.count += 3;
+               sh->reconstruct_state = reconstruct_state_prexor_drain_run;
+               set_bit(STRIPE_OP_PREXOR, &s->ops_request);
+               set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+               set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
 
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if (i == pd_idx)
                                continue;
 
-                       /* For a read-modify write there may be blocks that are
-                        * locked for reading while others are ready to be
-                        * written so we distinguish these blocks by the
-                        * R5_Wantprexor bit
-                        */
                        if (dev->towrite &&
                            (test_bit(R5_UPTODATE, &dev->flags) ||
-                           test_bit(R5_Wantcompute, &dev->flags))) {
-                               set_bit(R5_Wantprexor, &dev->flags);
+                            test_bit(R5_Wantcompute, &dev->flags))) {
+                               set_bit(R5_Wantdrain, &dev->flags);
                                set_bit(R5_LOCKED, &dev->flags);
                                clear_bit(R5_UPTODATE, &dev->flags);
-                               locked++;
+                               s->locked++;
                        }
                }
        }
@@ -1771,13 +1680,11 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
         */
        set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
        clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-       locked++;
+       s->locked++;
 
-       pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
+       pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
                __func__, (unsigned long long)sh->sector,
-               locked, sh->ops.pending);
-
-       return locked;
+               s->locked, s->ops_request);
 }
 
 /*
@@ -1876,7 +1783,7 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
 }
 
 static void
-handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
+handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
                                struct stripe_head_state *s, int disks,
                                struct bio **return_bi)
 {
@@ -1967,48 +1874,38 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                        md_wakeup_thread(conf->mddev->thread);
 }
 
-/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
- * to process
+/* fetch_block5 - checks the given member device to see if its data needs
+ * to be read or computed to satisfy a request.
+ *
+ * Returns 1 when no more member devices need to be checked, otherwise returns
+ * 0 to tell the loop in handle_stripe_fill5 to continue
  */
-static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
-                       struct stripe_head_state *s, int disk_idx, int disks)
+static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
+                       int disk_idx, int disks)
 {
        struct r5dev *dev = &sh->dev[disk_idx];
        struct r5dev *failed_dev = &sh->dev[s->failed_num];
 
-       /* don't schedule compute operations or reads on the parity block while
-        * a check is in flight
-        */
-       if ((disk_idx == sh->pd_idx) &&
-            test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
-               return ~0;
-
        /* is the data in this block needed, and can we get it? */
        if (!test_bit(R5_LOCKED, &dev->flags) &&
-           !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
-           (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
-            s->syncing || s->expanding || (s->failed &&
-            (failed_dev->toread || (failed_dev->towrite &&
-            !test_bit(R5_OVERWRITE, &failed_dev->flags)
-            ))))) {
-               /* 1/ We would like to get this block, possibly by computing it,
-                * but we might not be able to.
-                *
-                * 2/ Since parity check operations potentially make the parity
-                * block !uptodate it will need to be refreshed before any
-                * compute operations on data disks are scheduled.
-                *
-                * 3/ We hold off parity block re-reads until check operations
-                * have quiesced.
+           !test_bit(R5_UPTODATE, &dev->flags) &&
+           (dev->toread ||
+            (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+            s->syncing || s->expanding ||
+            (s->failed &&
+             (failed_dev->toread ||
+              (failed_dev->towrite &&
+               !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
+               /* We would like to get this block, possibly by computing it,
+                * otherwise read it if the backing disk is insync
                 */
                if ((s->uptodate == disks - 1) &&
-                   (s->failed && disk_idx == s->failed_num) &&
-                   !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
-                       set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+                   (s->failed && disk_idx == s->failed_num)) {
+                       set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+                       set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
                        set_bit(R5_Wantcompute, &dev->flags);
                        sh->ops.target = disk_idx;
                        s->req_compute = 1;
-                       sh->ops.count++;
                        /* Careful: from this point on 'uptodate' is in the eye
                         * of raid5_run_ops which services 'compute' operations
                         * before writes. R5_Wantcompute flags a block that will
@@ -2016,58 +1913,40 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
                         * subsequent operation.
                         */
                        s->uptodate++;
-                       return 0; /* uptodate + compute == disks */
-               } else if ((s->uptodate < disks - 1) &&
-                       test_bit(R5_Insync, &dev->flags)) {
-                       /* Note: we hold off compute operations while checks are
-                        * in flight, but we still prefer 'compute' over 'read'
-                        * hence we only read if (uptodate < * disks-1)
-                        */
+                       return 1; /* uptodate + compute == disks */
+               } else if (test_bit(R5_Insync, &dev->flags)) {
                        set_bit(R5_LOCKED, &dev->flags);
                        set_bit(R5_Wantread, &dev->flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
                        s->locked++;
                        pr_debug("Reading block %d (sync=%d)\n", disk_idx,
                                s->syncing);
                }
        }
 
-       return ~0;
+       return 0;
 }
 
-static void handle_issuing_new_read_requests5(struct stripe_head *sh,
+/**
+ * handle_stripe_fill5 - read or compute data to satisfy pending requests.
+ */
+static void handle_stripe_fill5(struct stripe_head *sh,
                        struct stripe_head_state *s, int disks)
 {
        int i;
 
-       /* Clear completed compute operations.  Parity recovery
-        * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
-        * later on in this routine
-        */
-       if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
-               !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
-               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
-               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
-               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
-       }
-
        /* look for blocks to read/compute, skip this if a compute
         * is already in flight, or if the stripe contents are in the
         * midst of changing due to a write
         */
-       if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
-               !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) &&
-               !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+       if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
+           !sh->reconstruct_state)
                for (i = disks; i--; )
-                       if (__handle_issuing_new_read_requests5(
-                               sh, s, i, disks) == 0)
+                       if (fetch_block5(sh, s, i, disks))
                                break;
-       }
        set_bit(STRIPE_HANDLE, &sh->state);
 }
 
-static void handle_issuing_new_read_requests6(struct stripe_head *sh,
+static void handle_stripe_fill6(struct stripe_head *sh,
                        struct stripe_head_state *s, struct r6_state *r6s,
                        int disks)
 {
@@ -2126,12 +2005,12 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh,
 }
 
 
-/* handle_completed_write_requests
+/* handle_stripe_clean_event
  * any written block on an uptodate or failed drive can be returned.
  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
  * never LOCKED, so we don't need to test 'failed' directly.
  */
-static void handle_completed_write_requests(raid5_conf_t *conf,
+static void handle_stripe_clean_event(raid5_conf_t *conf,
        struct stripe_head *sh, int disks, struct bio **return_bi)
 {
        int i;
@@ -2176,7 +2055,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
                        md_wakeup_thread(conf->mddev->thread);
 }
 
-static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
+static void handle_stripe_dirtying5(raid5_conf_t *conf,
                struct stripe_head *sh, struct stripe_head_state *s, int disks)
 {
        int rmw = 0, rcw = 0, i;
@@ -2220,9 +2099,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                                                "%d for r-m-w\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
-                                       if (!test_and_set_bit(
-                                               STRIPE_OP_IO, &sh->ops.pending))
-                                               sh->ops.count++;
                                        s->locked++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -2246,9 +2122,6 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                                                "%d for Reconstruct\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
-                                       if (!test_and_set_bit(
-                                               STRIPE_OP_IO, &sh->ops.pending))
-                                               sh->ops.count++;
                                        s->locked++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -2266,14 +2139,13 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
         * simultaneously.  If this is not the case then new writes need to be
         * held off until the compute completes.
         */
-       if ((s->req_compute ||
-           !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) &&
-               (s->locked == 0 && (rcw == 0 || rmw == 0) &&
-               !test_bit(STRIPE_BIT_DELAY, &sh->state)))
-               s->locked += handle_write_operations5(sh, rcw == 0, 0);
+       if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
+           (s->locked == 0 && (rcw == 0 || rmw == 0) &&
+           !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+               schedule_reconstruction5(sh, s, rcw == 0, 0);
 }
 
-static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
+static void handle_stripe_dirtying6(raid5_conf_t *conf,
                struct stripe_head *sh, struct stripe_head_state *s,
                struct r6_state *r6s, int disks)
 {
@@ -2376,92 +2248,86 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
                                struct stripe_head_state *s, int disks)
 {
-       int canceled_check = 0;
+       struct r5dev *dev = NULL;
 
        set_bit(STRIPE_HANDLE, &sh->state);
 
-       /* complete a check operation */
-       if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
-               clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
-               clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
+       switch (sh->check_state) {
+       case check_state_idle:
+               /* start a new check operation if there are no failures */
                if (s->failed == 0) {
-                       if (sh->ops.zero_sum_result == 0)
-                               /* parity is correct (on disc,
-                                * not in buffer any more)
-                                */
-                               set_bit(STRIPE_INSYNC, &sh->state);
-                       else {
-                               conf->mddev->resync_mismatches +=
-                                       STRIPE_SECTORS;
-                               if (test_bit(
-                                    MD_RECOVERY_CHECK, &conf->mddev->recovery))
-                                       /* don't try to repair!! */
-                                       set_bit(STRIPE_INSYNC, &sh->state);
-                               else {
-                                       set_bit(STRIPE_OP_COMPUTE_BLK,
-                                               &sh->ops.pending);
-                                       set_bit(STRIPE_OP_MOD_REPAIR_PD,
-                                               &sh->ops.pending);
-                                       set_bit(R5_Wantcompute,
-                                               &sh->dev[sh->pd_idx].flags);
-                                       sh->ops.target = sh->pd_idx;
-                                       sh->ops.count++;
-                                       s->uptodate++;
-                               }
-                       }
-               } else
-                       canceled_check = 1; /* STRIPE_INSYNC is not set */
-       }
-
-       /* start a new check operation if there are no failures, the stripe is
-        * not insync, and a repair is not in flight
-        */
-       if (s->failed == 0 &&
-           !test_bit(STRIPE_INSYNC, &sh->state) &&
-           !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
-               if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
                        BUG_ON(s->uptodate != disks);
+                       sh->check_state = check_state_run;
+                       set_bit(STRIPE_OP_CHECK, &s->ops_request);
                        clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
-                       sh->ops.count++;
                        s->uptodate--;
+                       break;
                }
-       }
-
-       /* check if we can clear a parity disk reconstruct */
-       if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
-           test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
-
-               clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
-               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
-               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
-               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
-       }
-
+               dev = &sh->dev[s->failed_num];
+               /* fall through */
+       case check_state_compute_result:
+               sh->check_state = check_state_idle;
+               if (!dev)
+                       dev = &sh->dev[sh->pd_idx];
+
+               /* check that a write has not made the stripe insync */
+               if (test_bit(STRIPE_INSYNC, &sh->state))
+                       break;
 
-       /* Wait for check parity and compute block operations to complete
-        * before write-back.  If a failure occurred while the check operation
-        * was in flight we need to cycle this stripe through handle_stripe
-        * since the parity block may not be uptodate
-        */
-       if (!canceled_check && !test_bit(STRIPE_INSYNC, &sh->state) &&
-           !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
-           !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
-               struct r5dev *dev;
                /* either failed parity check, or recovery is happening */
-               if (s->failed == 0)
-                       s->failed_num = sh->pd_idx;
-               dev = &sh->dev[s->failed_num];
                BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
                BUG_ON(s->uptodate != disks);
 
                set_bit(R5_LOCKED, &dev->flags);
+               s->locked++;
                set_bit(R5_Wantwrite, &dev->flags);
-               if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                       sh->ops.count++;
 
                clear_bit(STRIPE_DEGRADED, &sh->state);
-               s->locked++;
                set_bit(STRIPE_INSYNC, &sh->state);
+               break;
+       case check_state_run:
+               break; /* we will be called again upon completion */
+       case check_state_check_result:
+               sh->check_state = check_state_idle;
+
+               /* if a failure occurred during the check operation, leave
+                * STRIPE_INSYNC not set and let the stripe be handled again
+                */
+               if (s->failed)
+                       break;
+
+               /* handle a successful check operation, if parity is correct
+                * we are done.  Otherwise update the mismatch count and repair
+                * parity if !MD_RECOVERY_CHECK
+                */
+               if (sh->ops.zero_sum_result == 0)
+                       /* parity is correct (on disc,
+                        * not in buffer any more)
+                        */
+                       set_bit(STRIPE_INSYNC, &sh->state);
+               else {
+                       conf->mddev->resync_mismatches += STRIPE_SECTORS;
+                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                               /* don't try to repair!! */
+                               set_bit(STRIPE_INSYNC, &sh->state);
+                       else {
+                               sh->check_state = check_state_compute_run;
+                               set_bit(STRIPE_COMPUTE_RUN, &sh->state);
+                               set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
+                               set_bit(R5_Wantcompute,
+                                       &sh->dev[sh->pd_idx].flags);
+                               sh->ops.target = sh->pd_idx;
+                               s->uptodate++;
+                       }
+               }
+               break;
+       case check_state_compute_run:
+               break;
+       default:
+               printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+                      __func__, sh->check_state,
+                      (unsigned long long) sh->sector);
+               BUG();
        }
 }
 
@@ -2646,15 +2512,14 @@ static void handle_stripe5(struct stripe_head *sh)
        struct bio *return_bi = NULL;
        struct stripe_head_state s;
        struct r5dev *dev;
-       unsigned long pending = 0;
        mdk_rdev_t *blocked_rdev = NULL;
        int prexor;
 
        memset(&s, 0, sizeof(s));
-       pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
-               "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state,
-               atomic_read(&sh->count), sh->pd_idx,
-               sh->ops.pending, sh->ops.ack, sh->ops.complete);
+       pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
+                "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
+                atomic_read(&sh->count), sh->pd_idx, sh->check_state,
+                sh->reconstruct_state);
 
        spin_lock(&sh->lock);
        clear_bit(STRIPE_HANDLE, &sh->state);
@@ -2663,15 +2528,8 @@ static void handle_stripe5(struct stripe_head *sh)
        s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
        s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
        s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
-       /* Now to look around and see what can be done */
-
-       /* clean-up completed biofill operations */
-       if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) {
-               clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
-               clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
-               clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete);
-       }
 
+       /* Now to look around and see what can be done */
        rcu_read_lock();
        for (i=disks; i--; ) {
                mdk_rdev_t *rdev;
@@ -2685,10 +2543,10 @@ static void handle_stripe5(struct stripe_head *sh)
                /* maybe we can request a biofill operation
                 *
                 * new wantfill requests are only permitted while
-                * STRIPE_OP_BIOFILL is clear
+                * ops_complete_biofill is guaranteed to be inactive
                 */
                if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
-                       !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+                   !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
                        set_bit(R5_Wantfill, &dev->flags);
 
                /* now count some things */
@@ -2732,8 +2590,10 @@ static void handle_stripe5(struct stripe_head *sh)
                goto unlock;
        }
 
-       if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
-               sh->ops.count++;
+       if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
+               set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
+               set_bit(STRIPE_BIOFILL_RUN, &sh->state);
+       }
 
        pr_debug("locked=%d uptodate=%d to_read=%d"
                " to_write=%d failed=%d failed_num=%d\n",
@@ -2743,8 +2603,7 @@ static void handle_stripe5(struct stripe_head *sh)
         * need to be failed
         */
        if (s.failed > 1 && s.to_read+s.to_write+s.written)
-               handle_requests_to_failed_array(conf, sh, &s, disks,
-                                               &return_bi);
+               handle_failed_stripe(conf, sh, &s, disks, &return_bi);
        if (s.failed > 1 && s.syncing) {
                md_done_sync(conf->mddev, STRIPE_SECTORS,0);
                clear_bit(STRIPE_SYNCING, &sh->state);
@@ -2760,48 +2619,25 @@ static void handle_stripe5(struct stripe_head *sh)
               !test_bit(R5_LOCKED, &dev->flags) &&
               test_bit(R5_UPTODATE, &dev->flags)) ||
               (s.failed == 1 && s.failed_num == sh->pd_idx)))
-               handle_completed_write_requests(conf, sh, disks, &return_bi);
+               handle_stripe_clean_event(conf, sh, disks, &return_bi);
 
        /* Now we might consider reading some blocks, either to check/generate
         * parity, or to satisfy requests
         * or to load a block that is being partially written.
         */
        if (s.to_read || s.non_overwrite ||
-           (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding ||
-           test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
-               handle_issuing_new_read_requests5(sh, &s, disks);
+           (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+               handle_stripe_fill5(sh, &s, disks);
 
        /* Now we check to see if any write operations have recently
         * completed
         */
-
-       /* leave prexor set until postxor is done, allows us to distinguish
-        * a rmw from a rcw during biodrain
-        */
        prexor = 0;
-       if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) &&
-               test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
-
+       if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
                prexor = 1;
-               clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
-               clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack);
-               clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
-
-               for (i = disks; i--; )
-                       clear_bit(R5_Wantprexor, &sh->dev[i].flags);
-       }
-
-       /* if only POSTXOR is set then this is an 'expand' postxor */
-       if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) &&
-               test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
-
-               clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
-               clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack);
-               clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
-
-               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
-               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
-               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+       if (sh->reconstruct_state == reconstruct_state_drain_result ||
+           sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
+               sh->reconstruct_state = reconstruct_state_idle;
 
                /* All the 'written' buffers and the parity block are ready to
                 * be written back to disk
@@ -2813,9 +2649,6 @@ static void handle_stripe5(struct stripe_head *sh)
                                (i == sh->pd_idx || dev->written)) {
                                pr_debug("Writing block %d\n", i);
                                set_bit(R5_Wantwrite, &dev->flags);
-                               if (!test_and_set_bit(
-                                   STRIPE_OP_IO, &sh->ops.pending))
-                                       sh->ops.count++;
                                if (prexor)
                                        continue;
                                if (!test_bit(R5_Insync, &dev->flags) ||
@@ -2837,20 +2670,18 @@ static void handle_stripe5(struct stripe_head *sh)
         * 2/ A 'check' operation is in flight, as it may clobber the parity
         *    block.
         */
-       if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) &&
-                         !test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
-               handle_issuing_new_write_requests5(conf, sh, &s, disks);
+       if (s.to_write && !sh->reconstruct_state && !sh->check_state)
+               handle_stripe_dirtying5(conf, sh, &s, disks);
 
        /* maybe we need to check and possibly fix the parity for this stripe
         * Any reads will already have been scheduled, so we just see if enough
         * data is available.  The parity check is held off while parity
         * dependent operations are in flight.
         */
-       if ((s.syncing && s.locked == 0 &&
-            !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
-            !test_bit(STRIPE_INSYNC, &sh->state)) ||
-             test_bit(STRIPE_OP_CHECK, &sh->ops.pending) ||
-             test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending))
+       if (sh->check_state ||
+           (s.syncing && s.locked == 0 &&
+            !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
+            !test_bit(STRIPE_INSYNC, &sh->state)))
                handle_parity_checks5(conf, sh, &s, disks);
 
        if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -2869,52 +2700,35 @@ static void handle_stripe5(struct stripe_head *sh)
                dev = &sh->dev[s.failed_num];
                if (!test_bit(R5_ReWrite, &dev->flags)) {
                        set_bit(R5_Wantwrite, &dev->flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
                        set_bit(R5_ReWrite, &dev->flags);
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
                } else {
                        /* let's read it back */
                        set_bit(R5_Wantread, &dev->flags);
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
                }
        }
 
-       /* Finish postxor operations initiated by the expansion
-        * process
-        */
-       if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
-               !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
-
+       /* Finish reconstruct operations initiated by the expansion process */
+       if (sh->reconstruct_state == reconstruct_state_result) {
+               sh->reconstruct_state = reconstruct_state_idle;
                clear_bit(STRIPE_EXPANDING, &sh->state);
-
-               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
-               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
-               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
-
-               for (i = conf->raid_disks; i--; ) {
+               for (i = conf->raid_disks; i--; )
                        set_bit(R5_Wantwrite, &sh->dev[i].flags);
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
-                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
-                               sh->ops.count++;
-               }
        }
 
        if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
-               !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+           !sh->reconstruct_state) {
                /* Need to write out all blocks after computing parity */
                sh->disks = conf->raid_disks;
                sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
                        conf->raid_disks);
-               s.locked += handle_write_operations5(sh, 1, 1);
-       } else if (s.expanded &&
-                  s.locked == 0 &&
-               !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+               schedule_reconstruction5(sh, &s, 1, 1);
+       } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
                clear_bit(STRIPE_EXPAND_READY, &sh->state);
                atomic_dec(&conf->reshape_stripes);
                wake_up(&conf->wait_for_overlap);
@@ -2922,12 +2736,9 @@ static void handle_stripe5(struct stripe_head *sh)
        }
 
        if (s.expanding && s.locked == 0 &&
-           !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
+           !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
                handle_stripe_expansion(conf, sh, NULL);
 
-       if (sh->ops.count)
-               pending = get_stripe_work(sh);
-
  unlock:
        spin_unlock(&sh->lock);
 
@@ -2935,11 +2746,12 @@ static void handle_stripe5(struct stripe_head *sh)
        if (unlikely(blocked_rdev))
                md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
 
-       if (pending)
-               raid5_run_ops(sh, pending);
+       if (s.ops_request)
+               raid5_run_ops(sh, s.ops_request);
 
-       return_io(return_bi);
+       ops_run_io(sh, &s);
 
+       return_io(return_bi);
 }
 
 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
@@ -3047,8 +2859,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
         * might need to be failed
         */
        if (s.failed > 2 && s.to_read+s.to_write+s.written)
-               handle_requests_to_failed_array(conf, sh, &s, disks,
-                                               &return_bi);
+               handle_failed_stripe(conf, sh, &s, disks, &return_bi);
        if (s.failed > 2 && s.syncing) {
                md_done_sync(conf->mddev, STRIPE_SECTORS,0);
                clear_bit(STRIPE_SYNCING, &sh->state);
@@ -3073,7 +2884,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
             ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
                             && !test_bit(R5_LOCKED, &qdev->flags)
                             && test_bit(R5_UPTODATE, &qdev->flags)))))
-               handle_completed_write_requests(conf, sh, disks, &return_bi);
+               handle_stripe_clean_event(conf, sh, disks, &return_bi);
 
        /* Now we might consider reading some blocks, either to check/generate
         * parity, or to satisfy requests
@@ -3081,11 +2892,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
         */
        if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
            (s.syncing && (s.uptodate < disks)) || s.expanding)
-               handle_issuing_new_read_requests6(sh, &s, &r6s, disks);
+               handle_stripe_fill6(sh, &s, &r6s, disks);
 
        /* now to consider writing and what else, if anything should be read */
        if (s.to_write)
-               handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks);
+               handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
 
        /* maybe we need to check and possibly fix the parity for this stripe
         * Any reads will already have been scheduled, so we just see if enough
@@ -3141,7 +2952,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
        }
 
        if (s.expanding && s.locked == 0 &&
-           !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
+           !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
                handle_stripe_expansion(conf, sh, &r6s);
 
  unlock:
@@ -3151,68 +2962,9 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
        if (unlikely(blocked_rdev))
                md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
 
-       return_io(return_bi);
-
-       for (i=disks; i-- ;) {
-               int rw;
-               struct bio *bi;
-               mdk_rdev_t *rdev;
-               if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
-                       rw = WRITE;
-               else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
-                       rw = READ;
-               else
-                       continue;
-
-               set_bit(STRIPE_IO_STARTED, &sh->state);
-
-               bi = &sh->dev[i].req;
-
-               bi->bi_rw = rw;
-               if (rw == WRITE)
-                       bi->bi_end_io = raid5_end_write_request;
-               else
-                       bi->bi_end_io = raid5_end_read_request;
-
-               rcu_read_lock();
-               rdev = rcu_dereference(conf->disks[i].rdev);
-               if (rdev && test_bit(Faulty, &rdev->flags))
-                       rdev = NULL;
-               if (rdev)
-                       atomic_inc(&rdev->nr_pending);
-               rcu_read_unlock();
-
-               if (rdev) {
-                       if (s.syncing || s.expanding || s.expanded)
-                               md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+       ops_run_io(sh, &s);
 
-                       bi->bi_bdev = rdev->bdev;
-                       pr_debug("for %llu schedule op %ld on disc %d\n",
-                               (unsigned long long)sh->sector, bi->bi_rw, i);
-                       atomic_inc(&sh->count);
-                       bi->bi_sector = sh->sector + rdev->data_offset;
-                       bi->bi_flags = 1 << BIO_UPTODATE;
-                       bi->bi_vcnt = 1;
-                       bi->bi_max_vecs = 1;
-                       bi->bi_idx = 0;
-                       bi->bi_io_vec = &sh->dev[i].vec;
-                       bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
-                       bi->bi_io_vec[0].bv_offset = 0;
-                       bi->bi_size = STRIPE_SIZE;
-                       bi->bi_next = NULL;
-                       if (rw == WRITE &&
-                           test_bit(R5_ReWrite, &sh->dev[i].flags))
-                               atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
-                       generic_make_request(bi);
-               } else {
-                       if (rw == WRITE)
-                               set_bit(STRIPE_DEGRADED, &sh->state);
-                       pr_debug("skip op %ld on disc %d for sector %llu\n",
-                               bi->bi_rw, i, (unsigned long long)sh->sector);
-                       clear_bit(R5_LOCKED, &sh->dev[i].flags);
-                       set_bit(STRIPE_HANDLE, &sh->state);
-               }
-       }
+       return_io(return_bi);
 }
 
 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
@@ -3700,9 +3452,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
                if ( rw == WRITE )
                        md_write_end(mddev);
 
-               bi->bi_end_io(bi,
-                             test_bit(BIO_UPTODATE, &bi->bi_flags)
-                               ? 0 : -EIO);
+               bio_endio(bi, 0);
        }
        return 0;
 }
@@ -4005,12 +3755,8 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
        spin_lock_irq(&conf->device_lock);
        remaining = --raid_bio->bi_phys_segments;
        spin_unlock_irq(&conf->device_lock);
-       if (remaining == 0) {
-
-               raid_bio->bi_end_io(raid_bio,
-                             test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
-                               ? 0 : -EIO);
-       }
+       if (remaining == 0)
+               bio_endio(raid_bio, 0);
        if (atomic_dec_and_test(&conf->active_aligned_reads))
                wake_up(&conf->wait_for_stripe);
        return handled;
@@ -4612,35 +4358,41 @@ abort:
 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        raid5_conf_t *conf = mddev->private;
-       int found = 0;
+       int err = -EEXIST;
        int disk;
        struct disk_info *p;
+       int first = 0;
+       int last = conf->raid_disks - 1;
 
        if (mddev->degraded > conf->max_degraded)
                /* no point adding a device */
-               return 0;
+               return -EINVAL;
+
+       if (rdev->raid_disk >= 0)
+               first = last = rdev->raid_disk;
 
        /*
         * find the disk ... but prefer rdev->saved_raid_disk
         * if possible.
         */
        if (rdev->saved_raid_disk >= 0 &&
+           rdev->saved_raid_disk >= first &&
            conf->disks[rdev->saved_raid_disk].rdev == NULL)
                disk = rdev->saved_raid_disk;
        else
-               disk = 0;
-       for ( ; disk < conf->raid_disks; disk++)
+               disk = first;
+       for ( ; disk <= last ; disk++)
                if ((p=conf->disks + disk)->rdev == NULL) {
                        clear_bit(In_sync, &rdev->flags);
                        rdev->raid_disk = disk;
-                       found = 1;
+                       err = 0;
                        if (rdev->saved_raid_disk != disk)
                                conf->fullsync = 1;
                        rcu_assign_pointer(p->rdev, rdev);
                        break;
                }
        print_raid5_conf(conf);
-       return found;
+       return err;
 }
 
 static int raid5_resize(mddev_t *mddev, sector_t sectors)
@@ -4741,7 +4493,7 @@ static int raid5_start_reshape(mddev_t *mddev)
        rdev_for_each(rdev, rtmp, mddev)
                if (rdev->raid_disk < 0 &&
                    !test_bit(Faulty, &rdev->flags)) {
-                       if (raid5_add_disk(mddev, rdev)) {
+                       if (raid5_add_disk(mddev, rdev) == 0) {
                                char nm[20];
                                set_bit(In_sync, &rdev->flags);
                                added_devices++;
index 78bfdea24a8ecd3f2465a031a7956fd8ba43f63a..e98900671ca91d7a4f76ae91ac7393f6613bd5ee 100644 (file)
@@ -221,6 +221,7 @@ struct bitmap {
        unsigned long syncchunk;
 
        __u64   events_cleared;
+       int need_sync;
 
        /* bitmap spinlock */
        spinlock_t lock;
index 3dea9f545c8f337c516b1713a39e06f59ec767d3..df30c439587592eb380ca9cf5c207a27dddfc111 100644 (file)
@@ -87,6 +87,9 @@ struct mdk_rdev_s
 #define Blocked                8               /* An error occured on an externally
                                         * managed array, don't allow writes
                                         * until it is cleared */
+#define StateChanged   9               /* Faulty or Blocked has changed during
+                                        * interrupt, so it needs to be
+                                        * notified by the thread */
        wait_queue_head_t blocked_wait;
 
        int desc_nr;                    /* descriptor index in the superblock */
@@ -188,6 +191,7 @@ struct mddev_s
         * NEEDED:   we might need to start a resync/recover
         * RUNNING:  a thread is running, or about to be started
         * SYNC:     actually doing a resync, not a recovery
+        * RECOVER:  doing recovery, or need to try it.
         * INTR:     resync needs to be aborted for some reason
         * DONE:     thread is done and is waiting to be reaped
         * REQUEST:  user-space has requested a sync (used with SYNC)
@@ -198,6 +202,7 @@ struct mddev_s
         */
 #define        MD_RECOVERY_RUNNING     0
 #define        MD_RECOVERY_SYNC        1
+#define        MD_RECOVERY_RECOVER     2
 #define        MD_RECOVERY_INTR        3
 #define        MD_RECOVERY_DONE        4
 #define        MD_RECOVERY_NEEDED      5
@@ -227,6 +232,8 @@ struct mddev_s
        atomic_t                        recovery_active; /* blocks scheduled, but not written */
        wait_queue_head_t               recovery_wait;
        sector_t                        recovery_cp;
+       sector_t                        resync_min;     /* user requested sync
+                                                        * starts here */
        sector_t                        resync_max;     /* resync should pause
                                                         * when it gets here */
 
index f0827d31ae6fab718158499c10aaa50979527b17..3b2672792457ff08b991acb78e619f3daa3dab6e 100644 (file)
  *    the compute block completes.
  */
 
+/*
+ * Operations state - intermediate states that are visible outside of sh->lock
+ * In general _idle indicates nothing is running, _run indicates a data
+ * processing operation is active, and _result means the data processing result
+ * is stable and can be acted upon.  For simple operations like biofill and
+ * compute that only have an _idle and _run state they are indicated with
+ * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
+ */
+/**
+ * enum check_states - handles syncing / repairing a stripe
+ * @check_state_idle - check operations are quiesced
+ * @check_state_run - check operation is running
+ * @check_state_result - set outside lock when check result is valid
+ * @check_state_compute_run - check failed and we are repairing
+ * @check_state_compute_result - set outside lock when compute result is valid
+ */
+enum check_states {
+       check_state_idle = 0,
+       check_state_run, /* parity check */
+       check_state_check_result,
+       check_state_compute_run, /* parity repair */
+       check_state_compute_result,
+};
+
+/**
+ * enum reconstruct_states - handles writing or expanding a stripe
+ */
+enum reconstruct_states {
+       reconstruct_state_idle = 0,
+       reconstruct_state_prexor_drain_run,     /* prexor-write */
+       reconstruct_state_drain_run,            /* write */
+       reconstruct_state_run,                  /* expand */
+       reconstruct_state_prexor_drain_result,
+       reconstruct_state_drain_result,
+       reconstruct_state_result,
+};
+
 struct stripe_head {
        struct hlist_node       hash;
        struct list_head        lru;                    /* inactive_list or handle_list */
@@ -169,19 +206,13 @@ struct stripe_head {
        spinlock_t              lock;
        int                     bm_seq; /* sequence number for bitmap flushes */
        int                     disks;                  /* disks in stripe */
+       enum check_states       check_state;
+       enum reconstruct_states reconstruct_state;
        /* stripe_operations
-        * @pending - pending ops flags (set for request->issue->complete)
-        * @ack - submitted ops flags (set for issue->complete)
-        * @complete - completed ops flags (set for complete)
         * @target - STRIPE_OP_COMPUTE_BLK target
-        * @count - raid5_runs_ops is set to run when this is non-zero
         */
        struct stripe_operations {
-               unsigned long      pending;
-               unsigned long      ack;
-               unsigned long      complete;
                int                target;
-               int                count;
                u32                zero_sum_result;
        } ops;
        struct r5dev {
@@ -202,6 +233,7 @@ struct stripe_head_state {
        int locked, uptodate, to_read, to_write, failed, written;
        int to_fill, compute, req_compute, non_overwrite;
        int failed_num;
+       unsigned long ops_request;
 };
 
 /* r6_state - extra state data only relevant to r6 */
@@ -228,9 +260,7 @@ struct r6_state {
 #define        R5_Wantfill     12 /* dev->toread contains a bio that needs
                                    * filling
                                    */
-#define        R5_Wantprexor   13 /* distinguish blocks ready for rmw from
-                                   * other "towrites"
-                                   */
+#define R5_Wantdrain   13 /* dev->towrite needs to be drained */
 /*
  * Write method
  */
@@ -254,8 +284,10 @@ struct r6_state {
 #define        STRIPE_EXPAND_READY     11
 #define        STRIPE_IO_STARTED       12 /* do not count towards 'bypass_count' */
 #define        STRIPE_FULL_WRITE       13 /* all blocks are set to be overwritten */
+#define        STRIPE_BIOFILL_RUN      14
+#define        STRIPE_COMPUTE_RUN      15
 /*
- * Operations flags (in issue order)
+ * Operation request flags
  */
 #define STRIPE_OP_BIOFILL      0
 #define STRIPE_OP_COMPUTE_BLK  1
@@ -263,14 +295,6 @@ struct r6_state {
 #define STRIPE_OP_BIODRAIN     3
 #define STRIPE_OP_POSTXOR      4
 #define STRIPE_OP_CHECK        5
-#define STRIPE_OP_IO           6
-
-/* modifiers to the base operations
- * STRIPE_OP_MOD_REPAIR_PD - compute the parity block and write it back
- * STRIPE_OP_MOD_DMA_CHECK - parity is not corrupted by the check
- */
-#define STRIPE_OP_MOD_REPAIR_PD 7
-#define STRIPE_OP_MOD_DMA_CHECK 8
 
 /*
  * Plugging: