]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/md/raid5.c
Merge branches 'omap1-upstream' and 'omap2-upstream' into devel
[linux-2.6-omap-h63xx.git] / drivers / md / raid5.c
index 38232fa111a4e04fe1f8281b45e9d8892b4d4f33..caaca9e178bc2bdd15e960596c569d79aa86212a 100644 (file)
@@ -52,6 +52,7 @@
 #include "raid6.h"
 
 #include <linux/raid/bitmap.h>
+#include <linux/async_tx.h>
 
 /*
  * Stripe cache
@@ -80,7 +81,6 @@
 /*
  * The following can be used to debug the driver
  */
-#define RAID5_DEBUG    0
 #define RAID5_PARANOIA 1
 #if RAID5_PARANOIA && defined(CONFIG_SMP)
 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
@@ -88,8 +88,7 @@
 # define CHECK_DEVLOCK()
 #endif
 
-#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
-#if RAID5_DEBUG
+#ifdef DEBUG
 #define inline
 #define __inline__
 #endif
@@ -109,12 +108,11 @@ static void return_io(struct bio *return_bi)
 {
        struct bio *bi = return_bi;
        while (bi) {
-               int bytes = bi->bi_size;
 
                return_bi = bi->bi_next;
                bi->bi_next = NULL;
                bi->bi_size = 0;
-               bi->bi_end_io(bi, bytes,
+               bi->bi_end_io(bi,
                              test_bit(BIO_UPTODATE, &bi->bi_flags)
                                ? 0 : -EIO);
                bi = return_bi;
@@ -142,6 +140,7 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
                        }
                        md_wakeup_thread(conf->mddev->thread);
                } else {
+                       BUG_ON(sh->ops.pending);
                        if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
                                atomic_dec(&conf->preread_active_stripes);
                                if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -169,7 +168,8 @@ static void release_stripe(struct stripe_head *sh)
 
 static inline void remove_hash(struct stripe_head *sh)
 {
-       PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
+       pr_debug("remove_hash(), stripe %llu\n",
+               (unsigned long long)sh->sector);
 
        hlist_del_init(&sh->hash);
 }
@@ -178,7 +178,8 @@ static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
 {
        struct hlist_head *hp = stripe_hash(conf, sh->sector);
 
-       PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
+       pr_debug("insert_hash(), stripe %llu\n",
+               (unsigned long long)sh->sector);
 
        CHECK_DEVLOCK();
        hlist_add_head(&sh->hash, hp);
@@ -241,9 +242,10 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
 
        BUG_ON(atomic_read(&sh->count) != 0);
        BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
-       
+       BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete);
+
        CHECK_DEVLOCK();
-       PRINTK("init_stripe called, stripe %llu\n", 
+       pr_debug("init_stripe called, stripe %llu\n",
                (unsigned long long)sh->sector);
 
        remove_hash(sh);
@@ -257,11 +259,11 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
        for (i = sh->disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
 
-               if (dev->toread || dev->towrite || dev->written ||
+               if (dev->toread || dev->read || dev->towrite || dev->written ||
                    test_bit(R5_LOCKED, &dev->flags)) {
-                       printk("sector=%llx i=%d %p %p %p %d\n",
+                       printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
                               (unsigned long long)sh->sector, i, dev->toread,
-                              dev->towrite, dev->written,
+                              dev->read, dev->towrite, dev->written,
                               test_bit(R5_LOCKED, &dev->flags));
                        BUG();
                }
@@ -277,23 +279,23 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
        struct hlist_node *hn;
 
        CHECK_DEVLOCK();
-       PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
+       pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
        hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
                if (sh->sector == sector && sh->disks == disks)
                        return sh;
-       PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
+       pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
        return NULL;
 }
 
 static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
 
 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
                                             int pd_idx, int noblock)
 {
        struct stripe_head *sh;
 
-       PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
+       pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 
        spin_lock_irq(&conf->device_lock);
 
@@ -341,6 +343,576 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
        return sh;
 }
 
+/* test_and_ack_op() ensures that we only dequeue an operation once */
+#define test_and_ack_op(op, pend) \
+do {                                                   \
+       if (test_bit(op, &sh->ops.pending) &&           \
+               !test_bit(op, &sh->ops.complete)) {     \
+               if (test_and_set_bit(op, &sh->ops.ack)) \
+                       clear_bit(op, &pend);           \
+               else                                    \
+                       ack++;                          \
+       } else                                          \
+               clear_bit(op, &pend);                   \
+} while (0)
+
+/* find new work to run, do not resubmit work that is already
+ * in flight
+ */
+static unsigned long get_stripe_work(struct stripe_head *sh)
+{
+       unsigned long pending;
+       int ack = 0;
+
+       pending = sh->ops.pending;
+
+       test_and_ack_op(STRIPE_OP_BIOFILL, pending);
+       test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending);
+       test_and_ack_op(STRIPE_OP_PREXOR, pending);
+       test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
+       test_and_ack_op(STRIPE_OP_POSTXOR, pending);
+       test_and_ack_op(STRIPE_OP_CHECK, pending);
+       if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
+               ack++;
+
+       sh->ops.count -= ack;
+       BUG_ON(sh->ops.count < 0);
+
+       return pending;
+}
+
+static void
+raid5_end_read_request(struct bio *bi, int error);
+static void
+raid5_end_write_request(struct bio *bi, int error);
+
+static void ops_run_io(struct stripe_head *sh)
+{
+       raid5_conf_t *conf = sh->raid_conf;
+       int i, disks = sh->disks;
+
+       might_sleep();
+
+       for (i = disks; i--; ) {
+               int rw;
+               struct bio *bi;
+               mdk_rdev_t *rdev;
+               if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
+                       rw = WRITE;
+               else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
+                       rw = READ;
+               else
+                       continue;
+
+               bi = &sh->dev[i].req;
+
+               bi->bi_rw = rw;
+               if (rw == WRITE)
+                       bi->bi_end_io = raid5_end_write_request;
+               else
+                       bi->bi_end_io = raid5_end_read_request;
+
+               rcu_read_lock();
+               rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
+               if (rdev)
+                       atomic_inc(&rdev->nr_pending);
+               rcu_read_unlock();
+
+               if (rdev) {
+                       if (test_bit(STRIPE_SYNCING, &sh->state) ||
+                               test_bit(STRIPE_EXPAND_SOURCE, &sh->state) ||
+                               test_bit(STRIPE_EXPAND_READY, &sh->state))
+                               md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+
+                       bi->bi_bdev = rdev->bdev;
+                       pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+                               __FUNCTION__, (unsigned long long)sh->sector,
+                               bi->bi_rw, i);
+                       atomic_inc(&sh->count);
+                       bi->bi_sector = sh->sector + rdev->data_offset;
+                       bi->bi_flags = 1 << BIO_UPTODATE;
+                       bi->bi_vcnt = 1;
+                       bi->bi_max_vecs = 1;
+                       bi->bi_idx = 0;
+                       bi->bi_io_vec = &sh->dev[i].vec;
+                       bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+                       bi->bi_io_vec[0].bv_offset = 0;
+                       bi->bi_size = STRIPE_SIZE;
+                       bi->bi_next = NULL;
+                       if (rw == WRITE &&
+                           test_bit(R5_ReWrite, &sh->dev[i].flags))
+                               atomic_add(STRIPE_SECTORS,
+                                       &rdev->corrected_errors);
+                       generic_make_request(bi);
+               } else {
+                       if (rw == WRITE)
+                               set_bit(STRIPE_DEGRADED, &sh->state);
+                       pr_debug("skip op %ld on disc %d for sector %llu\n",
+                               bi->bi_rw, i, (unsigned long long)sh->sector);
+                       clear_bit(R5_LOCKED, &sh->dev[i].flags);
+                       set_bit(STRIPE_HANDLE, &sh->state);
+               }
+       }
+}
+
+static struct dma_async_tx_descriptor *
+async_copy_data(int frombio, struct bio *bio, struct page *page,
+       sector_t sector, struct dma_async_tx_descriptor *tx)
+{
+       struct bio_vec *bvl;
+       struct page *bio_page;
+       int i;
+       int page_offset;
+
+       if (bio->bi_sector >= sector)
+               page_offset = (signed)(bio->bi_sector - sector) * 512;
+       else
+               page_offset = (signed)(sector - bio->bi_sector) * -512;
+       bio_for_each_segment(bvl, bio, i) {
+               int len = bio_iovec_idx(bio, i)->bv_len;
+               int clen;
+               int b_offset = 0;
+
+               if (page_offset < 0) {
+                       b_offset = -page_offset;
+                       page_offset += b_offset;
+                       len -= b_offset;
+               }
+
+               if (len > 0 && page_offset + len > STRIPE_SIZE)
+                       clen = STRIPE_SIZE - page_offset;
+               else
+                       clen = len;
+
+               if (clen > 0) {
+                       b_offset += bio_iovec_idx(bio, i)->bv_offset;
+                       bio_page = bio_iovec_idx(bio, i)->bv_page;
+                       if (frombio)
+                               tx = async_memcpy(page, bio_page, page_offset,
+                                       b_offset, clen,
+                                       ASYNC_TX_DEP_ACK,
+                                       tx, NULL, NULL);
+                       else
+                               tx = async_memcpy(bio_page, page, b_offset,
+                                       page_offset, clen,
+                                       ASYNC_TX_DEP_ACK,
+                                       tx, NULL, NULL);
+               }
+               if (clen < len) /* hit end of page */
+                       break;
+               page_offset +=  len;
+       }
+
+       return tx;
+}
+
+static void ops_complete_biofill(void *stripe_head_ref)
+{
+       struct stripe_head *sh = stripe_head_ref;
+       struct bio *return_bi = NULL;
+       raid5_conf_t *conf = sh->raid_conf;
+       int i;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       /* clear completed biofills */
+       for (i = sh->disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+
+               /* acknowledge completion of a biofill operation */
+               /* and check if we need to reply to a read request,
+                * new R5_Wantfill requests are held off until
+                * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+                */
+               if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
+                       struct bio *rbi, *rbi2;
+
+                       /* The access to dev->read is outside of the
+                        * spin_lock_irq(&conf->device_lock), but is protected
+                        * by the STRIPE_OP_BIOFILL pending bit
+                        */
+                       BUG_ON(!dev->read);
+                       rbi = dev->read;
+                       dev->read = NULL;
+                       while (rbi && rbi->bi_sector <
+                               dev->sector + STRIPE_SECTORS) {
+                               rbi2 = r5_next_bio(rbi, dev->sector);
+                               spin_lock_irq(&conf->device_lock);
+                               if (--rbi->bi_phys_segments == 0) {
+                                       rbi->bi_next = return_bi;
+                                       return_bi = rbi;
+                               }
+                               spin_unlock_irq(&conf->device_lock);
+                               rbi = rbi2;
+                       }
+               }
+       }
+       clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
+       clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
+
+       return_io(return_bi);
+
+       set_bit(STRIPE_HANDLE, &sh->state);
+       release_stripe(sh);
+}
+
+static void ops_run_biofill(struct stripe_head *sh)
+{
+       struct dma_async_tx_descriptor *tx = NULL;
+       raid5_conf_t *conf = sh->raid_conf;
+       int i;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       for (i = sh->disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+               if (test_bit(R5_Wantfill, &dev->flags)) {
+                       struct bio *rbi;
+                       spin_lock_irq(&conf->device_lock);
+                       dev->read = rbi = dev->toread;
+                       dev->toread = NULL;
+                       spin_unlock_irq(&conf->device_lock);
+                       while (rbi && rbi->bi_sector <
+                               dev->sector + STRIPE_SECTORS) {
+                               tx = async_copy_data(0, rbi, dev->page,
+                                       dev->sector, tx);
+                               rbi = r5_next_bio(rbi, dev->sector);
+                       }
+               }
+       }
+
+       atomic_inc(&sh->count);
+       async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
+               ops_complete_biofill, sh);
+}
+
+static void ops_complete_compute5(void *stripe_head_ref)
+{
+       struct stripe_head *sh = stripe_head_ref;
+       int target = sh->ops.target;
+       struct r5dev *tgt = &sh->dev[target];
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       set_bit(R5_UPTODATE, &tgt->flags);
+       BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
+       clear_bit(R5_Wantcompute, &tgt->flags);
+       set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+       set_bit(STRIPE_HANDLE, &sh->state);
+       release_stripe(sh);
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_compute5(struct stripe_head *sh, unsigned long pending)
+{
+       /* kernel stack size limits the total number of disks */
+       int disks = sh->disks;
+       struct page *xor_srcs[disks];
+       int target = sh->ops.target;
+       struct r5dev *tgt = &sh->dev[target];
+       struct page *xor_dest = tgt->page;
+       int count = 0;
+       struct dma_async_tx_descriptor *tx;
+       int i;
+
+       pr_debug("%s: stripe %llu block: %d\n",
+               __FUNCTION__, (unsigned long long)sh->sector, target);
+       BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
+
+       for (i = disks; i--; )
+               if (i != target)
+                       xor_srcs[count++] = sh->dev[i].page;
+
+       atomic_inc(&sh->count);
+
+       if (unlikely(count == 1))
+               tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
+                       0, NULL, ops_complete_compute5, sh);
+       else
+               tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+                       ASYNC_TX_XOR_ZERO_DST, NULL,
+                       ops_complete_compute5, sh);
+
+       /* ack now if postxor is not set to be run */
+       if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending))
+               async_tx_ack(tx);
+
+       return tx;
+}
+
+static void ops_complete_prexor(void *stripe_head_ref)
+{
+       struct stripe_head *sh = stripe_head_ref;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+{
+       /* kernel stack size limits the total number of disks */
+       int disks = sh->disks;
+       struct page *xor_srcs[disks];
+       int count = 0, pd_idx = sh->pd_idx, i;
+
+       /* existing parity data subtracted */
+       struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       for (i = disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+               /* Only process blocks that are known to be uptodate */
+               if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags))
+                       xor_srcs[count++] = dev->page;
+       }
+
+       tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+               ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
+               ops_complete_prexor, sh);
+
+       return tx;
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+{
+       int disks = sh->disks;
+       int pd_idx = sh->pd_idx, i;
+
+       /* check if prexor is active which means only process blocks
+        * that are part of a read-modify-write (Wantprexor)
+        */
+       int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       for (i = disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+               struct bio *chosen;
+               int towrite;
+
+               towrite = 0;
+               if (prexor) { /* rmw */
+                       if (dev->towrite &&
+                           test_bit(R5_Wantprexor, &dev->flags))
+                               towrite = 1;
+               } else { /* rcw */
+                       if (i != pd_idx && dev->towrite &&
+                               test_bit(R5_LOCKED, &dev->flags))
+                               towrite = 1;
+               }
+
+               if (towrite) {
+                       struct bio *wbi;
+
+                       spin_lock(&sh->lock);
+                       chosen = dev->towrite;
+                       dev->towrite = NULL;
+                       BUG_ON(dev->written);
+                       wbi = dev->written = chosen;
+                       spin_unlock(&sh->lock);
+
+                       while (wbi && wbi->bi_sector <
+                               dev->sector + STRIPE_SECTORS) {
+                               tx = async_copy_data(1, wbi, dev->page,
+                                       dev->sector, tx);
+                               wbi = r5_next_bio(wbi, dev->sector);
+                       }
+               }
+       }
+
+       return tx;
+}
+
+static void ops_complete_postxor(void *stripe_head_ref)
+{
+       struct stripe_head *sh = stripe_head_ref;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+       set_bit(STRIPE_HANDLE, &sh->state);
+       release_stripe(sh);
+}
+
+static void ops_complete_write(void *stripe_head_ref)
+{
+       struct stripe_head *sh = stripe_head_ref;
+       int disks = sh->disks, i, pd_idx = sh->pd_idx;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       for (i = disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+               if (dev->written || i == pd_idx)
+                       set_bit(R5_UPTODATE, &dev->flags);
+       }
+
+       set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
+       set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+
+       set_bit(STRIPE_HANDLE, &sh->state);
+       release_stripe(sh);
+}
+
+static void
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+{
+       /* kernel stack size limits the total number of disks */
+       int disks = sh->disks;
+       struct page *xor_srcs[disks];
+
+       int count = 0, pd_idx = sh->pd_idx, i;
+       struct page *xor_dest;
+       int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+       unsigned long flags;
+       dma_async_tx_callback callback;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       /* check if prexor is active which means only process blocks
+        * that are part of a read-modify-write (written)
+        */
+       if (prexor) {
+               xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+               for (i = disks; i--; ) {
+                       struct r5dev *dev = &sh->dev[i];
+                       if (dev->written)
+                               xor_srcs[count++] = dev->page;
+               }
+       } else {
+               xor_dest = sh->dev[pd_idx].page;
+               for (i = disks; i--; ) {
+                       struct r5dev *dev = &sh->dev[i];
+                       if (i != pd_idx)
+                               xor_srcs[count++] = dev->page;
+               }
+       }
+
+       /* check whether this postxor is part of a write */
+       callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ?
+               ops_complete_write : ops_complete_postxor;
+
+       /* 1/ if we prexor'd then the dest is reused as a source
+        * 2/ if we did not prexor then we are redoing the parity
+        * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
+        * for the synchronous xor case
+        */
+       flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
+               (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
+
+       atomic_inc(&sh->count);
+
+       if (unlikely(count == 1)) {
+               flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
+               tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
+                       flags, tx, callback, sh);
+       } else
+               tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+                       flags, tx, callback, sh);
+}
+
+static void ops_complete_check(void *stripe_head_ref)
+{
+       struct stripe_head *sh = stripe_head_ref;
+       int pd_idx = sh->pd_idx;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
+               sh->ops.zero_sum_result == 0)
+               set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+
+       set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
+       set_bit(STRIPE_HANDLE, &sh->state);
+       release_stripe(sh);
+}
+
+static void ops_run_check(struct stripe_head *sh)
+{
+       /* kernel stack size limits the total number of disks */
+       int disks = sh->disks;
+       struct page *xor_srcs[disks];
+       struct dma_async_tx_descriptor *tx;
+
+       int count = 0, pd_idx = sh->pd_idx, i;
+       struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+
+       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+               (unsigned long long)sh->sector);
+
+       for (i = disks; i--; ) {
+               struct r5dev *dev = &sh->dev[i];
+               if (i != pd_idx)
+                       xor_srcs[count++] = dev->page;
+       }
+
+       tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+               &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
+
+       if (tx)
+               set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
+       else
+               clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
+
+       atomic_inc(&sh->count);
+       tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
+               ops_complete_check, sh);
+}
+
+static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
+{
+       int overlap_clear = 0, i, disks = sh->disks;
+       struct dma_async_tx_descriptor *tx = NULL;
+
+       if (test_bit(STRIPE_OP_BIOFILL, &pending)) {
+               ops_run_biofill(sh);
+               overlap_clear++;
+       }
+
+       if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending))
+               tx = ops_run_compute5(sh, pending);
+
+       if (test_bit(STRIPE_OP_PREXOR, &pending))
+               tx = ops_run_prexor(sh, tx);
+
+       if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
+               tx = ops_run_biodrain(sh, tx);
+               overlap_clear++;
+       }
+
+       if (test_bit(STRIPE_OP_POSTXOR, &pending))
+               ops_run_postxor(sh, tx);
+
+       if (test_bit(STRIPE_OP_CHECK, &pending))
+               ops_run_check(sh);
+
+       if (test_bit(STRIPE_OP_IO, &pending))
+               ops_run_io(sh);
+
+       if (overlap_clear)
+               for (i = disks; i--; ) {
+                       struct r5dev *dev = &sh->dev[i];
+                       if (test_and_clear_bit(R5_Overlap, &dev->flags))
+                               wake_up(&sh->raid_conf->wait_for_overlap);
+               }
+}
+
 static int grow_one_stripe(raid5_conf_t *conf)
 {
        struct stripe_head *sh;
@@ -375,7 +947,7 @@ static int grow_stripes(raid5_conf_t *conf, int num)
        conf->active_name = 0;
        sc = kmem_cache_create(conf->cache_name[conf->active_name],
                               sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
-                              0, 0, NULL, NULL);
+                              0, 0, NULL);
        if (!sc)
                return 1;
        conf->slab_cache = sc;
@@ -427,7 +999,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
        /* Step 1 */
        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
                               sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
-                              0, 0, NULL, NULL);
+                              0, 0, NULL);
        if (!sc)
                return -ENOMEM;
 
@@ -537,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
        conf->slab_cache = NULL;
 }
 
-static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
-                                  int error)
+static void raid5_end_read_request(struct bio * bi, int error)
 {
        struct stripe_head *sh = bi->bi_private;
        raid5_conf_t *conf = sh->raid_conf;
@@ -547,19 +1118,17 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
        char b[BDEVNAME_SIZE];
        mdk_rdev_t *rdev;
 
-       if (bi->bi_size)
-               return 1;
 
        for (i=0 ; i<disks; i++)
                if (bi == &sh->dev[i].req)
                        break;
 
-       PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", 
-               (unsigned long long)sh->sector, i, atomic_read(&sh->count), 
+       pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
+               (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                uptodate);
        if (i == disks) {
                BUG();
-               return 0;
+               return;
        }
 
        if (uptodate) {
@@ -612,30 +1181,25 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
        clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
-       return 0;
 }
 
-static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
-                                   int error)
+static void raid5_end_write_request (struct bio *bi, int error)
 {
        struct stripe_head *sh = bi->bi_private;
        raid5_conf_t *conf = sh->raid_conf;
        int disks = sh->disks, i;
        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
 
-       if (bi->bi_size)
-               return 1;
-
        for (i=0 ; i<disks; i++)
                if (bi == &sh->dev[i].req)
                        break;
 
-       PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", 
+       pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                uptodate);
        if (i == disks) {
                BUG();
-               return 0;
+               return;
        }
 
        if (!uptodate)
@@ -646,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
        clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
-       return 0;
 }
 
 
@@ -675,7 +1238,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
 {
        char b[BDEVNAME_SIZE];
        raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
-       PRINTK("raid5: error called\n");
+       pr_debug("raid5: error called\n");
 
        if (!test_bit(Faulty, &rdev->flags)) {
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -928,142 +1491,18 @@ static void copy_data(int frombio, struct bio *bio,
                        __bio_kunmap_atomic(ba, KM_USER0);
                }
                if (clen < len) /* hit end of page */
-                       break;
-               page_offset +=  len;
-       }
-}
-
-#define check_xor()    do {                                              \
-                               if (count == MAX_XOR_BLOCKS) {            \
-                               xor_blocks(count, STRIPE_SIZE, dest, ptr);\
-                               count = 0;                                \
-                          }                                              \
-                       } while(0)
-
-
-static void compute_block(struct stripe_head *sh, int dd_idx)
-{
-       int i, count, disks = sh->disks;
-       void *ptr[MAX_XOR_BLOCKS], *dest, *p;
-
-       PRINTK("compute_block, stripe %llu, idx %d\n", 
-               (unsigned long long)sh->sector, dd_idx);
-
-       dest = page_address(sh->dev[dd_idx].page);
-       memset(dest, 0, STRIPE_SIZE);
-       count = 0;
-       for (i = disks ; i--; ) {
-               if (i == dd_idx)
-                       continue;
-               p = page_address(sh->dev[i].page);
-               if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
-                       ptr[count++] = p;
-               else
-                       printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
-                               " not present\n", dd_idx,
-                               (unsigned long long)sh->sector, i);
-
-               check_xor();
-       }
-       if (count)
-               xor_blocks(count, STRIPE_SIZE, dest, ptr);
-       set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
-}
-
-static void compute_parity5(struct stripe_head *sh, int method)
-{
-       raid5_conf_t *conf = sh->raid_conf;
-       int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
-       void *ptr[MAX_XOR_BLOCKS], *dest;
-       struct bio *chosen;
-
-       PRINTK("compute_parity5, stripe %llu, method %d\n",
-               (unsigned long long)sh->sector, method);
-
-       count = 0;
-       dest = page_address(sh->dev[pd_idx].page);
-       switch(method) {
-       case READ_MODIFY_WRITE:
-               BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
-               for (i=disks ; i-- ;) {
-                       if (i==pd_idx)
-                               continue;
-                       if (sh->dev[i].towrite &&
-                           test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
-                               ptr[count++] = page_address(sh->dev[i].page);
-                               chosen = sh->dev[i].towrite;
-                               sh->dev[i].towrite = NULL;
-
-                               if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
-                                       wake_up(&conf->wait_for_overlap);
-
-                               BUG_ON(sh->dev[i].written);
-                               sh->dev[i].written = chosen;
-                               check_xor();
-                       }
-               }
-               break;
-       case RECONSTRUCT_WRITE:
-               memset(dest, 0, STRIPE_SIZE);
-               for (i= disks; i-- ;)
-                       if (i!=pd_idx && sh->dev[i].towrite) {
-                               chosen = sh->dev[i].towrite;
-                               sh->dev[i].towrite = NULL;
-
-                               if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
-                                       wake_up(&conf->wait_for_overlap);
-
-                               BUG_ON(sh->dev[i].written);
-                               sh->dev[i].written = chosen;
-                       }
-               break;
-       case CHECK_PARITY:
-               break;
-       }
-       if (count) {
-               xor_blocks(count, STRIPE_SIZE, dest, ptr);
-               count = 0;
-       }
-       
-       for (i = disks; i--;)
-               if (sh->dev[i].written) {
-                       sector_t sector = sh->dev[i].sector;
-                       struct bio *wbi = sh->dev[i].written;
-                       while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
-                               copy_data(1, wbi, sh->dev[i].page, sector);
-                               wbi = r5_next_bio(wbi, sector);
-                       }
-
-                       set_bit(R5_LOCKED, &sh->dev[i].flags);
-                       set_bit(R5_UPTODATE, &sh->dev[i].flags);
-               }
-
-       switch(method) {
-       case RECONSTRUCT_WRITE:
-       case CHECK_PARITY:
-               for (i=disks; i--;)
-                       if (i != pd_idx) {
-                               ptr[count++] = page_address(sh->dev[i].page);
-                               check_xor();
-                       }
-               break;
-       case READ_MODIFY_WRITE:
-               for (i = disks; i--;)
-                       if (sh->dev[i].written) {
-                               ptr[count++] = page_address(sh->dev[i].page);
-                               check_xor();
-                       }
-       }
-       if (count)
-               xor_blocks(count, STRIPE_SIZE, dest, ptr);
-
-       if (method != CHECK_PARITY) {
-               set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-               set_bit(R5_LOCKED,   &sh->dev[pd_idx].flags);
-       } else
-               clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+                       break;
+               page_offset +=  len;
+       }
 }
 
+#define check_xor()    do {                                              \
+                               if (count == MAX_XOR_BLOCKS) {            \
+                               xor_blocks(count, STRIPE_SIZE, dest, ptr);\
+                               count = 0;                                \
+                          }                                              \
+                       } while(0)
+
 static void compute_parity6(struct stripe_head *sh, int method)
 {
        raid6_conf_t *conf = sh->raid_conf;
@@ -1075,7 +1514,7 @@ static void compute_parity6(struct stripe_head *sh, int method)
        qd_idx = raid6_next_disk(pd_idx, disks);
        d0_idx = raid6_next_disk(qd_idx, disks);
 
-       PRINTK("compute_parity, stripe %llu, method %d\n",
+       pr_debug("compute_parity, stripe %llu, method %d\n",
                (unsigned long long)sh->sector, method);
 
        switch(method) {
@@ -1153,7 +1592,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
        int pd_idx = sh->pd_idx;
        int qd_idx = raid6_next_disk(pd_idx, disks);
 
-       PRINTK("compute_block_1, stripe %llu, idx %d\n",
+       pr_debug("compute_block_1, stripe %llu, idx %d\n",
                (unsigned long long)sh->sector, dd_idx);
 
        if ( dd_idx == qd_idx ) {
@@ -1200,7 +1639,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
        BUG_ON(faila == failb);
        if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
 
-       PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
+       pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
               (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
 
        if ( failb == disks-1 ) {
@@ -1246,7 +1685,79 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
        }
 }
 
+static int
+handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
+{
+       int i, pd_idx = sh->pd_idx, disks = sh->disks;
+       int locked = 0;
+
+       if (rcw) {
+               /* if we are not expanding this is a proper write request, and
+                * there will be bios with new data to be drained into the
+                * stripe cache
+                */
+               if (!expand) {
+                       set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
+                       sh->ops.count++;
+               }
+
+               set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+               sh->ops.count++;
+
+               for (i = disks; i--; ) {
+                       struct r5dev *dev = &sh->dev[i];
+
+                       if (dev->towrite) {
+                               set_bit(R5_LOCKED, &dev->flags);
+                               if (!expand)
+                                       clear_bit(R5_UPTODATE, &dev->flags);
+                               locked++;
+                       }
+               }
+       } else {
+               BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
+                       test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
+
+               set_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+               set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
+               set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+
+               sh->ops.count += 3;
+
+               for (i = disks; i--; ) {
+                       struct r5dev *dev = &sh->dev[i];
+                       if (i == pd_idx)
+                               continue;
+
+                       /* For a read-modify write there may be blocks that are
+                        * locked for reading while others are ready to be
+                        * written so we distinguish these blocks by the
+                        * R5_Wantprexor bit
+                        */
+                       if (dev->towrite &&
+                           (test_bit(R5_UPTODATE, &dev->flags) ||
+                           test_bit(R5_Wantcompute, &dev->flags))) {
+                               set_bit(R5_Wantprexor, &dev->flags);
+                               set_bit(R5_LOCKED, &dev->flags);
+                               clear_bit(R5_UPTODATE, &dev->flags);
+                               locked++;
+                       }
+               }
+       }
+
+       /* keep the parity disk locked while asynchronous operations
+        * are in flight
+        */
+       set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
+       clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+       locked++;
+
+       pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
+               __FUNCTION__, (unsigned long long)sh->sector,
+               locked, sh->ops.pending);
 
+       return locked;
+}
 
 /*
  * Each stripe/dev can have one or more bion attached.
@@ -1259,7 +1770,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        raid5_conf_t *conf = sh->raid_conf;
        int firstwrite=0;
 
-       PRINTK("adding bh b#%llu to stripe s#%llu\n",
+       pr_debug("adding bh b#%llu to stripe s#%llu\n",
                (unsigned long long)bi->bi_sector,
                (unsigned long long)sh->sector);
 
@@ -1288,7 +1799,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        spin_unlock_irq(&conf->device_lock);
        spin_unlock(&sh->lock);
 
-       PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
+       pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)bi->bi_sector,
                (unsigned long long)sh->sector, dd_idx);
 
@@ -1401,9 +1912,12 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                        bi = bi2;
                }
 
-               /* fail any reads if this device is non-operational */
-               if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
-                   test_bit(R5_ReadError, &sh->dev[i].flags)) {
+               /* fail any reads if this device is non-operational and
+                * the data has not reached the cache yet.
+                */
+               if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
+                   (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+                     test_bit(R5_ReadError, &sh->dev[i].flags))) {
                        bi = sh->dev[i].toread;
                        sh->dev[i].toread = NULL;
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
@@ -1429,36 +1943,101 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
 
 }
 
+/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
+ * to process
+ */
+static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
+                       struct stripe_head_state *s, int disk_idx, int disks)
+{
+       struct r5dev *dev = &sh->dev[disk_idx];
+       struct r5dev *failed_dev = &sh->dev[s->failed_num];
+
+       /* don't schedule compute operations or reads on the parity block while
+        * a check is in flight
+        */
+       if ((disk_idx == sh->pd_idx) &&
+            test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
+               return ~0;
+
+       /* is the data in this block needed, and can we get it? */
+       if (!test_bit(R5_LOCKED, &dev->flags) &&
+           !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
+           (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+            s->syncing || s->expanding || (s->failed &&
+            (failed_dev->toread || (failed_dev->towrite &&
+            !test_bit(R5_OVERWRITE, &failed_dev->flags)
+            ))))) {
+               /* 1/ We would like to get this block, possibly by computing it,
+                * but we might not be able to.
+                *
+                * 2/ Since parity check operations potentially make the parity
+                * block !uptodate it will need to be refreshed before any
+                * compute operations on data disks are scheduled.
+                *
+                * 3/ We hold off parity block re-reads until check operations
+                * have quiesced.
+                */
+               if ((s->uptodate == disks - 1) &&
+                   !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+                       set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+                       set_bit(R5_Wantcompute, &dev->flags);
+                       sh->ops.target = disk_idx;
+                       s->req_compute = 1;
+                       sh->ops.count++;
+                       /* Careful: from this point on 'uptodate' is in the eye
+                        * of raid5_run_ops which services 'compute' operations
+                        * before writes. R5_Wantcompute flags a block that will
+                        * be R5_UPTODATE by the time it is needed for a
+                        * subsequent operation.
+                        */
+                       s->uptodate++;
+                       return 0; /* uptodate + compute == disks */
+               } else if ((s->uptodate < disks - 1) &&
+                       test_bit(R5_Insync, &dev->flags)) {
+                       /* Note: we hold off compute operations while checks are
+                        * in flight, but we still prefer 'compute' over 'read'
+                        * hence we only read if (uptodate < * disks-1)
+                        */
+                       set_bit(R5_LOCKED, &dev->flags);
+                       set_bit(R5_Wantread, &dev->flags);
+                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+                               sh->ops.count++;
+                       s->locked++;
+                       pr_debug("Reading block %d (sync=%d)\n", disk_idx,
+                               s->syncing);
+               }
+       }
+
+       return ~0;
+}
+
 static void handle_issuing_new_read_requests5(struct stripe_head *sh,
                        struct stripe_head_state *s, int disks)
 {
        int i;
-       for (i = disks; i--; ) {
-               struct r5dev *dev = &sh->dev[i];
-               if (!test_bit(R5_LOCKED, &dev->flags) &&
-                   !test_bit(R5_UPTODATE, &dev->flags) &&
-                   (dev->toread ||
-                    (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
-                    s->syncing || s->expanding ||
-                    (s->failed && (sh->dev[s->failed_num].toread ||
-                       (sh->dev[s->failed_num].towrite &&
-                       !test_bit(R5_OVERWRITE, &sh->dev[s->failed_num].flags))
-                     )))) {
-                       /* we would like to get this block, possibly
-                        * by computing it, but we might not be able to
-                        */
-                       if (s->uptodate == disks-1) {
-                               PRINTK("Computing block %d\n", i);
-                               compute_block(sh, i);
-                               s->uptodate++;
-                       } else if (test_bit(R5_Insync, &dev->flags)) {
-                               set_bit(R5_LOCKED, &dev->flags);
-                               set_bit(R5_Wantread, &dev->flags);
-                               s->locked++;
-                               PRINTK("Reading block %d (sync=%d)\n",
-                                       i, s->syncing);
-                       }
-               }
+
+       /* Clear completed compute operations.  Parity recovery
+        * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
+        * later on in this routine
+        */
+       if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+               !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+       }
+
+       /* look for blocks to read/compute, skip this if a compute
+        * is already in flight, or if the stripe contents are in the
+        * midst of changing due to a write
+        */
+       if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
+               !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) &&
+               !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+               for (i = disks; i--; )
+                       if (__handle_issuing_new_read_requests5(
+                               sh, s, i, disks) == 0)
+                               break;
        }
        set_bit(STRIPE_HANDLE, &sh->state);
 }
@@ -1485,7 +2064,7 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh,
                         * by computing it, but we might not be able to
                         */
                        if (s->uptodate == disks-1) {
-                               PRINTK("Computing stripe %llu block %d\n",
+                               pr_debug("Computing stripe %llu block %d\n",
                                       (unsigned long long)sh->sector, i);
                                compute_block_1(sh, i, 0);
                                s->uptodate++;
@@ -1502,7 +2081,7 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh,
                                                break;
                                }
                                BUG_ON(other < 0);
-                               PRINTK("Computing stripe %llu blocks %d,%d\n",
+                               pr_debug("Computing stripe %llu blocks %d,%d\n",
                                       (unsigned long long)sh->sector,
                                       i, other);
                                compute_block_2(sh, i, other);
@@ -1511,7 +2090,7 @@ static void handle_issuing_new_read_requests6(struct stripe_head *sh,
                                set_bit(R5_LOCKED, &dev->flags);
                                set_bit(R5_Wantread, &dev->flags);
                                s->locked++;
-                               PRINTK("Reading block %d (sync=%d)\n",
+                               pr_debug("Reading block %d (sync=%d)\n",
                                        i, s->syncing);
                        }
                }
@@ -1539,7 +2118,7 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
                                /* We can return any write requests */
                                struct bio *wbi, *wbi2;
                                int bitmap_end = 0;
-                               PRINTK("Return write for disc %d\n", i);
+                               pr_debug("Return write for disc %d\n", i);
                                spin_lock_irq(&conf->device_lock);
                                wbi = dev->written;
                                dev->written = NULL;
@@ -1575,7 +2154,8 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                struct r5dev *dev = &sh->dev[i];
                if ((dev->towrite || i == sh->pd_idx) &&
                    !test_bit(R5_LOCKED, &dev->flags) &&
-                   !test_bit(R5_UPTODATE, &dev->flags)) {
+                   !(test_bit(R5_UPTODATE, &dev->flags) ||
+                     test_bit(R5_Wantcompute, &dev->flags))) {
                        if (test_bit(R5_Insync, &dev->flags))
                                rmw++;
                        else
@@ -1584,14 +2164,14 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                /* Would I have to read this buffer for reconstruct_write */
                if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
                    !test_bit(R5_LOCKED, &dev->flags) &&
-                   !test_bit(R5_UPTODATE, &dev->flags)) {
-                       if (test_bit(R5_Insync, &dev->flags))
-                               rcw++;
+                   !(test_bit(R5_UPTODATE, &dev->flags) ||
+                   test_bit(R5_Wantcompute, &dev->flags))) {
+                       if (test_bit(R5_Insync, &dev->flags)) rcw++;
                        else
                                rcw += 2*disks;
                }
        }
-       PRINTK("for sector %llu, rmw=%d rcw=%d\n",
+       pr_debug("for sector %llu, rmw=%d rcw=%d\n",
                (unsigned long long)sh->sector, rmw, rcw);
        set_bit(STRIPE_HANDLE, &sh->state);
        if (rmw < rcw && rmw > 0)
@@ -1600,14 +2180,18 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                        struct r5dev *dev = &sh->dev[i];
                        if ((dev->towrite || i == sh->pd_idx) &&
                            !test_bit(R5_LOCKED, &dev->flags) &&
-                           !test_bit(R5_UPTODATE, &dev->flags) &&
+                           !(test_bit(R5_UPTODATE, &dev->flags) ||
+                           test_bit(R5_Wantcompute, &dev->flags)) &&
                            test_bit(R5_Insync, &dev->flags)) {
                                if (
                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                                       PRINTK("Read_old block "
+                                       pr_debug("Read_old block "
                                                "%d for r-m-w\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
+                                       if (!test_and_set_bit(
+                                               STRIPE_OP_IO, &sh->ops.pending))
+                                               sh->ops.count++;
                                        s->locked++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -1622,14 +2206,18 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
                        if (!test_bit(R5_OVERWRITE, &dev->flags) &&
                            i != sh->pd_idx &&
                            !test_bit(R5_LOCKED, &dev->flags) &&
-                           !test_bit(R5_UPTODATE, &dev->flags) &&
+                           !(test_bit(R5_UPTODATE, &dev->flags) ||
+                           test_bit(R5_Wantcompute, &dev->flags)) &&
                            test_bit(R5_Insync, &dev->flags)) {
                                if (
                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                                       PRINTK("Read_old block "
+                                       pr_debug("Read_old block "
                                                "%d for Reconstruct\n", i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
+                                       if (!test_and_set_bit(
+                                               STRIPE_OP_IO, &sh->ops.pending))
+                                               sh->ops.count++;
                                        s->locked++;
                                } else {
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -1640,28 +2228,18 @@ static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
        /* now if nothing is locked, and if we have enough data,
         * we can start a write request
         */
-       if (s->locked == 0 && (rcw == 0 || rmw == 0) &&
-           !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
-               PRINTK("Computing parity...\n");
-               compute_parity5(sh, rcw == 0 ?
-                       RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
-               /* now every locked buffer is ready to be written */
-               for (i = disks; i--; )
-                       if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
-                               PRINTK("Writing block %d\n", i);
-                               s->locked++;
-                               set_bit(R5_Wantwrite, &sh->dev[i].flags);
-                               if (!test_bit(R5_Insync, &sh->dev[i].flags)
-                                   || (i == sh->pd_idx && s->failed == 0))
-                                       set_bit(STRIPE_INSYNC, &sh->state);
-                       }
-               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                       atomic_dec(&conf->preread_active_stripes);
-                       if (atomic_read(&conf->preread_active_stripes) <
-                           IO_THRESHOLD)
-                               md_wakeup_thread(conf->mddev->thread);
-               }
-       }
+       /* since handle_stripe can be called at any time we need to handle the
+        * case where a compute block operation has been submitted and then a
+        * subsequent call wants to start a write request.  raid5_run_ops only
+        * handles the case where compute block and postxor are requested
+        * simultaneously.  If this is not the case then new writes need to be
+        * held off until the compute completes.
+        */
+       if ((s->req_compute ||
+           !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) &&
+               (s->locked == 0 && (rcw == 0 || rmw == 0) &&
+               !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+               s->locked += handle_write_operations5(sh, rcw == 0, 0);
 }
 
 static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
@@ -1680,13 +2258,13 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
                    !test_bit(R5_UPTODATE, &dev->flags)) {
                        if (test_bit(R5_Insync, &dev->flags)) rcw++;
                        else {
-                               PRINTK("raid6: must_compute: "
+                               pr_debug("raid6: must_compute: "
                                        "disk %d flags=%#lx\n", i, dev->flags);
                                must_compute++;
                        }
                }
        }
-       PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
+       pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
               (unsigned long long)sh->sector, rcw, must_compute);
        set_bit(STRIPE_HANDLE, &sh->state);
 
@@ -1701,14 +2279,14 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
                            test_bit(R5_Insync, &dev->flags)) {
                                if (
                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
-                                       PRINTK("Read_old stripe %llu "
+                                       pr_debug("Read_old stripe %llu "
                                                "block %d for Reconstruct\n",
                                             (unsigned long long)sh->sector, i);
                                        set_bit(R5_LOCKED, &dev->flags);
                                        set_bit(R5_Wantread, &dev->flags);
                                        s->locked++;
                                } else {
-                                       PRINTK("Request delayed stripe %llu "
+                                       pr_debug("Request delayed stripe %llu "
                                                "block %d for Reconstruct\n",
                                             (unsigned long long)sh->sector, i);
                                        set_bit(STRIPE_DELAYED, &sh->state);
@@ -1738,13 +2316,13 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
                        }
                }
 
-               PRINTK("Computing parity for stripe %llu\n",
+               pr_debug("Computing parity for stripe %llu\n",
                        (unsigned long long)sh->sector);
                compute_parity6(sh, RECONSTRUCT_WRITE);
                /* now every locked buffer is ready to be written */
                for (i = disks; i--; )
                        if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
-                               PRINTK("Writing stripe %llu block %d\n",
+                               pr_debug("Writing stripe %llu block %d\n",
                                       (unsigned long long)sh->sector, i);
                                s->locked++;
                                set_bit(R5_Wantwrite, &sh->dev[i].flags);
@@ -1765,26 +2343,67 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
                                struct stripe_head_state *s, int disks)
 {
        set_bit(STRIPE_HANDLE, &sh->state);
-       if (s->failed == 0) {
-               BUG_ON(s->uptodate != disks);
-               compute_parity5(sh, CHECK_PARITY);
-               s->uptodate--;
-               if (page_is_zero(sh->dev[sh->pd_idx].page)) {
-                       /* parity is correct (on disc, not in buffer any more)
-                        */
-                       set_bit(STRIPE_INSYNC, &sh->state);
-               } else {
-                       conf->mddev->resync_mismatches += STRIPE_SECTORS;
-                       if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
-                               /* don't try to repair!! */
+       /* Take one of the following actions:
+        * 1/ start a check parity operation if (uptodate == disks)
+        * 2/ finish a check parity operation and act on the result
+        * 3/ skip to the writeback section if we previously
+        *    initiated a recovery operation
+        */
+       if (s->failed == 0 &&
+           !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+               if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+                       BUG_ON(s->uptodate != disks);
+                       clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
+                       sh->ops.count++;
+                       s->uptodate--;
+               } else if (
+                      test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
+                       clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
+                       clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
+
+                       if (sh->ops.zero_sum_result == 0)
+                               /* parity is correct (on disc,
+                                * not in buffer any more)
+                                */
                                set_bit(STRIPE_INSYNC, &sh->state);
                        else {
-                               compute_block(sh, sh->pd_idx);
-                               s->uptodate++;
+                               conf->mddev->resync_mismatches +=
+                                       STRIPE_SECTORS;
+                               if (test_bit(
+                                    MD_RECOVERY_CHECK, &conf->mddev->recovery))
+                                       /* don't try to repair!! */
+                                       set_bit(STRIPE_INSYNC, &sh->state);
+                               else {
+                                       set_bit(STRIPE_OP_COMPUTE_BLK,
+                                               &sh->ops.pending);
+                                       set_bit(STRIPE_OP_MOD_REPAIR_PD,
+                                               &sh->ops.pending);
+                                       set_bit(R5_Wantcompute,
+                                               &sh->dev[sh->pd_idx].flags);
+                                       sh->ops.target = sh->pd_idx;
+                                       sh->ops.count++;
+                                       s->uptodate++;
+                               }
                        }
                }
        }
-       if (!test_bit(STRIPE_INSYNC, &sh->state)) {
+
+       /* check if we can clear a parity disk reconstruct */
+       if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+               test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+
+               clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
+               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+               clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+       }
+
+       /* Wait for check parity and compute block operations to complete
+        * before write-back
+        */
+       if (!test_bit(STRIPE_INSYNC, &sh->state) &&
+               !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
+               !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
                struct r5dev *dev;
                /* either failed parity check, or recovery is happening */
                if (s->failed == 0)
@@ -1795,6 +2414,9 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
 
                set_bit(R5_LOCKED, &dev->flags);
                set_bit(R5_Wantwrite, &dev->flags);
+               if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+                       sh->ops.count++;
+
                clear_bit(STRIPE_DEGRADED, &sh->state);
                s->locked++;
                set_bit(STRIPE_INSYNC, &sh->state);
@@ -1903,9 +2525,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
        /* We have read all the blocks in this stripe and now we need to
         * copy some of them into a target stripe for expand.
         */
+       struct dma_async_tx_descriptor *tx = NULL;
        clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
        for (i = 0; i < sh->disks; i++)
-               if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
+               if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
                        int dd_idx, pd_idx, j;
                        struct stripe_head *sh2;
 
@@ -1928,14 +2551,18 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                                release_stripe(sh2);
                                continue;
                        }
-                       memcpy(page_address(sh2->dev[dd_idx].page),
-                              page_address(sh->dev[i].page),
-                              STRIPE_SIZE);
+
+                       /* place all the copies on one channel */
+                       tx = async_memcpy(sh2->dev[dd_idx].page,
+                               sh->dev[i].page, 0, 0, STRIPE_SIZE,
+                               ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+
                        set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
                        set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
                        for (j = 0; j < conf->raid_disks; j++)
                                if (j != sh2->pd_idx &&
-                                   (r6s && j != r6s->qd_idx) &&
+                                   (!r6s || j != raid6_next_disk(sh2->pd_idx,
+                                                                sh2->disks)) &&
                                    !test_bit(R5_Expanded, &sh2->dev[j].flags))
                                        break;
                        if (j == conf->raid_disks) {
@@ -1943,7 +2570,13 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                                set_bit(STRIPE_HANDLE, &sh2->state);
                        }
                        release_stripe(sh2);
+
                }
+       /* done submitting copies, wait for them to complete */
+       if (tx) {
+               async_tx_ack(tx);
+               dma_wait_for_async_tx(tx);
+       }
 }
 
 /*
@@ -1958,7 +2591,6 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
  *    schedule a write of some buffers
  *    return confirmation of parity correctness
  *
- * Parity calculations are done inside the stripe lock
  * buffers are taken off read_list or write_list, and bh_cache buffers
  * get BH_Lock set before the stripe lock is released.
  *
@@ -1971,11 +2603,13 @@ static void handle_stripe5(struct stripe_head *sh)
        struct bio *return_bi = NULL;
        struct stripe_head_state s;
        struct r5dev *dev;
+       unsigned long pending = 0;
 
        memset(&s, 0, sizeof(s));
-       PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
-               (unsigned long long)sh->sector, atomic_read(&sh->count),
-               sh->pd_idx);
+       pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
+               "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state,
+               atomic_read(&sh->count), sh->pd_idx,
+               sh->ops.pending, sh->ops.ack, sh->ops.complete);
 
        spin_lock(&sh->lock);
        clear_bit(STRIPE_HANDLE, &sh->state);
@@ -1992,36 +2626,27 @@ static void handle_stripe5(struct stripe_head *sh)
                struct r5dev *dev = &sh->dev[i];
                clear_bit(R5_Insync, &dev->flags);
 
-               PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
-                       i, dev->flags, dev->toread, dev->towrite, dev->written);
-               /* maybe we can reply to a read */
-               if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
-                       struct bio *rbi, *rbi2;
-                       PRINTK("Return read for disc %d\n", i);
-                       spin_lock_irq(&conf->device_lock);
-                       rbi = dev->toread;
-                       dev->toread = NULL;
-                       if (test_and_clear_bit(R5_Overlap, &dev->flags))
-                               wake_up(&conf->wait_for_overlap);
-                       spin_unlock_irq(&conf->device_lock);
-                       while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
-                               copy_data(0, rbi, dev->page, dev->sector);
-                               rbi2 = r5_next_bio(rbi, dev->sector);
-                               spin_lock_irq(&conf->device_lock);
-                               if (--rbi->bi_phys_segments == 0) {
-                                       rbi->bi_next = return_bi;
-                                       return_bi = rbi;
-                               }
-                               spin_unlock_irq(&conf->device_lock);
-                               rbi = rbi2;
-                       }
-               }
+               pr_debug("check %d: state 0x%lx toread %p read %p write %p "
+                       "written %p\n", i, dev->flags, dev->toread, dev->read,
+                       dev->towrite, dev->written);
+
+               /* maybe we can request a biofill operation
+                *
+                * new wantfill requests are only permitted while
+                * STRIPE_OP_BIOFILL is clear
+                */
+               if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
+                       !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+                       set_bit(R5_Wantfill, &dev->flags);
 
                /* now count some things */
                if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
                if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
+               if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
 
-               if (dev->toread)
+               if (test_bit(R5_Wantfill, &dev->flags))
+                       s.to_fill++;
+               else if (dev->toread)
                        s.to_read++;
                if (dev->towrite) {
                        s.to_write++;
@@ -2044,7 +2669,11 @@ static void handle_stripe5(struct stripe_head *sh)
                        set_bit(R5_Insync, &dev->flags);
        }
        rcu_read_unlock();
-       PRINTK("locked=%d uptodate=%d to_read=%d"
+
+       if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+               sh->ops.count++;
+
+       pr_debug("locked=%d uptodate=%d to_read=%d"
                " to_write=%d failed=%d failed_num=%d\n",
                s.locked, s.uptodate, s.to_read, s.to_write,
                s.failed, s.failed_num);
@@ -2076,20 +2705,88 @@ static void handle_stripe5(struct stripe_head *sh)
         * or to load a block that is being partially written.
         */
        if (s.to_read || s.non_overwrite ||
-               (s.syncing && (s.uptodate < disks)) || s.expanding)
+           (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding ||
+           test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
                handle_issuing_new_read_requests5(sh, &s, disks);
 
-       /* now to consider writing and what else, if anything should be read */
-       if (s.to_write)
+       /* Now we check to see if any write operations have recently
+        * completed
+        */
+
+       /* leave prexor set until postxor is done, allows us to distinguish
+        * a rmw from a rcw during biodrain
+        */
+       if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) &&
+               test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
+
+               clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
+               clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack);
+               clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+
+               for (i = disks; i--; )
+                       clear_bit(R5_Wantprexor, &sh->dev[i].flags);
+       }
+
+       /* if only POSTXOR is set then this is an 'expand' postxor */
+       if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) &&
+               test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
+
+               clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
+               clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack);
+               clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
+
+               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
+               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+
+               /* All the 'written' buffers and the parity block are ready to
+                * be written back to disk
+                */
+               BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
+               for (i = disks; i--; ) {
+                       dev = &sh->dev[i];
+                       if (test_bit(R5_LOCKED, &dev->flags) &&
+                               (i == sh->pd_idx || dev->written)) {
+                               pr_debug("Writing block %d\n", i);
+                               set_bit(R5_Wantwrite, &dev->flags);
+                               if (!test_and_set_bit(
+                                   STRIPE_OP_IO, &sh->ops.pending))
+                                       sh->ops.count++;
+                               if (!test_bit(R5_Insync, &dev->flags) ||
+                                   (i == sh->pd_idx && s.failed == 0))
+                                       set_bit(STRIPE_INSYNC, &sh->state);
+                       }
+               }
+               if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+                       atomic_dec(&conf->preread_active_stripes);
+                       if (atomic_read(&conf->preread_active_stripes) <
+                               IO_THRESHOLD)
+                               md_wakeup_thread(conf->mddev->thread);
+               }
+       }
+
+       /* Now to consider new write requests and what else, if anything
+        * should be read.  We do not handle new writes when:
+        * 1/ A 'write' operation (copy+xor) is already in flight.
+        * 2/ A 'check' operation is in flight, as it may clobber the parity
+        *    block.
+        */
+       if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) &&
+                         !test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
                handle_issuing_new_write_requests5(conf, sh, &s, disks);
 
        /* maybe we need to check and possibly fix the parity for this stripe
-        * Any reads will already have been scheduled, so we just see if enough data
-        * is available
+        * Any reads will already have been scheduled, so we just see if enough
+        * data is available.  The parity check is held off while parity
+        * dependent operations are in flight.
         */
-       if (s.syncing && s.locked == 0 &&
-           !test_bit(STRIPE_INSYNC, &sh->state))
+       if ((s.syncing && s.locked == 0 &&
+            !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
+            !test_bit(STRIPE_INSYNC, &sh->state)) ||
+             test_bit(STRIPE_OP_CHECK, &sh->ops.pending) ||
+             test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending))
                handle_parity_checks5(conf, sh, &s, disks);
+
        if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
                md_done_sync(conf->mddev, STRIPE_SECTORS,1);
                clear_bit(STRIPE_SYNCING, &sh->state);
@@ -2106,29 +2803,49 @@ static void handle_stripe5(struct stripe_head *sh)
                dev = &sh->dev[s.failed_num];
                if (!test_bit(R5_ReWrite, &dev->flags)) {
                        set_bit(R5_Wantwrite, &dev->flags);
+                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+                               sh->ops.count++;
                        set_bit(R5_ReWrite, &dev->flags);
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
                } else {
                        /* let's read it back */
                        set_bit(R5_Wantread, &dev->flags);
+                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+                               sh->ops.count++;
                        set_bit(R5_LOCKED, &dev->flags);
                        s.locked++;
                }
        }
 
-       if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
-               /* Need to write out all blocks after computing parity */
-               sh->disks = conf->raid_disks;
-               sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
-               compute_parity5(sh, RECONSTRUCT_WRITE);
+       /* Finish postxor operations initiated by the expansion
+        * process
+        */
+       if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
+               !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
+
+               clear_bit(STRIPE_EXPANDING, &sh->state);
+
+               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
+               clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+
                for (i = conf->raid_disks; i--; ) {
-                       set_bit(R5_LOCKED, &sh->dev[i].flags);
-                       s.locked++;
                        set_bit(R5_Wantwrite, &sh->dev[i].flags);
+                       if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+                               sh->ops.count++;
                }
-               clear_bit(STRIPE_EXPANDING, &sh->state);
-       } else if (s.expanded) {
+       }
+
+       if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
+               !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+               /* Need to write out all blocks after computing parity */
+               sh->disks = conf->raid_disks;
+               sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
+                       conf->raid_disks);
+               s.locked += handle_write_operations5(sh, 1, 1);
+       } else if (s.expanded &&
+               !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
                clear_bit(STRIPE_EXPAND_READY, &sh->state);
                atomic_dec(&conf->reshape_stripes);
                wake_up(&conf->wait_for_overlap);
@@ -2138,68 +2855,16 @@ static void handle_stripe5(struct stripe_head *sh)
        if (s.expanding && s.locked == 0)
                handle_stripe_expansion(conf, sh, NULL);
 
+       if (sh->ops.count)
+               pending = get_stripe_work(sh);
+
        spin_unlock(&sh->lock);
 
-       return_io(return_bi);
+       if (pending)
+               raid5_run_ops(sh, pending);
 
-       for (i=disks; i-- ;) {
-               int rw;
-               struct bio *bi;
-               mdk_rdev_t *rdev;
-               if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
-                       rw = WRITE;
-               else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
-                       rw = READ;
-               else
-                       continue;
-               bi = &sh->dev[i].req;
-               bi->bi_rw = rw;
-               if (rw == WRITE)
-                       bi->bi_end_io = raid5_end_write_request;
-               else
-                       bi->bi_end_io = raid5_end_read_request;
-               rcu_read_lock();
-               rdev = rcu_dereference(conf->disks[i].rdev);
-               if (rdev && test_bit(Faulty, &rdev->flags))
-                       rdev = NULL;
-               if (rdev)
-                       atomic_inc(&rdev->nr_pending);
-               rcu_read_unlock();
-               if (rdev) {
-                       if (s.syncing || s.expanding || s.expanded)
-                               md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+       return_io(return_bi);
 
-                       bi->bi_bdev = rdev->bdev;
-                       PRINTK("for %llu schedule op %ld on disc %d\n",
-                               (unsigned long long)sh->sector, bi->bi_rw, i);
-                       atomic_inc(&sh->count);
-                       bi->bi_sector = sh->sector + rdev->data_offset;
-                       bi->bi_flags = 1 << BIO_UPTODATE;
-                       bi->bi_vcnt = 1;        
-                       bi->bi_max_vecs = 1;
-                       bi->bi_idx = 0;
-                       bi->bi_io_vec = &sh->dev[i].vec;
-                       bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
-                       bi->bi_io_vec[0].bv_offset = 0;
-                       bi->bi_size = STRIPE_SIZE;
-                       bi->bi_next = NULL;
-                       if (rw == WRITE &&
-                           test_bit(R5_ReWrite, &sh->dev[i].flags))
-                               atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
-                       generic_make_request(bi);
-               } else {
-                       if (rw == WRITE)
-                               set_bit(STRIPE_DEGRADED, &sh->state);
-                       PRINTK("skip op %ld on disc %d for sector %llu\n",
-                               bi->bi_rw, i, (unsigned long long)sh->sector);
-                       clear_bit(R5_LOCKED, &sh->dev[i].flags);
-                       set_bit(STRIPE_HANDLE, &sh->state);
-               }
-       }
 }
 
 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
@@ -2213,7 +2878,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
        struct r5dev *dev, *pdev, *qdev;
 
        r6s.qd_idx = raid6_next_disk(pd_idx, disks);
-       PRINTK("handling stripe %llu, state=%#lx cnt=%d, "
+       pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
                "pd_idx=%d, qd_idx=%d\n",
               (unsigned long long)sh->sector, sh->state,
               atomic_read(&sh->count), pd_idx, r6s.qd_idx);
@@ -2234,12 +2899,12 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                dev = &sh->dev[i];
                clear_bit(R5_Insync, &dev->flags);
 
-               PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
+               pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
                        i, dev->flags, dev->toread, dev->towrite, dev->written);
                /* maybe we can reply to a read */
                if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
                        struct bio *rbi, *rbi2;
-                       PRINTK("Return read for disc %d\n", i);
+                       pr_debug("Return read for disc %d\n", i);
                        spin_lock_irq(&conf->device_lock);
                        rbi = dev->toread;
                        dev->toread = NULL;
@@ -2288,7 +2953,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                        set_bit(R5_Insync, &dev->flags);
        }
        rcu_read_unlock();
-       PRINTK("locked=%d uptodate=%d to_read=%d"
+       pr_debug("locked=%d uptodate=%d to_read=%d"
               " to_write=%d failed=%d failed_num=%d,%d\n",
               s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
               r6s.failed_num[0], r6s.failed_num[1]);
@@ -2428,7 +3093,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 
                        bi->bi_bdev = rdev->bdev;
-                       PRINTK("for %llu schedule op %ld on disc %d\n",
+                       pr_debug("for %llu schedule op %ld on disc %d\n",
                                (unsigned long long)sh->sector, bi->bi_rw, i);
                        atomic_inc(&sh->count);
                        bi->bi_sector = sh->sector + rdev->data_offset;
@@ -2448,7 +3113,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                } else {
                        if (rw == WRITE)
                                set_bit(STRIPE_DEGRADED, &sh->state);
-                       PRINTK("skip op %ld on disc %d for sector %llu\n",
+                       pr_debug("skip op %ld on disc %d for sector %llu\n",
                                bi->bi_rw, i, (unsigned long long)sh->sector);
                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
                        set_bit(STRIPE_HANDLE, &sh->state);
@@ -2505,7 +3170,7 @@ static void unplug_slaves(mddev_t *mddev)
        for (i=0; i<mddev->raid_disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
@@ -2520,7 +3185,7 @@ static void unplug_slaves(mddev_t *mddev)
        rcu_read_unlock();
 }
 
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -2539,7 +3204,7 @@ static void raid5_unplug_device(request_queue_t *q)
        unplug_slaves(mddev);
 }
 
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
                             sector_t *error_sector)
 {
        mddev_t *mddev = q->queuedata;
@@ -2551,7 +3216,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct block_device *bdev = rdev->bdev;
-                       request_queue_t *r_queue = bdev_get_queue(bdev);
+                       struct request_queue *r_queue = bdev_get_queue(bdev);
 
                        if (!r_queue->issue_flush_fn)
                                ret = -EOPNOTSUPP;
@@ -2590,7 +3255,7 @@ static int raid5_congested(void *data, int bits)
 /* We want read requests to align with chunks where possible,
  * but write requests don't need to.
  */
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
 {
        mddev_t *mddev = q->queuedata;
        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -2665,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
  *  first).
  *  If the read failed..
  */
-static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
+static void raid5_align_endio(struct bio *bi, int error)
 {
        struct bio* raid_bi  = bi->bi_private;
        mddev_t *mddev;
@@ -2673,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
        mdk_rdev_t *rdev;
 
-       if (bi->bi_size)
-               return 1;
        bio_put(bi);
 
        mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -2685,22 +3348,21 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
        rdev_dec_pending(rdev, conf->mddev);
 
        if (!error && uptodate) {
-               bio_endio(raid_bi, bytes, 0);
+               bio_endio(raid_bi, 0);
                if (atomic_dec_and_test(&conf->active_aligned_reads))
                        wake_up(&conf->wait_for_stripe);
-               return 0;
+               return;
        }
 
 
-       PRINTK("raid5_align_endio : io error...handing IO for a retry\n");
+       pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
 
        add_bio_to_retry(raid_bi, conf);
-       return 0;
 }
 
 static int bio_fits_rdev(struct bio *bi)
 {
-       request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+       struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
        if ((bi->bi_size>>9) > q->max_sectors)
                return 0;
@@ -2719,7 +3381,7 @@ static int bio_fits_rdev(struct bio *bi)
 }
 
 
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -2730,7 +3392,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
        mdk_rdev_t *rdev;
 
        if (!in_chunk_boundary(mddev, raid_bio)) {
-               PRINTK("chunk_aligned_read : non aligned\n");
+               pr_debug("chunk_aligned_read : non aligned\n");
                return 0;
        }
        /*
@@ -2789,7 +3451,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
 }
 
 
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -2801,7 +3463,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
        int remaining;
 
        if (unlikely(bio_barrier(bi))) {
-               bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
+               bio_endio(bi, -EOPNOTSUPP);
                return 0;
        }
 
@@ -2854,7 +3516,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
 
                new_sector = raid5_compute_sector(logical_sector, disks, data_disks,
                                                  &dd_idx, &pd_idx, conf);
-               PRINTK("raid5: make_request, sector %llu logical %llu\n",
+               pr_debug("raid5: make_request, sector %llu logical %llu\n",
                        (unsigned long long)new_sector, 
                        (unsigned long long)logical_sector);
 
@@ -2917,12 +3579,11 @@ static int make_request(request_queue_t *q, struct bio * bi)
        remaining = --bi->bi_phys_segments;
        spin_unlock_irq(&conf->device_lock);
        if (remaining == 0) {
-               int bytes = bi->bi_size;
 
                if ( rw == WRITE )
                        md_write_end(mddev);
-               bi->bi_size = 0;
-               bi->bi_end_io(bi, bytes,
+
+               bi->bi_end_io(bi,
                              test_bit(BIO_UPTODATE, &bi->bi_flags)
                                ? 0 : -EIO);
        }
@@ -3200,10 +3861,8 @@ static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
        remaining = --raid_bio->bi_phys_segments;
        spin_unlock_irq(&conf->device_lock);
        if (remaining == 0) {
-               int bytes = raid_bio->bi_size;
 
-               raid_bio->bi_size = 0;
-               raid_bio->bi_end_io(raid_bio, bytes,
+               raid_bio->bi_end_io(raid_bio,
                              test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
                                ? 0 : -EIO);
        }
@@ -3227,7 +3886,7 @@ static void raid5d (mddev_t *mddev)
        raid5_conf_t *conf = mddev_to_conf(mddev);
        int handled;
 
-       PRINTK("+++ raid5d active\n");
+       pr_debug("+++ raid5d active\n");
 
        md_check_recovery(mddev);
 
@@ -3262,8 +3921,10 @@ static void raid5d (mddev_t *mddev)
                        handled++;
                }
 
-               if (list_empty(&conf->handle_list))
+               if (list_empty(&conf->handle_list)) {
+                       async_tx_issue_pending_all();
                        break;
+               }
 
                first = conf->handle_list.next;
                sh = list_entry(first, struct stripe_head, lru);
@@ -3279,13 +3940,13 @@ static void raid5d (mddev_t *mddev)
 
                spin_lock_irq(&conf->device_lock);
        }
-       PRINTK("%d stripes handled\n", handled);
+       pr_debug("%d stripes handled\n", handled);
 
        spin_unlock_irq(&conf->device_lock);
 
        unplug_slaves(mddev);
 
-       PRINTK("--- raid5d inactive\n");
+       pr_debug("--- raid5d inactive\n");
 }
 
 static ssize_t
@@ -3461,7 +4122,7 @@ static int run(mddev_t *mddev)
        atomic_set(&conf->preread_active_stripes, 0);
        atomic_set(&conf->active_aligned_reads, 0);
 
-       PRINTK("raid5: run(%s) called.\n", mdname(mddev));
+       pr_debug("raid5: run(%s) called.\n", mdname(mddev));
 
        ITERATE_RDEV(mddev,rdev,tmp) {
                raid_disk = rdev->raid_disk;
@@ -3644,7 +4305,7 @@ static int stop(mddev_t *mddev)
        return 0;
 }
 
-#if RAID5_DEBUG
+#ifdef DEBUG
 static void print_sh (struct seq_file *seq, struct stripe_head *sh)
 {
        int i;
@@ -3691,7 +4352,7 @@ static void status (struct seq_file *seq, mddev_t *mddev)
                               conf->disks[i].rdev &&
                               test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
        seq_printf (seq, "]");
-#if RAID5_DEBUG
+#ifdef DEBUG
        seq_printf (seq, "\n");
        printall(seq, conf);
 #endif