conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return -ENOMEM;
} \
} while(0)
-
-static void compute_block(struct stripe_head *sh, int dd_idx)
-{
- int i, count, disks = sh->disks;
- void *ptr[MAX_XOR_BLOCKS], *dest, *p;
-
- pr_debug("compute_block, stripe %llu, idx %d\n",
- (unsigned long long)sh->sector, dd_idx);
-
- dest = page_address(sh->dev[dd_idx].page);
- memset(dest, 0, STRIPE_SIZE);
- count = 0;
- for (i = disks ; i--; ) {
- if (i == dd_idx)
- continue;
- p = page_address(sh->dev[i].page);
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
- ptr[count++] = p;
- else
- printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
- " not present\n", dd_idx,
- (unsigned long long)sh->sector, i);
-
- check_xor();
- }
- if (count)
- xor_blocks(count, STRIPE_SIZE, dest, ptr);
- set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
-}
-
-static void compute_parity5(struct stripe_head *sh, int method)
-{
- raid5_conf_t *conf = sh->raid_conf;
- int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
- void *ptr[MAX_XOR_BLOCKS], *dest;
- struct bio *chosen;
-
- pr_debug("compute_parity5, stripe %llu, method %d\n",
- (unsigned long long)sh->sector, method);
-
- count = 0;
- dest = page_address(sh->dev[pd_idx].page);
- switch(method) {
- case READ_MODIFY_WRITE:
- BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
- for (i=disks ; i-- ;) {
- if (i==pd_idx)
- continue;
- if (sh->dev[i].towrite &&
- test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
- ptr[count++] = page_address(sh->dev[i].page);
- chosen = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- BUG_ON(sh->dev[i].written);
- sh->dev[i].written = chosen;
- check_xor();
- }
- }
- break;
- case RECONSTRUCT_WRITE:
- memset(dest, 0, STRIPE_SIZE);
- for (i= disks; i-- ;)
- if (i!=pd_idx && sh->dev[i].towrite) {
- chosen = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- BUG_ON(sh->dev[i].written);
- sh->dev[i].written = chosen;
- }
- break;
- case CHECK_PARITY:
- break;
- }
- if (count) {
- xor_blocks(count, STRIPE_SIZE, dest, ptr);
- count = 0;
- }
-
- for (i = disks; i--;)
- if (sh->dev[i].written) {
- sector_t sector = sh->dev[i].sector;
- struct bio *wbi = sh->dev[i].written;
- while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
- copy_data(1, wbi, sh->dev[i].page, sector);
- wbi = r5_next_bio(wbi, sector);
- }
-
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(R5_UPTODATE, &sh->dev[i].flags);
- }
-
- switch(method) {
- case RECONSTRUCT_WRITE:
- case CHECK_PARITY:
- for (i=disks; i--;)
- if (i != pd_idx) {
- ptr[count++] = page_address(sh->dev[i].page);
- check_xor();
- }
- break;
- case READ_MODIFY_WRITE:
- for (i = disks; i--;)
- if (sh->dev[i].written) {
- ptr[count++] = page_address(sh->dev[i].page);
- check_xor();
- }
- }
- if (count)
- xor_blocks(count, STRIPE_SIZE, dest, ptr);
-
- if (method != CHECK_PARITY) {
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
- } else
- clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-}
-
static void compute_parity6(struct stripe_head *sh, int method)
{
raid6_conf_t *conf = sh->raid_conf;
bi = bi2;
}
- /* fail any reads if this device is non-operational */
- if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
- test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ /* fail any reads if this device is non-operational and
+ * the data has not reached the cache yet.
+ */
+ if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
+ (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+ test_bit(R5_ReadError, &sh->dev[i].flags))) {
bi = sh->dev[i].toread;
sh->dev[i].toread = NULL;
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
}
+/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
+ * to process
+ */
+static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
+ struct stripe_head_state *s, int disk_idx, int disks)
+{
+ struct r5dev *dev = &sh->dev[disk_idx];
+ struct r5dev *failed_dev = &sh->dev[s->failed_num];
+
+ /* don't schedule compute operations or reads on the parity block while
+ * a check is in flight
+ */
+ if ((disk_idx == sh->pd_idx) &&
+ test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
+ return ~0;
+
+ /* is the data in this block needed, and can we get it? */
+ if (!test_bit(R5_LOCKED, &dev->flags) &&
+ !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
+ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+ s->syncing || s->expanding || (s->failed &&
+ (failed_dev->toread || (failed_dev->towrite &&
+ !test_bit(R5_OVERWRITE, &failed_dev->flags)
+ ))))) {
+ /* 1/ We would like to get this block, possibly by computing it,
+ * but we might not be able to.
+ *
+ * 2/ Since parity check operations potentially make the parity
+ * block !uptodate it will need to be refreshed before any
+ * compute operations on data disks are scheduled.
+ *
+ * 3/ We hold off parity block re-reads until check operations
+ * have quiesced.
+ */
+ if ((s->uptodate == disks - 1) &&
+ !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+ set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ set_bit(R5_Wantcompute, &dev->flags);
+ sh->ops.target = disk_idx;
+ s->req_compute = 1;
+ sh->ops.count++;
+ /* Careful: from this point on 'uptodate' is in the eye
+ * of raid5_run_ops which services 'compute' operations
+ * before writes. R5_Wantcompute flags a block that will
+ * be R5_UPTODATE by the time it is needed for a
+ * subsequent operation.
+ */
+ s->uptodate++;
+ return 0; /* uptodate + compute == disks */
+ } else if ((s->uptodate < disks - 1) &&
+ test_bit(R5_Insync, &dev->flags)) {
+ /* Note: we hold off compute operations while checks are
+ * in flight, but we still prefer 'compute' over 'read'
+ * hence we only read if (uptodate < * disks-1)
+ */
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+ s->locked++;
+ pr_debug("Reading block %d (sync=%d)\n", disk_idx,
+ s->syncing);
+ }
+ }
+
+ return ~0;
+}
+
static void handle_issuing_new_read_requests5(struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
int i;
- for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- s->syncing || s->expanding ||
- (s->failed && (sh->dev[s->failed_num].toread ||
- (sh->dev[s->failed_num].towrite &&
- !test_bit(R5_OVERWRITE, &sh->dev[s->failed_num].flags))
- )))) {
- /* we would like to get this block, possibly
- * by computing it, but we might not be able to
- */
- if (s->uptodate == disks-1) {
- pr_debug("Computing block %d\n", i);
- compute_block(sh, i);
- s->uptodate++;
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- pr_debug("Reading block %d (sync=%d)\n",
- i, s->syncing);
- }
- }
+
+ /* Clear completed compute operations. Parity recovery
+ * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
+ * later on in this routine
+ */
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+ !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ }
+
+ /* look for blocks to read/compute, skip this if a compute
+ * is already in flight, or if the stripe contents are in the
+ * midst of changing due to a write
+ */
+ if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ for (i = disks; i--; )
+ if (__handle_issuing_new_read_requests5(
+ sh, s, i, disks) == 0)
+ break;
}
set_bit(STRIPE_HANDLE, &sh->state);
}
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
if (test_bit(R5_Insync, &dev->flags))
rmw++;
else
/* Would I have to read this buffer for reconstruct_write */
if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
- if (test_bit(R5_Insync, &dev->flags))
- rcw++;
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ if (test_bit(R5_Insync, &dev->flags)) rcw++;
else
rcw += 2*disks;
}
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
"%d for r-m-w\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(
+ STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
s->locked++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
i != sh->pd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
"%d for Reconstruct\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(
+ STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
s->locked++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
- if (s->locked == 0 && (rcw == 0 || rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state))
+ /* since handle_stripe can be called at any time we need to handle the
+ * case where a compute block operation has been submitted and then a
+ * subsequent call wants to start a write request. raid5_run_ops only
+ * handles the case where compute block and postxor are requested
+ * simultaneously. If this is not the case then new writes need to be
+ * held off until the compute completes.
+ */
+ if ((s->req_compute ||
+ !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) &&
+ (s->locked == 0 && (rcw == 0 || rmw == 0) &&
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
s->locked += handle_write_operations5(sh, rcw == 0, 0);
}
struct stripe_head_state *s, int disks)
{
set_bit(STRIPE_HANDLE, &sh->state);
- if (s->failed == 0) {
- BUG_ON(s->uptodate != disks);
- compute_parity5(sh, CHECK_PARITY);
- s->uptodate--;
- if (page_is_zero(sh->dev[sh->pd_idx].page)) {
- /* parity is correct (on disc, not in buffer any more)
- */
- set_bit(STRIPE_INSYNC, &sh->state);
- } else {
- conf->mddev->resync_mismatches += STRIPE_SECTORS;
- if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
- /* don't try to repair!! */
+ /* Take one of the following actions:
+ * 1/ start a check parity operation if (uptodate == disks)
+ * 2/ finish a check parity operation and act on the result
+ * 3/ skip to the writeback section if we previously
+ * initiated a recovery operation
+ */
+ if (s->failed == 0 &&
+ !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+ if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+ BUG_ON(s->uptodate != disks);
+ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
+ sh->ops.count++;
+ s->uptodate--;
+ } else if (
+ test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
+ clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
+
+ if (sh->ops.zero_sum_result == 0)
+ /* parity is correct (on disc,
+ * not in buffer any more)
+ */
set_bit(STRIPE_INSYNC, &sh->state);
else {
- compute_block(sh, sh->pd_idx);
- s->uptodate++;
+ conf->mddev->resync_mismatches +=
+ STRIPE_SECTORS;
+ if (test_bit(
+ MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ set_bit(STRIPE_OP_COMPUTE_BLK,
+ &sh->ops.pending);
+ set_bit(STRIPE_OP_MOD_REPAIR_PD,
+ &sh->ops.pending);
+ set_bit(R5_Wantcompute,
+ &sh->dev[sh->pd_idx].flags);
+ sh->ops.target = sh->pd_idx;
+ sh->ops.count++;
+ s->uptodate++;
+ }
}
}
}
- if (!test_bit(STRIPE_INSYNC, &sh->state)) {
+
+ /* check if we can clear a parity disk reconstruct */
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+ test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+
+ clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ }
+
+ /* Wait for check parity and compute block operations to complete
+ * before write-back
+ */
+ if (!test_bit(STRIPE_INSYNC, &sh->state) &&
+ !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
struct r5dev *dev;
/* either failed parity check, or recovery is happening */
if (s->failed == 0)
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+
clear_bit(STRIPE_DEGRADED, &sh->state);
s->locked++;
set_bit(STRIPE_INSYNC, &sh->state);
/* We have read all the blocks in this stripe and now we need to
* copy some of them into a target stripe for expand.
*/
+ struct dma_async_tx_descriptor *tx = NULL;
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
release_stripe(sh2);
continue;
}
- memcpy(page_address(sh2->dev[dd_idx].page),
- page_address(sh->dev[i].page),
- STRIPE_SIZE);
+
+ /* place all the copies on one channel */
+ tx = async_memcpy(sh2->dev[dd_idx].page,
+ sh->dev[i].page, 0, 0, STRIPE_SIZE,
+ ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+
set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j = 0; j < conf->raid_disks; j++)
set_bit(STRIPE_HANDLE, &sh2->state);
}
release_stripe(sh2);
+
+ /* done submitting copies, wait for them to complete */
+ if (i + 1 >= sh->disks) {
+ async_tx_ack(tx);
+ dma_wait_for_async_tx(tx);
+ }
}
}
struct r5dev *dev = &sh->dev[i];
clear_bit(R5_Insync, &dev->flags);
- pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
- /* maybe we can reply to a read */
- if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
- struct bio *rbi, *rbi2;
- pr_debug("Return read for disc %d\n", i);
- spin_lock_irq(&conf->device_lock);
- rbi = dev->toread;
- dev->toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
- while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- copy_data(0, rbi, dev->page, dev->sector);
- rbi2 = r5_next_bio(rbi, dev->sector);
- spin_lock_irq(&conf->device_lock);
- if (--rbi->bi_phys_segments == 0) {
- rbi->bi_next = return_bi;
- return_bi = rbi;
- }
- spin_unlock_irq(&conf->device_lock);
- rbi = rbi2;
- }
- }
+ pr_debug("check %d: state 0x%lx toread %p read %p write %p "
+ "written %p\n", i, dev->flags, dev->toread, dev->read,
+ dev->towrite, dev->written);
+
+ /* maybe we can request a biofill operation
+ *
+ * new wantfill requests are only permitted while
+ * STRIPE_OP_BIOFILL is clear
+ */
+ if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
+ !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+ set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
+ if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
- if (dev->toread)
+ if (test_bit(R5_Wantfill, &dev->flags))
+ s.to_fill++;
+ else if (dev->toread)
s.to_read++;
if (dev->towrite) {
s.to_write++;
set_bit(R5_Insync, &dev->flags);
}
rcu_read_unlock();
+
+ if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+ sh->ops.count++;
+
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d\n",
s.locked, s.uptodate, s.to_read, s.to_write,
* or to load a block that is being partially written.
*/
if (s.to_read || s.non_overwrite ||
- (s.syncing && (s.uptodate < disks)) || s.expanding)
+ (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding ||
+ test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
handle_issuing_new_read_requests5(sh, &s, disks);
/* Now we check to see if any write operations have recently
handle_issuing_new_write_requests5(conf, sh, &s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
- * Any reads will already have been scheduled, so we just see if enough data
- * is available
+ * Any reads will already have been scheduled, so we just see if enough
+ * data is available. The parity check is held off while parity
+ * dependent operations are in flight.
*/
- if (s.syncing && s.locked == 0 &&
- !test_bit(STRIPE_INSYNC, &sh->state))
+ if ((s.syncing && s.locked == 0 &&
+ !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
+ !test_bit(STRIPE_INSYNC, &sh->state)) ||
+ test_bit(STRIPE_OP_CHECK, &sh->ops.pending) ||
+ test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending))
handle_parity_checks5(conf, sh, &s, disks);
+
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
dev = &sh->dev[s.failed_num];
if (!test_bit(R5_ReWrite, &dev->flags)) {
set_bit(R5_Wantwrite, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
} else {
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
set_bit(R5_LOCKED, &dev->flags);
s.locked++;
}
}
- if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
- /* Need to write out all blocks after computing parity */
- sh->disks = conf->raid_disks;
- sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
- compute_parity5(sh, RECONSTRUCT_WRITE);
+ /* Finish postxor operations initiated by the expansion
+ * process
+ */
+ if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
+ !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
+
+ clear_bit(STRIPE_EXPANDING, &sh->state);
+
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+
for (i = conf->raid_disks; i--; ) {
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- s.locked++;
set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
}
- clear_bit(STRIPE_EXPANDING, &sh->state);
- } else if (s.expanded) {
+ }
+
+ if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
+ !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ /* Need to write out all blocks after computing parity */
+ sh->disks = conf->raid_disks;
+ sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
+ conf->raid_disks);
+ s.locked += handle_write_operations5(sh, 0, 1);
+ } else if (s.expanded &&
+ !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
return_io(return_bi);
- for (i=disks; i-- ;) {
- int rw;
- struct bio *bi;
- mdk_rdev_t *rdev;
- if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
- rw = WRITE;
- else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = READ;
- else
- continue;
-
- bi = &sh->dev[i].req;
-
- bi->bi_rw = rw;
- if (rw == WRITE)
- bi->bi_end_io = raid5_end_write_request;
- else
- bi->bi_end_io = raid5_end_read_request;
-
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(Faulty, &rdev->flags))
- rdev = NULL;
- if (rdev)
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- if (rdev) {
- if (s.syncing || s.expanding || s.expanded)
- md_sync_acct(rdev->bdev, STRIPE_SECTORS);
-
- bi->bi_bdev = rdev->bdev;
- pr_debug("for %llu schedule op %ld on disc %d\n",
- (unsigned long long)sh->sector, bi->bi_rw, i);
- atomic_inc(&sh->count);
- bi->bi_sector = sh->sector + rdev->data_offset;
- bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
- bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
- bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
- bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
- bi->bi_next = NULL;
- if (rw == WRITE &&
- test_bit(R5_ReWrite, &sh->dev[i].flags))
- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
- generic_make_request(bi);
- } else {
- if (rw == WRITE)
- set_bit(STRIPE_DEGRADED, &sh->state);
- pr_debug("skip op %ld on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
}
static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)