]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/md/raid5.c
icside: fix ->speedproc to return on unsupported modes (take 5)
[linux-2.6-omap-h63xx.git] / drivers / md / raid5.c
index 0b66afef2d82009be4f54c34880df588a8a22b61..f96dea975fa50d94e805c40c46be82df7cfe80bc 100644 (file)
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
 }
 
 static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
 
 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
                                             int pd_idx, int noblock)
@@ -493,12 +493,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
                        if (frombio)
                                tx = async_memcpy(page, bio_page, page_offset,
                                        b_offset, clen,
-                                       ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_SRC,
+                                       ASYNC_TX_DEP_ACK,
                                        tx, NULL, NULL);
                        else
                                tx = async_memcpy(bio_page, page, b_offset,
                                        page_offset, clen,
-                                       ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_DST,
+                                       ASYNC_TX_DEP_ACK,
                                        tx, NULL, NULL);
                }
                if (clen < len) /* hit end of page */
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
        struct stripe_head *sh = stripe_head_ref;
        struct bio *return_bi = NULL;
        raid5_conf_t *conf = sh->raid_conf;
-       int i, more_to_read = 0;
+       int i;
 
        pr_debug("%s: stripe %llu\n", __FUNCTION__,
                (unsigned long long)sh->sector);
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
        /* clear completed biofills */
        for (i = sh->disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
-               /* check if this stripe has new incoming reads */
-               if (dev->toread)
-                       more_to_read++;
 
                /* acknowledge completion of a biofill operation */
-               /* and check if we need to reply to a read request
-               */
-               if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+               /* and check if we need to reply to a read request,
+                * new R5_Wantfill requests are held off until
+                * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+                */
+               if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
                        struct bio *rbi, *rbi2;
-                       clear_bit(R5_Wantfill, &dev->flags);
 
                        /* The access to dev->read is outside of the
                         * spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
 
        return_io(return_bi);
 
-       if (more_to_read)
-               set_bit(STRIPE_HANDLE, &sh->state);
+       set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
 }
 
@@ -951,7 +948,7 @@ static int grow_stripes(raid5_conf_t *conf, int num)
        conf->active_name = 0;
        sc = kmem_cache_create(conf->cache_name[conf->active_name],
                               sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
-                              0, 0, NULL, NULL);
+                              0, 0, NULL);
        if (!sc)
                return 1;
        conf->slab_cache = sc;
@@ -1003,7 +1000,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
        /* Step 1 */
        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
                               sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
-                              0, 0, NULL, NULL);
+                              0, 0, NULL);
        if (!sc)
                return -ENOMEM;
 
@@ -2541,7 +2538,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
        struct dma_async_tx_descriptor *tx = NULL;
        clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
        for (i = 0; i < sh->disks; i++)
-               if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
+               if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
                        int dd_idx, pd_idx, j;
                        struct stripe_head *sh2;
 
@@ -2574,7 +2571,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                        set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
                        for (j = 0; j < conf->raid_disks; j++)
                                if (j != sh2->pd_idx &&
-                                   (r6s && j != r6s->qd_idx) &&
+                                   (!r6s || j != raid6_next_disk(sh2->pd_idx,
+                                                                sh2->disks)) &&
                                    !test_bit(R5_Expanded, &sh2->dev[j].flags))
                                        break;
                        if (j == conf->raid_disks) {
@@ -2583,12 +2581,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
                        }
                        release_stripe(sh2);
 
-                       /* done submitting copies, wait for them to complete */
-                       if (i + 1 >= sh->disks) {
-                               async_tx_ack(tx);
-                               dma_wait_for_async_tx(tx);
-                       }
                }
+       /* done submitting copies, wait for them to complete */
+       if (tx) {
+               async_tx_ack(tx);
+               dma_wait_for_async_tx(tx);
+       }
 }
 
 /*
@@ -2855,7 +2853,7 @@ static void handle_stripe5(struct stripe_head *sh)
                sh->disks = conf->raid_disks;
                sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
                        conf->raid_disks);
-               s.locked += handle_write_operations5(sh, 0, 1);
+               s.locked += handle_write_operations5(sh, 1, 1);
        } else if (s.expanded &&
                !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
                clear_bit(STRIPE_EXPAND_READY, &sh->state);
@@ -3182,7 +3180,7 @@ static void unplug_slaves(mddev_t *mddev)
        for (i=0; i<mddev->raid_disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
@@ -3197,7 +3195,7 @@ static void unplug_slaves(mddev_t *mddev)
        rcu_read_unlock();
 }
 
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3214,7 @@ static void raid5_unplug_device(request_queue_t *q)
        unplug_slaves(mddev);
 }
 
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
                             sector_t *error_sector)
 {
        mddev_t *mddev = q->queuedata;
@@ -3228,7 +3226,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct block_device *bdev = rdev->bdev;
-                       request_queue_t *r_queue = bdev_get_queue(bdev);
+                       struct request_queue *r_queue = bdev_get_queue(bdev);
 
                        if (!r_queue->issue_flush_fn)
                                ret = -EOPNOTSUPP;
@@ -3267,7 +3265,7 @@ static int raid5_congested(void *data, int bits)
 /* We want read requests to align with chunks where possible,
  * but write requests don't need to.
  */
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
 {
        mddev_t *mddev = q->queuedata;
        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3375,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
 
 static int bio_fits_rdev(struct bio *bi)
 {
-       request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+       struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
        if ((bi->bi_size>>9) > q->max_sectors)
                return 0;
@@ -3396,7 +3394,7 @@ static int bio_fits_rdev(struct bio *bi)
 }
 
 
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3464,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
 }
 
 
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
 {
        mddev_t *mddev = q->queuedata;
        raid5_conf_t *conf = mddev_to_conf(mddev);