static void raid5_build_block (struct stripe_head *sh, int i);
-static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
+static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
{
raid5_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks, i;
+ int i;
if (atomic_read(&sh->count) != 0)
BUG();
sh->pd_idx = pd_idx;
sh->state = 0;
- for (i=disks; i--; ) {
+ sh->disks = disks;
+
+ for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->toread || dev->towrite || dev->written ||
insert_hash(conf, sh);
}
-static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
+static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
{
struct stripe_head *sh;
struct hlist_node *hn;
CHECK_DEVLOCK();
PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
- if (sh->sector == sector)
+ if (sh->sector == sector && sh->disks == disks)
return sh;
PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
return NULL;
static void unplug_slaves(mddev_t *mddev);
static void raid5_unplug_device(request_queue_t *q);
-static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
- int pd_idx, int noblock)
+static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
+ int pd_idx, int noblock)
{
struct stripe_head *sh;
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
conf->device_lock, /* nothing */);
- sh = __find_stripe(conf, sector);
+ sh = __find_stripe(conf, sector, disks);
if (!sh) {
if (!conf->inactive_blocked)
sh = get_free_stripe(conf);
);
conf->inactive_blocked = 0;
} else
- init_stripe(sh, sector, pd_idx);
+ init_stripe(sh, sector, pd_idx, disks);
} else {
if (atomic_read(&sh->count)) {
if (!list_empty(&sh->lru))
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
+ sh->disks = conf->raid_disks;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
kmem_cache_t *sc;
int devs = conf->raid_disks;
- sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
-
- sc = kmem_cache_create(conf->cache_name,
+ sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
+ sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
+ conf->active_name = 0;
+ sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
0, 0, NULL, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
+ conf->pool_size = devs;
while (num--) {
if (!grow_one_stripe(conf))
return 1;
}
return 0;
}
+static int resize_stripes(raid5_conf_t *conf, int newsize)
+{
+ /* Make all the stripes able to hold 'newsize' devices.
+ * New slots in each stripe get 'page' set to a new page.
+ *
+ * This happens in stages:
+ * 1/ create a new kmem_cache and allocate the required number of
+ * stripe_heads.
+ * 2/ gather all the old stripe_heads and tranfer the pages across
+ * to the new stripe_heads. This will have the side effect of
+ * freezing the array as once all stripe_heads have been collected,
+ * no IO will be possible. Old stripe heads are freed once their
+ * pages have been transferred over, and the old kmem_cache is
+ * freed when all stripes are done.
+ * 3/ reallocate conf->disks to be suitable bigger. If this fails,
+ * we simple return a failre status - no need to clean anything up.
+ * 4/ allocate new pages for the new slots in the new stripe_heads.
+ * If this fails, we don't bother trying the shrink the
+ * stripe_heads down again, we just leave them as they are.
+ * As each stripe_head is processed the new one is released into
+ * active service.
+ *
+ * Once step2 is started, we cannot afford to wait for a write,
+ * so we use GFP_NOIO allocations.
+ */
+ struct stripe_head *osh, *nsh;
+ LIST_HEAD(newstripes);
+ struct disk_info *ndisks;
+ int err = 0;
+ kmem_cache_t *sc;
+ int i;
+
+ if (newsize <= conf->pool_size)
+ return 0; /* never bother to shrink */
+
+ /* Step 1 */
+ sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
+ sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
+ 0, 0, NULL, NULL);
+ if (!sc)
+ return -ENOMEM;
+
+ for (i = conf->max_nr_stripes; i; i--) {
+ nsh = kmem_cache_alloc(sc, GFP_KERNEL);
+ if (!nsh)
+ break;
+
+ memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
+
+ nsh->raid_conf = conf;
+ spin_lock_init(&nsh->lock);
+
+ list_add(&nsh->lru, &newstripes);
+ }
+ if (i) {
+ /* didn't get enough, give up */
+ while (!list_empty(&newstripes)) {
+ nsh = list_entry(newstripes.next, struct stripe_head, lru);
+ list_del(&nsh->lru);
+ kmem_cache_free(sc, nsh);
+ }
+ kmem_cache_destroy(sc);
+ return -ENOMEM;
+ }
+ /* Step 2 - Must use GFP_NOIO now.
+ * OK, we have enough stripes, start collecting inactive
+ * stripes and copying them over
+ */
+ list_for_each_entry(nsh, &newstripes, lru) {
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ !list_empty(&conf->inactive_list),
+ conf->device_lock,
+ unplug_slaves(conf->mddev);
+ );
+ osh = get_free_stripe(conf);
+ spin_unlock_irq(&conf->device_lock);
+ atomic_set(&nsh->count, 1);
+ for(i=0; i<conf->pool_size; i++)
+ nsh->dev[i].page = osh->dev[i].page;
+ for( ; i<newsize; i++)
+ nsh->dev[i].page = NULL;
+ kmem_cache_free(conf->slab_cache, osh);
+ }
+ kmem_cache_destroy(conf->slab_cache);
+
+ /* Step 3.
+ * At this point, we are holding all the stripes so the array
+ * is completely stalled, so now is a good time to resize
+ * conf->disks.
+ */
+ ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
+ if (ndisks) {
+ for (i=0; i<conf->raid_disks; i++)
+ ndisks[i] = conf->disks[i];
+ kfree(conf->disks);
+ conf->disks = ndisks;
+ } else
+ err = -ENOMEM;
+
+ /* Step 4, return new stripes to service */
+ while(!list_empty(&newstripes)) {
+ nsh = list_entry(newstripes.next, struct stripe_head, lru);
+ list_del_init(&nsh->lru);
+ for (i=conf->raid_disks; i < newsize; i++)
+ if (nsh->dev[i].page == NULL) {
+ struct page *p = alloc_page(GFP_NOIO);
+ nsh->dev[i].page = p;
+ if (!p)
+ err = -ENOMEM;
+ }
+ release_stripe(nsh);
+ }
+ /* critical section pass, GFP_NOIO no longer needed */
+
+ conf->slab_cache = sc;
+ conf->active_name = 1-conf->active_name;
+ conf->pool_size = newsize;
+ return err;
+}
+
static int drop_one_stripe(raid5_conf_t *conf)
{
return 0;
if (atomic_read(&sh->count))
BUG();
- shrink_buffers(sh, conf->raid_disks);
+ shrink_buffers(sh, conf->pool_size);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
return 1;
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks, i;
+ int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
if (bi->bi_size)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks, i;
+ int disks = sh->disks, i;
unsigned long flags;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
static sector_t compute_blocknr(struct stripe_head *sh, int i)
{
raid5_conf_t *conf = sh->raid_conf;
- int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
+ int raid_disks = sh->disks, data_disks = raid_disks - 1;
sector_t new_sector = sh->sector, check;
int sectors_per_chunk = conf->chunk_size >> 9;
sector_t stripe;
static void compute_block(struct stripe_head *sh, int dd_idx)
{
- raid5_conf_t *conf = sh->raid_conf;
- int i, count, disks = conf->raid_disks;
+ int i, count, disks = sh->disks;
void *ptr[MAX_XOR_BLOCKS], *p;
PRINTK("compute_block, stripe %llu, idx %d\n",
static void compute_parity(struct stripe_head *sh, int method)
{
raid5_conf_t *conf = sh->raid_conf;
- int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
+ int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
void *ptr[MAX_XOR_BLOCKS];
struct bio *chosen;
static void handle_stripe(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
- int disks = conf->raid_disks;
+ int disks = sh->disks;
struct bio *return_bi= NULL;
struct bio *bi;
int i;
spin_unlock_irq(&conf->device_lock);
}
-static int make_request (request_queue_t *q, struct bio * bi)
+static int make_request(request_queue_t *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
- const unsigned int raid_disks = conf->raid_disks;
- const unsigned int data_disks = raid_disks - 1;
unsigned int dd_idx, pd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
+ int disks;
- new_sector = raid5_compute_sector(logical_sector,
- raid_disks, data_disks, &dd_idx, &pd_idx, conf);
-
+ retry:
+ if (likely(conf->expand_progress == MaxSector))
+ disks = conf->raid_disks;
+ else {
+ spin_lock_irq(&conf->device_lock);
+ disks = conf->raid_disks;
+ if (logical_sector >= conf->expand_progress)
+ disks = conf->previous_raid_disks;
+ spin_unlock_irq(&conf->device_lock);
+ }
+ new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
+ &dd_idx, &pd_idx, conf);
PRINTK("raid5: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
- retry:
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
- sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
+ sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
if (sh) {
- if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
- /* Add failed due to overlap. Flush everything
+ if (unlikely(conf->expand_progress != MaxSector)) {
+ /* expansion might have moved on while waiting for a
+ * stripe, so we much do the range check again.
+ */
+ int must_retry = 0;
+ spin_lock_irq(&conf->device_lock);
+ if (logical_sector < conf->expand_progress &&
+ disks == conf->previous_raid_disks)
+ /* mismatch, need to try again */
+ must_retry = 1;
+ spin_unlock_irq(&conf->device_lock);
+ if (must_retry) {
+ release_stripe(sh);
+ goto retry;
+ }
+ }
+
+ if (test_bit(STRIPE_EXPANDING, &sh->state) ||
+ !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
+ /* Stripe is busy expanding or
+ * add failed due to overlap. Flush everything
* and wait a while
*/
raid5_unplug_device(mddev->queue);
raid5_plug_device(conf);
handle_stripe(sh);
release_stripe(sh);
-
} else {
/* cannot get stripe for read-ahead, just give-up */
clear_bit(BIO_UPTODATE, &bi->bi_flags);
first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
- sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
+ sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
if (sh == NULL) {
- sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
+ sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
return -EIO;
}
- mddev->private = kzalloc(sizeof (raid5_conf_t)
- + mddev->raid_disks * sizeof(struct disk_info),
- GFP_KERNEL);
+ mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
if ((conf = mddev->private) == NULL)
goto abort;
+ conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
+ GFP_KERNEL);
+ if (!conf->disks)
+ goto abort;
conf->mddev = mddev;
conf->level = mddev->level;
conf->algorithm = mddev->layout;
conf->max_nr_stripes = NR_STRIPES;
+ conf->expand_progress = MaxSector;
/* device size must be a multiple of chunk size */
mddev->size &= ~(mddev->chunk_size/1024 -1);
abort:
if (conf) {
print_raid5_conf(conf);
+ kfree(conf->disks);
kfree(conf->stripe_hashtbl);
kfree(conf);
}
kfree(conf->stripe_hashtbl);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
+ kfree(conf->disks);
kfree(conf);
mddev->private = NULL;
return 0;
printk("sh %llu, count %d.\n",
(unsigned long long)sh->sector, atomic_read(&sh->count));
printk("sh %llu, ", (unsigned long long)sh->sector);
- for (i = 0; i < sh->raid_conf->raid_disks; i++) {
+ for (i = 0; i < sh->disks; i++) {
printk("(cache%d: %p %ld) ",
i, sh->dev[i].page, sh->dev[i].flags);
}