2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
6 * RAID-5 management functions.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/raid/raid5.h>
23 #include <linux/highmem.h>
24 #include <linux/bitops.h>
25 #include <asm/atomic.h>
27 #include <linux/raid/bitmap.h>
33 #define NR_STRIPES 256
34 #define STRIPE_SIZE PAGE_SIZE
35 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
36 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
37 #define IO_THRESHOLD 1
38 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
39 #define HASH_MASK (NR_HASH - 1)
41 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
43 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
44 * order without overlap. There may be several bio's per stripe+device, and
45 * a bio could span several devices.
46 * When walking this list for a particular stripe+device, we must never proceed
47 * beyond a bio that extends past this device, as the next bio might no longer
49 * This macro is used to determine the 'next' bio in the list, given the sector
50 * of the current stripe+device
52 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
54 * The following can be used to debug the driver
57 #define RAID5_PARANOIA 1
58 #if RAID5_PARANOIA && defined(CONFIG_SMP)
59 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
61 # define CHECK_DEVLOCK()
64 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
70 static void print_raid5_conf (raid5_conf_t *conf);
72 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
74 if (atomic_dec_and_test(&sh->count)) {
75 if (!list_empty(&sh->lru))
77 if (atomic_read(&conf->active_stripes)==0)
79 if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 if (test_bit(STRIPE_DELAYED, &sh->state))
81 list_add_tail(&sh->lru, &conf->delayed_list);
82 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 conf->seq_write == sh->bm_seq)
84 list_add_tail(&sh->lru, &conf->bitmap_list);
86 clear_bit(STRIPE_BIT_DELAY, &sh->state);
87 list_add_tail(&sh->lru, &conf->handle_list);
89 md_wakeup_thread(conf->mddev->thread);
91 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 atomic_dec(&conf->preread_active_stripes);
93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 md_wakeup_thread(conf->mddev->thread);
96 list_add_tail(&sh->lru, &conf->inactive_list);
97 atomic_dec(&conf->active_stripes);
98 if (!conf->inactive_blocked ||
99 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
100 wake_up(&conf->wait_for_stripe);
104 static void release_stripe(struct stripe_head *sh)
106 raid5_conf_t *conf = sh->raid_conf;
109 spin_lock_irqsave(&conf->device_lock, flags);
110 __release_stripe(conf, sh);
111 spin_unlock_irqrestore(&conf->device_lock, flags);
114 static inline void remove_hash(struct stripe_head *sh)
116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
118 hlist_del_init(&sh->hash);
121 static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
128 hlist_add_head(&sh->hash, hp);
132 /* find an idle stripe, make sure it is unhashed, and return it. */
133 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
135 struct stripe_head *sh = NULL;
136 struct list_head *first;
139 if (list_empty(&conf->inactive_list))
141 first = conf->inactive_list.next;
142 sh = list_entry(first, struct stripe_head, lru);
143 list_del_init(first);
145 atomic_inc(&conf->active_stripes);
150 static void shrink_buffers(struct stripe_head *sh, int num)
155 for (i=0; i<num ; i++) {
159 sh->dev[i].page = NULL;
164 static int grow_buffers(struct stripe_head *sh, int num)
168 for (i=0; i<num; i++) {
171 if (!(page = alloc_page(GFP_KERNEL))) {
174 sh->dev[i].page = page;
179 static void raid5_build_block (struct stripe_head *sh, int i);
181 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
183 raid5_conf_t *conf = sh->raid_conf;
186 if (atomic_read(&sh->count) != 0)
188 if (test_bit(STRIPE_HANDLE, &sh->state))
192 PRINTK("init_stripe called, stripe %llu\n",
193 (unsigned long long)sh->sector);
203 for (i = sh->disks; i--; ) {
204 struct r5dev *dev = &sh->dev[i];
206 if (dev->toread || dev->towrite || dev->written ||
207 test_bit(R5_LOCKED, &dev->flags)) {
208 printk("sector=%llx i=%d %p %p %p %d\n",
209 (unsigned long long)sh->sector, i, dev->toread,
210 dev->towrite, dev->written,
211 test_bit(R5_LOCKED, &dev->flags));
215 raid5_build_block(sh, i);
217 insert_hash(conf, sh);
220 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
222 struct stripe_head *sh;
223 struct hlist_node *hn;
226 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
227 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
228 if (sh->sector == sector && sh->disks == disks)
230 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
234 static void unplug_slaves(mddev_t *mddev);
235 static void raid5_unplug_device(request_queue_t *q);
237 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
238 int pd_idx, int noblock)
240 struct stripe_head *sh;
242 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
244 spin_lock_irq(&conf->device_lock);
247 wait_event_lock_irq(conf->wait_for_stripe,
249 conf->device_lock, /* nothing */);
250 sh = __find_stripe(conf, sector, disks);
252 if (!conf->inactive_blocked)
253 sh = get_free_stripe(conf);
254 if (noblock && sh == NULL)
257 conf->inactive_blocked = 1;
258 wait_event_lock_irq(conf->wait_for_stripe,
259 !list_empty(&conf->inactive_list) &&
260 (atomic_read(&conf->active_stripes)
261 < (conf->max_nr_stripes *3/4)
262 || !conf->inactive_blocked),
264 unplug_slaves(conf->mddev);
266 conf->inactive_blocked = 0;
268 init_stripe(sh, sector, pd_idx, disks);
270 if (atomic_read(&sh->count)) {
271 if (!list_empty(&sh->lru))
274 if (!test_bit(STRIPE_HANDLE, &sh->state))
275 atomic_inc(&conf->active_stripes);
276 if (list_empty(&sh->lru))
278 list_del_init(&sh->lru);
281 } while (sh == NULL);
284 atomic_inc(&sh->count);
286 spin_unlock_irq(&conf->device_lock);
290 static int grow_one_stripe(raid5_conf_t *conf)
292 struct stripe_head *sh;
293 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
296 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
297 sh->raid_conf = conf;
298 spin_lock_init(&sh->lock);
300 if (grow_buffers(sh, conf->raid_disks)) {
301 shrink_buffers(sh, conf->raid_disks);
302 kmem_cache_free(conf->slab_cache, sh);
305 sh->disks = conf->raid_disks;
306 /* we just created an active stripe so... */
307 atomic_set(&sh->count, 1);
308 atomic_inc(&conf->active_stripes);
309 INIT_LIST_HEAD(&sh->lru);
314 static int grow_stripes(raid5_conf_t *conf, int num)
317 int devs = conf->raid_disks;
319 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
320 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
321 conf->active_name = 0;
322 sc = kmem_cache_create(conf->cache_name[conf->active_name],
323 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
327 conf->slab_cache = sc;
328 conf->pool_size = devs;
330 if (!grow_one_stripe(conf))
335 static int resize_stripes(raid5_conf_t *conf, int newsize)
337 /* Make all the stripes able to hold 'newsize' devices.
338 * New slots in each stripe get 'page' set to a new page.
340 * This happens in stages:
341 * 1/ create a new kmem_cache and allocate the required number of
343 * 2/ gather all the old stripe_heads and tranfer the pages across
344 * to the new stripe_heads. This will have the side effect of
345 * freezing the array as once all stripe_heads have been collected,
346 * no IO will be possible. Old stripe heads are freed once their
347 * pages have been transferred over, and the old kmem_cache is
348 * freed when all stripes are done.
349 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
350 * we simple return a failre status - no need to clean anything up.
351 * 4/ allocate new pages for the new slots in the new stripe_heads.
352 * If this fails, we don't bother trying the shrink the
353 * stripe_heads down again, we just leave them as they are.
354 * As each stripe_head is processed the new one is released into
357 * Once step2 is started, we cannot afford to wait for a write,
358 * so we use GFP_NOIO allocations.
360 struct stripe_head *osh, *nsh;
361 LIST_HEAD(newstripes);
362 struct disk_info *ndisks;
367 if (newsize <= conf->pool_size)
368 return 0; /* never bother to shrink */
371 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
372 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
377 for (i = conf->max_nr_stripes; i; i--) {
378 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
382 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
384 nsh->raid_conf = conf;
385 spin_lock_init(&nsh->lock);
387 list_add(&nsh->lru, &newstripes);
390 /* didn't get enough, give up */
391 while (!list_empty(&newstripes)) {
392 nsh = list_entry(newstripes.next, struct stripe_head, lru);
394 kmem_cache_free(sc, nsh);
396 kmem_cache_destroy(sc);
399 /* Step 2 - Must use GFP_NOIO now.
400 * OK, we have enough stripes, start collecting inactive
401 * stripes and copying them over
403 list_for_each_entry(nsh, &newstripes, lru) {
404 spin_lock_irq(&conf->device_lock);
405 wait_event_lock_irq(conf->wait_for_stripe,
406 !list_empty(&conf->inactive_list),
408 unplug_slaves(conf->mddev);
410 osh = get_free_stripe(conf);
411 spin_unlock_irq(&conf->device_lock);
412 atomic_set(&nsh->count, 1);
413 for(i=0; i<conf->pool_size; i++)
414 nsh->dev[i].page = osh->dev[i].page;
415 for( ; i<newsize; i++)
416 nsh->dev[i].page = NULL;
417 kmem_cache_free(conf->slab_cache, osh);
419 kmem_cache_destroy(conf->slab_cache);
422 * At this point, we are holding all the stripes so the array
423 * is completely stalled, so now is a good time to resize
426 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
428 for (i=0; i<conf->raid_disks; i++)
429 ndisks[i] = conf->disks[i];
431 conf->disks = ndisks;
435 /* Step 4, return new stripes to service */
436 while(!list_empty(&newstripes)) {
437 nsh = list_entry(newstripes.next, struct stripe_head, lru);
438 list_del_init(&nsh->lru);
439 for (i=conf->raid_disks; i < newsize; i++)
440 if (nsh->dev[i].page == NULL) {
441 struct page *p = alloc_page(GFP_NOIO);
442 nsh->dev[i].page = p;
448 /* critical section pass, GFP_NOIO no longer needed */
450 conf->slab_cache = sc;
451 conf->active_name = 1-conf->active_name;
452 conf->pool_size = newsize;
457 static int drop_one_stripe(raid5_conf_t *conf)
459 struct stripe_head *sh;
461 spin_lock_irq(&conf->device_lock);
462 sh = get_free_stripe(conf);
463 spin_unlock_irq(&conf->device_lock);
466 if (atomic_read(&sh->count))
468 shrink_buffers(sh, conf->pool_size);
469 kmem_cache_free(conf->slab_cache, sh);
470 atomic_dec(&conf->active_stripes);
474 static void shrink_stripes(raid5_conf_t *conf)
476 while (drop_one_stripe(conf))
479 if (conf->slab_cache)
480 kmem_cache_destroy(conf->slab_cache);
481 conf->slab_cache = NULL;
484 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
487 struct stripe_head *sh = bi->bi_private;
488 raid5_conf_t *conf = sh->raid_conf;
489 int disks = sh->disks, i;
490 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
495 for (i=0 ; i<disks; i++)
496 if (bi == &sh->dev[i].req)
499 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
500 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
511 spin_lock_irqsave(&conf->device_lock, flags);
512 /* we can return a buffer if we bypassed the cache or
513 * if the top buffer is not in highmem. If there are
514 * multiple buffers, leave the extra work to
517 buffer = sh->bh_read[i];
519 (!PageHighMem(buffer->b_page)
520 || buffer->b_page == bh->b_page )
522 sh->bh_read[i] = buffer->b_reqnext;
523 buffer->b_reqnext = NULL;
526 spin_unlock_irqrestore(&conf->device_lock, flags);
527 if (sh->bh_page[i]==bh->b_page)
528 set_buffer_uptodate(bh);
530 if (buffer->b_page != bh->b_page)
531 memcpy(buffer->b_data, bh->b_data, bh->b_size);
532 buffer->b_end_io(buffer, 1);
535 set_bit(R5_UPTODATE, &sh->dev[i].flags);
537 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
538 printk(KERN_INFO "raid5: read error corrected!!\n");
539 clear_bit(R5_ReadError, &sh->dev[i].flags);
540 clear_bit(R5_ReWrite, &sh->dev[i].flags);
542 if (atomic_read(&conf->disks[i].rdev->read_errors))
543 atomic_set(&conf->disks[i].rdev->read_errors, 0);
546 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
547 atomic_inc(&conf->disks[i].rdev->read_errors);
548 if (conf->mddev->degraded)
549 printk(KERN_WARNING "raid5: read error not correctable.\n");
550 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
552 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
553 else if (atomic_read(&conf->disks[i].rdev->read_errors)
554 > conf->max_nr_stripes)
556 "raid5: Too many read errors, failing device.\n");
560 set_bit(R5_ReadError, &sh->dev[i].flags);
562 clear_bit(R5_ReadError, &sh->dev[i].flags);
563 clear_bit(R5_ReWrite, &sh->dev[i].flags);
564 md_error(conf->mddev, conf->disks[i].rdev);
567 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
569 /* must restore b_page before unlocking buffer... */
570 if (sh->bh_page[i] != bh->b_page) {
571 bh->b_page = sh->bh_page[i];
572 bh->b_data = page_address(bh->b_page);
573 clear_buffer_uptodate(bh);
576 clear_bit(R5_LOCKED, &sh->dev[i].flags);
577 set_bit(STRIPE_HANDLE, &sh->state);
582 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
585 struct stripe_head *sh = bi->bi_private;
586 raid5_conf_t *conf = sh->raid_conf;
587 int disks = sh->disks, i;
589 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
594 for (i=0 ; i<disks; i++)
595 if (bi == &sh->dev[i].req)
598 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
599 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
606 spin_lock_irqsave(&conf->device_lock, flags);
608 md_error(conf->mddev, conf->disks[i].rdev);
610 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
612 clear_bit(R5_LOCKED, &sh->dev[i].flags);
613 set_bit(STRIPE_HANDLE, &sh->state);
614 __release_stripe(conf, sh);
615 spin_unlock_irqrestore(&conf->device_lock, flags);
620 static sector_t compute_blocknr(struct stripe_head *sh, int i);
622 static void raid5_build_block (struct stripe_head *sh, int i)
624 struct r5dev *dev = &sh->dev[i];
627 dev->req.bi_io_vec = &dev->vec;
629 dev->req.bi_max_vecs++;
630 dev->vec.bv_page = dev->page;
631 dev->vec.bv_len = STRIPE_SIZE;
632 dev->vec.bv_offset = 0;
634 dev->req.bi_sector = sh->sector;
635 dev->req.bi_private = sh;
639 dev->sector = compute_blocknr(sh, i);
642 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
644 char b[BDEVNAME_SIZE];
645 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
646 PRINTK("raid5: error called\n");
648 if (!test_bit(Faulty, &rdev->flags)) {
650 if (test_bit(In_sync, &rdev->flags)) {
651 conf->working_disks--;
653 conf->failed_disks++;
654 clear_bit(In_sync, &rdev->flags);
656 * if recovery was running, make sure it aborts.
658 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
660 set_bit(Faulty, &rdev->flags);
662 "raid5: Disk failure on %s, disabling device."
663 " Operation continuing on %d devices\n",
664 bdevname(rdev->bdev,b), conf->working_disks);
669 * Input: a 'big' sector number,
670 * Output: index of the data and parity disk, and the sector # in them.
672 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
673 unsigned int data_disks, unsigned int * dd_idx,
674 unsigned int * pd_idx, raid5_conf_t *conf)
677 unsigned long chunk_number;
678 unsigned int chunk_offset;
680 int sectors_per_chunk = conf->chunk_size >> 9;
682 /* First compute the information on this sector */
685 * Compute the chunk number and the sector offset inside the chunk
687 chunk_offset = sector_div(r_sector, sectors_per_chunk);
688 chunk_number = r_sector;
689 BUG_ON(r_sector != chunk_number);
692 * Compute the stripe number
694 stripe = chunk_number / data_disks;
697 * Compute the data disk and parity disk indexes inside the stripe
699 *dd_idx = chunk_number % data_disks;
702 * Select the parity disk based on the user selected algorithm.
704 if (conf->level == 4)
705 *pd_idx = data_disks;
706 else switch (conf->algorithm) {
707 case ALGORITHM_LEFT_ASYMMETRIC:
708 *pd_idx = data_disks - stripe % raid_disks;
709 if (*dd_idx >= *pd_idx)
712 case ALGORITHM_RIGHT_ASYMMETRIC:
713 *pd_idx = stripe % raid_disks;
714 if (*dd_idx >= *pd_idx)
717 case ALGORITHM_LEFT_SYMMETRIC:
718 *pd_idx = data_disks - stripe % raid_disks;
719 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
721 case ALGORITHM_RIGHT_SYMMETRIC:
722 *pd_idx = stripe % raid_disks;
723 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
726 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
731 * Finally, compute the new sector number
733 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
738 static sector_t compute_blocknr(struct stripe_head *sh, int i)
740 raid5_conf_t *conf = sh->raid_conf;
741 int raid_disks = sh->disks, data_disks = raid_disks - 1;
742 sector_t new_sector = sh->sector, check;
743 int sectors_per_chunk = conf->chunk_size >> 9;
746 int chunk_number, dummy1, dummy2, dd_idx = i;
749 chunk_offset = sector_div(new_sector, sectors_per_chunk);
751 BUG_ON(new_sector != stripe);
754 switch (conf->algorithm) {
755 case ALGORITHM_LEFT_ASYMMETRIC:
756 case ALGORITHM_RIGHT_ASYMMETRIC:
760 case ALGORITHM_LEFT_SYMMETRIC:
761 case ALGORITHM_RIGHT_SYMMETRIC:
764 i -= (sh->pd_idx + 1);
767 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
771 chunk_number = stripe * data_disks + i;
772 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
774 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
775 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
776 printk(KERN_ERR "compute_blocknr: map not correct\n");
785 * Copy data between a page in the stripe cache, and a bio.
786 * There are no alignment or size guarantees between the page or the
787 * bio except that there is some overlap.
788 * All iovecs in the bio must be considered.
790 static void copy_data(int frombio, struct bio *bio,
794 char *pa = page_address(page);
799 if (bio->bi_sector >= sector)
800 page_offset = (signed)(bio->bi_sector - sector) * 512;
802 page_offset = (signed)(sector - bio->bi_sector) * -512;
803 bio_for_each_segment(bvl, bio, i) {
804 int len = bio_iovec_idx(bio,i)->bv_len;
808 if (page_offset < 0) {
809 b_offset = -page_offset;
810 page_offset += b_offset;
814 if (len > 0 && page_offset + len > STRIPE_SIZE)
815 clen = STRIPE_SIZE - page_offset;
819 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
821 memcpy(pa+page_offset, ba+b_offset, clen);
823 memcpy(ba+b_offset, pa+page_offset, clen);
824 __bio_kunmap_atomic(ba, KM_USER0);
826 if (clen < len) /* hit end of page */
832 #define check_xor() do { \
833 if (count == MAX_XOR_BLOCKS) { \
834 xor_block(count, STRIPE_SIZE, ptr); \
840 static void compute_block(struct stripe_head *sh, int dd_idx)
842 int i, count, disks = sh->disks;
843 void *ptr[MAX_XOR_BLOCKS], *p;
845 PRINTK("compute_block, stripe %llu, idx %d\n",
846 (unsigned long long)sh->sector, dd_idx);
848 ptr[0] = page_address(sh->dev[dd_idx].page);
849 memset(ptr[0], 0, STRIPE_SIZE);
851 for (i = disks ; i--; ) {
854 p = page_address(sh->dev[i].page);
855 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
858 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
859 " not present\n", dd_idx,
860 (unsigned long long)sh->sector, i);
865 xor_block(count, STRIPE_SIZE, ptr);
866 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
869 static void compute_parity(struct stripe_head *sh, int method)
871 raid5_conf_t *conf = sh->raid_conf;
872 int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
873 void *ptr[MAX_XOR_BLOCKS];
876 PRINTK("compute_parity, stripe %llu, method %d\n",
877 (unsigned long long)sh->sector, method);
880 ptr[0] = page_address(sh->dev[pd_idx].page);
882 case READ_MODIFY_WRITE:
883 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
885 for (i=disks ; i-- ;) {
888 if (sh->dev[i].towrite &&
889 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
890 ptr[count++] = page_address(sh->dev[i].page);
891 chosen = sh->dev[i].towrite;
892 sh->dev[i].towrite = NULL;
894 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
895 wake_up(&conf->wait_for_overlap);
897 if (sh->dev[i].written) BUG();
898 sh->dev[i].written = chosen;
903 case RECONSTRUCT_WRITE:
904 memset(ptr[0], 0, STRIPE_SIZE);
905 for (i= disks; i-- ;)
906 if (i!=pd_idx && sh->dev[i].towrite) {
907 chosen = sh->dev[i].towrite;
908 sh->dev[i].towrite = NULL;
910 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
911 wake_up(&conf->wait_for_overlap);
913 if (sh->dev[i].written) BUG();
914 sh->dev[i].written = chosen;
921 xor_block(count, STRIPE_SIZE, ptr);
925 for (i = disks; i--;)
926 if (sh->dev[i].written) {
927 sector_t sector = sh->dev[i].sector;
928 struct bio *wbi = sh->dev[i].written;
929 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
930 copy_data(1, wbi, sh->dev[i].page, sector);
931 wbi = r5_next_bio(wbi, sector);
934 set_bit(R5_LOCKED, &sh->dev[i].flags);
935 set_bit(R5_UPTODATE, &sh->dev[i].flags);
939 case RECONSTRUCT_WRITE:
943 ptr[count++] = page_address(sh->dev[i].page);
947 case READ_MODIFY_WRITE:
948 for (i = disks; i--;)
949 if (sh->dev[i].written) {
950 ptr[count++] = page_address(sh->dev[i].page);
955 xor_block(count, STRIPE_SIZE, ptr);
957 if (method != CHECK_PARITY) {
958 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
959 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
961 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
965 * Each stripe/dev can have one or more bion attached.
966 * toread/towrite point to the first in a chain.
967 * The bi_next chain must be in order.
969 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
972 raid5_conf_t *conf = sh->raid_conf;
975 PRINTK("adding bh b#%llu to stripe s#%llu\n",
976 (unsigned long long)bi->bi_sector,
977 (unsigned long long)sh->sector);
980 spin_lock(&sh->lock);
981 spin_lock_irq(&conf->device_lock);
983 bip = &sh->dev[dd_idx].towrite;
984 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
987 bip = &sh->dev[dd_idx].toread;
988 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
989 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
991 bip = & (*bip)->bi_next;
993 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
996 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
1001 bi->bi_phys_segments ++;
1002 spin_unlock_irq(&conf->device_lock);
1003 spin_unlock(&sh->lock);
1005 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
1006 (unsigned long long)bi->bi_sector,
1007 (unsigned long long)sh->sector, dd_idx);
1009 if (conf->mddev->bitmap && firstwrite) {
1010 sh->bm_seq = conf->seq_write;
1011 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1013 set_bit(STRIPE_BIT_DELAY, &sh->state);
1017 /* check if page is covered */
1018 sector_t sector = sh->dev[dd_idx].sector;
1019 for (bi=sh->dev[dd_idx].towrite;
1020 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1021 bi && bi->bi_sector <= sector;
1022 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1023 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1024 sector = bi->bi_sector + (bi->bi_size>>9);
1026 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1027 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1032 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1033 spin_unlock_irq(&conf->device_lock);
1034 spin_unlock(&sh->lock);
1040 * handle_stripe - do things to a stripe.
1042 * We lock the stripe and then examine the state of various bits
1043 * to see what needs to be done.
1045 * return some read request which now have data
1046 * return some write requests which are safely on disc
1047 * schedule a read on some buffers
1048 * schedule a write of some buffers
1049 * return confirmation of parity correctness
1051 * Parity calculations are done inside the stripe lock
1052 * buffers are taken off read_list or write_list, and bh_cache buffers
1053 * get BH_Lock set before the stripe lock is released.
1057 static void handle_stripe(struct stripe_head *sh)
1059 raid5_conf_t *conf = sh->raid_conf;
1060 int disks = sh->disks;
1061 struct bio *return_bi= NULL;
1065 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1066 int non_overwrite = 0;
1070 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
1071 (unsigned long long)sh->sector, atomic_read(&sh->count),
1074 spin_lock(&sh->lock);
1075 clear_bit(STRIPE_HANDLE, &sh->state);
1076 clear_bit(STRIPE_DELAYED, &sh->state);
1078 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1079 /* Now to look around and see what can be done */
1082 for (i=disks; i--; ) {
1085 clear_bit(R5_Insync, &dev->flags);
1087 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1088 i, dev->flags, dev->toread, dev->towrite, dev->written);
1089 /* maybe we can reply to a read */
1090 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1091 struct bio *rbi, *rbi2;
1092 PRINTK("Return read for disc %d\n", i);
1093 spin_lock_irq(&conf->device_lock);
1096 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1097 wake_up(&conf->wait_for_overlap);
1098 spin_unlock_irq(&conf->device_lock);
1099 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1100 copy_data(0, rbi, dev->page, dev->sector);
1101 rbi2 = r5_next_bio(rbi, dev->sector);
1102 spin_lock_irq(&conf->device_lock);
1103 if (--rbi->bi_phys_segments == 0) {
1104 rbi->bi_next = return_bi;
1107 spin_unlock_irq(&conf->device_lock);
1112 /* now count some things */
1113 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1114 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1117 if (dev->toread) to_read++;
1120 if (!test_bit(R5_OVERWRITE, &dev->flags))
1123 if (dev->written) written++;
1124 rdev = rcu_dereference(conf->disks[i].rdev);
1125 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1126 /* The ReadError flag will just be confusing now */
1127 clear_bit(R5_ReadError, &dev->flags);
1128 clear_bit(R5_ReWrite, &dev->flags);
1130 if (!rdev || !test_bit(In_sync, &rdev->flags)
1131 || test_bit(R5_ReadError, &dev->flags)) {
1135 set_bit(R5_Insync, &dev->flags);
1138 PRINTK("locked=%d uptodate=%d to_read=%d"
1139 " to_write=%d failed=%d failed_num=%d\n",
1140 locked, uptodate, to_read, to_write, failed, failed_num);
1141 /* check if the array has lost two devices and, if so, some requests might
1144 if (failed > 1 && to_read+to_write+written) {
1145 for (i=disks; i--; ) {
1148 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1151 rdev = rcu_dereference(conf->disks[i].rdev);
1152 if (rdev && test_bit(In_sync, &rdev->flags))
1153 /* multiple read failures in one stripe */
1154 md_error(conf->mddev, rdev);
1158 spin_lock_irq(&conf->device_lock);
1159 /* fail all writes first */
1160 bi = sh->dev[i].towrite;
1161 sh->dev[i].towrite = NULL;
1162 if (bi) { to_write--; bitmap_end = 1; }
1164 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1165 wake_up(&conf->wait_for_overlap);
1167 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1168 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1169 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1170 if (--bi->bi_phys_segments == 0) {
1171 md_write_end(conf->mddev);
1172 bi->bi_next = return_bi;
1177 /* and fail all 'written' */
1178 bi = sh->dev[i].written;
1179 sh->dev[i].written = NULL;
1180 if (bi) bitmap_end = 1;
1181 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1182 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1183 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1184 if (--bi->bi_phys_segments == 0) {
1185 md_write_end(conf->mddev);
1186 bi->bi_next = return_bi;
1192 /* fail any reads if this device is non-operational */
1193 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1194 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1195 bi = sh->dev[i].toread;
1196 sh->dev[i].toread = NULL;
1197 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1198 wake_up(&conf->wait_for_overlap);
1200 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1201 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1202 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1203 if (--bi->bi_phys_segments == 0) {
1204 bi->bi_next = return_bi;
1210 spin_unlock_irq(&conf->device_lock);
1212 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1213 STRIPE_SECTORS, 0, 0);
1216 if (failed > 1 && syncing) {
1217 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1218 clear_bit(STRIPE_SYNCING, &sh->state);
1222 /* might be able to return some write requests if the parity block
1223 * is safe, or on a failed drive
1225 dev = &sh->dev[sh->pd_idx];
1227 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1228 test_bit(R5_UPTODATE, &dev->flags))
1229 || (failed == 1 && failed_num == sh->pd_idx))
1231 /* any written block on an uptodate or failed drive can be returned.
1232 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1233 * never LOCKED, so we don't need to test 'failed' directly.
1235 for (i=disks; i--; )
1236 if (sh->dev[i].written) {
1238 if (!test_bit(R5_LOCKED, &dev->flags) &&
1239 test_bit(R5_UPTODATE, &dev->flags) ) {
1240 /* We can return any write requests */
1241 struct bio *wbi, *wbi2;
1243 PRINTK("Return write for disc %d\n", i);
1244 spin_lock_irq(&conf->device_lock);
1246 dev->written = NULL;
1247 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1248 wbi2 = r5_next_bio(wbi, dev->sector);
1249 if (--wbi->bi_phys_segments == 0) {
1250 md_write_end(conf->mddev);
1251 wbi->bi_next = return_bi;
1256 if (dev->towrite == NULL)
1258 spin_unlock_irq(&conf->device_lock);
1260 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1262 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1267 /* Now we might consider reading some blocks, either to check/generate
1268 * parity, or to satisfy requests
1269 * or to load a block that is being partially written.
1271 if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1272 for (i=disks; i--;) {
1274 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1276 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1278 (failed && (sh->dev[failed_num].toread ||
1279 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1282 /* we would like to get this block, possibly
1283 * by computing it, but we might not be able to
1285 if (uptodate == disks-1) {
1286 PRINTK("Computing block %d\n", i);
1287 compute_block(sh, i);
1289 } else if (test_bit(R5_Insync, &dev->flags)) {
1290 set_bit(R5_LOCKED, &dev->flags);
1291 set_bit(R5_Wantread, &dev->flags);
1293 /* if I am just reading this block and we don't have
1294 a failed drive, or any pending writes then sidestep the cache */
1295 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1296 ! syncing && !failed && !to_write) {
1297 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1298 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1302 PRINTK("Reading block %d (sync=%d)\n",
1307 set_bit(STRIPE_HANDLE, &sh->state);
1310 /* now to consider writing and what else, if anything should be read */
1313 for (i=disks ; i--;) {
1314 /* would I have to read this buffer for read_modify_write */
1316 if ((dev->towrite || i == sh->pd_idx) &&
1317 (!test_bit(R5_LOCKED, &dev->flags)
1319 || sh->bh_page[i]!=bh->b_page
1322 !test_bit(R5_UPTODATE, &dev->flags)) {
1323 if (test_bit(R5_Insync, &dev->flags)
1324 /* && !(!mddev->insync && i == sh->pd_idx) */
1327 else rmw += 2*disks; /* cannot read it */
1329 /* Would I have to read this buffer for reconstruct_write */
1330 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1331 (!test_bit(R5_LOCKED, &dev->flags)
1333 || sh->bh_page[i] != bh->b_page
1336 !test_bit(R5_UPTODATE, &dev->flags)) {
1337 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1338 else rcw += 2*disks;
1341 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1342 (unsigned long long)sh->sector, rmw, rcw);
1343 set_bit(STRIPE_HANDLE, &sh->state);
1344 if (rmw < rcw && rmw > 0)
1345 /* prefer read-modify-write, but need to get some data */
1346 for (i=disks; i--;) {
1348 if ((dev->towrite || i == sh->pd_idx) &&
1349 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1350 test_bit(R5_Insync, &dev->flags)) {
1351 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1353 PRINTK("Read_old block %d for r-m-w\n", i);
1354 set_bit(R5_LOCKED, &dev->flags);
1355 set_bit(R5_Wantread, &dev->flags);
1358 set_bit(STRIPE_DELAYED, &sh->state);
1359 set_bit(STRIPE_HANDLE, &sh->state);
1363 if (rcw <= rmw && rcw > 0)
1364 /* want reconstruct write, but need to get some data */
1365 for (i=disks; i--;) {
1367 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1368 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1369 test_bit(R5_Insync, &dev->flags)) {
1370 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1372 PRINTK("Read_old block %d for Reconstruct\n", i);
1373 set_bit(R5_LOCKED, &dev->flags);
1374 set_bit(R5_Wantread, &dev->flags);
1377 set_bit(STRIPE_DELAYED, &sh->state);
1378 set_bit(STRIPE_HANDLE, &sh->state);
1382 /* now if nothing is locked, and if we have enough data, we can start a write request */
1383 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1384 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1385 PRINTK("Computing parity...\n");
1386 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1387 /* now every locked buffer is ready to be written */
1389 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1390 PRINTK("Writing block %d\n", i);
1392 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1393 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1394 || (i==sh->pd_idx && failed == 0))
1395 set_bit(STRIPE_INSYNC, &sh->state);
1397 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1398 atomic_dec(&conf->preread_active_stripes);
1399 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1400 md_wakeup_thread(conf->mddev->thread);
1405 /* maybe we need to check and possibly fix the parity for this stripe
1406 * Any reads will already have been scheduled, so we just see if enough data
1409 if (syncing && locked == 0 &&
1410 !test_bit(STRIPE_INSYNC, &sh->state)) {
1411 set_bit(STRIPE_HANDLE, &sh->state);
1414 if (uptodate != disks)
1416 compute_parity(sh, CHECK_PARITY);
1418 pagea = page_address(sh->dev[sh->pd_idx].page);
1419 if ((*(u32*)pagea) == 0 &&
1420 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1421 /* parity is correct (on disc, not in buffer any more) */
1422 set_bit(STRIPE_INSYNC, &sh->state);
1424 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1425 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1426 /* don't try to repair!! */
1427 set_bit(STRIPE_INSYNC, &sh->state);
1429 compute_block(sh, sh->pd_idx);
1434 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1435 /* either failed parity check, or recovery is happening */
1437 failed_num = sh->pd_idx;
1438 dev = &sh->dev[failed_num];
1439 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1440 BUG_ON(uptodate != disks);
1442 set_bit(R5_LOCKED, &dev->flags);
1443 set_bit(R5_Wantwrite, &dev->flags);
1444 clear_bit(STRIPE_DEGRADED, &sh->state);
1446 set_bit(STRIPE_INSYNC, &sh->state);
1449 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1450 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1451 clear_bit(STRIPE_SYNCING, &sh->state);
1454 /* If the failed drive is just a ReadError, then we might need to progress
1455 * the repair/check process
1457 if (failed == 1 && ! conf->mddev->ro &&
1458 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1459 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1460 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1462 dev = &sh->dev[failed_num];
1463 if (!test_bit(R5_ReWrite, &dev->flags)) {
1464 set_bit(R5_Wantwrite, &dev->flags);
1465 set_bit(R5_ReWrite, &dev->flags);
1466 set_bit(R5_LOCKED, &dev->flags);
1468 /* let's read it back */
1469 set_bit(R5_Wantread, &dev->flags);
1470 set_bit(R5_LOCKED, &dev->flags);
1474 spin_unlock(&sh->lock);
1476 while ((bi=return_bi)) {
1477 int bytes = bi->bi_size;
1479 return_bi = bi->bi_next;
1482 bi->bi_end_io(bi, bytes, 0);
1484 for (i=disks; i-- ;) {
1488 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1490 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1495 bi = &sh->dev[i].req;
1499 bi->bi_end_io = raid5_end_write_request;
1501 bi->bi_end_io = raid5_end_read_request;
1504 rdev = rcu_dereference(conf->disks[i].rdev);
1505 if (rdev && test_bit(Faulty, &rdev->flags))
1508 atomic_inc(&rdev->nr_pending);
1513 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1515 bi->bi_bdev = rdev->bdev;
1516 PRINTK("for %llu schedule op %ld on disc %d\n",
1517 (unsigned long long)sh->sector, bi->bi_rw, i);
1518 atomic_inc(&sh->count);
1519 bi->bi_sector = sh->sector + rdev->data_offset;
1520 bi->bi_flags = 1 << BIO_UPTODATE;
1522 bi->bi_max_vecs = 1;
1524 bi->bi_io_vec = &sh->dev[i].vec;
1525 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1526 bi->bi_io_vec[0].bv_offset = 0;
1527 bi->bi_size = STRIPE_SIZE;
1530 test_bit(R5_ReWrite, &sh->dev[i].flags))
1531 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1532 generic_make_request(bi);
1535 set_bit(STRIPE_DEGRADED, &sh->state);
1536 PRINTK("skip op %ld on disc %d for sector %llu\n",
1537 bi->bi_rw, i, (unsigned long long)sh->sector);
1538 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1539 set_bit(STRIPE_HANDLE, &sh->state);
1544 static void raid5_activate_delayed(raid5_conf_t *conf)
1546 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1547 while (!list_empty(&conf->delayed_list)) {
1548 struct list_head *l = conf->delayed_list.next;
1549 struct stripe_head *sh;
1550 sh = list_entry(l, struct stripe_head, lru);
1552 clear_bit(STRIPE_DELAYED, &sh->state);
1553 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1554 atomic_inc(&conf->preread_active_stripes);
1555 list_add_tail(&sh->lru, &conf->handle_list);
1560 static void activate_bit_delay(raid5_conf_t *conf)
1562 /* device_lock is held */
1563 struct list_head head;
1564 list_add(&head, &conf->bitmap_list);
1565 list_del_init(&conf->bitmap_list);
1566 while (!list_empty(&head)) {
1567 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1568 list_del_init(&sh->lru);
1569 atomic_inc(&sh->count);
1570 __release_stripe(conf, sh);
1574 static void unplug_slaves(mddev_t *mddev)
1576 raid5_conf_t *conf = mddev_to_conf(mddev);
1580 for (i=0; i<mddev->raid_disks; i++) {
1581 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1582 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1583 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1585 atomic_inc(&rdev->nr_pending);
1588 if (r_queue->unplug_fn)
1589 r_queue->unplug_fn(r_queue);
1591 rdev_dec_pending(rdev, mddev);
1598 static void raid5_unplug_device(request_queue_t *q)
1600 mddev_t *mddev = q->queuedata;
1601 raid5_conf_t *conf = mddev_to_conf(mddev);
1602 unsigned long flags;
1604 spin_lock_irqsave(&conf->device_lock, flags);
1606 if (blk_remove_plug(q)) {
1608 raid5_activate_delayed(conf);
1610 md_wakeup_thread(mddev->thread);
1612 spin_unlock_irqrestore(&conf->device_lock, flags);
1614 unplug_slaves(mddev);
1617 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1618 sector_t *error_sector)
1620 mddev_t *mddev = q->queuedata;
1621 raid5_conf_t *conf = mddev_to_conf(mddev);
1625 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1626 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1627 if (rdev && !test_bit(Faulty, &rdev->flags)) {
1628 struct block_device *bdev = rdev->bdev;
1629 request_queue_t *r_queue = bdev_get_queue(bdev);
1631 if (!r_queue->issue_flush_fn)
1634 atomic_inc(&rdev->nr_pending);
1636 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1638 rdev_dec_pending(rdev, mddev);
1647 static inline void raid5_plug_device(raid5_conf_t *conf)
1649 spin_lock_irq(&conf->device_lock);
1650 blk_plug_device(conf->mddev->queue);
1651 spin_unlock_irq(&conf->device_lock);
1654 static int make_request(request_queue_t *q, struct bio * bi)
1656 mddev_t *mddev = q->queuedata;
1657 raid5_conf_t *conf = mddev_to_conf(mddev);
1658 unsigned int dd_idx, pd_idx;
1659 sector_t new_sector;
1660 sector_t logical_sector, last_sector;
1661 struct stripe_head *sh;
1662 const int rw = bio_data_dir(bi);
1664 if (unlikely(bio_barrier(bi))) {
1665 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1669 md_write_start(mddev, bi);
1671 disk_stat_inc(mddev->gendisk, ios[rw]);
1672 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1674 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1675 last_sector = bi->bi_sector + (bi->bi_size>>9);
1677 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
1679 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1684 if (likely(conf->expand_progress == MaxSector))
1685 disks = conf->raid_disks;
1687 spin_lock_irq(&conf->device_lock);
1688 disks = conf->raid_disks;
1689 if (logical_sector >= conf->expand_progress)
1690 disks = conf->previous_raid_disks;
1691 spin_unlock_irq(&conf->device_lock);
1693 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
1694 &dd_idx, &pd_idx, conf);
1695 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1696 (unsigned long long)new_sector,
1697 (unsigned long long)logical_sector);
1699 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1700 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
1702 if (unlikely(conf->expand_progress != MaxSector)) {
1703 /* expansion might have moved on while waiting for a
1704 * stripe, so we much do the range check again.
1707 spin_lock_irq(&conf->device_lock);
1708 if (logical_sector < conf->expand_progress &&
1709 disks == conf->previous_raid_disks)
1710 /* mismatch, need to try again */
1712 spin_unlock_irq(&conf->device_lock);
1719 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
1720 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1721 /* Stripe is busy expanding or
1722 * add failed due to overlap. Flush everything
1725 raid5_unplug_device(mddev->queue);
1730 finish_wait(&conf->wait_for_overlap, &w);
1731 raid5_plug_device(conf);
1735 /* cannot get stripe for read-ahead, just give-up */
1736 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1737 finish_wait(&conf->wait_for_overlap, &w);
1742 spin_lock_irq(&conf->device_lock);
1743 if (--bi->bi_phys_segments == 0) {
1744 int bytes = bi->bi_size;
1746 if ( bio_data_dir(bi) == WRITE )
1747 md_write_end(mddev);
1749 bi->bi_end_io(bi, bytes, 0);
1751 spin_unlock_irq(&conf->device_lock);
1755 /* FIXME go_faster isn't used */
1756 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1758 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1759 struct stripe_head *sh;
1760 int sectors_per_chunk = conf->chunk_size >> 9;
1762 unsigned long stripe;
1765 sector_t first_sector;
1766 int raid_disks = conf->raid_disks;
1767 int data_disks = raid_disks-1;
1768 sector_t max_sector = mddev->size << 1;
1771 if (sector_nr >= max_sector) {
1772 /* just being told to finish up .. nothing much to do */
1773 unplug_slaves(mddev);
1775 if (mddev->curr_resync < max_sector) /* aborted */
1776 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1778 else /* compelted sync */
1780 bitmap_close_sync(mddev->bitmap);
1784 /* if there is 1 or more failed drives and we are trying
1785 * to resync, then assert that we are finished, because there is
1786 * nothing we can do.
1788 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1789 sector_t rv = (mddev->size << 1) - sector_nr;
1793 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1794 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1795 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1796 /* we can skip this block, and probably more */
1797 sync_blocks /= STRIPE_SECTORS;
1799 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1803 chunk_offset = sector_div(x, sectors_per_chunk);
1805 BUG_ON(x != stripe);
1807 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1808 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1809 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
1811 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
1812 /* make sure we don't swamp the stripe cache if someone else
1813 * is trying to get access
1815 schedule_timeout_uninterruptible(1);
1817 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1818 spin_lock(&sh->lock);
1819 set_bit(STRIPE_SYNCING, &sh->state);
1820 clear_bit(STRIPE_INSYNC, &sh->state);
1821 spin_unlock(&sh->lock);
1826 return STRIPE_SECTORS;
1830 * This is our raid5 kernel thread.
1832 * We scan the hash table for stripes which can be handled now.
1833 * During the scan, completed stripes are saved for us by the interrupt
1834 * handler, so that they will not have to wait for our next wakeup.
1836 static void raid5d (mddev_t *mddev)
1838 struct stripe_head *sh;
1839 raid5_conf_t *conf = mddev_to_conf(mddev);
1842 PRINTK("+++ raid5d active\n");
1844 md_check_recovery(mddev);
1847 spin_lock_irq(&conf->device_lock);
1849 struct list_head *first;
1851 if (conf->seq_flush - conf->seq_write > 0) {
1852 int seq = conf->seq_flush;
1853 spin_unlock_irq(&conf->device_lock);
1854 bitmap_unplug(mddev->bitmap);
1855 spin_lock_irq(&conf->device_lock);
1856 conf->seq_write = seq;
1857 activate_bit_delay(conf);
1860 if (list_empty(&conf->handle_list) &&
1861 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1862 !blk_queue_plugged(mddev->queue) &&
1863 !list_empty(&conf->delayed_list))
1864 raid5_activate_delayed(conf);
1866 if (list_empty(&conf->handle_list))
1869 first = conf->handle_list.next;
1870 sh = list_entry(first, struct stripe_head, lru);
1872 list_del_init(first);
1873 atomic_inc(&sh->count);
1874 if (atomic_read(&sh->count)!= 1)
1876 spin_unlock_irq(&conf->device_lock);
1882 spin_lock_irq(&conf->device_lock);
1884 PRINTK("%d stripes handled\n", handled);
1886 spin_unlock_irq(&conf->device_lock);
1888 unplug_slaves(mddev);
1890 PRINTK("--- raid5d inactive\n");
1894 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
1896 raid5_conf_t *conf = mddev_to_conf(mddev);
1898 return sprintf(page, "%d\n", conf->max_nr_stripes);
1904 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
1906 raid5_conf_t *conf = mddev_to_conf(mddev);
1909 if (len >= PAGE_SIZE)
1914 new = simple_strtoul(page, &end, 10);
1915 if (!*page || (*end && *end != '\n') )
1917 if (new <= 16 || new > 32768)
1919 while (new < conf->max_nr_stripes) {
1920 if (drop_one_stripe(conf))
1921 conf->max_nr_stripes--;
1925 while (new > conf->max_nr_stripes) {
1926 if (grow_one_stripe(conf))
1927 conf->max_nr_stripes++;
1933 static struct md_sysfs_entry
1934 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1935 raid5_show_stripe_cache_size,
1936 raid5_store_stripe_cache_size);
1939 stripe_cache_active_show(mddev_t *mddev, char *page)
1941 raid5_conf_t *conf = mddev_to_conf(mddev);
1943 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1948 static struct md_sysfs_entry
1949 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
1951 static struct attribute *raid5_attrs[] = {
1952 &raid5_stripecache_size.attr,
1953 &raid5_stripecache_active.attr,
1956 static struct attribute_group raid5_attrs_group = {
1958 .attrs = raid5_attrs,
1961 static int run(mddev_t *mddev)
1964 int raid_disk, memory;
1966 struct disk_info *disk;
1967 struct list_head *tmp;
1969 if (mddev->level != 5 && mddev->level != 4) {
1970 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1971 mdname(mddev), mddev->level);
1975 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
1976 if ((conf = mddev->private) == NULL)
1978 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
1983 conf->mddev = mddev;
1985 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1988 spin_lock_init(&conf->device_lock);
1989 init_waitqueue_head(&conf->wait_for_stripe);
1990 init_waitqueue_head(&conf->wait_for_overlap);
1991 INIT_LIST_HEAD(&conf->handle_list);
1992 INIT_LIST_HEAD(&conf->delayed_list);
1993 INIT_LIST_HEAD(&conf->bitmap_list);
1994 INIT_LIST_HEAD(&conf->inactive_list);
1995 atomic_set(&conf->active_stripes, 0);
1996 atomic_set(&conf->preread_active_stripes, 0);
1998 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
2000 ITERATE_RDEV(mddev,rdev,tmp) {
2001 raid_disk = rdev->raid_disk;
2002 if (raid_disk >= mddev->raid_disks
2005 disk = conf->disks + raid_disk;
2009 if (test_bit(In_sync, &rdev->flags)) {
2010 char b[BDEVNAME_SIZE];
2011 printk(KERN_INFO "raid5: device %s operational as raid"
2012 " disk %d\n", bdevname(rdev->bdev,b),
2014 conf->working_disks++;
2018 conf->raid_disks = mddev->raid_disks;
2020 * 0 for a fully functional array, 1 for a degraded array.
2022 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
2023 conf->mddev = mddev;
2024 conf->chunk_size = mddev->chunk_size;
2025 conf->level = mddev->level;
2026 conf->algorithm = mddev->layout;
2027 conf->max_nr_stripes = NR_STRIPES;
2028 conf->expand_progress = MaxSector;
2030 /* device size must be a multiple of chunk size */
2031 mddev->size &= ~(mddev->chunk_size/1024 -1);
2032 mddev->resync_max_sectors = mddev->size << 1;
2034 if (!conf->chunk_size || conf->chunk_size % 4) {
2035 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
2036 conf->chunk_size, mdname(mddev));
2039 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
2041 "raid5: unsupported parity algorithm %d for %s\n",
2042 conf->algorithm, mdname(mddev));
2045 if (mddev->degraded > 1) {
2046 printk(KERN_ERR "raid5: not enough operational devices for %s"
2047 " (%d/%d failed)\n",
2048 mdname(mddev), conf->failed_disks, conf->raid_disks);
2052 if (mddev->degraded == 1 &&
2053 mddev->recovery_cp != MaxSector) {
2054 if (mddev->ok_start_degraded)
2056 "raid5: starting dirty degraded array: %s"
2057 "- data corruption possible.\n",
2061 "raid5: cannot start dirty degraded array for %s\n",
2068 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
2069 if (!mddev->thread) {
2071 "raid5: couldn't allocate thread for %s\n",
2076 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
2077 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
2078 if (grow_stripes(conf, conf->max_nr_stripes)) {
2080 "raid5: couldn't allocate %dkB for buffers\n", memory);
2081 shrink_stripes(conf);
2082 md_unregister_thread(mddev->thread);
2085 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
2086 memory, mdname(mddev));
2088 if (mddev->degraded == 0)
2089 printk("raid5: raid level %d set %s active with %d out of %d"
2090 " devices, algorithm %d\n", conf->level, mdname(mddev),
2091 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
2094 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
2095 " out of %d devices, algorithm %d\n", conf->level,
2096 mdname(mddev), mddev->raid_disks - mddev->degraded,
2097 mddev->raid_disks, conf->algorithm);
2099 print_raid5_conf(conf);
2101 /* read-ahead size must cover two whole stripes, which is
2102 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
2105 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
2107 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
2108 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
2111 /* Ok, everything is just fine now */
2112 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
2114 mddev->queue->unplug_fn = raid5_unplug_device;
2115 mddev->queue->issue_flush_fn = raid5_issue_flush;
2117 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
2121 print_raid5_conf(conf);
2123 kfree(conf->stripe_hashtbl);
2126 mddev->private = NULL;
2127 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
2133 static int stop(mddev_t *mddev)
2135 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2137 md_unregister_thread(mddev->thread);
2138 mddev->thread = NULL;
2139 shrink_stripes(conf);
2140 kfree(conf->stripe_hashtbl);
2141 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2142 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
2145 mddev->private = NULL;
2150 static void print_sh (struct stripe_head *sh)
2154 printk("sh %llu, pd_idx %d, state %ld.\n",
2155 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2156 printk("sh %llu, count %d.\n",
2157 (unsigned long long)sh->sector, atomic_read(&sh->count));
2158 printk("sh %llu, ", (unsigned long long)sh->sector);
2159 for (i = 0; i < sh->disks; i++) {
2160 printk("(cache%d: %p %ld) ",
2161 i, sh->dev[i].page, sh->dev[i].flags);
2166 static void printall (raid5_conf_t *conf)
2168 struct stripe_head *sh;
2169 struct hlist_node *hn;
2172 spin_lock_irq(&conf->device_lock);
2173 for (i = 0; i < NR_HASH; i++) {
2174 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2175 if (sh->raid_conf != conf)
2180 spin_unlock_irq(&conf->device_lock);
2184 static void status (struct seq_file *seq, mddev_t *mddev)
2186 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2189 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2190 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2191 for (i = 0; i < conf->raid_disks; i++)
2192 seq_printf (seq, "%s",
2193 conf->disks[i].rdev &&
2194 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2195 seq_printf (seq, "]");
2198 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2203 static void print_raid5_conf (raid5_conf_t *conf)
2206 struct disk_info *tmp;
2208 printk("RAID5 conf printout:\n");
2210 printk("(conf==NULL)\n");
2213 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2214 conf->working_disks, conf->failed_disks);
2216 for (i = 0; i < conf->raid_disks; i++) {
2217 char b[BDEVNAME_SIZE];
2218 tmp = conf->disks + i;
2220 printk(" disk %d, o:%d, dev:%s\n",
2221 i, !test_bit(Faulty, &tmp->rdev->flags),
2222 bdevname(tmp->rdev->bdev,b));
2226 static int raid5_spare_active(mddev_t *mddev)
2229 raid5_conf_t *conf = mddev->private;
2230 struct disk_info *tmp;
2232 for (i = 0; i < conf->raid_disks; i++) {
2233 tmp = conf->disks + i;
2235 && !test_bit(Faulty, &tmp->rdev->flags)
2236 && !test_bit(In_sync, &tmp->rdev->flags)) {
2238 conf->failed_disks--;
2239 conf->working_disks++;
2240 set_bit(In_sync, &tmp->rdev->flags);
2243 print_raid5_conf(conf);
2247 static int raid5_remove_disk(mddev_t *mddev, int number)
2249 raid5_conf_t *conf = mddev->private;
2252 struct disk_info *p = conf->disks + number;
2254 print_raid5_conf(conf);
2257 if (test_bit(In_sync, &rdev->flags) ||
2258 atomic_read(&rdev->nr_pending)) {
2264 if (atomic_read(&rdev->nr_pending)) {
2265 /* lost the race, try later */
2272 print_raid5_conf(conf);
2276 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2278 raid5_conf_t *conf = mddev->private;
2281 struct disk_info *p;
2283 if (mddev->degraded > 1)
2284 /* no point adding a device */
2290 for (disk=0; disk < mddev->raid_disks; disk++)
2291 if ((p=conf->disks + disk)->rdev == NULL) {
2292 clear_bit(In_sync, &rdev->flags);
2293 rdev->raid_disk = disk;
2295 if (rdev->saved_raid_disk != disk)
2297 rcu_assign_pointer(p->rdev, rdev);
2300 print_raid5_conf(conf);
2304 static int raid5_resize(mddev_t *mddev, sector_t sectors)
2306 /* no resync is happening, and there is enough space
2307 * on all devices, so we can resize.
2308 * We need to make sure resync covers any new space.
2309 * If the array is shrinking we should possibly wait until
2310 * any io in the removed space completes, but it hardly seems
2313 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2314 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2315 set_capacity(mddev->gendisk, mddev->array_size << 1);
2317 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2318 mddev->recovery_cp = mddev->size << 1;
2319 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2321 mddev->size = sectors /2;
2322 mddev->resync_max_sectors = sectors;
2326 static void raid5_quiesce(mddev_t *mddev, int state)
2328 raid5_conf_t *conf = mddev_to_conf(mddev);
2331 case 1: /* stop all writes */
2332 spin_lock_irq(&conf->device_lock);
2334 wait_event_lock_irq(conf->wait_for_stripe,
2335 atomic_read(&conf->active_stripes) == 0,
2336 conf->device_lock, /* nothing */);
2337 spin_unlock_irq(&conf->device_lock);
2340 case 0: /* re-enable writes */
2341 spin_lock_irq(&conf->device_lock);
2343 wake_up(&conf->wait_for_stripe);
2344 spin_unlock_irq(&conf->device_lock);
2349 static struct mdk_personality raid5_personality =
2353 .owner = THIS_MODULE,
2354 .make_request = make_request,
2358 .error_handler = error,
2359 .hot_add_disk = raid5_add_disk,
2360 .hot_remove_disk= raid5_remove_disk,
2361 .spare_active = raid5_spare_active,
2362 .sync_request = sync_request,
2363 .resize = raid5_resize,
2364 .quiesce = raid5_quiesce,
2367 static struct mdk_personality raid4_personality =
2371 .owner = THIS_MODULE,
2372 .make_request = make_request,
2376 .error_handler = error,
2377 .hot_add_disk = raid5_add_disk,
2378 .hot_remove_disk= raid5_remove_disk,
2379 .spare_active = raid5_spare_active,
2380 .sync_request = sync_request,
2381 .resize = raid5_resize,
2382 .quiesce = raid5_quiesce,
2385 static int __init raid5_init(void)
2387 register_md_personality(&raid5_personality);
2388 register_md_personality(&raid4_personality);
2392 static void raid5_exit(void)
2394 unregister_md_personality(&raid5_personality);
2395 unregister_md_personality(&raid4_personality);
2398 module_init(raid5_init);
2399 module_exit(raid5_exit);
2400 MODULE_LICENSE("GPL");
2401 MODULE_ALIAS("md-personality-4"); /* RAID5 */
2402 MODULE_ALIAS("md-raid5");
2403 MODULE_ALIAS("md-raid4");
2404 MODULE_ALIAS("md-level-5");
2405 MODULE_ALIAS("md-level-4");