]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/md/md.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6-omap-h63xx.git] / drivers / md / md.c
index be7873c61b3cc08c89c8a3cfbcd5e20b29f6f162..2897df90df44856df8d769bbc4f305543425aaf6 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <linux/module.h>
 #include <linux/config.h>
+#include <linux/kthread.h>
 #include <linux/linkage.h>
 #include <linux/raid/md.h>
 #include <linux/raid/bitmap.h>
@@ -73,7 +74,7 @@ static DEFINE_SPINLOCK(pers_lock);
  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  * is 1000 KB/sec, so the extra system load does not show up that much.
  * Increase it if you want to have more _guaranteed_ speed. Note that
- * the RAID driver will use the maximum available bandwith if the IO
+ * the RAID driver will use the maximum available bandwidth if the IO
  * subsystem is idle. There is also an 'absolute maximum' reconstruction
  * speed limit - in case reconstruction slows down your system despite
  * idle IO detection.
@@ -645,7 +646,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
 
                if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
                    mddev->bitmap_file == NULL) {
-                       if (mddev->level != 1) {
+                       if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) {
                                /* FIXME use a better test */
                                printk(KERN_WARNING "md: bitmaps only support for raid1\n");
                                return -EINVAL;
@@ -711,6 +712,8 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
        int i;
        int active=0, working=0,failed=0,spare=0,nr_disks=0;
 
+       rdev->sb_size = MD_SB_BYTES;
+
        sb = (mdp_super_t*)page_address(rdev->sb_page);
 
        memset(sb, 0, sizeof(*sb));
@@ -897,7 +900,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        rdev->data_offset = le64_to_cpu(sb->data_offset);
 
        rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
-       bmask = block_size(rdev->bdev)-1;
+       bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
        if (rdev->sb_size & bmask)
                rdev-> sb_size = (rdev->sb_size | bmask)+1;
 
@@ -956,8 +959,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->events = le64_to_cpu(sb->events);
                mddev->bitmap_offset = 0;
                mddev->default_bitmap_offset = 0;
-               if (mddev->minor_version == 0)
-                       mddev->default_bitmap_offset = -(64*1024)/512;
+               mddev->default_bitmap_offset = 1024;
                
                mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
                memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -3049,18 +3051,6 @@ static int md_thread(void * arg)
 {
        mdk_thread_t *thread = arg;
 
-       lock_kernel();
-
-       /*
-        * Detach thread
-        */
-
-       daemonize(thread->name, mdname(thread->mddev));
-
-       current->exit_signal = SIGCHLD;
-       allow_signal(SIGKILL);
-       thread->tsk = current;
-
        /*
         * md_thread is a 'system-thread', it's priority should be very
         * high. We avoid resource deadlocks individually in each
@@ -3072,14 +3062,14 @@ static int md_thread(void * arg)
         * bdflush, otherwise bdflush will deadlock if there are too
         * many dirty RAID5 blocks.
         */
-       unlock_kernel();
 
        complete(thread->event);
-       while (thread->run) {
+       while (!kthread_should_stop()) {
                void (*run)(mddev_t *);
 
                wait_event_interruptible_timeout(thread->wqueue,
-                                                test_bit(THREAD_WAKEUP, &thread->flags),
+                                                test_bit(THREAD_WAKEUP, &thread->flags)
+                                                || kthread_should_stop(),
                                                 thread->timeout);
                try_to_freeze();
 
@@ -3088,11 +3078,8 @@ static int md_thread(void * arg)
                run = thread->run;
                if (run)
                        run(thread->mddev);
-
-               if (signal_pending(current))
-                       flush_signals(current);
        }
-       complete(thread->event);
+
        return 0;
 }
 
@@ -3109,11 +3096,9 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
                                 const char *name)
 {
        mdk_thread_t *thread;
-       int ret;
        struct completion event;
 
-       thread = (mdk_thread_t *) kmalloc
-                               (sizeof(mdk_thread_t), GFP_KERNEL);
+       thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
        if (!thread)
                return NULL;
 
@@ -3126,8 +3111,8 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
        thread->mddev = mddev;
        thread->name = name;
        thread->timeout = MAX_SCHEDULE_TIMEOUT;
-       ret = kernel_thread(md_thread, thread, 0);
-       if (ret < 0) {
+       thread->tsk = kthread_run(md_thread, thread, mdname(thread->mddev));
+       if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
        }
@@ -3137,21 +3122,9 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
 
 void md_unregister_thread(mdk_thread_t *thread)
 {
-       struct completion event;
-
-       init_completion(&event);
-
-       thread->event = &event;
-
-       /* As soon as ->run is set to NULL, the task could disappear,
-        * so we need to hold tasklist_lock until we have sent the signal
-        */
        dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
-       read_lock(&tasklist_lock);
-       thread->run = NULL;
-       send_sig(SIGKILL, thread->tsk, 1);
-       read_unlock(&tasklist_lock);
-       wait_for_completion(&event);
+
+       kthread_stop(thread->tsk);
        kfree(thread);
 }
 
@@ -3363,7 +3336,8 @@ static int md_seq_show(struct seq_file *seq, void *v)
                        if (rdev->faulty) {
                                seq_printf(seq, "(F)");
                                continue;
-                       }
+                       } else if (rdev->raid_disk < 0)
+                               seq_printf(seq, "(S)"); /* spare */
                        size += rdev->size;
                }
 
@@ -3375,6 +3349,15 @@ static int md_seq_show(struct seq_file *seq, void *v)
                                seq_printf(seq, "\n      %llu blocks",
                                        (unsigned long long)size);
                }
+               if (mddev->persistent) {
+                       if (mddev->major_version != 0 ||
+                           mddev->minor_version != 90) {
+                               seq_printf(seq," super %d.%d",
+                                          mddev->major_version,
+                                          mddev->minor_version);
+                       }
+               } else
+                       seq_printf(seq, " super non-persistent");
 
                if (mddev->pers) {
                        mddev->pers->status (seq, mddev);
@@ -3517,7 +3500,6 @@ void md_done_sync(mddev_t *mddev, int blocks, int ok)
  */
 void md_write_start(mddev_t *mddev, struct bio *bi)
 {
-       DEFINE_WAIT(w);
        if (bio_data_dir(bi) != WRITE)
                return;
 
@@ -3634,7 +3616,7 @@ static void md_do_sync(mddev_t *mddev)
        printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
        printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
                " %d KB/sec/disc.\n", sysctl_speed_limit_min);
-       printk(KERN_INFO "md: using maximum available idle IO bandwith "
+       printk(KERN_INFO "md: using maximum available idle IO bandwidth "
               "(but not more than %d KB/sec) for reconstruction.\n",
               sysctl_speed_limit_max);