unsigned fail_count;            /* Cumulative failure count */
 
        struct dm_path path;
+       struct work_struct deactivate_path;
 };
 
 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
+static void deactivate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
 {
        struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 
-       if (pgpath)
+       if (pgpath) {
                pgpath->path.is_active = 1;
+               INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+       }
 
        return pgpath;
 }
        kfree(pgpath);
 }
 
+static void deactivate_path(struct work_struct *work)
+{
+       struct pgpath *pgpath =
+               container_of(work, struct pgpath, deactivate_path);
+
+       blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
+}
+
 static struct priority_group *alloc_priority_group(void)
 {
        struct priority_group *pg;
                      pgpath->path.dev->name, m->nr_valid_paths);
 
        queue_work(kmultipathd, &m->trigger_event);
+       queue_work(kmultipathd, &pgpath->deactivate_path);
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);