]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - block/ll_rw_blk.c
[ARM] Fix Neponset IRQ handling
[linux-2.6-omap-h63xx.git] / block / ll_rw_blk.c
index 82469db25100c12e6f01528763725504976291ea..7eb36c53f4b7868247141a105ef82e8be3e2d96f 100644 (file)
@@ -1554,7 +1554,7 @@ void blk_plug_device(request_queue_t *q)
         * don't plug a stopped queue, it must be paired with blk_start_queue()
         * which will restart the queueing
         */
-       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+       if (blk_queue_stopped(q))
                return;
 
        if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
@@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(blk_remove_plug);
  */
 void __generic_unplug_device(request_queue_t *q)
 {
-       if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
+       if (unlikely(blk_queue_stopped(q)))
                return;
 
        if (!blk_remove_plug(q))
@@ -1732,15 +1732,28 @@ void blk_run_queue(struct request_queue *q)
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_remove_plug(q);
-       if (!elv_queue_empty(q))
-               q->request_fn(q);
+
+       /*
+        * Only recurse once to avoid overrunning the stack, let the unplug
+        * handling reinvoke the handler shortly if we already got there.
+        */
+       if (!elv_queue_empty(q)) {
+               if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+                       q->request_fn(q);
+                       clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+               } else {
+                       blk_plug_device(q);
+                       kblockd_schedule_work(&q->unplug_work);
+               }
+       }
+
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
 
 /**
  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q:    the request queue to be released
+ * @kobj:    the kobj belonging of the request queue to be released
  *
  * Description:
  *     blk_cleanup_queue is the pair to blk_init_queue() or
@@ -3385,7 +3398,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
 }
 
 
-static struct notifier_block __devinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier = {
        .notifier_call  = blk_cpu_notify,
 };
 
@@ -3439,7 +3452,12 @@ void end_that_request_last(struct request *req, int uptodate)
        if (unlikely(laptop_mode) && blk_fs_request(req))
                laptop_io_completion();
 
-       if (disk && blk_fs_request(req)) {
+       /*
+        * Account IO completion.  bar_rq isn't accounted as a normal
+        * IO on queueing nor completion.  Accounting the containing
+        * request is enough.
+        */
+       if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
 
@@ -3514,7 +3532,7 @@ int __init blk_dev_init(void)
        iocontext_cachep = kmem_cache_create("blkdev_ioc",
                        sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
@@ -3539,11 +3557,17 @@ void put_io_context(struct io_context *ioc)
        BUG_ON(atomic_read(&ioc->refcount) == 0);
 
        if (atomic_dec_and_test(&ioc->refcount)) {
+               struct cfq_io_context *cic;
+
                rcu_read_lock();
                if (ioc->aic && ioc->aic->dtor)
                        ioc->aic->dtor(ioc->aic);
-               if (ioc->cic && ioc->cic->dtor)
-                       ioc->cic->dtor(ioc->cic);
+               if (ioc->cic_root.rb_node != NULL) {
+                       struct rb_node *n = rb_first(&ioc->cic_root);
+
+                       cic = rb_entry(n, struct cfq_io_context, rb_node);
+                       cic->dtor(ioc);
+               }
                rcu_read_unlock();
 
                kmem_cache_free(iocontext_cachep, ioc);
@@ -3556,6 +3580,7 @@ void exit_io_context(void)
 {
        unsigned long flags;
        struct io_context *ioc;
+       struct cfq_io_context *cic;
 
        local_irq_save(flags);
        task_lock(current);
@@ -3567,9 +3592,11 @@ void exit_io_context(void)
 
        if (ioc->aic && ioc->aic->exit)
                ioc->aic->exit(ioc->aic);
-       if (ioc->cic && ioc->cic->exit)
-               ioc->cic->exit(ioc->cic);
-
+       if (ioc->cic_root.rb_node != NULL) {
+               cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
+               cic->exit(ioc);
+       }
        put_io_context(ioc);
 }
 
@@ -3598,7 +3625,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
                ret->last_waited = jiffies; /* doesn't matter... */
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
-               ret->cic = NULL;
+               ret->cic_root.rb_node = NULL;
                tsk->io_context = ret;
        }