kobject_init(&q->kobj, &blk_queue_ktype);
mutex_init(&q->sysfs_lock);
+ spin_lock_init(&q->__queue_lock);
return q;
}
* if caller didn't supply a lock, they get per-queue locking with
* our embedded lock
*/
- if (!lock) {
- spin_lock_init(&q->__queue_lock);
+ if (!lock)
lock = &q->__queue_lock;
- }
q->request_fn = rfn;
q->prep_rq_fn = NULL;
rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
+ struct io_context *ioc;
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
-
- if (!rq) {
- struct io_context *ioc;
-
- blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
+ __generic_unplug_device(q);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
- /*
- * After sleeping, we become a "batching" process and
- * will be able to allocate at least one request, and
- * up to a big batch of them for a small period time.
- * See ioc_batching, ioc_set_batching
- */
- ioc = current_io_context(GFP_NOIO, q->node);
- ioc_set_batching(q, ioc);
+ /*
+ * After sleeping, we become a "batching" process and
+ * will be able to allocate at least one request, and
+ * up to a big batch of them for a small period time.
+ * See ioc_batching, ioc_set_batching
+ */
+ ioc = current_io_context(GFP_NOIO, q->node);
+ ioc_set_batching(q, ioc);
- spin_lock_irq(q->queue_lock);
- }
+ spin_lock_irq(q->queue_lock);
finish_wait(&rl->wait[rw], &wait);
- }
+
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
+ };
return rq;
}