* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
- * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;
struct list_head idle_rr;
unsigned int busy_queues;
- /*
- * non-ordered list of empty cfqq's
- */
- struct list_head empty_list;
-
/*
* cfqq lookup hash
*/
struct hlist_node cfq_hash;
/* hash key */
unsigned int key;
- /* on either rr or empty list of cfqd */
+ /* member of the rr/busy/cur/idle cfqd list */
struct list_head cfq_list;
/* sorted list of pending requests */
struct rb_root sort_list;
int queued[2];
/* currently allocated requests */
int allocated[2];
+ /* pending metadata requests */
+ int meta_pending;
/* fifo list of requests in sort_list */
struct list_head fifo;
unsigned long slice_start;
unsigned long slice_end;
unsigned long slice_left;
- unsigned long service_last;
/* number of requests that are on the dispatch list */
int on_dispatch[2];
CFQ_CFQQ_FLAG_fifo_expire,
CFQ_CFQQ_FLAG_idle_window,
CFQ_CFQQ_FLAG_prio_changed,
+ CFQ_CFQQ_FLAG_queue_new,
};
#define CFQ_CFQQ_FNS(name) \
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(queue_new);
#undef CFQ_CFQQ_FNS
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
+ if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+ return rq1;
+ else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+ return rq2;
s1 = rq1->sector;
s2 = rq2->sector;
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
{
struct cfq_data *cfqd = cfqq->cfqd;
- struct list_head *list, *entry;
+ struct list_head *list;
BUG_ON(!cfq_cfqq_on_rr(cfqq));
}
/*
- * if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted, but insert at the back for fairness.
+ * If this queue was preempted or is new (never been serviced), let
+ * it be added first for fairness but beind other new queues.
+ * Otherwise, just add to the back of the list.
*/
- if (preempted || list == &cfqd->busy_rr) {
- if (preempted)
- list = list->prev;
+ if (preempted || cfq_cfqq_queue_new(cfqq)) {
+ struct list_head *n = list;
+ struct cfq_queue *__cfqq;
- list_add_tail(&cfqq->cfq_list, list);
- return;
- }
+ while (n->next != list) {
+ __cfqq = list_entry_cfqq(n->next);
+ if (!cfq_cfqq_queue_new(__cfqq))
+ break;
- /*
- * sort by when queue was last serviced
- */
- entry = list;
- while ((entry = entry->prev) != list) {
- struct cfq_queue *__cfqq = list_entry_cfqq(entry);
+ n = n->next;
+ }
- if (!__cfqq->service_last)
- break;
- if (time_before(__cfqq->service_last, cfqq->service_last))
- break;
+ list = n;
}
- list_add(&cfqq->cfq_list, entry);
+ list_add_tail(&cfqq->cfq_list, list);
}
/*
{
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq);
- list_move(&cfqq->cfq_list, &cfqd->empty_list);
+ list_del_init(&cfqq->cfq_list);
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
*/
while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
cfq_dispatch_insert(cfqd->queue, __alias);
+
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq);
}
static inline void
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
+
+ if (rq_is_meta(rq)) {
+ WARN_ON(!cfqq->meta_pending);
+ cfqq->meta_pending--;
+ }
}
static int
if (cfq_cfqq_wait_request(cfqq))
del_timer(&cfqd->idle_slice_timer);
- if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
- cfqq->service_last = now;
+ if (!preempted && !cfq_cfqq_dispatched(cfqq))
cfq_schedule_dispatch(cfqd);
- }
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
+ cfq_clear_cfqq_queue_new(cfqq);
/*
* store what was left of this slice, if the queue idled out
{
struct cfq_data *cfqd = cic->key;
struct cfq_queue *cfqq;
+ unsigned long flags;
if (unlikely(!cfqd))
return;
- spin_lock(cfqd->queue->queue_lock);
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
cfqq = cic->cfqq[ASYNC];
if (cfqq) {
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
- spin_unlock(cfqd->queue->queue_lock);
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
static void cfq_ioc_set_ioprio(struct io_context *ioc)
hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd;
- cfqq->service_last = 0;
/*
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
+ cfq_mark_cfqq_queue_new(cfqq);
cfq_init_prio_data(cfqq);
}
struct rb_node **p;
struct rb_node *parent;
struct cfq_io_context *__cic;
+ unsigned long flags;
void *k;
cic->ioc = ioc;
rb_link_node(&cic->rb_node, parent, p);
rb_insert_color(&cic->rb_node, &ioc->cic_root);
- spin_lock_irq(cfqd->queue->queue_lock);
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
list_add(&cic->queue_list, &cfqd->cic_list);
- spin_unlock_irq(cfqd->queue->queue_lock);
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
/*
}
static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
- struct request *rq)
+cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
*/
if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
return 0;
+ /*
+ * if the new request is sync, but the currently running queue is
+ * not, let the sync request have priority.
+ */
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1;
+ /*
+ * So both queues are sync. Let the new request get disk time if
+ * it's a metadata request and the current queue is doing regular IO.
+ */
+ if (rq_is_meta(rq) && !cfqq->meta_pending)
+ return 1;
return 0;
}
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
- struct cfq_queue *__cfqq, *next;
-
- list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
- cfq_resort_rr_list(__cfqq, 1);
+ cfq_slice_expired(cfqd, 1);
if (!cfqq->slice_left)
cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
- cfqq->slice_end = cfqq->slice_left + jiffies;
- cfq_slice_expired(cfqd, 1);
- __cfq_set_active_queue(cfqd, cfqq);
-}
-
-/*
- * should really be a ll_rw_blk.c helper
- */
-static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- request_queue_t *q = cfqd->queue;
+ /*
+ * Put the new queue at the front of the of the current list,
+ * so we know that it will be selected next.
+ */
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ list_move(&cfqq->cfq_list, &cfqd->cur_rr);
- if (!blk_queue_plugged(q))
- q->request_fn(q);
- else
- __generic_unplug_device(q);
+ cfqq->slice_end = cfqq->slice_left + jiffies;
}
/*
{
struct cfq_io_context *cic = RQ_CIC(rq);
+ if (rq_is_meta(rq))
+ cfqq->meta_pending++;
+
/*
* check if this request is a better next-serve candidate)) {
*/
if (cic == cfqd->active_cic &&
del_timer(&cfqd->idle_slice_timer)) {
cfq_slice_expired(cfqd, 0);
- cfq_start_queueing(cfqd, cfqq);
+ blk_start_queueing(cfqd->queue);
}
return;
}
cfq_update_io_thinktime(cfqd, cic);
- cfq_update_io_seektime(cfqd, cic, rq);
+ cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
if (cfq_cfqq_wait_request(cfqq)) {
cfq_mark_cfqq_must_dispatch(cfqq);
del_timer(&cfqd->idle_slice_timer);
- cfq_start_queueing(cfqd, cfqq);
+ blk_start_queueing(cfqd->queue);
}
} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/*
*/
cfq_preempt_queue(cfqd, cfqq);
cfq_mark_cfqq_must_dispatch(cfqq);
- cfq_start_queueing(cfqd, cfqq);
+ blk_start_queueing(cfqd->queue);
}
}
cfq_add_rq_rb(rq);
- if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq);
-
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
if (!cfq_class_idle(cfqq))
cfqd->last_end_request = now;
- if (!cfq_cfqq_dispatched(cfqq)) {
- if (cfq_cfqq_on_rr(cfqq)) {
- cfqq->service_last = now;
- cfq_resort_rr_list(cfqq, 0);
- }
- }
+ if (!cfq_cfqq_dispatched(cfqq) && cfq_cfqq_on_rr(cfqq))
+ cfq_resort_rr_list(cfqq, 0);
if (sync)
RQ_CIC(rq)->last_end_request = now;
/*
* queue lock held here
*/
-static void cfq_put_request(request_queue_t *q, struct request *rq)
+static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
return 1;
}
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
{
- request_queue_t *q = data;
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ request_queue_t *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- blk_remove_plug(q);
- q->request_fn(q);
+ blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
INIT_LIST_HEAD(&cfqd->busy_rr);
INIT_LIST_HEAD(&cfqd->cur_rr);
INIT_LIST_HEAD(&cfqd->idle_rr);
- INIT_LIST_HEAD(&cfqd->empty_list);
INIT_LIST_HEAD(&cfqd->cic_list);
cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
cfqd->idle_class_timer.function = cfq_idle_class_timer;
cfqd->idle_class_timer.data = (unsigned long) cfqd;
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
static void __exit cfq_exit(void)
{
- DECLARE_COMPLETION(all_gone);
+ DECLARE_COMPLETION_ONSTACK(all_gone);
elv_unregister(&iosched_cfq);
ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */