]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/block/cfq-iosched.c
[PATCH] Update cfq io scheduler to time sliced design
[linux-2.6-omap-h63xx.git] / drivers / block / cfq-iosched.c
1 /*
2  *  linux/drivers/block/cfq-iosched.c
3  *
4  *  CFQ, or complete fairness queueing, disk scheduler.
5  *
6  *  Based on ideas from a previously unfinished io
7  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
8  *
9  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
10  */
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/blkdev.h>
14 #include <linux/elevator.h>
15 #include <linux/bio.h>
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
20 #include <linux/compiler.h>
21 #include <linux/hash.h>
22 #include <linux/rbtree.h>
23 #include <linux/mempool.h>
24 #include <linux/ioprio.h>
25 #include <linux/writeback.h>
26
27 /*
28  * tunables
29  */
30 static int cfq_quantum = 4;             /* max queue in one round of service */
31 static int cfq_queued = 8;              /* minimum rq allocate limit per-queue*/
32 static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
33 static int cfq_back_max = 16 * 1024;    /* maximum backwards seek, in KiB */
34 static int cfq_back_penalty = 2;        /* penalty of a backwards seek */
35
36 static int cfq_slice_sync = HZ / 10;
37 static int cfq_slice_async = HZ / 50;
38 static int cfq_slice_async_rq = 2;
39 static int cfq_slice_idle = HZ / 50;
40
41 #define CFQ_IDLE_GRACE          (HZ / 10)
42 #define CFQ_SLICE_SCALE         (5)
43
44 #define CFQ_KEY_ASYNC           (0)
45
46 /*
47  * disable queueing at the driver/hardware level
48  */
49 static int cfq_max_depth = 1;
50
51 /*
52  * for the hash of cfqq inside the cfqd
53  */
54 #define CFQ_QHASH_SHIFT         6
55 #define CFQ_QHASH_ENTRIES       (1 << CFQ_QHASH_SHIFT)
56 #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
57
58 /*
59  * for the hash of crq inside the cfqq
60  */
61 #define CFQ_MHASH_SHIFT         6
62 #define CFQ_MHASH_BLOCK(sec)    ((sec) >> 3)
63 #define CFQ_MHASH_ENTRIES       (1 << CFQ_MHASH_SHIFT)
64 #define CFQ_MHASH_FN(sec)       hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
65 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
66 #define list_entry_hash(ptr)    hlist_entry((ptr), struct cfq_rq, hash)
67
68 #define list_entry_cfqq(ptr)    list_entry((ptr), struct cfq_queue, cfq_list)
69 #define list_entry_fifo(ptr)    list_entry((ptr), struct request, queuelist)
70
71 #define RQ_DATA(rq)             (rq)->elevator_private
72
73 /*
74  * rb-tree defines
75  */
76 #define RB_NONE                 (2)
77 #define RB_EMPTY(node)          ((node)->rb_node == NULL)
78 #define RB_CLEAR_COLOR(node)    (node)->rb_color = RB_NONE
79 #define RB_CLEAR(node)          do {    \
80         (node)->rb_parent = NULL;       \
81         RB_CLEAR_COLOR((node));         \
82         (node)->rb_right = NULL;        \
83         (node)->rb_left = NULL;         \
84 } while (0)
85 #define RB_CLEAR_ROOT(root)     ((root)->rb_node = NULL)
86 #define ON_RB(node)             ((node)->rb_color != RB_NONE)
87 #define rb_entry_crq(node)      rb_entry((node), struct cfq_rq, rb_node)
88 #define rq_rb_key(rq)           (rq)->sector
89
90 static kmem_cache_t *crq_pool;
91 static kmem_cache_t *cfq_pool;
92 static kmem_cache_t *cfq_ioc_pool;
93
94 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
95 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
96 #define cfq_class_be(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
97 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
98
99 #define cfq_cfqq_sync(cfqq)     ((cfqq)->key != CFQ_KEY_ASYNC)
100
101 /*
102  * Per block device queue structure
103  */
104 struct cfq_data {
105         atomic_t ref;
106         request_queue_t *queue;
107
108         /*
109          * rr list of queues with requests and the count of them
110          */
111         struct list_head rr_list[CFQ_PRIO_LISTS];
112         struct list_head busy_rr;
113         struct list_head cur_rr;
114         struct list_head idle_rr;
115         unsigned int busy_queues;
116
117         /*
118          * non-ordered list of empty cfqq's
119          */
120         struct list_head empty_list;
121
122         /*
123          * cfqq lookup hash
124          */
125         struct hlist_head *cfq_hash;
126
127         /*
128          * global crq hash for all queues
129          */
130         struct hlist_head *crq_hash;
131
132         unsigned int max_queued;
133
134         mempool_t *crq_pool;
135
136         int rq_in_driver;
137
138         /*
139          * schedule slice state info
140          */
141         /*
142          * idle window management
143          */
144         struct timer_list idle_slice_timer;
145         struct work_struct unplug_work;
146
147         struct cfq_queue *active_queue;
148         struct cfq_io_context *active_cic;
149         int cur_prio, cur_end_prio;
150         unsigned int dispatch_slice;
151
152         struct timer_list idle_class_timer;
153
154         sector_t last_sector;
155         unsigned long last_end_request;
156
157         unsigned int rq_starved;
158
159         /*
160          * tunables, see top of file
161          */
162         unsigned int cfq_quantum;
163         unsigned int cfq_queued;
164         unsigned int cfq_fifo_expire[2];
165         unsigned int cfq_back_penalty;
166         unsigned int cfq_back_max;
167         unsigned int cfq_slice[2];
168         unsigned int cfq_slice_async_rq;
169         unsigned int cfq_slice_idle;
170         unsigned int cfq_max_depth;
171 };
172
173 /*
174  * Per process-grouping structure
175  */
176 struct cfq_queue {
177         /* reference count */
178         atomic_t ref;
179         /* parent cfq_data */
180         struct cfq_data *cfqd;
181         /* cfqq lookup hash */
182         struct hlist_node cfq_hash;
183         /* hash key */
184         unsigned int key;
185         /* on either rr or empty list of cfqd */
186         struct list_head cfq_list;
187         /* sorted list of pending requests */
188         struct rb_root sort_list;
189         /* if fifo isn't expired, next request to serve */
190         struct cfq_rq *next_crq;
191         /* requests queued in sort_list */
192         int queued[2];
193         /* currently allocated requests */
194         int allocated[2];
195         /* fifo list of requests in sort_list */
196         struct list_head fifo;
197
198         unsigned long slice_start;
199         unsigned long slice_end;
200         unsigned long slice_left;
201         unsigned long service_last;
202
203         /* number of requests that have been handed to the driver */
204         int in_flight;
205
206         /* io prio of this group */
207         unsigned short ioprio, org_ioprio;
208         unsigned short ioprio_class, org_ioprio_class;
209
210         /* whether queue is on rr (or empty) list */
211         unsigned on_rr : 1;
212         /* idle slice, waiting for new request submission */
213         unsigned wait_request : 1;
214         /* set when wait_request gets set, reset on first rq alloc */
215         unsigned must_alloc : 1;
216         /* only gets one must_alloc per slice */
217         unsigned must_alloc_slice : 1;
218         /* idle slice, request added, now waiting to dispatch it */
219         unsigned must_dispatch : 1;
220         /* fifo expire per-slice */
221         unsigned fifo_expire : 1;
222
223         unsigned idle_window : 1;
224         unsigned prio_changed : 1;
225 };
226
227 struct cfq_rq {
228         struct rb_node rb_node;
229         sector_t rb_key;
230         struct request *request;
231         struct hlist_node hash;
232
233         struct cfq_queue *cfq_queue;
234         struct cfq_io_context *io_context;
235
236         unsigned in_flight : 1;
237         unsigned accounted : 1;
238         unsigned is_sync   : 1;
239         unsigned requeued  : 1;
240 };
241
242 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int);
243 static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
244 static void cfq_put_cfqd(struct cfq_data *cfqd);
245
246 #define process_sync(tsk)       ((tsk)->flags & PF_SYNCWRITE)
247
248 /*
249  * lots of deadline iosched dupes, can be abstracted later...
250  */
251 static inline void cfq_del_crq_hash(struct cfq_rq *crq)
252 {
253         hlist_del_init(&crq->hash);
254 }
255
256 static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
257 {
258         cfq_del_crq_hash(crq);
259
260         if (q->last_merge == crq->request)
261                 q->last_merge = NULL;
262 }
263
264 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
265 {
266         const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
267
268         hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
269 }
270
271 static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
272 {
273         struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
274         struct hlist_node *entry, *next;
275
276         hlist_for_each_safe(entry, next, hash_list) {
277                 struct cfq_rq *crq = list_entry_hash(entry);
278                 struct request *__rq = crq->request;
279
280                 if (!rq_mergeable(__rq)) {
281                         cfq_del_crq_hash(crq);
282                         continue;
283                 }
284
285                 if (rq_hash_key(__rq) == offset)
286                         return __rq;
287         }
288
289         return NULL;
290 }
291
292 /*
293  * Lifted from AS - choose which of crq1 and crq2 that is best served now.
294  * We choose the request that is closest to the head right now. Distance
295  * behind the head are penalized and only allowed to a certain extent.
296  */
297 static struct cfq_rq *
298 cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
299 {
300         sector_t last, s1, s2, d1 = 0, d2 = 0;
301         int r1_wrap = 0, r2_wrap = 0;   /* requests are behind the disk head */
302         unsigned long back_max;
303
304         if (crq1 == NULL || crq1 == crq2)
305                 return crq2;
306         if (crq2 == NULL)
307                 return crq1;
308         if (crq1->requeued)
309                 return crq1;
310         if (crq2->requeued)
311                 return crq2;
312
313         s1 = crq1->request->sector;
314         s2 = crq2->request->sector;
315
316         last = cfqd->last_sector;
317
318         /*
319          * by definition, 1KiB is 2 sectors
320          */
321         back_max = cfqd->cfq_back_max * 2;
322
323         /*
324          * Strict one way elevator _except_ in the case where we allow
325          * short backward seeks which are biased as twice the cost of a
326          * similar forward seek.
327          */
328         if (s1 >= last)
329                 d1 = s1 - last;
330         else if (s1 + back_max >= last)
331                 d1 = (last - s1) * cfqd->cfq_back_penalty;
332         else
333                 r1_wrap = 1;
334
335         if (s2 >= last)
336                 d2 = s2 - last;
337         else if (s2 + back_max >= last)
338                 d2 = (last - s2) * cfqd->cfq_back_penalty;
339         else
340                 r2_wrap = 1;
341
342         /* Found required data */
343         if (!r1_wrap && r2_wrap)
344                 return crq1;
345         else if (!r2_wrap && r1_wrap)
346                 return crq2;
347         else if (r1_wrap && r2_wrap) {
348                 /* both behind the head */
349                 if (s1 <= s2)
350                         return crq1;
351                 else
352                         return crq2;
353         }
354
355         /* Both requests in front of the head */
356         if (d1 < d2)
357                 return crq1;
358         else if (d2 < d1)
359                 return crq2;
360         else {
361                 if (s1 >= s2)
362                         return crq1;
363                 else
364                         return crq2;
365         }
366 }
367
368 /*
369  * would be nice to take fifo expire time into account as well
370  */
371 static struct cfq_rq *
372 cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
373                   struct cfq_rq *last)
374 {
375         struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
376         struct rb_node *rbnext, *rbprev;
377
378         if (ON_RB(&last->rb_node))
379                 rbnext = rb_next(&last->rb_node);
380         else {
381                 rbnext = rb_first(&cfqq->sort_list);
382                 if (rbnext == &last->rb_node)
383                         rbnext = NULL;
384         }
385
386         rbprev = rb_prev(&last->rb_node);
387
388         if (rbprev)
389                 crq_prev = rb_entry_crq(rbprev);
390         if (rbnext)
391                 crq_next = rb_entry_crq(rbnext);
392
393         return cfq_choose_req(cfqd, crq_next, crq_prev);
394 }
395
396 static void cfq_update_next_crq(struct cfq_rq *crq)
397 {
398         struct cfq_queue *cfqq = crq->cfq_queue;
399
400         if (cfqq->next_crq == crq)
401                 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
402 }
403
404 static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
405 {
406         struct cfq_data *cfqd = cfqq->cfqd;
407         struct list_head *list, *entry;
408
409         BUG_ON(!cfqq->on_rr);
410
411         list_del(&cfqq->cfq_list);
412
413         if (cfq_class_rt(cfqq))
414                 list = &cfqd->cur_rr;
415         else if (cfq_class_idle(cfqq))
416                 list = &cfqd->idle_rr;
417         else {
418                 /*
419                  * if cfqq has requests in flight, don't allow it to be
420                  * found in cfq_set_active_queue before it has finished them.
421                  * this is done to increase fairness between a process that
422                  * has lots of io pending vs one that only generates one
423                  * sporadically or synchronously
424                  */
425                 if (cfqq->in_flight)
426                         list = &cfqd->busy_rr;
427                 else
428                         list = &cfqd->rr_list[cfqq->ioprio];
429         }
430
431         /*
432          * if queue was preempted, just add to front to be fair. busy_rr
433          * isn't sorted.
434          */
435         if (preempted || list == &cfqd->busy_rr) {
436                 list_add(&cfqq->cfq_list, list);
437                 return;
438         }
439
440         /*
441          * sort by when queue was last serviced
442          */
443         entry = list;
444         while ((entry = entry->prev) != list) {
445                 struct cfq_queue *__cfqq = list_entry_cfqq(entry);
446
447                 if (!__cfqq->service_last)
448                         break;
449                 if (time_before(__cfqq->service_last, cfqq->service_last))
450                         break;
451         }
452
453         list_add(&cfqq->cfq_list, entry);
454 }
455
456 /*
457  * add to busy list of queues for service, trying to be fair in ordering
458  * the pending list according to last request service
459  */
460 static inline void
461 cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
462 {
463         BUG_ON(cfqq->on_rr);
464         cfqq->on_rr = 1;
465         cfqd->busy_queues++;
466
467         cfq_resort_rr_list(cfqq, requeue);
468 }
469
470 static inline void
471 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
472 {
473         BUG_ON(!cfqq->on_rr);
474         cfqq->on_rr = 0;
475         list_move(&cfqq->cfq_list, &cfqd->empty_list);
476
477         BUG_ON(!cfqd->busy_queues);
478         cfqd->busy_queues--;
479 }
480
481 /*
482  * rb tree support functions
483  */
484 static inline void cfq_del_crq_rb(struct cfq_rq *crq)
485 {
486         struct cfq_queue *cfqq = crq->cfq_queue;
487
488         if (ON_RB(&crq->rb_node)) {
489                 struct cfq_data *cfqd = cfqq->cfqd;
490                 const int sync = crq->is_sync;
491
492                 BUG_ON(!cfqq->queued[sync]);
493                 cfqq->queued[sync]--;
494
495                 cfq_update_next_crq(crq);
496
497                 rb_erase(&crq->rb_node, &cfqq->sort_list);
498                 RB_CLEAR_COLOR(&crq->rb_node);
499
500                 if (cfqq->on_rr && RB_EMPTY(&cfqq->sort_list))
501                         cfq_del_cfqq_rr(cfqd, cfqq);
502         }
503 }
504
505 static struct cfq_rq *
506 __cfq_add_crq_rb(struct cfq_rq *crq)
507 {
508         struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
509         struct rb_node *parent = NULL;
510         struct cfq_rq *__crq;
511
512         while (*p) {
513                 parent = *p;
514                 __crq = rb_entry_crq(parent);
515
516                 if (crq->rb_key < __crq->rb_key)
517                         p = &(*p)->rb_left;
518                 else if (crq->rb_key > __crq->rb_key)
519                         p = &(*p)->rb_right;
520                 else
521                         return __crq;
522         }
523
524         rb_link_node(&crq->rb_node, parent, p);
525         return NULL;
526 }
527
528 static void cfq_add_crq_rb(struct cfq_rq *crq)
529 {
530         struct cfq_queue *cfqq = crq->cfq_queue;
531         struct cfq_data *cfqd = cfqq->cfqd;
532         struct request *rq = crq->request;
533         struct cfq_rq *__alias;
534
535         crq->rb_key = rq_rb_key(rq);
536         cfqq->queued[crq->is_sync]++;
537
538         /*
539          * looks a little odd, but the first insert might return an alias.
540          * if that happens, put the alias on the dispatch list
541          */
542         while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
543                 cfq_dispatch_sort(cfqd->queue, __alias);
544
545         rb_insert_color(&crq->rb_node, &cfqq->sort_list);
546
547         if (!cfqq->on_rr)
548                 cfq_add_cfqq_rr(cfqd, cfqq, crq->requeued);
549
550         /*
551          * check if this request is a better next-serve candidate
552          */
553         cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
554 }
555
556 static inline void
557 cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
558 {
559         if (ON_RB(&crq->rb_node)) {
560                 rb_erase(&crq->rb_node, &cfqq->sort_list);
561                 cfqq->queued[crq->is_sync]--;
562         }
563
564         cfq_add_crq_rb(crq);
565 }
566
567 static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
568
569 {
570         struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid);
571         struct rb_node *n;
572
573         if (!cfqq)
574                 goto out;
575
576         n = cfqq->sort_list.rb_node;
577         while (n) {
578                 struct cfq_rq *crq = rb_entry_crq(n);
579
580                 if (sector < crq->rb_key)
581                         n = n->rb_left;
582                 else if (sector > crq->rb_key)
583                         n = n->rb_right;
584                 else
585                         return crq->request;
586         }
587
588 out:
589         return NULL;
590 }
591
592 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
593 {
594         struct cfq_data *cfqd = q->elevator->elevator_data;
595         struct cfq_rq *crq = RQ_DATA(rq);
596
597         if (crq) {
598                 struct cfq_queue *cfqq = crq->cfq_queue;
599
600                 if (crq->accounted) {
601                         crq->accounted = 0;
602                         WARN_ON(!cfqd->rq_in_driver);
603                         cfqd->rq_in_driver--;
604                 }
605                 if (crq->in_flight) {
606                         crq->in_flight = 0;
607                         WARN_ON(!cfqq->in_flight);
608                         cfqq->in_flight--;
609                 }
610                 crq->requeued = 1;
611         }
612 }
613
614 /*
615  * make sure the service time gets corrected on reissue of this request
616  */
617 static void cfq_requeue_request(request_queue_t *q, struct request *rq)
618 {
619         cfq_deactivate_request(q, rq);
620         list_add(&rq->queuelist, &q->queue_head);
621 }
622
623 static void cfq_remove_request(request_queue_t *q, struct request *rq)
624 {
625         struct cfq_rq *crq = RQ_DATA(rq);
626
627         if (crq) {
628                 list_del_init(&rq->queuelist);
629                 cfq_del_crq_rb(crq);
630                 cfq_remove_merge_hints(q, crq);
631
632         }
633 }
634
635 static int
636 cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
637 {
638         struct cfq_data *cfqd = q->elevator->elevator_data;
639         struct request *__rq;
640         int ret;
641
642         ret = elv_try_last_merge(q, bio);
643         if (ret != ELEVATOR_NO_MERGE) {
644                 __rq = q->last_merge;
645                 goto out_insert;
646         }
647
648         __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
649         if (__rq && elv_rq_merge_ok(__rq, bio)) {
650                 ret = ELEVATOR_BACK_MERGE;
651                 goto out;
652         }
653
654         __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
655         if (__rq && elv_rq_merge_ok(__rq, bio)) {
656                 ret = ELEVATOR_FRONT_MERGE;
657                 goto out;
658         }
659
660         return ELEVATOR_NO_MERGE;
661 out:
662         q->last_merge = __rq;
663 out_insert:
664         *req = __rq;
665         return ret;
666 }
667
668 static void cfq_merged_request(request_queue_t *q, struct request *req)
669 {
670         struct cfq_data *cfqd = q->elevator->elevator_data;
671         struct cfq_rq *crq = RQ_DATA(req);
672
673         cfq_del_crq_hash(crq);
674         cfq_add_crq_hash(cfqd, crq);
675
676         if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
677                 struct cfq_queue *cfqq = crq->cfq_queue;
678
679                 cfq_update_next_crq(crq);
680                 cfq_reposition_crq_rb(cfqq, crq);
681         }
682
683         q->last_merge = req;
684 }
685
686 static void
687 cfq_merged_requests(request_queue_t *q, struct request *rq,
688                     struct request *next)
689 {
690         cfq_merged_request(q, rq);
691
692         /*
693          * reposition in fifo if next is older than rq
694          */
695         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
696             time_before(next->start_time, rq->start_time))
697                 list_move(&rq->queuelist, &next->queuelist);
698
699         cfq_remove_request(q, next);
700 }
701
702 static inline void
703 __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
704 {
705         if (cfqq) {
706                 /*
707                  * stop potential idle class queues waiting service
708                  */
709                 del_timer(&cfqd->idle_class_timer);
710
711                 cfqq->slice_start = jiffies;
712                 cfqq->slice_end = 0;
713                 cfqq->slice_left = 0;
714                 cfqq->must_alloc_slice = 0;
715                 cfqq->fifo_expire = 0;
716         }
717
718         cfqd->active_queue = cfqq;
719 }
720
721 /*
722  * 0
723  * 0,1
724  * 0,1,2
725  * 0,1,2,3
726  * 0,1,2,3,4
727  * 0,1,2,3,4,5
728  * 0,1,2,3,4,5,6
729  * 0,1,2,3,4,5,6,7
730  */
731 static int cfq_get_next_prio_level(struct cfq_data *cfqd)
732 {
733         int prio, wrap;
734
735         prio = -1;
736         wrap = 0;
737         do {
738                 int p;
739
740                 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
741                         if (!list_empty(&cfqd->rr_list[p])) {
742                                 prio = p;
743                                 break;
744                         }
745                 }
746
747                 if (prio != -1)
748                         break;
749                 cfqd->cur_prio = 0;
750                 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
751                         cfqd->cur_end_prio = 0;
752                         if (wrap)
753                                 break;
754                         wrap = 1;
755                 }
756         } while (1);
757
758         if (unlikely(prio == -1))
759                 return -1;
760
761         BUG_ON(prio >= CFQ_PRIO_LISTS);
762
763         list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
764
765         cfqd->cur_prio = prio + 1;
766         if (cfqd->cur_prio > cfqd->cur_end_prio) {
767                 cfqd->cur_end_prio = cfqd->cur_prio;
768                 cfqd->cur_prio = 0;
769         }
770         if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
771                 cfqd->cur_prio = 0;
772                 cfqd->cur_end_prio = 0;
773         }
774
775         return prio;
776 }
777
778 static void cfq_set_active_queue(struct cfq_data *cfqd)
779 {
780         struct cfq_queue *cfqq = NULL;
781
782         /*
783          * if current list is non-empty, grab first entry. if it is empty,
784          * get next prio level and grab first entry then if any are spliced
785          */
786         if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
787                 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
788
789         /*
790          * if we have idle queues and no rt or be queues had pending
791          * requests, either allow immediate service if the grace period
792          * has passed or arm the idle grace timer
793          */
794         if (!cfqq && !list_empty(&cfqd->idle_rr)) {
795                 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
796
797                 if (time_after_eq(jiffies, end))
798                         cfqq = list_entry_cfqq(cfqd->idle_rr.next);
799                 else
800                         mod_timer(&cfqd->idle_class_timer, end);
801         }
802
803         __cfq_set_active_queue(cfqd, cfqq);
804 }
805
806 /*
807  * current cfqq expired its slice (or was too idle), select new one
808  */
809 static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
810 {
811         struct cfq_queue *cfqq = cfqd->active_queue;
812
813         if (cfqq) {
814                 unsigned long now = jiffies;
815
816                 if (cfqq->wait_request)
817                         del_timer(&cfqd->idle_slice_timer);
818
819                 if (!preempted && !cfqq->in_flight)
820                         cfqq->service_last = now;
821
822                 cfqq->must_dispatch = 0;
823                 cfqq->wait_request = 0;
824
825                 /*
826                  * store what was left of this slice, if the queue idled out
827                  * or was preempted
828                  */
829                 if (time_after(now, cfqq->slice_end))
830                         cfqq->slice_left = now - cfqq->slice_end;
831                 else
832                         cfqq->slice_left = 0;
833
834                 if (cfqq->on_rr)
835                         cfq_resort_rr_list(cfqq, preempted);
836
837                 cfqd->active_queue = NULL;
838
839                 if (cfqd->active_cic) {
840                         put_io_context(cfqd->active_cic->ioc);
841                         cfqd->active_cic = NULL;
842                 }
843         }
844
845         cfqd->dispatch_slice = 0;
846 }
847
848 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
849
850 {
851         WARN_ON(!RB_EMPTY(&cfqq->sort_list));
852         WARN_ON(cfqq != cfqd->active_queue);
853
854         /*
855          * idle is disabled, either manually or by past process history
856          */
857         if (!cfqd->cfq_slice_idle)
858                 return 0;
859         if (!cfqq->idle_window)
860                 return 0;
861         /*
862          * task has exited, don't wait
863          */
864         if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
865                 return 0;
866
867         cfqq->wait_request = 1;
868         cfqq->must_alloc = 1;
869
870         if (!timer_pending(&cfqd->idle_slice_timer)) {
871                 unsigned long slice_left = cfqq->slice_end - 1;
872
873                 cfqd->idle_slice_timer.expires = min(jiffies + cfqd->cfq_slice_idle, slice_left);
874                 add_timer(&cfqd->idle_slice_timer);
875         }
876
877         return 1;
878 }
879
880 /*
881  * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
882  * this function sector sorts the selected request to minimize seeks. we start
883  * at cfqd->last_sector, not 0.
884  */
885 static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
886 {
887         struct cfq_data *cfqd = q->elevator->elevator_data;
888         struct cfq_queue *cfqq = crq->cfq_queue;
889         struct list_head *head = &q->queue_head, *entry = head;
890         struct request *__rq;
891         sector_t last;
892
893         list_del(&crq->request->queuelist);
894
895         last = cfqd->last_sector;
896         list_for_each_entry_reverse(__rq, head, queuelist) {
897                 struct cfq_rq *__crq = RQ_DATA(__rq);
898
899                 if (blk_barrier_rq(__rq))
900                         break;
901                 if (!blk_fs_request(__rq))
902                         break;
903                 if (__crq->requeued)
904                         break;
905
906                 if (__rq->sector <= crq->request->sector)
907                         break;
908                 if (__rq->sector > last && crq->request->sector < last) {
909                         last = crq->request->sector + crq->request->nr_sectors;
910                         break;
911                 }
912                 entry = &__rq->queuelist;
913         }
914
915         cfqd->last_sector = last;
916
917         cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
918
919         cfq_del_crq_rb(crq);
920         cfq_remove_merge_hints(q, crq);
921
922         crq->in_flight = 1;
923         crq->requeued = 0;
924         cfqq->in_flight++;
925         list_add_tail(&crq->request->queuelist, entry);
926 }
927
928 /*
929  * return expired entry, or NULL to just start from scratch in rbtree
930  */
931 static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
932 {
933         struct cfq_data *cfqd = cfqq->cfqd;
934         struct request *rq;
935         struct cfq_rq *crq;
936
937         if (cfqq->fifo_expire)
938                 return NULL;
939
940         if (!list_empty(&cfqq->fifo)) {
941                 int fifo = cfq_cfqq_sync(cfqq);
942
943                 crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
944                 rq = crq->request;
945                 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
946                         cfqq->fifo_expire = 1;
947                         return crq;
948                 }
949         }
950
951         return NULL;
952 }
953
954 /*
955  * Scale schedule slice based on io priority
956  */
957 static inline int
958 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
959 {
960         const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
961
962         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
963
964         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
965 }
966
967 static inline void
968 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
969 {
970         cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
971 }
972
973 static inline int
974 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
975 {
976         const int base_rq = cfqd->cfq_slice_async_rq;
977
978         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
979
980         return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
981 }
982
983 /*
984  * get next queue for service
985  */
986 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
987 {
988         unsigned long now = jiffies;
989         struct cfq_queue *cfqq;
990
991         cfqq = cfqd->active_queue;
992         if (!cfqq)
993                 goto new_queue;
994
995         /*
996          * slice has expired
997          */
998         if (!cfqq->must_dispatch && time_after(jiffies, cfqq->slice_end))
999                 goto new_queue;
1000
1001         /*
1002          * if queue has requests, dispatch one. if not, check if
1003          * enough slice is left to wait for one
1004          */
1005         if (!RB_EMPTY(&cfqq->sort_list))
1006                 goto keep_queue;
1007         else if (!force && cfq_cfqq_sync(cfqq) &&
1008                  time_before(now, cfqq->slice_end)) {
1009                 if (cfq_arm_slice_timer(cfqd, cfqq))
1010                         return NULL;
1011         }
1012
1013 new_queue:
1014         cfq_slice_expired(cfqd, 0);
1015         cfq_set_active_queue(cfqd);
1016 keep_queue:
1017         return cfqd->active_queue;
1018 }
1019
1020 static int
1021 __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1022                         int max_dispatch)
1023 {
1024         int dispatched = 0;
1025
1026         BUG_ON(RB_EMPTY(&cfqq->sort_list));
1027
1028         do {
1029                 struct cfq_rq *crq;
1030
1031                 /*
1032                  * follow expired path, else get first next available
1033                  */
1034                 if ((crq = cfq_check_fifo(cfqq)) == NULL)
1035                         crq = cfqq->next_crq;
1036
1037                 /*
1038                  * finally, insert request into driver dispatch list
1039                  */
1040                 cfq_dispatch_sort(cfqd->queue, crq);
1041
1042                 cfqd->dispatch_slice++;
1043                 dispatched++;
1044
1045                 if (!cfqd->active_cic) {
1046                         atomic_inc(&crq->io_context->ioc->refcount);
1047                         cfqd->active_cic = crq->io_context;
1048                 }
1049
1050                 if (RB_EMPTY(&cfqq->sort_list))
1051                         break;
1052
1053         } while (dispatched < max_dispatch);
1054
1055         /*
1056          * if slice end isn't set yet, set it. if at least one request was
1057          * sync, use the sync time slice value
1058          */
1059         if (!cfqq->slice_end)
1060                 cfq_set_prio_slice(cfqd, cfqq);
1061
1062         /*
1063          * expire an async queue immediately if it has used up its slice. idle
1064          * queue always expire after 1 dispatch round.
1065          */
1066         if ((!cfq_cfqq_sync(cfqq) &&
1067             cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1068             cfq_class_idle(cfqq))
1069                 cfq_slice_expired(cfqd, 0);
1070
1071         return dispatched;
1072 }
1073
1074 static int
1075 cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
1076 {
1077         struct cfq_data *cfqd = q->elevator->elevator_data;
1078         struct cfq_queue *cfqq;
1079
1080         if (!cfqd->busy_queues)
1081                 return 0;
1082
1083         cfqq = cfq_select_queue(cfqd, force);
1084         if (cfqq) {
1085                 cfqq->wait_request = 0;
1086                 cfqq->must_dispatch = 0;
1087                 del_timer(&cfqd->idle_slice_timer);
1088
1089                 if (cfq_class_idle(cfqq))
1090                         max_dispatch = 1;
1091
1092                 return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
1093         }
1094
1095         return 0;
1096 }
1097
1098 static inline void cfq_account_dispatch(struct cfq_rq *crq)
1099 {
1100         struct cfq_queue *cfqq = crq->cfq_queue;
1101         struct cfq_data *cfqd = cfqq->cfqd;
1102
1103         if (unlikely(!blk_fs_request(crq->request)))
1104                 return;
1105
1106         /*
1107          * accounted bit is necessary since some drivers will call
1108          * elv_next_request() many times for the same request (eg ide)
1109          */
1110         if (crq->accounted)
1111                 return;
1112
1113         crq->accounted = 1;
1114         cfqd->rq_in_driver++;
1115 }
1116
1117 static inline void
1118 cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
1119 {
1120         struct cfq_data *cfqd = cfqq->cfqd;
1121         unsigned long now;
1122
1123         if (!crq->accounted)
1124                 return;
1125
1126         now = jiffies;
1127
1128         WARN_ON(!cfqd->rq_in_driver);
1129         cfqd->rq_in_driver--;
1130
1131         if (!cfq_class_idle(cfqq))
1132                 cfqd->last_end_request = now;
1133
1134         if (!cfqq->in_flight && cfqq->on_rr) {
1135                 cfqq->service_last = now;
1136                 cfq_resort_rr_list(cfqq, 0);
1137         }
1138
1139         if (crq->is_sync)
1140                 crq->io_context->last_end_request = now;
1141 }
1142
1143 static struct request *cfq_next_request(request_queue_t *q)
1144 {
1145         struct cfq_data *cfqd = q->elevator->elevator_data;
1146         struct request *rq;
1147
1148         if (!list_empty(&q->queue_head)) {
1149                 struct cfq_rq *crq;
1150 dispatch:
1151                 rq = list_entry_rq(q->queue_head.next);
1152
1153                 crq = RQ_DATA(rq);
1154                 if (crq) {
1155                         /*
1156                          * if idle window is disabled, allow queue buildup
1157                          */
1158                         if (!crq->in_flight && !crq->cfq_queue->idle_window &&
1159                             cfqd->rq_in_driver >= cfqd->cfq_max_depth)
1160                                 return NULL;
1161
1162                         cfq_remove_merge_hints(q, crq);
1163                         cfq_account_dispatch(crq);
1164                 }
1165
1166                 return rq;
1167         }
1168
1169         if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
1170                 goto dispatch;
1171
1172         return NULL;
1173 }
1174
1175 /*
1176  * task holds one reference to the queue, dropped when task exits. each crq
1177  * in-flight on this queue also holds a reference, dropped when crq is freed.
1178  *
1179  * queue lock must be held here.
1180  */
1181 static void cfq_put_queue(struct cfq_queue *cfqq)
1182 {
1183         struct cfq_data *cfqd = cfqq->cfqd;
1184
1185         BUG_ON(atomic_read(&cfqq->ref) <= 0);
1186
1187         if (!atomic_dec_and_test(&cfqq->ref))
1188                 return;
1189
1190         BUG_ON(rb_first(&cfqq->sort_list));
1191         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1192         BUG_ON(cfqq->on_rr);
1193
1194         if (unlikely(cfqd->active_queue == cfqq)) {
1195                 cfq_slice_expired(cfqd, 0);
1196                 kblockd_schedule_work(&cfqd->unplug_work);
1197         }
1198
1199         cfq_put_cfqd(cfqq->cfqd);
1200
1201         /*
1202          * it's on the empty list and still hashed
1203          */
1204         list_del(&cfqq->cfq_list);
1205         hlist_del(&cfqq->cfq_hash);
1206         kmem_cache_free(cfq_pool, cfqq);
1207 }
1208
1209 static inline struct cfq_queue *
1210 __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, const int hashval)
1211 {
1212         struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
1213         struct hlist_node *entry, *next;
1214
1215         hlist_for_each_safe(entry, next, hash_list) {
1216                 struct cfq_queue *__cfqq = list_entry_qhash(entry);
1217
1218                 if (__cfqq->key == key)
1219                         return __cfqq;
1220         }
1221
1222         return NULL;
1223 }
1224
1225 static struct cfq_queue *
1226 cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key)
1227 {
1228         return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT));
1229 }
1230
1231 static void cfq_free_io_context(struct cfq_io_context *cic)
1232 {
1233         struct cfq_io_context *__cic;
1234         struct list_head *entry, *next;
1235
1236         list_for_each_safe(entry, next, &cic->list) {
1237                 __cic = list_entry(entry, struct cfq_io_context, list);
1238                 kmem_cache_free(cfq_ioc_pool, __cic);
1239         }
1240
1241         kmem_cache_free(cfq_ioc_pool, cic);
1242 }
1243
1244 /*
1245  * Called with interrupts disabled
1246  */
1247 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1248 {
1249         struct cfq_data *cfqd = cic->cfqq->cfqd;
1250         request_queue_t *q = cfqd->queue;
1251
1252         WARN_ON(!irqs_disabled());
1253
1254         spin_lock(q->queue_lock);
1255
1256         if (unlikely(cic->cfqq == cfqd->active_queue)) {
1257                 cfq_slice_expired(cfqd, 0);
1258                 kblockd_schedule_work(&cfqd->unplug_work);
1259         }
1260
1261         cfq_put_queue(cic->cfqq);
1262         cic->cfqq = NULL;
1263         spin_unlock(q->queue_lock);
1264 }
1265
1266 /*
1267  * Another task may update the task cic list, if it is doing a queue lookup
1268  * on its behalf. cfq_cic_lock excludes such concurrent updates
1269  */
1270 static void cfq_exit_io_context(struct cfq_io_context *cic)
1271 {
1272         struct cfq_io_context *__cic;
1273         struct list_head *entry;
1274         unsigned long flags;
1275
1276         local_irq_save(flags);
1277
1278         /*
1279          * put the reference this task is holding to the various queues
1280          */
1281         list_for_each(entry, &cic->list) {
1282                 __cic = list_entry(entry, struct cfq_io_context, list);
1283                 cfq_exit_single_io_context(__cic);
1284         }
1285
1286         cfq_exit_single_io_context(cic);
1287         local_irq_restore(flags);
1288 }
1289
1290 static struct cfq_io_context *
1291 cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
1292 {
1293         struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
1294
1295         if (cic) {
1296                 INIT_LIST_HEAD(&cic->list);
1297                 cic->cfqq = NULL;
1298                 cic->key = NULL;
1299                 cic->last_end_request = jiffies;
1300                 cic->ttime_total = 0;
1301                 cic->ttime_samples = 0;
1302                 cic->ttime_mean = 0;
1303                 cic->dtor = cfq_free_io_context;
1304                 cic->exit = cfq_exit_io_context;
1305         }
1306
1307         return cic;
1308 }
1309
1310 static void cfq_init_prio_data(struct cfq_queue *cfqq)
1311 {
1312         struct task_struct *tsk = current;
1313         int ioprio_class;
1314
1315         if (!cfqq->prio_changed)
1316                 return;
1317
1318         ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
1319         switch (ioprio_class) {
1320                 default:
1321                         printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1322                 case IOPRIO_CLASS_NONE:
1323                         /*
1324                          * no prio set, place us in the middle of the BE classes
1325                          */
1326                         cfqq->ioprio = task_nice_ioprio(tsk);
1327                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1328                         break;
1329                 case IOPRIO_CLASS_RT:
1330                         cfqq->ioprio = task_ioprio(tsk);
1331                         cfqq->ioprio_class = IOPRIO_CLASS_RT;
1332                         break;
1333                 case IOPRIO_CLASS_BE:
1334                         cfqq->ioprio = task_ioprio(tsk);
1335                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1336                         break;
1337                 case IOPRIO_CLASS_IDLE:
1338                         cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1339                         cfqq->ioprio = 7;
1340                         cfqq->idle_window = 0;
1341                         break;
1342         }
1343
1344         /*
1345          * keep track of original prio settings in case we have to temporarily
1346          * elevate the priority of this queue
1347          */
1348         cfqq->org_ioprio = cfqq->ioprio;
1349         cfqq->org_ioprio_class = cfqq->ioprio_class;
1350
1351         if (cfqq->on_rr)
1352                 cfq_resort_rr_list(cfqq, 0);
1353
1354         cfqq->prio_changed = 0;
1355 }
1356
1357 static inline void changed_ioprio(struct cfq_queue *cfqq)
1358 {
1359         if (cfqq) {
1360                 struct cfq_data *cfqd = cfqq->cfqd;
1361
1362                 spin_lock(cfqd->queue->queue_lock);
1363                 cfqq->prio_changed = 1;
1364                 cfq_init_prio_data(cfqq);
1365                 spin_unlock(cfqd->queue->queue_lock);
1366         }
1367 }
1368
1369 /*
1370  * callback from sys_ioprio_set, irqs are disabled
1371  */
1372 static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
1373 {
1374         struct cfq_io_context *cic = ioc->cic;
1375
1376         changed_ioprio(cic->cfqq);
1377
1378         list_for_each_entry(cic, &cic->list, list)
1379                 changed_ioprio(cic->cfqq);
1380
1381         return 0;
1382 }
1383
1384 static struct cfq_queue *
1385 cfq_get_queue(struct cfq_data *cfqd, unsigned int key, int gfp_mask)
1386 {
1387         const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1388         struct cfq_queue *cfqq, *new_cfqq = NULL;
1389
1390 retry:
1391         cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
1392
1393         if (!cfqq) {
1394                 if (new_cfqq) {
1395                         cfqq = new_cfqq;
1396                         new_cfqq = NULL;
1397                 } else if (gfp_mask & __GFP_WAIT) {
1398                         spin_unlock_irq(cfqd->queue->queue_lock);
1399                         new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1400                         spin_lock_irq(cfqd->queue->queue_lock);
1401                         goto retry;
1402                 } else {
1403                         cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1404                         if (!cfqq)
1405                                 goto out;
1406                 }
1407
1408                 memset(cfqq, 0, sizeof(*cfqq));
1409
1410                 INIT_HLIST_NODE(&cfqq->cfq_hash);
1411                 INIT_LIST_HEAD(&cfqq->cfq_list);
1412                 RB_CLEAR_ROOT(&cfqq->sort_list);
1413                 INIT_LIST_HEAD(&cfqq->fifo);
1414
1415                 cfqq->key = key;
1416                 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1417                 atomic_set(&cfqq->ref, 0);
1418                 cfqq->cfqd = cfqd;
1419                 atomic_inc(&cfqd->ref);
1420                 cfqq->service_last = 0;
1421                 /*
1422                  * set ->slice_left to allow preemption for a new process
1423                  */
1424                 cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
1425                 cfqq->idle_window = 1;
1426                 cfqq->ioprio = -1;
1427                 cfqq->ioprio_class = -1;
1428                 cfqq->prio_changed = 1;
1429         }
1430
1431         if (new_cfqq)
1432                 kmem_cache_free(cfq_pool, new_cfqq);
1433
1434         atomic_inc(&cfqq->ref);
1435 out:
1436         WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1437         return cfqq;
1438 }
1439
1440 /*
1441  * Setup general io context and cfq io context. There can be several cfq
1442  * io contexts per general io context, if this process is doing io to more
1443  * than one device managed by cfq. Note that caller is holding a reference to
1444  * cfqq, so we don't need to worry about it disappearing
1445  */
1446 static struct cfq_io_context *
1447 cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
1448 {
1449         struct io_context *ioc = NULL;
1450         struct cfq_io_context *cic;
1451
1452         might_sleep_if(gfp_mask & __GFP_WAIT);
1453
1454         ioc = get_io_context(gfp_mask);
1455         if (!ioc)
1456                 return NULL;
1457
1458         if ((cic = ioc->cic) == NULL) {
1459                 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1460
1461                 if (cic == NULL)
1462                         goto err;
1463
1464                 /*
1465                  * manually increment generic io_context usage count, it
1466                  * cannot go away since we are already holding one ref to it
1467                  */
1468                 ioc->cic = cic;
1469                 ioc->set_ioprio = cfq_ioc_set_ioprio;
1470                 cic->ioc = ioc;
1471                 cic->key = cfqd;
1472                 atomic_inc(&cfqd->ref);
1473         } else {
1474                 struct cfq_io_context *__cic;
1475
1476                 /*
1477                  * the first cic on the list is actually the head itself
1478                  */
1479                 if (cic->key == cfqd)
1480                         goto out;
1481
1482                 /*
1483                  * cic exists, check if we already are there. linear search
1484                  * should be ok here, the list will usually not be more than
1485                  * 1 or a few entries long
1486                  */
1487                 list_for_each_entry(__cic, &cic->list, list) {
1488                         /*
1489                          * this process is already holding a reference to
1490                          * this queue, so no need to get one more
1491                          */
1492                         if (__cic->key == cfqd) {
1493                                 cic = __cic;
1494                                 goto out;
1495                         }
1496                 }
1497
1498                 /*
1499                  * nope, process doesn't have a cic assoicated with this
1500                  * cfqq yet. get a new one and add to list
1501                  */
1502                 __cic = cfq_alloc_io_context(cfqd, gfp_mask);
1503                 if (__cic == NULL)
1504                         goto err;
1505
1506                 __cic->ioc = ioc;
1507                 __cic->key = cfqd;
1508                 atomic_inc(&cfqd->ref);
1509                 list_add(&__cic->list, &cic->list);
1510                 cic = __cic;
1511         }
1512
1513 out:
1514         return cic;
1515 err:
1516         put_io_context(ioc);
1517         return NULL;
1518 }
1519
1520 static void
1521 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1522 {
1523         unsigned long elapsed, ttime;
1524
1525         /*
1526          * if this context already has stuff queued, thinktime is from
1527          * last queue not last end
1528          */
1529 #if 0
1530         if (time_after(cic->last_end_request, cic->last_queue))
1531                 elapsed = jiffies - cic->last_end_request;
1532         else
1533                 elapsed = jiffies - cic->last_queue;
1534 #else
1535                 elapsed = jiffies - cic->last_end_request;
1536 #endif
1537
1538         ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1539
1540         cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1541         cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1542         cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1543 }
1544
1545 #define sample_valid(samples)   ((samples) > 80)
1546
1547 /*
1548  * Disable idle window if the process thinks too long or seeks so much that
1549  * it doesn't matter
1550  */
1551 static void
1552 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1553                        struct cfq_io_context *cic)
1554 {
1555         int enable_idle = cfqq->idle_window;
1556
1557         if (!cic->ioc->task || !cfqd->cfq_slice_idle)
1558                 enable_idle = 0;
1559         else if (sample_valid(cic->ttime_samples)) {
1560                 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1561                         enable_idle = 0;
1562                 else
1563                         enable_idle = 1;
1564         }
1565
1566         cfqq->idle_window = enable_idle;
1567 }
1568
1569
1570 /*
1571  * Check if new_cfqq should preempt the currently active queue. Return 0 for
1572  * no or if we aren't sure, a 1 will cause a preempt.
1573  */
1574 static int
1575 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1576                    struct cfq_rq *crq)
1577 {
1578         struct cfq_queue *cfqq = cfqd->active_queue;
1579
1580         if (cfq_class_idle(new_cfqq))
1581                 return 0;
1582
1583         if (!cfqq)
1584                 return 1;
1585
1586         if (cfq_class_idle(cfqq))
1587                 return 1;
1588         if (!new_cfqq->wait_request)
1589                 return 0;
1590         /*
1591          * if it doesn't have slice left, forget it
1592          */
1593         if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
1594                 return 0;
1595         if (crq->is_sync && !cfq_cfqq_sync(cfqq))
1596                 return 1;
1597
1598         return 0;
1599 }
1600
1601 /*
1602  * cfqq preempts the active queue. if we allowed preempt with no slice left,
1603  * let it have half of its nominal slice.
1604  */
1605 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1606 {
1607         struct cfq_queue *__cfqq, *next;
1608
1609         list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
1610                 cfq_resort_rr_list(__cfqq, 1);
1611
1612         if (!cfqq->slice_left)
1613                 cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
1614
1615         cfqq->slice_end = cfqq->slice_left + jiffies;
1616         cfq_slice_expired(cfqd, 1);
1617         __cfq_set_active_queue(cfqd, cfqq);
1618 }
1619
1620 /*
1621  * should really be a ll_rw_blk.c helper
1622  */
1623 static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1624 {
1625         request_queue_t *q = cfqd->queue;
1626
1627         if (!blk_queue_plugged(q))
1628                 q->request_fn(q);
1629         else
1630                 __generic_unplug_device(q);
1631 }
1632
1633 /*
1634  * Called when a new fs request (crq) is added (to cfqq). Check if there's
1635  * something we should do about it
1636  */
1637 static void
1638 cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1639                  struct cfq_rq *crq)
1640 {
1641         const int sync = crq->is_sync;
1642
1643         cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
1644
1645         if (sync) {
1646                 struct cfq_io_context *cic = crq->io_context;
1647
1648                 cfq_update_io_thinktime(cfqd, cic);
1649                 cfq_update_idle_window(cfqd, cfqq, cic);
1650
1651                 cic->last_queue = jiffies;
1652         }
1653
1654         if (cfqq == cfqd->active_queue) {
1655                 /*
1656                  * if we are waiting for a request for this queue, let it rip
1657                  * immediately and flag that we must not expire this queue
1658                  * just now
1659                  */
1660                 if (cfqq->wait_request) {
1661                         cfqq->must_dispatch = 1;
1662                         del_timer(&cfqd->idle_slice_timer);
1663                         cfq_start_queueing(cfqd, cfqq);
1664                 }
1665         } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
1666                 /*
1667                  * not the active queue - expire current slice if it is
1668                  * idle and has expired it's mean thinktime or this new queue
1669                  * has some old slice time left and is of higher priority
1670                  */
1671                 cfq_preempt_queue(cfqd, cfqq);
1672                 cfqq->must_dispatch = 1;
1673                 cfq_start_queueing(cfqd, cfqq);
1674         }
1675 }
1676
1677 static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
1678 {
1679         struct cfq_rq *crq = RQ_DATA(rq);
1680         struct cfq_queue *cfqq = crq->cfq_queue;
1681
1682         cfq_init_prio_data(cfqq);
1683
1684         cfq_add_crq_rb(crq);
1685
1686         list_add_tail(&rq->queuelist, &cfqq->fifo);
1687
1688         if (rq_mergeable(rq)) {
1689                 cfq_add_crq_hash(cfqd, crq);
1690
1691                 if (!cfqd->queue->last_merge)
1692                         cfqd->queue->last_merge = rq;
1693         }
1694
1695         cfq_crq_enqueued(cfqd, cfqq, crq);
1696 }
1697
1698 static void
1699 cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1700 {
1701         struct cfq_data *cfqd = q->elevator->elevator_data;
1702
1703         switch (where) {
1704                 case ELEVATOR_INSERT_BACK:
1705                         while (cfq_dispatch_requests(q, INT_MAX, 1))
1706                                 ;
1707                         list_add_tail(&rq->queuelist, &q->queue_head);
1708                         /*
1709                          * If we were idling with pending requests on
1710                          * inactive cfqqs, force dispatching will
1711                          * remove the idle timer and the queue won't
1712                          * be kicked by __make_request() afterward.
1713                          * Kick it here.
1714                          */
1715                         kblockd_schedule_work(&cfqd->unplug_work);
1716                         break;
1717                 case ELEVATOR_INSERT_FRONT:
1718                         list_add(&rq->queuelist, &q->queue_head);
1719                         break;
1720                 case ELEVATOR_INSERT_SORT:
1721                         BUG_ON(!blk_fs_request(rq));
1722                         cfq_enqueue(cfqd, rq);
1723                         break;
1724                 default:
1725                         printk("%s: bad insert point %d\n", __FUNCTION__,where);
1726                         return;
1727         }
1728 }
1729
1730 static inline int cfq_pending_requests(struct cfq_data *cfqd)
1731 {
1732         return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
1733 }
1734
1735 static int cfq_queue_empty(request_queue_t *q)
1736 {
1737         struct cfq_data *cfqd = q->elevator->elevator_data;
1738
1739         return !cfq_pending_requests(cfqd);
1740 }
1741
1742 static void cfq_completed_request(request_queue_t *q, struct request *rq)
1743 {
1744         struct cfq_rq *crq = RQ_DATA(rq);
1745         struct cfq_queue *cfqq;
1746
1747         if (unlikely(!blk_fs_request(rq)))
1748                 return;
1749
1750         cfqq = crq->cfq_queue;
1751
1752         if (crq->in_flight) {
1753                 WARN_ON(!cfqq->in_flight);
1754                 cfqq->in_flight--;
1755         }
1756
1757         cfq_account_completion(cfqq, crq);
1758 }
1759
1760 static struct request *
1761 cfq_former_request(request_queue_t *q, struct request *rq)
1762 {
1763         struct cfq_rq *crq = RQ_DATA(rq);
1764         struct rb_node *rbprev = rb_prev(&crq->rb_node);
1765
1766         if (rbprev)
1767                 return rb_entry_crq(rbprev)->request;
1768
1769         return NULL;
1770 }
1771
1772 static struct request *
1773 cfq_latter_request(request_queue_t *q, struct request *rq)
1774 {
1775         struct cfq_rq *crq = RQ_DATA(rq);
1776         struct rb_node *rbnext = rb_next(&crq->rb_node);
1777
1778         if (rbnext)
1779                 return rb_entry_crq(rbnext)->request;
1780
1781         return NULL;
1782 }
1783
1784 /*
1785  * we temporarily boost lower priority queues if they are holding fs exclusive
1786  * resources. they are boosted to normal prio (CLASS_BE/4)
1787  */
1788 static void cfq_prio_boost(struct cfq_queue *cfqq)
1789 {
1790         const int ioprio_class = cfqq->ioprio_class;
1791         const int ioprio = cfqq->ioprio;
1792
1793         if (has_fs_excl()) {
1794                 /*
1795                  * boost idle prio on transactions that would lock out other
1796                  * users of the filesystem
1797                  */
1798                 if (cfq_class_idle(cfqq))
1799                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
1800                 if (cfqq->ioprio > IOPRIO_NORM)
1801                         cfqq->ioprio = IOPRIO_NORM;
1802         } else {
1803                 /*
1804                  * check if we need to unboost the queue
1805                  */
1806                 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
1807                         cfqq->ioprio_class = cfqq->org_ioprio_class;
1808                 if (cfqq->ioprio != cfqq->org_ioprio)
1809                         cfqq->ioprio = cfqq->org_ioprio;
1810         }
1811
1812         /*
1813          * refile between round-robin lists if we moved the priority class
1814          */
1815         if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
1816             cfqq->on_rr)
1817                 cfq_resort_rr_list(cfqq, 0);
1818 }
1819
1820 static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
1821 {
1822         if (rw == READ || process_sync(task))
1823                 return task->pid;
1824
1825         return CFQ_KEY_ASYNC;
1826 }
1827
1828 static inline int
1829 __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1830                 struct task_struct *task, int rw)
1831 {
1832         if (cfqq->wait_request && cfqq->must_alloc)
1833                 return ELV_MQUEUE_MUST;
1834
1835         return ELV_MQUEUE_MAY;
1836 #if 0
1837         if (!cfqq || task->flags & PF_MEMALLOC)
1838                 return ELV_MQUEUE_MAY;
1839         if (!cfqq->allocated[rw] || cfqq->must_alloc) {
1840                 if (cfqq->wait_request)
1841                         return ELV_MQUEUE_MUST;
1842
1843                 /*
1844                  * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
1845                  * can quickly flood the queue with writes from a single task
1846                  */
1847                 if (rw == READ || !cfqq->must_alloc_slice) {
1848                         cfqq->must_alloc_slice = 1;
1849                         return ELV_MQUEUE_MUST;
1850                 }
1851
1852                 return ELV_MQUEUE_MAY;
1853         }
1854         if (cfq_class_idle(cfqq))
1855                 return ELV_MQUEUE_NO;
1856         if (cfqq->allocated[rw] >= cfqd->max_queued) {
1857                 struct io_context *ioc = get_io_context(GFP_ATOMIC);
1858                 int ret = ELV_MQUEUE_NO;
1859
1860                 if (ioc && ioc->nr_batch_requests)
1861                         ret = ELV_MQUEUE_MAY;
1862
1863                 put_io_context(ioc);
1864                 return ret;
1865         }
1866
1867         return ELV_MQUEUE_MAY;
1868 #endif
1869 }
1870
1871 static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
1872 {
1873         struct cfq_data *cfqd = q->elevator->elevator_data;
1874         struct task_struct *tsk = current;
1875         struct cfq_queue *cfqq;
1876
1877         /*
1878          * don't force setup of a queue from here, as a call to may_queue
1879          * does not necessarily imply that a request actually will be queued.
1880          * so just lookup a possibly existing queue, or return 'may queue'
1881          * if that fails
1882          */
1883         cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw));
1884         if (cfqq) {
1885                 cfq_init_prio_data(cfqq);
1886                 cfq_prio_boost(cfqq);
1887
1888                 return __cfq_may_queue(cfqd, cfqq, tsk, rw);
1889         }
1890
1891         return ELV_MQUEUE_MAY;
1892 }
1893
1894 static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1895 {
1896         struct cfq_data *cfqd = q->elevator->elevator_data;
1897         struct request_list *rl = &q->rq;
1898
1899         if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
1900                 smp_mb();
1901                 if (waitqueue_active(&rl->wait[READ]))
1902                         wake_up(&rl->wait[READ]);
1903         }
1904
1905         if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
1906                 smp_mb();
1907                 if (waitqueue_active(&rl->wait[WRITE]))
1908                         wake_up(&rl->wait[WRITE]);
1909         }
1910 }
1911
1912 /*
1913  * queue lock held here
1914  */
1915 static void cfq_put_request(request_queue_t *q, struct request *rq)
1916 {
1917         struct cfq_data *cfqd = q->elevator->elevator_data;
1918         struct cfq_rq *crq = RQ_DATA(rq);
1919
1920         if (crq) {
1921                 struct cfq_queue *cfqq = crq->cfq_queue;
1922                 const int rw = rq_data_dir(rq);
1923
1924                 BUG_ON(!cfqq->allocated[rw]);
1925                 cfqq->allocated[rw]--;
1926
1927                 put_io_context(crq->io_context->ioc);
1928
1929                 mempool_free(crq, cfqd->crq_pool);
1930                 rq->elevator_private = NULL;
1931
1932                 cfq_check_waiters(q, cfqq);
1933                 cfq_put_queue(cfqq);
1934         }
1935 }
1936
1937 /*
1938  * Allocate cfq data structures associated with this request.
1939  */
1940 static int
1941 cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
1942                 int gfp_mask)
1943 {
1944         struct cfq_data *cfqd = q->elevator->elevator_data;
1945         struct cfq_io_context *cic;
1946         const int rw = rq_data_dir(rq);
1947         struct cfq_queue *cfqq;
1948         struct cfq_rq *crq;
1949         unsigned long flags;
1950
1951         might_sleep_if(gfp_mask & __GFP_WAIT);
1952
1953         cic = cfq_get_io_context(cfqd, cfq_queue_pid(current, rw), gfp_mask);
1954
1955         spin_lock_irqsave(q->queue_lock, flags);
1956
1957         if (!cic)
1958                 goto queue_fail;
1959
1960         if (!cic->cfqq) {
1961                 cfqq = cfq_get_queue(cfqd, current->pid, gfp_mask);
1962                 if (!cfqq)
1963                         goto queue_fail;
1964
1965                 cic->cfqq = cfqq;
1966         } else
1967                 cfqq = cic->cfqq;
1968
1969         cfqq->allocated[rw]++;
1970         cfqq->must_alloc = 0;
1971         cfqd->rq_starved = 0;
1972         atomic_inc(&cfqq->ref);
1973         spin_unlock_irqrestore(q->queue_lock, flags);
1974
1975         crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
1976         if (crq) {
1977                 RB_CLEAR(&crq->rb_node);
1978                 crq->rb_key = 0;
1979                 crq->request = rq;
1980                 INIT_HLIST_NODE(&crq->hash);
1981                 crq->cfq_queue = cfqq;
1982                 crq->io_context = cic;
1983                 crq->in_flight = crq->accounted = 0;
1984                 crq->is_sync = (rw == READ || process_sync(current));
1985                 crq->requeued = 0;
1986                 rq->elevator_private = crq;
1987                 return 0;
1988         }
1989
1990         spin_lock_irqsave(q->queue_lock, flags);
1991         cfqq->allocated[rw]--;
1992         if (!(cfqq->allocated[0] + cfqq->allocated[1]))
1993                 cfqq->must_alloc = 1;
1994         cfq_put_queue(cfqq);
1995 queue_fail:
1996         if (cic)
1997                 put_io_context(cic->ioc);
1998         /*
1999          * mark us rq allocation starved. we need to kickstart the process
2000          * ourselves if there are no pending requests that can do it for us.
2001          * that would be an extremely rare OOM situation
2002          */
2003         cfqd->rq_starved = 1;
2004         kblockd_schedule_work(&cfqd->unplug_work);
2005         spin_unlock_irqrestore(q->queue_lock, flags);
2006         return 1;
2007 }
2008
2009 static void cfq_kick_queue(void *data)
2010 {
2011         request_queue_t *q = data;
2012         struct cfq_data *cfqd = q->elevator->elevator_data;
2013         unsigned long flags;
2014
2015         spin_lock_irqsave(q->queue_lock, flags);
2016
2017         if (cfqd->rq_starved) {
2018                 struct request_list *rl = &q->rq;
2019
2020                 /*
2021                  * we aren't guaranteed to get a request after this, but we
2022                  * have to be opportunistic
2023                  */
2024                 smp_mb();
2025                 if (waitqueue_active(&rl->wait[READ]))
2026                         wake_up(&rl->wait[READ]);
2027                 if (waitqueue_active(&rl->wait[WRITE]))
2028                         wake_up(&rl->wait[WRITE]);
2029         }
2030
2031         blk_remove_plug(q);
2032         q->request_fn(q);
2033         spin_unlock_irqrestore(q->queue_lock, flags);
2034 }
2035
2036 /*
2037  * Timer running if the active_queue is currently idling inside its time slice
2038  */
2039 static void cfq_idle_slice_timer(unsigned long data)
2040 {
2041         struct cfq_data *cfqd = (struct cfq_data *) data;
2042         struct cfq_queue *cfqq;
2043         unsigned long flags;
2044
2045         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2046
2047         if ((cfqq = cfqd->active_queue) != NULL) {
2048                 unsigned long now = jiffies;
2049
2050                 /*
2051                  * expired
2052                  */
2053                 if (time_after(now, cfqq->slice_end))
2054                         goto expire;
2055
2056                 /*
2057                  * only expire and reinvoke request handler, if there are
2058                  * other queues with pending requests
2059                  */
2060                 if (!cfq_pending_requests(cfqd)) {
2061                         cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
2062                         add_timer(&cfqd->idle_slice_timer);
2063                         goto out_cont;
2064                 }
2065
2066                 /*
2067                  * not expired and it has a request pending, let it dispatch
2068                  */
2069                 if (!RB_EMPTY(&cfqq->sort_list)) {
2070                         cfqq->must_dispatch = 1;
2071                         goto out_kick;
2072                 }
2073         }
2074 expire:
2075         cfq_slice_expired(cfqd, 0);
2076 out_kick:
2077         if (cfq_pending_requests(cfqd))
2078                 kblockd_schedule_work(&cfqd->unplug_work);
2079 out_cont:
2080         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2081 }
2082
2083 /*
2084  * Timer running if an idle class queue is waiting for service
2085  */
2086 static void cfq_idle_class_timer(unsigned long data)
2087 {
2088         struct cfq_data *cfqd = (struct cfq_data *) data;
2089         unsigned long flags, end;
2090
2091         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2092
2093         /*
2094          * race with a non-idle queue, reset timer
2095          */
2096         end = cfqd->last_end_request + CFQ_IDLE_GRACE;
2097         if (!time_after_eq(jiffies, end)) {
2098                 cfqd->idle_class_timer.expires = end;
2099                 add_timer(&cfqd->idle_class_timer);
2100         } else
2101                 kblockd_schedule_work(&cfqd->unplug_work);
2102
2103         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2104 }
2105
2106
2107 static void cfq_put_cfqd(struct cfq_data *cfqd)
2108 {
2109         request_queue_t *q = cfqd->queue;
2110
2111         if (!atomic_dec_and_test(&cfqd->ref))
2112                 return;
2113
2114         blk_sync_queue(q);
2115
2116         blk_put_queue(q);
2117
2118         mempool_destroy(cfqd->crq_pool);
2119         kfree(cfqd->crq_hash);
2120         kfree(cfqd->cfq_hash);
2121         kfree(cfqd);
2122 }
2123
2124 static void cfq_exit_queue(elevator_t *e)
2125 {
2126         struct cfq_data *cfqd = e->elevator_data;
2127
2128         del_timer_sync(&cfqd->idle_slice_timer);
2129         del_timer_sync(&cfqd->idle_class_timer);
2130         cfq_put_cfqd(cfqd);
2131 }
2132
2133 static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2134 {
2135         struct cfq_data *cfqd;
2136         int i;
2137
2138         cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
2139         if (!cfqd)
2140                 return -ENOMEM;
2141
2142         memset(cfqd, 0, sizeof(*cfqd));
2143
2144         for (i = 0; i < CFQ_PRIO_LISTS; i++)
2145                 INIT_LIST_HEAD(&cfqd->rr_list[i]);
2146
2147         INIT_LIST_HEAD(&cfqd->busy_rr);
2148         INIT_LIST_HEAD(&cfqd->cur_rr);
2149         INIT_LIST_HEAD(&cfqd->idle_rr);
2150         INIT_LIST_HEAD(&cfqd->empty_list);
2151
2152         cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
2153         if (!cfqd->crq_hash)
2154                 goto out_crqhash;
2155
2156         cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
2157         if (!cfqd->cfq_hash)
2158                 goto out_cfqhash;
2159
2160         cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
2161         if (!cfqd->crq_pool)
2162                 goto out_crqpool;
2163
2164         for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
2165                 INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
2166         for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
2167                 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
2168
2169         e->elevator_data = cfqd;
2170
2171         cfqd->queue = q;
2172         atomic_inc(&q->refcnt);
2173
2174         cfqd->max_queued = q->nr_requests / 4;
2175         q->nr_batching = cfq_queued;
2176
2177         init_timer(&cfqd->idle_slice_timer);
2178         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2179         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2180
2181         init_timer(&cfqd->idle_class_timer);
2182         cfqd->idle_class_timer.function = cfq_idle_class_timer;
2183         cfqd->idle_class_timer.data = (unsigned long) cfqd;
2184
2185         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
2186
2187         atomic_set(&cfqd->ref, 1);
2188
2189         cfqd->cfq_queued = cfq_queued;
2190         cfqd->cfq_quantum = cfq_quantum;
2191         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2192         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2193         cfqd->cfq_back_max = cfq_back_max;
2194         cfqd->cfq_back_penalty = cfq_back_penalty;
2195         cfqd->cfq_slice[0] = cfq_slice_async;
2196         cfqd->cfq_slice[1] = cfq_slice_sync;
2197         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2198         cfqd->cfq_slice_idle = cfq_slice_idle;
2199         cfqd->cfq_max_depth = cfq_max_depth;
2200         return 0;
2201 out_crqpool:
2202         kfree(cfqd->cfq_hash);
2203 out_cfqhash:
2204         kfree(cfqd->crq_hash);
2205 out_crqhash:
2206         kfree(cfqd);
2207         return -ENOMEM;
2208 }
2209
2210 static void cfq_slab_kill(void)
2211 {
2212         if (crq_pool)
2213                 kmem_cache_destroy(crq_pool);
2214         if (cfq_pool)
2215                 kmem_cache_destroy(cfq_pool);
2216         if (cfq_ioc_pool)
2217                 kmem_cache_destroy(cfq_ioc_pool);
2218 }
2219
2220 static int __init cfq_slab_setup(void)
2221 {
2222         crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
2223                                         NULL, NULL);
2224         if (!crq_pool)
2225                 goto fail;
2226
2227         cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
2228                                         NULL, NULL);
2229         if (!cfq_pool)
2230                 goto fail;
2231
2232         cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
2233                         sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
2234         if (!cfq_ioc_pool)
2235                 goto fail;
2236
2237         return 0;
2238 fail:
2239         cfq_slab_kill();
2240         return -ENOMEM;
2241 }
2242
2243 /*
2244  * sysfs parts below -->
2245  */
2246 struct cfq_fs_entry {
2247         struct attribute attr;
2248         ssize_t (*show)(struct cfq_data *, char *);
2249         ssize_t (*store)(struct cfq_data *, const char *, size_t);
2250 };
2251
2252 static ssize_t
2253 cfq_var_show(unsigned int var, char *page)
2254 {
2255         return sprintf(page, "%d\n", var);
2256 }
2257
2258 static ssize_t
2259 cfq_var_store(unsigned int *var, const char *page, size_t count)
2260 {
2261         char *p = (char *) page;
2262
2263         *var = simple_strtoul(p, &p, 10);
2264         return count;
2265 }
2266
2267 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
2268 static ssize_t __FUNC(struct cfq_data *cfqd, char *page)                \
2269 {                                                                       \
2270         unsigned int __data = __VAR;                                    \
2271         if (__CONV)                                                     \
2272                 __data = jiffies_to_msecs(__data);                      \
2273         return cfq_var_show(__data, (page));                            \
2274 }
2275 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2276 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
2277 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2278 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2279 SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
2280 SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
2281 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2282 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2283 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2284 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2285 SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
2286 #undef SHOW_FUNCTION
2287
2288 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
2289 static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)    \
2290 {                                                                       \
2291         unsigned int __data;                                            \
2292         int ret = cfq_var_store(&__data, (page), count);                \
2293         if (__data < (MIN))                                             \
2294                 __data = (MIN);                                         \
2295         else if (__data > (MAX))                                        \
2296                 __data = (MAX);                                         \
2297         if (__CONV)                                                     \
2298                 *(__PTR) = msecs_to_jiffies(__data);                    \
2299         else                                                            \
2300                 *(__PTR) = __data;                                      \
2301         return ret;                                                     \
2302 }
2303 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2304 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
2305 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
2306 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
2307 STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2308 STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
2309 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2310 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2311 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2312 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
2313 STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
2314 #undef STORE_FUNCTION
2315
2316 static struct cfq_fs_entry cfq_quantum_entry = {
2317         .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
2318         .show = cfq_quantum_show,
2319         .store = cfq_quantum_store,
2320 };
2321 static struct cfq_fs_entry cfq_queued_entry = {
2322         .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
2323         .show = cfq_queued_show,
2324         .store = cfq_queued_store,
2325 };
2326 static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
2327         .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
2328         .show = cfq_fifo_expire_sync_show,
2329         .store = cfq_fifo_expire_sync_store,
2330 };
2331 static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
2332         .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
2333         .show = cfq_fifo_expire_async_show,
2334         .store = cfq_fifo_expire_async_store,
2335 };
2336 static struct cfq_fs_entry cfq_back_max_entry = {
2337         .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
2338         .show = cfq_back_max_show,
2339         .store = cfq_back_max_store,
2340 };
2341 static struct cfq_fs_entry cfq_back_penalty_entry = {
2342         .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
2343         .show = cfq_back_penalty_show,
2344         .store = cfq_back_penalty_store,
2345 };
2346 static struct cfq_fs_entry cfq_slice_sync_entry = {
2347         .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
2348         .show = cfq_slice_sync_show,
2349         .store = cfq_slice_sync_store,
2350 };
2351 static struct cfq_fs_entry cfq_slice_async_entry = {
2352         .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
2353         .show = cfq_slice_async_show,
2354         .store = cfq_slice_async_store,
2355 };
2356 static struct cfq_fs_entry cfq_slice_async_rq_entry = {
2357         .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
2358         .show = cfq_slice_async_rq_show,
2359         .store = cfq_slice_async_rq_store,
2360 };
2361 static struct cfq_fs_entry cfq_slice_idle_entry = {
2362         .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
2363         .show = cfq_slice_idle_show,
2364         .store = cfq_slice_idle_store,
2365 };
2366 static struct cfq_fs_entry cfq_max_depth_entry = {
2367         .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
2368         .show = cfq_max_depth_show,
2369         .store = cfq_max_depth_store,
2370 };
2371 static struct attribute *default_attrs[] = {
2372         &cfq_quantum_entry.attr,
2373         &cfq_queued_entry.attr,
2374         &cfq_fifo_expire_sync_entry.attr,
2375         &cfq_fifo_expire_async_entry.attr,
2376         &cfq_back_max_entry.attr,
2377         &cfq_back_penalty_entry.attr,
2378         &cfq_slice_sync_entry.attr,
2379         &cfq_slice_async_entry.attr,
2380         &cfq_slice_async_rq_entry.attr,
2381         &cfq_slice_idle_entry.attr,
2382         &cfq_max_depth_entry.attr,
2383         NULL,
2384 };
2385
2386 #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
2387
2388 static ssize_t
2389 cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2390 {
2391         elevator_t *e = container_of(kobj, elevator_t, kobj);
2392         struct cfq_fs_entry *entry = to_cfq(attr);
2393
2394         if (!entry->show)
2395                 return -EIO;
2396
2397         return entry->show(e->elevator_data, page);
2398 }
2399
2400 static ssize_t
2401 cfq_attr_store(struct kobject *kobj, struct attribute *attr,
2402                const char *page, size_t length)
2403 {
2404         elevator_t *e = container_of(kobj, elevator_t, kobj);
2405         struct cfq_fs_entry *entry = to_cfq(attr);
2406
2407         if (!entry->store)
2408                 return -EIO;
2409
2410         return entry->store(e->elevator_data, page, length);
2411 }
2412
2413 static struct sysfs_ops cfq_sysfs_ops = {
2414         .show   = cfq_attr_show,
2415         .store  = cfq_attr_store,
2416 };
2417
2418 static struct kobj_type cfq_ktype = {
2419         .sysfs_ops      = &cfq_sysfs_ops,
2420         .default_attrs  = default_attrs,
2421 };
2422
2423 static struct elevator_type iosched_cfq = {
2424         .ops = {
2425                 .elevator_merge_fn =            cfq_merge,
2426                 .elevator_merged_fn =           cfq_merged_request,
2427                 .elevator_merge_req_fn =        cfq_merged_requests,
2428                 .elevator_next_req_fn =         cfq_next_request,
2429                 .elevator_add_req_fn =          cfq_insert_request,
2430                 .elevator_remove_req_fn =       cfq_remove_request,
2431                 .elevator_requeue_req_fn =      cfq_requeue_request,
2432                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
2433                 .elevator_queue_empty_fn =      cfq_queue_empty,
2434                 .elevator_completed_req_fn =    cfq_completed_request,
2435                 .elevator_former_req_fn =       cfq_former_request,
2436                 .elevator_latter_req_fn =       cfq_latter_request,
2437                 .elevator_set_req_fn =          cfq_set_request,
2438                 .elevator_put_req_fn =          cfq_put_request,
2439                 .elevator_may_queue_fn =        cfq_may_queue,
2440                 .elevator_init_fn =             cfq_init_queue,
2441                 .elevator_exit_fn =             cfq_exit_queue,
2442         },
2443         .elevator_ktype =       &cfq_ktype,
2444         .elevator_name =        "cfq",
2445         .elevator_owner =       THIS_MODULE,
2446 };
2447
2448 static int __init cfq_init(void)
2449 {
2450         int ret;
2451
2452         /*
2453          * could be 0 on HZ < 1000 setups
2454          */
2455         if (!cfq_slice_async)
2456                 cfq_slice_async = 1;
2457         if (!cfq_slice_idle)
2458                 cfq_slice_idle = 1;
2459
2460         if (cfq_slab_setup())
2461                 return -ENOMEM;
2462
2463         ret = elv_register(&iosched_cfq);
2464         if (ret)
2465                 cfq_slab_kill();
2466
2467         return ret;
2468 }
2469
2470 static void __exit cfq_exit(void)
2471 {
2472         struct task_struct *g, *p;
2473         unsigned long flags;
2474
2475         read_lock_irqsave(&tasklist_lock, flags);
2476
2477         /*
2478          * iterate each process in the system, removing our io_context
2479          */
2480         do_each_thread(g, p) {
2481                 struct io_context *ioc = p->io_context;
2482
2483                 if (ioc && ioc->cic) {
2484                         ioc->cic->exit(ioc->cic);
2485                         cfq_free_io_context(ioc->cic);
2486                         ioc->cic = NULL;
2487                 }
2488         } while_each_thread(g, p);
2489
2490         read_unlock_irqrestore(&tasklist_lock, flags);
2491
2492         cfq_slab_kill();
2493         elv_unregister(&iosched_cfq);
2494 }
2495
2496 module_init(cfq_init);
2497 module_exit(cfq_exit);
2498
2499 MODULE_AUTHOR("Jens Axboe");
2500 MODULE_LICENSE("GPL");
2501 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");