5 * This is the per-process anticipatory I/O scheduler state.
10 void (*dtor)(struct as_io_context *aic); /* destructor */
11 void (*exit)(struct as_io_context *aic); /* called on task exit */
14 atomic_t nr_queued; /* queued reads & sync writes */
15 atomic_t nr_dispatched; /* number of requests gone to the drivers */
17 /* IO History tracking */
19 unsigned long last_end_request;
20 unsigned long ttime_total;
21 unsigned long ttime_samples;
22 unsigned long ttime_mean;
24 unsigned int seek_samples;
25 sector_t last_request_pos;
31 struct cfq_io_context {
32 struct rb_node rb_node;
35 struct cfq_queue *cfqq[2];
37 struct io_context *ioc;
39 unsigned long last_end_request;
40 sector_t last_request_pos;
42 unsigned long ttime_total;
43 unsigned long ttime_samples;
44 unsigned long ttime_mean;
46 unsigned int seek_samples;
50 struct list_head queue_list;
52 void (*dtor)(struct io_context *); /* destructor */
53 void (*exit)(struct io_context *); /* called on task exit */
57 * I/O subsystem state of the associated processes. It is refcounted
58 * and kmalloc'ed. These could be shared between processes.
64 /* all the fields below are protected by this lock */
67 unsigned short ioprio;
68 unsigned short ioprio_changed;
71 * For request batching
73 unsigned long last_waited; /* Time last woken after wait for request */
74 int nr_batch_requests; /* Number of requests left in the batch */
76 struct as_io_context *aic;
77 struct rb_root cic_root;
81 static inline struct io_context *ioc_task_link(struct io_context *ioc)
84 * if ref count is zero, don't allow sharing (ioc is going away, it's
87 if (ioc && atomic_inc_not_zero(&ioc->refcount))