]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - block/elevator.c
[S390] avenrun export in appdata_base.c
[linux-2.6-omap-h63xx.git] / block / elevator.c
index 96a61e029ce5e7858d2e024c08eff817b5a998d6..d00b283f31d21fdb5256754b75336e1005fb7eed 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/init.h>
 #include <linux/compiler.h>
 #include <linux/delay.h>
+#include <linux/blktrace_api.h>
 
 #include <asm/uaccess.h>
 
@@ -120,21 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
        return e;
 }
 
-static int elevator_attach(request_queue_t *q, struct elevator_type *e,
-                          struct elevator_queue *eq)
+static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
 {
-       int ret = 0;
-
-       memset(eq, 0, sizeof(*eq));
-       eq->ops = &e->ops;
-       eq->elevator_type = e;
+       return eq->ops->elevator_init_fn(q, eq);
+}
 
+static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+                          void *data)
+{
        q->elevator = eq;
-
-       if (eq->ops->elevator_init_fn)
-               ret = eq->ops->elevator_init_fn(q, eq);
-
-       return ret;
+       eq->elevator_data = data;
 }
 
 static char chosen_elevator[16];
@@ -149,16 +145,43 @@ static int __init elevator_setup(char *str)
                strcpy(chosen_elevator, "anticipatory");
        else
                strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
-       return 0;
+       return 1;
 }
 
 __setup("elevator=", elevator_setup);
 
+static struct kobj_type elv_ktype;
+
+static elevator_t *elevator_alloc(struct elevator_type *e)
+{
+       elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+       if (eq) {
+               memset(eq, 0, sizeof(*eq));
+               eq->ops = &e->ops;
+               eq->elevator_type = e;
+               kobject_init(&eq->kobj);
+               snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
+               eq->kobj.ktype = &elv_ktype;
+               mutex_init(&eq->sysfs_lock);
+       } else {
+               elevator_put(e);
+       }
+       return eq;
+}
+
+static void elevator_release(struct kobject *kobj)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       elevator_put(e->elevator_type);
+       kfree(e);
+}
+
 int elevator_init(request_queue_t *q, char *name)
 {
        struct elevator_type *e = NULL;
        struct elevator_queue *eq;
        int ret = 0;
+       void *data;
 
        INIT_LIST_HEAD(&q->queue_head);
        q->last_merge = NULL;
@@ -176,29 +199,29 @@ int elevator_init(request_queue_t *q, char *name)
                e = elevator_get("noop");
        }
 
-       eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
-       if (!eq) {
-               elevator_put(e);
+       eq = elevator_alloc(e);
+       if (!eq)
                return -ENOMEM;
-       }
 
-       ret = elevator_attach(q, e, eq);
-       if (ret) {
-               kfree(eq);
-               elevator_put(e);
+       data = elevator_init_queue(q, eq);
+       if (!data) {
+               kobject_put(&eq->kobj);
+               return -ENOMEM;
        }
 
+       elevator_attach(q, eq, data);
        return ret;
 }
 
 void elevator_exit(elevator_t *e)
 {
+       mutex_lock(&e->sysfs_lock);
        if (e->ops->elevator_exit_fn)
                e->ops->elevator_exit_fn(e);
+       e->ops = NULL;
+       mutex_unlock(&e->sysfs_lock);
 
-       elevator_put(e->elevator_type);
-       e->elevator_type = NULL;
-       kfree(e);
+       kobject_put(&e->kobj);
 }
 
 /*
@@ -293,7 +316,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
 
        rq->flags &= ~REQ_STARTED;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
 
 static void elv_drain_elevator(request_queue_t *q)
@@ -310,39 +333,13 @@ static void elv_drain_elevator(request_queue_t *q)
        }
 }
 
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
-                      int plug)
+void elv_insert(request_queue_t *q, struct request *rq, int where)
 {
        struct list_head *pos;
        unsigned ordseq;
+       int unplug_it = 1;
 
-       if (q->ordcolor)
-               rq->flags |= REQ_ORDERED_COLOR;
-
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
-               /*
-                * toggle ordered color
-                */
-               q->ordcolor ^= 1;
-
-               /*
-                * barriers implicitly indicate back insertion
-                */
-               if (where == ELEVATOR_INSERT_SORT)
-                       where = ELEVATOR_INSERT_BACK;
-
-               /*
-                * this request is scheduling boundary, update end_sector
-                */
-               if (blk_fs_request(rq)) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = rq;
-               }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
-               where = ELEVATOR_INSERT_BACK;
-
-       if (plug)
-               blk_plug_device(q);
+       blk_add_trace_rq(q, rq, BLK_TA_INSERT);
 
        rq->q = q;
 
@@ -407,6 +404,11 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                }
 
                list_add_tail(&rq->queuelist, pos);
+               /*
+                * most requeues happen because of a busy condition, don't
+                * force unplug of the queue for that case.
+                */
+               unplug_it = 0;
                break;
 
        default:
@@ -415,7 +417,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
                BUG();
        }
 
-       if (blk_queue_plugged(q)) {
+       if (unplug_it && blk_queue_plugged(q)) {
                int nrq = q->rq.count[READ] + q->rq.count[WRITE]
                        - q->in_flight;
 
@@ -424,6 +426,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
        }
 }
 
+void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+                      int plug)
+{
+       if (q->ordcolor)
+               rq->flags |= REQ_ORDERED_COLOR;
+
+       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * toggle ordered color
+                */
+               if (blk_barrier_rq(rq))
+                       q->ordcolor ^= 1;
+
+               /*
+                * barriers implicitly indicate back insertion
+                */
+               if (where == ELEVATOR_INSERT_SORT)
+                       where = ELEVATOR_INSERT_BACK;
+
+               /*
+                * this request is scheduling boundary, update
+                * end_sector
+                */
+               if (blk_fs_request(rq)) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = rq;
+               }
+       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+               where = ELEVATOR_INSERT_BACK;
+
+       if (plug)
+               blk_plug_device(q);
+
+       elv_insert(q, rq, where);
+}
+
 void elv_add_request(request_queue_t *q, struct request *rq, int where,
                     int plug)
 {
@@ -474,6 +512,7 @@ struct request *elv_next_request(request_queue_t *q)
                         * not be passed by new incoming requests
                         */
                        rq->flags |= REQ_STARTED;
+                       blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
                }
 
                if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -620,34 +659,89 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
        }
 }
 
-int elv_register_queue(struct request_queue *q)
+#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
+
+static ssize_t
+elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
-       elevator_t *e = q->elevator;
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct elv_fs_entry *entry = to_elv(attr);
+       ssize_t error;
 
-       e->kobj.parent = kobject_get(&q->kobj);
-       if (!e->kobj.parent)
-               return -EBUSY;
+       if (!entry->show)
+               return -EIO;
 
-       snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
-       e->kobj.ktype = e->elevator_type->elevator_ktype;
+       mutex_lock(&e->sysfs_lock);
+       error = e->ops ? entry->show(e, page) : -ENOENT;
+       mutex_unlock(&e->sysfs_lock);
+       return error;
+}
+
+static ssize_t
+elv_attr_store(struct kobject *kobj, struct attribute *attr,
+              const char *page, size_t length)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct elv_fs_entry *entry = to_elv(attr);
+       ssize_t error;
 
-       return kobject_register(&e->kobj);
+       if (!entry->store)
+               return -EIO;
+
+       mutex_lock(&e->sysfs_lock);
+       error = e->ops ? entry->store(e, page, length) : -ENOENT;
+       mutex_unlock(&e->sysfs_lock);
+       return error;
 }
 
-void elv_unregister_queue(struct request_queue *q)
+static struct sysfs_ops elv_sysfs_ops = {
+       .show   = elv_attr_show,
+       .store  = elv_attr_store,
+};
+
+static struct kobj_type elv_ktype = {
+       .sysfs_ops      = &elv_sysfs_ops,
+       .release        = elevator_release,
+};
+
+int elv_register_queue(struct request_queue *q)
 {
-       if (q) {
-               elevator_t *e = q->elevator;
-               kobject_unregister(&e->kobj);
-               kobject_put(&q->kobj);
+       elevator_t *e = q->elevator;
+       int error;
+
+       e->kobj.parent = &q->kobj;
+
+       error = kobject_add(&e->kobj);
+       if (!error) {
+               struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
+               if (attr) {
+                       while (attr->attr.name) {
+                               if (sysfs_create_file(&e->kobj, &attr->attr))
+                                       break;
+                               attr++;
+                       }
+               }
+               kobject_uevent(&e->kobj, KOBJ_ADD);
        }
+       return error;
+}
+
+static void __elv_unregister_queue(elevator_t *e)
+{
+       kobject_uevent(&e->kobj, KOBJ_REMOVE);
+       kobject_del(&e->kobj);
+}
+
+void elv_unregister_queue(struct request_queue *q)
+{
+       if (q)
+               __elv_unregister_queue(q->elevator);
 }
 
 int elv_register(struct elevator_type *e)
 {
        spin_lock_irq(&elv_list_lock);
-       if (elevator_find(e->elevator_name))
-               BUG();
+       BUG_ON(elevator_find(e->elevator_name));
        list_add_tail(&e->list, &elv_list);
        spin_unlock_irq(&elv_list_lock);
 
@@ -668,21 +762,15 @@ void elv_unregister(struct elevator_type *e)
        /*
         * Iterate every thread in the process to remove the io contexts.
         */
-       read_lock(&tasklist_lock);
-       do_each_thread(g, p) {
-               struct io_context *ioc = p->io_context;
-               if (ioc && ioc->cic) {
-                       ioc->cic->exit(ioc->cic);
-                       ioc->cic->dtor(ioc->cic);
-                       ioc->cic = NULL;
-               }
-               if (ioc && ioc->aic) {
-                       ioc->aic->exit(ioc->aic);
-                       ioc->aic->dtor(ioc->aic);
-                       ioc->aic = NULL;
-               }
-       } while_each_thread(g, p);
-       read_unlock(&tasklist_lock);
+       if (e->ops.trim) {
+               read_lock(&tasklist_lock);
+               do_each_thread(g, p) {
+                       task_lock(p);
+                       e->ops.trim(p->io_context);
+                       task_unlock(p);
+               } while_each_thread(g, p);
+               read_unlock(&tasklist_lock);
+       }
 
        spin_lock_irq(&elv_list_lock);
        list_del_init(&e->list);
@@ -696,16 +784,23 @@ EXPORT_SYMBOL_GPL(elv_unregister);
  * need for the new one. this way we have a chance of going back to the old
  * one, if the new one fails init for some reason.
  */
-static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 {
        elevator_t *old_elevator, *e;
+       void *data;
 
        /*
         * Allocate new elevator
         */
-       e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+       e = elevator_alloc(new_e);
        if (!e)
-               goto error;
+               return 0;
+
+       data = elevator_init_queue(q, e);
+       if (!data) {
+               kobject_put(&e->kobj);
+               return 0;
+       }
 
        /*
         * Turn on BYPASS and drain all requests w/ elevator private data
@@ -725,19 +820,19 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
                elv_drain_elevator(q);
        }
 
-       spin_unlock_irq(q->queue_lock);
-
        /*
-        * unregister old elevator data
+        * Remember old elevator.
         */
-       elv_unregister_queue(q);
        old_elevator = q->elevator;
 
        /*
         * attach and start new elevator
         */
-       if (elevator_attach(q, new_e, e))
-               goto fail;
+       elevator_attach(q, e, data);
+
+       spin_unlock_irq(q->queue_lock);
+
+       __elv_unregister_queue(old_elevator);
 
        if (elv_register_queue(q))
                goto fail_register;
@@ -747,7 +842,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
         */
        elevator_exit(old_elevator);
        clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       return;
+       return 1;
 
 fail_register:
        /*
@@ -755,15 +850,10 @@ fail_register:
         * one again (along with re-adding the sysfs dir)
         */
        elevator_exit(e);
-       e = NULL;
-fail:
        q->elevator = old_elevator;
        elv_register_queue(q);
        clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       kfree(e);
-error:
-       elevator_put(new_e);
-       printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
+       return 0;
 }
 
 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
@@ -790,7 +880,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
                return count;
        }
 
-       elevator_switch(q, e);
+       if (!elevator_switch(q, e))
+               printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
        return count;
 }
 
@@ -820,10 +911,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
 EXPORT_SYMBOL(elv_dispatch_sort);
 EXPORT_SYMBOL(elv_add_request);
 EXPORT_SYMBOL(__elv_add_request);
-EXPORT_SYMBOL(elv_requeue_request);
 EXPORT_SYMBOL(elv_next_request);
 EXPORT_SYMBOL(elv_dequeue_request);
 EXPORT_SYMBOL(elv_queue_empty);
-EXPORT_SYMBOL(elv_completed_request);
 EXPORT_SYMBOL(elevator_exit);
 EXPORT_SYMBOL(elevator_init);