rq->flags &= ~REQ_STARTED;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
 
 static void elv_drain_elevator(request_queue_t *q)
        }
 }
 
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
-                      int plug)
+void elv_insert(request_queue_t *q, struct request *rq, int where)
 {
        struct list_head *pos;
        unsigned ordseq;
 
-       if (q->ordcolor)
-               rq->flags |= REQ_ORDERED_COLOR;
-
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
-               /*
-                * toggle ordered color
-                */
-               if (blk_barrier_rq(rq))
-                       q->ordcolor ^= 1;
-
-               /*
-                * barriers implicitly indicate back insertion
-                */
-               if (where == ELEVATOR_INSERT_SORT)
-                       where = ELEVATOR_INSERT_BACK;
-
-               /*
-                * this request is scheduling boundary, update end_sector
-                */
-               if (blk_fs_request(rq)) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = rq;
-               }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
-               where = ELEVATOR_INSERT_BACK;
-
-       if (plug)
-               blk_plug_device(q);
-
        rq->q = q;
 
        switch (where) {
        }
 }
 
+void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+                      int plug)
+{
+       if (q->ordcolor)
+               rq->flags |= REQ_ORDERED_COLOR;
+
+       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * toggle ordered color
+                */
+               if (blk_barrier_rq(rq))
+                       q->ordcolor ^= 1;
+
+               /*
+                * barriers implicitly indicate back insertion
+                */
+               if (where == ELEVATOR_INSERT_SORT)
+                       where = ELEVATOR_INSERT_BACK;
+
+               /*
+                * this request is scheduling boundary, update
+                * end_sector
+                */
+               if (blk_fs_request(rq)) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = rq;
+               }
+       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+               where = ELEVATOR_INSERT_BACK;
+
+       if (plug)
+               blk_plug_device(q);
+
+       elv_insert(q, rq, where);
+}
+
 void elv_add_request(request_queue_t *q, struct request *rq, int where,
                     int plug)
 {
 
        rq->end_io = end_io;
        q->prepare_flush_fn(q, rq);
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
 static inline struct request *start_ordered(request_queue_t *q,
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 
        if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
                queue_flush(q, QUEUE_ORDERED_PREFLUSH);
 
 extern void elv_dispatch_sort(request_queue_t *, struct request *);
 extern void elv_add_request(request_queue_t *, struct request *, int, int);
 extern void __elv_add_request(request_queue_t *, struct request *, int, int);
+extern void elv_insert(request_queue_t *, struct request *, int);
 extern int elv_merge(request_queue_t *, struct request **, struct bio *);
 extern void elv_merge_requests(request_queue_t *, struct request *,
                               struct request *);