X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=block%2Fblk-core.c;h=c3df30cfb3fc332d5a503677b5b383dcf2660ec2;hb=fd55cd3d3a8e57b6fac5966a23aca3cf6035b34c;hp=b2d0ac8b760e1c87f2c3e7fad5853d43788141c6;hpb=ef9e3facdf1fe1228721a7c295a76d1b7a0e57ec;p=linux-2.6-omap-h63xx.git diff --git a/block/blk-core.c b/block/blk-core.c index b2d0ac8b760..c3df30cfb3f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -257,7 +257,6 @@ void __generic_unplug_device(struct request_queue *q) q->request_fn(q); } -EXPORT_SYMBOL(__generic_unplug_device); /** * generic_unplug_device - fire a request queue @@ -325,6 +324,9 @@ EXPORT_SYMBOL(blk_unplug); static void blk_invoke_request_fn(struct request_queue *q) { + if (unlikely(blk_queue_stopped(q))) + return; + /* * one level of recursion is ok and is much faster than kicking * the unplug handling @@ -399,8 +401,13 @@ void blk_sync_queue(struct request_queue *q) EXPORT_SYMBOL(blk_sync_queue); /** - * blk_run_queue - run a single device queue + * __blk_run_queue - run a single device queue * @q: The queue to run + * + * Description: + * See @blk_run_queue. This variant must be called with the queue lock + * held and interrupts disabled. + * */ void __blk_run_queue(struct request_queue *q) { @@ -418,6 +425,12 @@ EXPORT_SYMBOL(__blk_run_queue); /** * blk_run_queue - run a single device queue * @q: The queue to run + * + * Description: + * Invoke request handling on this queue, if it has pending work to do. + * May be used to restart queueing when a request has completed. Also + * See @blk_start_queueing. + * */ void blk_run_queue(struct request_queue *q) { @@ -501,6 +514,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) init_timer(&q->unplug_timer); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->timeout_list); + INIT_WORK(&q->unplug_work, blk_unplug_work); kobject_init(&q->kobj, &blk_queue_ktype); @@ -884,7 +898,8 @@ EXPORT_SYMBOL(blk_get_request); * * This is basically a helper to remove the need to know whether a queue * is plugged or not if someone just wants to initiate dispatch of requests - * for this queue. + * for this queue. Should be used to start queueing on a device outside + * of ->request_fn() context. Also see @blk_run_queue. * * The queue lock must be held with interrupts disabled. */ @@ -1003,8 +1018,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, } /** - * part_round_stats() - Round off the performance stats on a struct - * disk_stats. + * part_round_stats() - Round off the performance stats on a struct disk_stats. + * @cpu: cpu number for stats access + * @part: target partition * * The average IO queue length and utilisation statistics are maintained * by observing the current state of the queue length and the amount of @@ -1075,8 +1091,15 @@ void init_request_from_bio(struct request *req, struct bio *bio) /* * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) */ - if (bio_rw_ahead(bio) || bio_failfast(bio)) - req->cmd_flags |= REQ_FAILFAST; + if (bio_rw_ahead(bio)) + req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | + REQ_FAILFAST_DRIVER); + if (bio_failfast_dev(bio)) + req->cmd_flags |= REQ_FAILFAST_DEV; + if (bio_failfast_transport(bio)) + req->cmd_flags |= REQ_FAILFAST_TRANSPORT; + if (bio_failfast_driver(bio)) + req->cmd_flags |= REQ_FAILFAST_DRIVER; /* * REQ_BARRIER implies no merging, but lets make it explicit @@ -1790,17 +1813,6 @@ static void end_that_request_last(struct request *req, int error) } } -static inline void __end_request(struct request *rq, int uptodate, - unsigned int nr_bytes) -{ - int error = 0; - - if (uptodate <= 0) - error = uptodate ? uptodate : -EIO; - - __blk_end_request(rq, error, nr_bytes); -} - /** * blk_rq_bytes - Returns bytes left to complete in the entire request * @rq: the request being processed @@ -1830,41 +1842,6 @@ unsigned int blk_rq_cur_bytes(struct request *rq) } EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); -/** - * end_queued_request - end all I/O on a queued request - * @rq: the request being processed - * @uptodate: error value or %0/%1 uptodate flag - * - * Description: - * Ends all I/O on a request, and removes it from the block layer queues. - * Not suitable for normal I/O completion, unless the driver still has - * the request attached to the block layer. - * - **/ -void end_queued_request(struct request *rq, int uptodate) -{ - __end_request(rq, uptodate, blk_rq_bytes(rq)); -} -EXPORT_SYMBOL(end_queued_request); - -/** - * end_dequeued_request - end all I/O on a dequeued request - * @rq: the request being processed - * @uptodate: error value or %0/%1 uptodate flag - * - * Description: - * Ends all I/O on a request. The request must already have been - * dequeued using blkdev_dequeue_request(), as is normally the case - * for most drivers. - * - **/ -void end_dequeued_request(struct request *rq, int uptodate) -{ - __end_request(rq, uptodate, blk_rq_bytes(rq)); -} -EXPORT_SYMBOL(end_dequeued_request); - - /** * end_request - end I/O on the current segment of the request * @req: the request being processed @@ -1879,14 +1856,16 @@ EXPORT_SYMBOL(end_dequeued_request); * they have a residual value to account for. For that case this function * isn't really useful, unless the residual just happens to be the * full current segment. In other words, don't use this function in new - * code. Use blk_end_request() or __blk_end_request() to end partial parts - * of a request, or end_dequeued_request() and end_queued_request() to - * completely end IO on a dequeued/queued request. - * + * code. Use blk_end_request() or __blk_end_request() to end a request. **/ void end_request(struct request *req, int uptodate) { - __end_request(req, uptodate, req->hard_cur_sectors << 9); + int error = 0; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; + + __blk_end_request(req, error, req->hard_cur_sectors << 9); } EXPORT_SYMBOL(end_request);