X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=block%2Fblk-tag.c;h=c0d419e84ce7f8518e1246e3ec61b07f5c81efe8;hb=b04accc425d52ca59699290661e0dfd09b0feeeb;hp=de64e04299771f08eebb391616d63b72c2fca64a;hpb=be2e88011bd800222bfd7b477c727966f93186a9;p=linux-2.6-omap-h63xx.git diff --git a/block/blk-tag.c b/block/blk-tag.c index de64e042997..c0d419e84ce 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag); * __blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * Tries to free the specified @bqt@. Returns true if it was + * Tries to free the specified @bqt. Returns true if it was * actually freed and false if there are still references using it */ static int __blk_free_tags(struct blk_queue_tag *bqt) @@ -38,7 +38,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) retval = atomic_dec_and_test(&bqt->refcnt); if (retval) { - BUG_ON(bqt->busy); + BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < + bqt->max_depth); kfree(bqt->tag_index); bqt->tag_index = NULL; @@ -70,14 +71,14 @@ void __blk_queue_free_tags(struct request_queue *q) __blk_free_tags(bqt); q->queue_tags = NULL; - queue_flag_clear(QUEUE_FLAG_QUEUED, q); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * For externally managed @bqt@ frees the map. Callers of this + * For externally managed @bqt frees the map. Callers of this * function must guarantee to have released all the queues that * might have been using this tag map. */ @@ -93,12 +94,12 @@ EXPORT_SYMBOL(blk_free_tags); * @q: the request queue for the device * * Notes: - * This is used to disabled tagged queuing to a device, yet leave + * This is used to disable tagged queuing to a device, yet leave * queue in function. **/ void blk_queue_free_tags(struct request_queue *q) { - queue_flag_clear(QUEUE_FLAG_QUEUED, q); + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } EXPORT_SYMBOL(blk_queue_free_tags); @@ -147,7 +148,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, if (init_tag_map(q, tags, depth)) goto fail; - tags->busy = 0; atomic_set(&tags->refcnt, 1); return tags; fail: @@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags); * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use + * + * Queue lock must be held here if the function is called to resize an + * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) @@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, * assign it, all done */ q->queue_tags = tags; - queue_flag_set(QUEUE_FLAG_QUEUED, q); + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: @@ -268,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags); * @rq: the request that has completed * * Description: - * Typically called when end_that_request_first() returns 0, meaning + * Typically called when end_that_request_first() returns %0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. @@ -310,7 +313,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); - bqt->busy--; } EXPORT_SYMBOL(blk_queue_end_tag); @@ -335,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; + unsigned max_depth, offset; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { @@ -348,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. + * + * We reserve a few tags just for sync IO, since we don't want + * to starve sync IO on behalf of flooding async IO. */ + max_depth = bqt->max_depth; + if (rq_is_sync(rq)) + offset = 0; + else + offset = max_depth >> 2; + do { - tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); - if (tag >= bqt->max_depth) + tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); + if (tag >= max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); @@ -365,7 +377,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); - bqt->busy++; return 0; } EXPORT_SYMBOL(blk_queue_start_tag);