X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=block%2Fll_rw_blk.c;h=8b919940b2abdb37119cd9927b6f45dbec66aa31;hb=b334349eb4c8562fd60bc8a8bd5ba6b42f22b8ac;hp=b01dee3ae7f36c1d29014bc91fee09466ed7b922;hpb=0e4bd10c9b7e9ed65132f5935328275deb760384;p=linux-2.6-omap-h63xx.git diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b01dee3ae7f..8b919940b2a 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -39,7 +39,7 @@ static void blk_unplug_work(struct work_struct *work); static void blk_unplug_timeout(unsigned long data); -static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); +static void drive_stat_acct(struct request *rq, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); static int __make_request(struct request_queue *q, struct bio *bio); static struct io_context *current_io_context(gfp_t gfp_flags, int node); @@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) retval = atomic_dec_and_test(&bqt->refcnt); if (retval) { BUG_ON(bqt->busy); - BUG_ON(!list_empty(&bqt->busy_list)); kfree(bqt->tag_index); bqt->tag_index = NULL; @@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, if (init_tag_map(q, tags, depth)) goto fail; - INIT_LIST_HEAD(&tags->busy_list); tags->busy = 0; atomic_set(&tags->refcnt, 1); return tags; @@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, */ q->queue_tags = tags; q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); + INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: kfree(tags); @@ -1057,18 +1056,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) bqt->tag_index[tag] = NULL; - /* - * We use test_and_clear_bit's memory ordering properties here. - * The tag_map bit acts as a lock for tag_index[bit], so we need - * a barrer before clearing the bit (precisely: release semantics). - * Could use clear_bit_unlock when it is merged. - */ - if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { + if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __FUNCTION__, tag); return; } - + /* + * The tag_map bit acts as a lock for tag_index[bit], so we need + * unlock memory barrier semantics. + */ + clear_bit_unlock(tag, bqt->tag_map); bqt->busy--; } @@ -1114,17 +1111,17 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) if (tag >= bqt->max_depth) return 1; - } while (test_and_set_bit(tag, bqt->tag_map)); + } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* - * We rely on test_and_set_bit providing lock memory ordering semantics - * (could use test_and_set_bit_lock when it is merged). + * We need lock ordering semantics given by test_and_set_bit_lock. + * See blk_queue_end_tag for details. */ rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); - list_add(&rq->queuelist, &bqt->busy_list); + list_add(&rq->queuelist, &q->tag_busy_list); bqt->busy++; return 0; } @@ -1145,24 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag); **/ void blk_queue_invalidate_tags(struct request_queue *q) { - struct blk_queue_tag *bqt = q->queue_tags; struct list_head *tmp, *n; - struct request *rq; - list_for_each_safe(tmp, n, &bqt->busy_list) { - rq = list_entry_rq(tmp); - - if (rq->tag == -1) { - printk(KERN_ERR - "%s: bad tag found on list\n", __FUNCTION__); - list_del_init(&rq->queuelist); - rq->cmd_flags &= ~REQ_QUEUED; - } else - blk_queue_end_tag(q, rq); - - rq->cmd_flags &= ~REQ_STARTED; - __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); - } + list_for_each_safe(tmp, n, &q->tag_busy_list) + blk_requeue_request(q, list_entry_rq(tmp)); } EXPORT_SYMBOL(blk_queue_invalidate_tags); @@ -1373,7 +1356,7 @@ new_segment: } /* segments in rq */ if (sg) - __sg_mark_end(sg); + sg_mark_end(sg); return nsegs; } @@ -1638,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, { struct request_queue *q = bdi->unplug_io_data; - /* - * devices don't necessarily have an ->unplug_fn defined - */ - if (q->unplug_fn) { - blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, - q->rq.count[READ] + q->rq.count[WRITE]); - - q->unplug_fn(q); - } + blk_unplug(q); } static void blk_unplug_work(struct work_struct *work) @@ -1670,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data) kblockd_schedule_work(&q->unplug_work); } +void blk_unplug(struct request_queue *q) +{ + /* + * devices don't necessarily have an ->unplug_fn defined + */ + if (q->unplug_fn) { + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + + q->unplug_fn(q); + } +} +EXPORT_SYMBOL(blk_unplug); + /** * blk_start_queue - restart a previously stopped queue * @q: The &struct request_queue in question @@ -1738,6 +1727,7 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->unplug_timer); + kblockd_flush_work(&q->unplug_work); } EXPORT_SYMBOL(blk_sync_queue); @@ -2341,7 +2331,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); - drive_stat_acct(rq, rq->nr_sectors, 1); + drive_stat_acct(rq, 1); __elv_add_request(q, rq, where, 0); blk_start_queueing(q); spin_unlock_irqrestore(q->queue_lock, flags); @@ -2736,7 +2726,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) EXPORT_SYMBOL(blkdev_issue_flush); -static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) +static void drive_stat_acct(struct request *rq, int new_io) { int rw = rq_data_dir(rq); @@ -2758,7 +2748,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) */ static inline void add_request(struct request_queue * q, struct request * req) { - drive_stat_acct(req, req->nr_sectors, 1); + drive_stat_acct(req, 1); /* * elevator indicated where it wants this request to be @@ -3015,7 +3005,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); - drive_stat_acct(req, nr_sectors, 0); + drive_stat_acct(req, 0); if (!attempt_back_merge(q, req)) elv_merged_request(q, req, el_ret); goto out; @@ -3042,7 +3032,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) req->sector = req->hard_sector = bio->bi_sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); - drive_stat_acct(req, nr_sectors, 0); + drive_stat_acct(req, 0); if (!attempt_front_merge(q, req)) elv_merged_request(q, req, el_ret); goto out; @@ -3224,6 +3214,7 @@ static inline void __generic_make_request(struct bio *bio) sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); dev_t old_dev; + int err = -EIO; might_sleep(); @@ -3251,7 +3242,7 @@ static inline void __generic_make_request(struct bio *bio) bdevname(bio->bi_bdev, b), (long long) bio->bi_sector); end_io: - bio_endio(bio, -EIO); + bio_endio(bio, err); break; } @@ -3286,6 +3277,10 @@ end_io: if (bio_check_eod(bio, nr_sectors)) goto end_io; + if (bio_empty_barrier(bio) && !q->prepare_flush_fn) { + err = -EOPNOTSUPP; + goto end_io; + } ret = q->make_request_fn(q, bio); } while (ret); @@ -4085,23 +4080,7 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) return queue_var_show(max_hw_sectors_kb, (page)); } -static ssize_t queue_max_segments_show(struct request_queue *q, char *page) -{ - return queue_var_show(q->max_phys_segments, page); -} - -static ssize_t queue_max_segments_store(struct request_queue *q, - const char *page, size_t count) -{ - unsigned long segments; - ssize_t ret = queue_var_store(&segments, page, count); - spin_lock_irq(q->queue_lock); - q->max_phys_segments = segments; - spin_unlock_irq(q->queue_lock); - - return ret; -} static struct queue_sysfs_entry queue_requests_entry = { .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, .show = queue_requests_show, @@ -4125,12 +4104,6 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = { .show = queue_max_hw_sectors_show, }; -static struct queue_sysfs_entry queue_max_segments_entry = { - .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR }, - .show = queue_max_segments_show, - .store = queue_max_segments_store, -}; - static struct queue_sysfs_entry queue_iosched_entry = { .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, .show = elv_iosched_show, @@ -4142,7 +4115,6 @@ static struct attribute *default_attrs[] = { &queue_ra_entry.attr, &queue_max_hw_sectors_entry.attr, &queue_max_sectors_entry.attr, - &queue_max_segments_entry.attr, &queue_iosched_entry.attr, NULL, };