X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=block%2Fll_rw_blk.c;h=3795e0708a229b9641d2c70354b81e641dc3853b;hb=9e69fbb5373f7c081acdf2b75d7bac7e95023dd1;hp=fb6789725e1b681fc9cf67225b971baf2c2b10a3;hpb=18ed1c051317ac3a685120cead2adb192b802347;p=linux-2.6-omap-h63xx.git diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index fb6789725e1..3795e0708a2 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -340,6 +340,15 @@ unsigned blk_ordered_req_seq(struct request *rq) if (rq == &q->post_flush_rq) return QUEUE_ORDSEQ_POSTFLUSH; + /* + * !fs requests don't need to follow barrier ordering. Always + * put them at the front. This fixes the following deadlock. + * + * http://thread.gmane.org/gmane.linux.kernel/537473 + */ + if (!blk_fs_request(rq)) + return QUEUE_ORDSEQ_DRAIN; + if ((rq->cmd_flags & REQ_ORDERED_COLOR) == (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) return QUEUE_ORDSEQ_DRAIN; @@ -518,8 +527,6 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) { request_queue_t *q = bio->bi_private; - struct bio_vec *bvec; - int i; /* * This is dry run, restore bio_sector and size. We'll finish @@ -531,13 +538,6 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) if (bio->bi_size) return 1; - /* Rewind bvec's */ - bio->bi_idx = 0; - bio_for_each_segment(bvec, bio, i) { - bvec->bv_len += bvec->bv_offset; - bvec->bv_offset = 0; - } - /* Reset bio */ set_bit(BIO_UPTODATE, &bio->bi_flags); bio->bi_size = q->bi_size; @@ -1221,7 +1221,7 @@ void blk_recount_segments(request_queue_t *q, struct bio *bio) * considered part of another segment, since that might * change with the bounce page. */ - high = page_to_pfn(bv->bv_page) >= q->bounce_pfn; + high = page_to_pfn(bv->bv_page) > q->bounce_pfn; if (high || highprv) goto new_hw_segment; if (cluster) { @@ -1264,7 +1264,7 @@ new_hw_segment: bio->bi_hw_segments = nr_hw_segs; bio->bi_flags |= (1 << BIO_SEG_VALID); } - +EXPORT_SYMBOL(blk_recount_segments); static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, struct bio *nxt) @@ -1295,9 +1295,9 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) blk_recount_segments(q, nxt); if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || - BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size)) + BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) return 0; - if (bio->bi_size + nxt->bi_size > q->max_segment_size) + if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) return 0; return 1; @@ -1704,7 +1704,7 @@ EXPORT_SYMBOL(blk_stop_queue); * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources - * the the callbacks might use. The caller must already have made sure + * that the callbacks might use. The caller must already have made sure * that its ->make_request_fn will not re-add plugging prior to calling * this function. * @@ -1712,7 +1712,6 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->unplug_timer); - kblockd_flush(); } EXPORT_SYMBOL(blk_sync_queue); @@ -1925,6 +1924,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); + q->sg_reserved_size = INT_MAX; + /* * all done */ @@ -2556,6 +2557,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, bio->bi_rw |= (1 << BIO_RW); blk_rq_bio_prep(q, rq, bio); + blk_queue_bounce(q, &rq->bio); rq->buffer = rq->data = NULL; return 0; } @@ -3114,7 +3116,7 @@ static inline int should_fail_request(struct bio *bio) * bi_sector for remaps as it sees fit. So the values of these fields * should NOT be depended on after the call to generic_make_request. */ -void generic_make_request(struct bio *bio) +static inline void __generic_make_request(struct bio *bio) { request_queue_t *q; sector_t maxsector; @@ -3213,6 +3215,57 @@ end_io: } while (ret); } +/* + * We only want one ->make_request_fn to be active at a time, + * else stack usage with stacked devices could be a problem. + * So use current->bio_{list,tail} to keep a list of requests + * submited by a make_request_fn function. + * current->bio_tail is also used as a flag to say if + * generic_make_request is currently active in this task or not. + * If it is NULL, then no make_request is active. If it is non-NULL, + * then a make_request is active, and new requests should be added + * at the tail + */ +void generic_make_request(struct bio *bio) +{ + if (current->bio_tail) { + /* make_request is active */ + *(current->bio_tail) = bio; + bio->bi_next = NULL; + current->bio_tail = &bio->bi_next; + return; + } + /* following loop may be a bit non-obvious, and so deserves some + * explanation. + * Before entering the loop, bio->bi_next is NULL (as all callers + * ensure that) so we have a list with a single bio. + * We pretend that we have just taken it off a longer list, so + * we assign bio_list to the next (which is NULL) and bio_tail + * to &bio_list, thus initialising the bio_list of new bios to be + * added. __generic_make_request may indeed add some more bios + * through a recursive call to generic_make_request. If it + * did, we find a non-NULL value in bio_list and re-enter the loop + * from the top. In this case we really did just take the bio + * of the top of the list (no pretending) and so fixup bio_list and + * bio_tail or bi_next, and call into __generic_make_request again. + * + * The loop was structured like this to make only one call to + * __generic_make_request (which is important as it is large and + * inlined) and to keep the structure simple. + */ + BUG_ON(bio->bi_next); + do { + current->bio_list = bio->bi_next; + if (bio->bi_next == NULL) + current->bio_tail = ¤t->bio_list; + else + bio->bi_next = NULL; + __generic_make_request(bio); + bio = current->bio_list; + } while (bio); + current->bio_tail = NULL; /* deactivate */ +} + EXPORT_SYMBOL(generic_make_request); /** @@ -3505,7 +3558,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ - if (action == CPU_DEAD) { + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; local_irq_disable(); @@ -3629,11 +3682,11 @@ int kblockd_schedule_work(struct work_struct *work) EXPORT_SYMBOL(kblockd_schedule_work); -void kblockd_flush(void) +void kblockd_flush_work(struct work_struct *work) { - flush_workqueue(kblockd_workqueue); + cancel_work_sync(work); } -EXPORT_SYMBOL(kblockd_flush); +EXPORT_SYMBOL(kblockd_flush_work); int __init blk_dev_init(void) { @@ -3658,8 +3711,8 @@ int __init blk_dev_init(void) open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); register_hotcpu_notifier(&blk_cpu_notifier); - blk_max_low_pfn = max_low_pfn; - blk_max_pfn = max_pfn; + blk_max_low_pfn = max_low_pfn - 1; + blk_max_pfn = max_pfn - 1; return 0; } @@ -3741,6 +3794,7 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; ret->cic_root.rb_node = NULL; + ret->ioc_data = NULL; /* make sure set_task_ioprio() sees the settings above */ smp_wmb(); tsk->io_context = ret; @@ -3748,7 +3802,6 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) return ret; } -EXPORT_SYMBOL(current_io_context); /* * If the current task has no IO context then create one and initialise it. @@ -4038,6 +4091,13 @@ int blk_register_queue(struct gendisk *disk) return ret; } + ret = bsg_register_disk(disk); + if (ret) { + elv_unregister_queue(q); + kobject_unregister(&q->kobj); + return ret; + } + return 0; } @@ -4046,6 +4106,7 @@ void blk_unregister_queue(struct gendisk *disk) request_queue_t *q = disk->queue; if (q && q->request_fn) { + bsg_unregister_disk(disk); elv_unregister_queue(q); kobject_uevent(&q->kobj, KOBJ_REMOVE);