X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=block%2Fblk-merge.c;h=651136aae76e45ba821205a830a919a3d6d105c5;hb=0e6859d49ff194e01afc229c996e3aefca1a0539;hp=0f58616bcd7f183514c43eb913f0e0c23c855215;hpb=d55a4528f7f607ca2872fec18574bc8cec060f05;p=linux-2.6-omap-h63xx.git diff --git a/block/blk-merge.c b/block/blk-merge.c index 0f58616bcd7..651136aae76 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq) if (!rq->bio) return; - cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); + cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); hw_seg_size = seg_size = 0; phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; rq_for_each_segment(bv, rq, iter) { @@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments); static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) + if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) return 0; if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) @@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); - if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID))) + if (!bio_flagged(nxt, BIO_SEG_VALID)) blk_recount_segments(q, nxt); if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) @@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, int nsegs, cluster; nsegs = 0; - cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); + cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); /* * for each bio in rq @@ -220,6 +220,15 @@ new_segment: bvprv = bvec; } /* segments in rq */ + + if (unlikely(rq->cmd_flags & REQ_COPY_USER) && + (rq->data_len & q->dma_pad_mask)) { + unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; + + sg->length += pad_len; + rq->extra_len += pad_len; + } + if (q->dma_drain_size && q->dma_drain_needed(rq)) { if (rq->cmd_flags & REQ_RW) memset(q->dma_drain_buffer, 0, q->dma_drain_size); @@ -303,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, q->last_merge = NULL; return 0; } - if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID))) + if (!bio_flagged(req->biotail, BIO_SEG_VALID)) blk_recount_segments(q, req->biotail); - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) @@ -343,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, return 0; } len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + if (!bio_flagged(bio, BIO_SEG_VALID)) blk_recount_segments(q, bio); - if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID))) + if (!bio_flagged(req->bio, BIO_SEG_VALID)) blk_recount_segments(q, req->bio); if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && !BIOVEC_VIRT_OVERSIZE(len)) {