* considered part of another segment, since that might
* change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
if (high || highprv)
goto new_hw_segment;
if (cluster) {
bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
-
+EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
return 1;
}
-static int ll_back_merge_fn(request_queue_t *q, struct request *req,
- struct bio *bio)
+int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
{
unsigned short max_sectors;
int len;
return ll_new_hw_segment(q, req, bio);
}
+EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
}
q->request_fn = rfn;
- q->back_merge_fn = ll_back_merge_fn;
- q->front_merge_fn = ll_front_merge_fn;
- q->merge_requests_fn = ll_merge_requests_fn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
- gfp_t gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
struct io_context *ioc = NULL;
+ const int rw = rw_flags & 0x01;
int may_queue, priv;
- may_queue = elv_may_queue(q, rw);
+ may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, priv, gfp_mask);
+ rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw,
+static struct request *get_request_wait(request_queue_t *q, int rw_flags,
struct bio *bio)
{
+ const int rw = rw_flags & 0x01;
struct request *rq;
- rq = get_request(q, rw, bio, GFP_NOIO);
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) {
DEFINE_WAIT(wait);
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, bio, GFP_NOIO);
+ rq = get_request(q, rw_flags, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
else
bio = bio_copy_user(q, uaddr, len, reading);
- if (IS_ERR(bio)) {
+ if (IS_ERR(bio))
return PTR_ERR(bio);
- }
orig_bio = bio;
blk_queue_bounce(q, &bio);
+
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
- /*
- * for most (all? don't know of any) queues we could
- * skip grabbing the queue lock here. only drivers with
- * funky private ->back_merge_fn() function could be
- * problematic.
- */
- spin_lock_irq(q->queue_lock);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
- else if (!q->back_merge_fn(q, rq, bio)) {
+ else if (!ll_back_merge_fn(q, rq, bio)) {
ret = -EINVAL;
- spin_unlock_irq(q->queue_lock);
goto unmap_bio;
} else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->nr_sectors += bio_sectors(bio);
- rq->hard_nr_sectors = rq->nr_sectors;
rq->data_len += bio->bi_size;
}
- spin_unlock_irq(q->queue_lock);
return bio->bi_size;
unsigned long len)
{
unsigned long bytes_read = 0;
+ struct bio *bio = NULL;
int ret;
if (len > (q->max_hw_sectors << 9))
ret = __blk_rq_map_user(q, rq, ubuf, map_len);
if (ret < 0)
goto unmap_rq;
+ if (!bio)
+ bio = rq->bio;
bytes_read += ret;
ubuf += ret;
}
rq->buffer = rq->data = NULL;
return 0;
unmap_rq:
- blk_rq_unmap_user(rq);
+ blk_rq_unmap_user(bio);
return ret;
}
* @rq: request to map data to
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
+ * @len: I/O byte count
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
/**
* blk_rq_unmap_user - unmap a request with user data
- * @rq: rq to be unmapped
+ * @bio: start of bio list
*
* Description:
- * Unmap a rq previously mapped by blk_rq_map_user().
- * rq->bio must be set to the original head of the request.
+ * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
+ * supply the original rq->bio from the blk_rq_map_user() return, since
+ * the io completion may have changed rq->bio.
*/
-int blk_rq_unmap_user(struct request *rq)
+int blk_rq_unmap_user(struct bio *bio)
{
- struct bio *bio, *mapped_bio;
+ struct bio *mapped_bio;
+ int ret = 0, ret2;
- while ((bio = rq->bio)) {
- if (bio_flagged(bio, BIO_BOUNCED))
+ while (bio) {
+ mapped_bio = bio;
+ if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private;
- else
- mapped_bio = bio;
- __blk_rq_unmap_user(mapped_bio);
- rq->bio = bio->bi_next;
- bio_put(bio);
+ ret2 = __blk_rq_unmap_user(mapped_bio);
+ if (ret2 && !ret)
+ ret = ret2;
+
+ mapped_bio = bio;
+ bio = bio->bi_next;
+ bio_put(mapped_bio);
}
- return 0;
+
+ return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
* will have updated segment counts, update sector
* counts here.
*/
- if (!q->merge_requests_fn(q, req, next))
+ if (!ll_merge_requests_fn(q, req, next))
return 0;
/*
int el_ret, nr_sectors, barrier, err;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
+ int rw_flags;
nr_sectors = bio_sectors(bio);
case ELEVATOR_BACK_MERGE:
BUG_ON(!rq_mergeable(req));
- if (!q->back_merge_fn(q, req, bio))
+ if (!ll_back_merge_fn(q, req, bio))
break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
case ELEVATOR_FRONT_MERGE:
BUG_ON(!rq_mergeable(req));
- if (!q->front_merge_fn(q, req, bio))
+ if (!ll_front_merge_fn(q, req, bio))
break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
}
get_rq:
+ /*
+ * This sync check and mask will be re-done in init_request_from_bio(),
+ * but we need to set it earlier to expose the sync flag to the
+ * rq allocator and io schedulers.
+ */
+ rw_flags = bio_data_dir(bio);
+ if (sync)
+ rw_flags |= REQ_RW_SYNC;
+
/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, bio_data_dir(bio), bio);
+ req = get_request_wait(q, rw_flags, bio);
/*
* After dropping the lock and possibly sleeping here, our request
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
register_hotcpu_notifier(&blk_cpu_notifier);
- blk_max_low_pfn = max_low_pfn;
- blk_max_pfn = max_pfn;
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
return 0;
}