#include <linux/cpu.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
+#include <linux/scatterlist.h>
/*
* for max sense size
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
+static void blk_recalc_rq_segments(struct request *rq);
+static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio);
/*
* For the allocated request tables
EXPORT_SYMBOL(blk_queue_ordered);
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q: the request queue
- * @iff: the function to be called issuing the flush
- *
- * Description:
- * If a driver supports issuing a flush command, the support is notified
- * to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
-{
- q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
/*
* Cache flushing for ordered writes handling
*/
/*
* Okay, sequence complete.
*/
- rq = q->orig_bar_rq;
- uptodate = q->orderr ? q->orderr : 1;
+ uptodate = 1;
+ if (q->orderr)
+ uptodate = q->orderr;
q->ordseq = 0;
+ rq = q->orig_bar_rq;
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
end_that_request_last(rq, uptodate);
static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
- q->bi_size = 0;
q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;
rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
- rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+ if (q->ordered & QUEUE_ORDERED_FUA)
+ rq->cmd_flags |= REQ_FUA;
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
* Queue ordered sequence. As we stack them at the head, we
* need to queue in reverse order. Note that we rely on that
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
- * request gets inbetween ordered sequence.
+ * request gets inbetween ordered sequence. If this request is
+ * an empty barrier, we don't need to do a postflush ever since
+ * there will be no data written between the pre and post flush.
+ * Hence a single flush will suffice.
*/
- if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+ if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
else
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
- int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+ const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
if (!q->ordseq) {
if (!is_barrier)
return 1;
}
-static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
-{
- struct request_queue *q = bio->bi_private;
-
- /*
- * This is dry run, restore bio_sector and size. We'll finish
- * this request again with the original bi_end_io after an
- * error occurs or post flush is complete.
- */
- q->bi_size += bytes;
-
- if (bio->bi_size)
- return 1;
-
- /* Reset bio */
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio->bi_size = q->bi_size;
- bio->bi_sector -= (q->bi_size >> 9);
- q->bi_size = 0;
-
- return 0;
-}
-
-static int ordered_bio_endio(struct request *rq, struct bio *bio,
- unsigned int nbytes, int error)
+static void req_bio_endio(struct request *rq, struct bio *bio,
+ unsigned int nbytes, int error)
{
struct request_queue *q = rq->q;
- bio_end_io_t *endio;
- void *private;
- if (&q->bar_rq != rq)
- return 0;
+ if (&q->bar_rq != rq) {
+ if (error)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ error = -EIO;
- /*
- * Okay, this is the barrier request in progress, dry finish it.
- */
- if (error && !q->orderr)
- q->orderr = error;
-
- endio = bio->bi_end_io;
- private = bio->bi_private;
- bio->bi_end_io = flush_dry_bio_endio;
- bio->bi_private = q;
-
- bio_endio(bio, nbytes, error);
+ if (unlikely(nbytes > bio->bi_size)) {
+ printk("%s: want %u bytes done, only %u left\n",
+ __FUNCTION__, nbytes, bio->bi_size);
+ nbytes = bio->bi_size;
+ }
- bio->bi_end_io = endio;
- bio->bi_private = private;
+ bio->bi_size -= nbytes;
+ bio->bi_sector += (nbytes >> 9);
+ if (bio->bi_size == 0)
+ bio_endio(bio, error);
+ } else {
- return 1;
+ /*
+ * Okay, this is the barrier request in progress, just
+ * record the error;
+ */
+ if (error && !q->orderr)
+ q->orderr = error;
+ }
}
/**
*/
return;
- if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
- printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __FUNCTION__, tag);
- return;
- }
-
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
rq->tag = -1;
__FUNCTION__, tag);
bqt->tag_index[tag] = NULL;
+
+ /*
+ * We use test_and_clear_bit's memory ordering properties here.
+ * The tag_map bit acts as a lock for tag_index[bit], so we need
+ * a barrer before clearing the bit (precisely: release semantics).
+ * Could use clear_bit_unlock when it is merged.
+ */
+ if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
+ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+ __FUNCTION__, tag);
+ return;
+ }
+
bqt->busy--;
}
return 1;
} while (test_and_set_bit(tag, bqt->tag_map));
+ /*
+ * We rely on test_and_set_bit providing lock memory ordering semantics
+ * (could use test_and_set_bit_lock when it is merged).
+ */
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
+ struct request rq;
+ struct bio *nxt = bio->bi_next;
+ rq.q = q;
+ rq.bio = rq.biotail = bio;
+ bio->bi_next = NULL;
+ blk_recalc_rq_segments(&rq);
+ bio->bi_next = nxt;
+ bio->bi_phys_segments = rq.nr_phys_segments;
+ bio->bi_hw_segments = rq.nr_hw_segments;
+ bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+EXPORT_SYMBOL(blk_recount_segments);
+
+static void blk_recalc_rq_segments(struct request *rq)
+{
+ int nr_phys_segs;
+ int nr_hw_segs;
+ unsigned int phys_size;
+ unsigned int hw_size;
struct bio_vec *bv, *bvprv = NULL;
- int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
+ int seg_size;
+ int hw_seg_size;
+ int cluster;
+ struct req_iterator iter;
int high, highprv = 1;
+ struct request_queue *q = rq->q;
- if (unlikely(!bio->bi_io_vec))
+ if (!rq->bio)
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
- hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
- bio_for_each_segment(bv, bio, i) {
+ hw_seg_size = seg_size = 0;
+ phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+ rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
}
new_segment:
if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
- !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
+ !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
hw_seg_size += bv->bv_len;
- } else {
+ else {
new_hw_segment:
- if (hw_seg_size > bio->bi_hw_front_size)
- bio->bi_hw_front_size = hw_seg_size;
+ if (nr_hw_segs == 1 &&
+ hw_seg_size > rq->bio->bi_hw_front_size)
+ rq->bio->bi_hw_front_size = hw_seg_size;
hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
nr_hw_segs++;
}
seg_size = bv->bv_len;
highprv = high;
}
- if (hw_seg_size > bio->bi_hw_back_size)
- bio->bi_hw_back_size = hw_seg_size;
- if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
- bio->bi_hw_front_size = hw_seg_size;
- bio->bi_phys_segments = nr_phys_segs;
- bio->bi_hw_segments = nr_hw_segs;
- bio->bi_flags |= (1 << BIO_SEG_VALID);
+
+ if (nr_hw_segs == 1 &&
+ hw_seg_size > rq->bio->bi_hw_front_size)
+ rq->bio->bi_hw_front_size = hw_seg_size;
+ if (hw_seg_size > rq->biotail->bi_hw_back_size)
+ rq->biotail->bi_hw_back_size = hw_seg_size;
+ rq->nr_phys_segments = nr_phys_segs;
+ rq->nr_hw_segments = nr_hw_segs;
}
-EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
* must make sure sg can hold rq->nr_phys_segments entries
*/
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sg)
+ struct scatterlist *sglist)
{
struct bio_vec *bvec, *bvprv;
- struct bio *bio;
- int nsegs, i, cluster;
+ struct req_iterator iter;
+ struct scatterlist *sg;
+ int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
* for each bio in rq
*/
bvprv = NULL;
- rq_for_each_bio(bio, rq) {
- /*
- * for each segment in bio
- */
- bio_for_each_segment(bvec, bio, i) {
- int nbytes = bvec->bv_len;
+ sg = NULL;
+ rq_for_each_segment(bvec, rq, iter) {
+ int nbytes = bvec->bv_len;
- if (bvprv && cluster) {
- if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
- goto new_segment;
+ if (bvprv && cluster) {
+ if (sg->length + nbytes > q->max_segment_size)
+ goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
- goto new_segment;
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ goto new_segment;
- sg[nsegs - 1].length += nbytes;
- } else {
+ sg->length += nbytes;
+ } else {
new_segment:
- memset(&sg[nsegs],0,sizeof(struct scatterlist));
- sg[nsegs].page = bvec->bv_page;
- sg[nsegs].length = nbytes;
- sg[nsegs].offset = bvec->bv_offset;
-
- nsegs++;
- }
- bvprv = bvec;
- } /* segments in bio */
- } /* bios in rq */
+ if (!sg)
+ sg = sglist;
+ else
+ sg = sg_next(sg);
+
+ memset(sg, 0, sizeof(*sg));
+ sg->page = bvec->bv_page;
+ sg->length = nbytes;
+ sg->offset = bvec->bv_offset;
+ nsegs++;
+ }
+ bvprv = bvec;
+ } /* segments in rq */
return nsegs;
}
return 1;
}
-int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
+static int ll_back_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio)
{
unsigned short max_sectors;
int len;
return ll_new_hw_segment(q, req, bio);
}
-EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
blk_trace_shutdown(q);
+ bdi_destroy(&q->backing_dev_info);
kmem_cache_free(requestq_cachep, q);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
+ int err;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+ q->backing_dev_info.unplug_io_data = q;
+ err = bdi_init(&q->backing_dev_info);
+ if (err) {
+ kmem_cache_free(requestq_cachep, q);
+ return NULL;
+ }
+
init_timer(&q->unplug_timer);
- snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+ kobject_set_name(&q->kobj, "%s", "queue");
q->kobj.ktype = &queue_ktype;
kobject_init(&q->kobj);
- q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
- q->backing_dev_info.unplug_io_data = q;
-
mutex_init(&q->sysfs_lock);
return q;
return ret;
}
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ if (!rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+ else if (!ll_back_merge_fn(q, rq, bio))
+ return -EINVAL;
+ else {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+
+ rq->data_len += bio->bi_size;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(blk_rq_append_bio);
+
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
*/
bio_get(bio);
- if (!rq->bio)
- blk_rq_bio_prep(q, rq, bio);
- else if (!ll_back_merge_fn(q, rq, bio)) {
- ret = -EINVAL;
- goto unmap_bio;
- } else {
- rq->biotail->bi_next = bio;
- rq->biotail = bio;
+ ret = blk_rq_append_bio(q, rq, bio);
+ if (!ret)
+ return bio->bi_size;
- rq->data_len += bio->bi_size;
- }
-
- return bio->bi_size;
-
-unmap_bio:
/* if it was boucned we must call the end io function */
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
return PTR_ERR(bio);
if (bio->bi_size != len) {
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
bio_unmap_user(bio);
return -EINVAL;
}
EXPORT_SYMBOL(blk_execute_rq);
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ complete(bio->bi_private);
+}
+
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
+ struct bio *bio;
+ int ret;
if (bdev->bd_disk == NULL)
return -ENXIO;
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
- if (!q->issue_flush_fn)
- return -EOPNOTSUPP;
- return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = bio_end_empty_barrier;
+ bio->bi_private = &wait;
+ bio->bi_bdev = bdev;
+ submit_bio(1 << BIO_RW_BARRIER, bio);
+
+ wait_for_completion(&wait);
+
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be copied
+ * from rq->sector.
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+
+ ret = 0;
+ if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+
+ bio_put(bio);
+ return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
- req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
- req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
- req->nr_phys_segments = bio_phys_segments(req->q, bio);
- req->nr_hw_segments = bio_hw_segments(req->q, bio);
- req->buffer = bio_data(bio); /* see ->buffer comment above */
- req->bio = req->biotail = bio;
req->ioprio = bio_prio(bio);
- req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
+ blk_rq_bio_prep(req->q, req, bio);
}
static int __make_request(struct request_queue *q, struct bio *bio)
return 0;
end_io:
- bio_endio(bio, nr_sectors << 9, err);
+ bio_endio(bio, err);
return 0;
}
{
struct block_device *bdev = bio->bi_bdev;
- if (bdev != bdev->bd_contains) {
+ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
const int rw = bio_data_dir(bio);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+/*
+ * Check whether this bio extends beyond the end of the device.
+ */
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+{
+ sector_t maxsector;
+
+ if (!nr_sectors)
+ return 0;
+
+ /* Test device or partition size, when known. */
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+ if (maxsector) {
+ sector_t sector = bio->bi_sector;
+
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+ /*
+ * This may well happen - the kernel calls bread()
+ * without checking the size of the device, e.g., when
+ * mounting a device.
+ */
+ handle_bad_sector(bio);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/**
* generic_make_request: hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
static inline void __generic_make_request(struct bio *bio)
{
struct request_queue *q;
- sector_t maxsector;
sector_t old_sector;
int ret, nr_sectors = bio_sectors(bio);
dev_t old_dev;
might_sleep();
- /* Test device or partition size, when known. */
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
- sector_t sector = bio->bi_sector;
- if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
- /*
- * This may well happen - the kernel calls bread()
- * without checking the size of the device, e.g., when
- * mounting a device.
- */
- handle_bad_sector(bio);
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
/*
* Resolve the mapping until finished. (drivers are
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
end_io:
- bio_endio(bio, bio->bi_size, -EIO);
+ bio_endio(bio, -EIO);
break;
}
- if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+ if (unlikely(nr_sectors > q->max_hw_sectors)) {
printk("bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
blk_partition_remap(bio);
if (old_sector != -1)
- blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+ blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
old_sector);
blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
- sector_t sector = bio->bi_sector;
-
- if (maxsector < nr_sectors ||
- maxsector - nr_sectors < sector) {
- /*
- * This may well happen - partitions are not
- * checked to make sure they are within the size
- * of the whole device.
- */
- handle_bad_sector(bio);
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
ret = q->make_request_fn(q, bio);
} while (ret);
{
int count = bio_sectors(bio);
- BIO_BUG_ON(!bio->bi_size);
- BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw |= rw;
- if (rw & WRITE) {
- count_vm_events(PGPGOUT, count);
- } else {
- task_io_account_read(bio->bi_size);
- count_vm_events(PGPGIN, count);
- }
-
- if (unlikely(block_dump)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
- current->comm, current->pid,
- (rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
- bdevname(bio->bi_bdev,b));
- }
- generic_make_request(bio);
-}
-
-EXPORT_SYMBOL(submit_bio);
+ /*
+ * If it's a regular read/write or a barrier with data attached,
+ * go through the normal accounting stuff before submission.
+ */
+ if (!bio_empty_barrier(bio)) {
-static void blk_recalc_rq_segments(struct request *rq)
-{
- struct bio *bio, *prevbio = NULL;
- int nr_phys_segs, nr_hw_segs;
- unsigned int phys_size, hw_size;
- struct request_queue *q = rq->q;
+ BIO_BUG_ON(!bio->bi_size);
+ BIO_BUG_ON(!bio->bi_io_vec);
- if (!rq->bio)
- return;
+ if (rw & WRITE) {
+ count_vm_events(PGPGOUT, count);
+ } else {
+ task_io_account_read(bio->bi_size);
+ count_vm_events(PGPGIN, count);
+ }
- phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
- rq_for_each_bio(bio, rq) {
- /* Force bio hw/phys segs to be recalculated. */
- bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-
- nr_phys_segs += bio_phys_segments(q, bio);
- nr_hw_segs += bio_hw_segments(q, bio);
- if (prevbio) {
- int pseg = phys_size + prevbio->bi_size + bio->bi_size;
- int hseg = hw_size + prevbio->bi_size + bio->bi_size;
-
- if (blk_phys_contig_segment(q, prevbio, bio) &&
- pseg <= q->max_segment_size) {
- nr_phys_segs--;
- phys_size += prevbio->bi_size + bio->bi_size;
- } else
- phys_size = 0;
-
- if (blk_hw_contig_segment(q, prevbio, bio) &&
- hseg <= q->max_segment_size) {
- nr_hw_segs--;
- hw_size += prevbio->bi_size + bio->bi_size;
- } else
- hw_size = 0;
+ if (unlikely(block_dump)) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+ current->comm, current->pid,
+ (rw & WRITE) ? "WRITE" : "READ",
+ (unsigned long long)bio->bi_sector,
+ bdevname(bio->bi_bdev,b));
}
- prevbio = bio;
}
- rq->nr_phys_segments = nr_phys_segs;
- rq->nr_hw_segments = nr_hw_segs;
+ generic_make_request(bio);
}
+EXPORT_SYMBOL(submit_bio);
+
static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
while ((bio = req->bio) != NULL) {
int nbytes;
+ /*
+ * For an empty barrier request, the low level driver must
+ * store a potential error location in ->sector. We pass
+ * that back up in ->bi_sector.
+ */
+ if (blk_empty_barrier(req))
+ bio->bi_sector = req->sector;
+
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
- if (!ordered_bio_endio(req, bio, nbytes, error))
- bio_endio(bio, nbytes, error);
+ req_bio_endio(req, bio, nbytes, error);
next_idx = 0;
bio_nbytes = 0;
} else {
* if the request wasn't completed, update state
*/
if (bio_nbytes) {
- if (!ordered_bio_endio(req, bio, bio_nbytes, error))
- bio_endio(bio, bio_nbytes, error);
+ req_bio_endio(req, bio, bio_nbytes, error);
bio->bi_idx += next_idx;
bio_iovec(bio)->bv_offset += nr_bytes;
bio_iovec(bio)->bv_len -= nr_bytes;
}
}
-static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
/*
}
-static struct notifier_block __devinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier __cpuinitdata = {
.notifier_call = blk_cpu_notify,
};
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
- * through requeueing. Theh actual completion happens out-of-order,
+ * through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
EXPORT_SYMBOL(end_that_request_last);
-void end_request(struct request *req, int uptodate)
+static inline void __end_request(struct request *rq, int uptodate,
+ unsigned int nr_bytes, int dequeue)
{
- if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
- add_disk_randomness(req->rq_disk);
- blkdev_dequeue_request(req);
- end_that_request_last(req, uptodate);
+ if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+ if (dequeue)
+ blkdev_dequeue_request(rq);
+ add_disk_randomness(rq->rq_disk);
+ end_that_request_last(rq, uptodate);
}
}
+static unsigned int rq_byte_size(struct request *rq)
+{
+ if (blk_fs_request(rq))
+ return rq->hard_nr_sectors << 9;
+
+ return rq->data_len;
+}
+
+/**
+ * end_queued_request - end all I/O on a queued request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends all I/O on a request, and removes it from the block layer queues.
+ * Not suitable for normal IO completion, unless the driver still has
+ * the request attached to the block layer.
+ *
+ **/
+void end_queued_request(struct request *rq, int uptodate)
+{
+ __end_request(rq, uptodate, rq_byte_size(rq), 1);
+}
+EXPORT_SYMBOL(end_queued_request);
+
+/**
+ * end_dequeued_request - end all I/O on a dequeued request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends all I/O on a request. The request must already have been
+ * dequeued using blkdev_dequeue_request(), as is normally the case
+ * for most drivers.
+ *
+ **/
+void end_dequeued_request(struct request *rq, int uptodate)
+{
+ __end_request(rq, uptodate, rq_byte_size(rq), 0);
+}
+EXPORT_SYMBOL(end_dequeued_request);
+
+
+/**
+ * end_request - end I/O on the current segment of the request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends I/O on the current segment of a request. If that is the only
+ * remaining segment, the request is also completed and freed.
+ *
+ * This is a remnant of how older block drivers handled IO completions.
+ * Modern drivers typically end IO on the full request in one go, unless
+ * they have a residual value to account for. For that case this function
+ * isn't really useful, unless the residual just happens to be the
+ * full current segment. In other words, don't use this function in new
+ * code. Either use end_request_completely(), or the
+ * end_that_request_chunk() (along with end_that_request_last()) for
+ * partial completions.
+ *
+ **/
+void end_request(struct request *req, int uptodate)
+{
+ __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+}
EXPORT_SYMBOL(end_request);
-void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
- struct bio *bio)
+static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio)
{
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= (bio->bi_rw & 3);
rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
-}
-EXPORT_SYMBOL(blk_rq_bio_prep);
+ if (bio->bi_bdev)
+ rq->rq_disk = bio->bi_bdev->bd_disk;
+}
int kblockd_schedule_work(struct work_struct *work)
{
max_hw_sectors_kb = q->max_hw_sectors >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
- int ra_kb;
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
* values synchronously:
*/
spin_lock_irq(q->queue_lock);
- /*
- * Trim readahead window as well, if necessary:
- */
- ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
- if (ra_kb > max_sectors_kb)
- q->backing_dev_info.ra_pages =
- max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
-
q->max_sectors = max_sectors_kb << 1;
spin_unlock_irq(q->queue_lock);
return queue_var_show(max_hw_sectors_kb, (page));
}
+static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->max_phys_segments, page);
+}
+
+static ssize_t queue_max_segments_store(struct request_queue *q,
+ const char *page, size_t count)
+{
+ unsigned long segments;
+ ssize_t ret = queue_var_store(&segments, page, count);
+ spin_lock_irq(q->queue_lock);
+ q->max_phys_segments = segments;
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
.show = queue_max_hw_sectors_show,
};
+static struct queue_sysfs_entry queue_max_segments_entry = {
+ .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_max_segments_show,
+ .store = queue_max_segments_store,
+};
+
static struct queue_sysfs_entry queue_iosched_entry = {
.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
.show = elv_iosched_show,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
+ &queue_max_segments_entry.attr,
&queue_iosched_entry.attr,
NULL,
};