struct scatterlist *sglist)
{
struct bio_vec *bvec, *bvprv;
- struct scatterlist *next_sg, *sg;
struct req_iterator iter;
+ struct scatterlist *sg;
int nsegs, cluster;
nsegs = 0;
* for each bio in rq
*/
bvprv = NULL;
- sg = next_sg = &sglist[0];
+ sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
sg->length += nbytes;
} else {
new_segment:
- sg = next_sg;
- next_sg = sg_next(sg);
+ if (!sg)
+ sg = sglist;
+ else
+ sg = sg_next(sg);
+ memset(sg, 0, sizeof(*sg));
sg->page = bvec->bv_page;
sg->length = nbytes;
sg->offset = bvec->bv_offset;
blk_trace_shutdown(q);
+ bdi_destroy(&q->backing_dev_info);
kmem_cache_free(requestq_cachep, q);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
+ int err;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+ q->backing_dev_info.unplug_io_data = q;
+ err = bdi_init(&q->backing_dev_info);
+ if (err) {
+ kmem_cache_free(requestq_cachep, q);
+ return NULL;
+ }
+
init_timer(&q->unplug_timer);
kobject_set_name(&q->kobj, "%s", "queue");
q->kobj.ktype = &queue_ktype;
kobject_init(&q->kobj);
- q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
- q->backing_dev_info.unplug_io_data = q;
-
mutex_init(&q->sysfs_lock);
return q;
max_hw_sectors_kb = q->max_hw_sectors >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
- int ra_kb;
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
* values synchronously:
*/
spin_lock_irq(q->queue_lock);
- /*
- * Trim readahead window as well, if necessary:
- */
- ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
- if (ra_kb > max_sectors_kb)
- q->backing_dev_info.ra_pages =
- max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
-
q->max_sectors = max_sectors_kb << 1;
spin_unlock_irq(q->queue_lock);