X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-table.c;h=47818d8249cbf16f125328d39d2be54c0241130a;hb=0f94e87cdeaaac9f0f9a28a5dd2a5070b87cd3e8;hp=2fc199b0016bd5c819b42572b97dbc69dc03844c;hpb=eedab661a51966c454e38c17266a531aa58b4a98;p=linux-2.6-omap-h63xx.git diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2fc199b0016..47818d8249c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -99,9 +99,14 @@ static void combine_restrictions_low(struct io_restrictions *lhs, lhs->max_segment_size = min_not_zero(lhs->max_segment_size, rhs->max_segment_size); + lhs->max_hw_sectors = + min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); + lhs->seg_boundary_mask = min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); + lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); + lhs->no_cluster |= rhs->no_cluster; } @@ -187,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num) /* * Allocate both the target array and offset array at once. + * Append an empty entry to catch sectors beyond the end of + * the device. */ - n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + + n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; @@ -213,12 +220,11 @@ static int alloc_targets(struct dm_table *t, unsigned int num) int dm_table_create(struct dm_table **result, int mode, unsigned num_targets, struct mapped_device *md) { - struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL); + struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; - memset(t, 0, sizeof(*t)); INIT_LIST_HEAD(&t->devices); atomic_set(&t->holders, 1); @@ -526,7 +532,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) { - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); struct io_restrictions *rs = &ti->limits; /* @@ -563,10 +569,15 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) rs->max_segment_size = min_not_zero(rs->max_segment_size, q->max_segment_size); + rs->max_hw_sectors = + min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); + rs->seg_boundary_mask = min_not_zero(rs->seg_boundary_mask, q->seg_boundary_mask); + rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); + rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); } EXPORT_SYMBOL_GPL(dm_set_device_limits); @@ -698,6 +709,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) { if (!rs->max_sectors) rs->max_sectors = SAFE_MAX_SECTORS; + if (!rs->max_hw_sectors) + rs->max_hw_sectors = SAFE_MAX_SECTORS; if (!rs->max_phys_segments) rs->max_phys_segments = MAX_PHYS_SEGMENTS; if (!rs->max_hw_segments) @@ -708,6 +721,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) rs->max_segment_size = MAX_SEGMENT_SIZE; if (!rs->seg_boundary_mask) rs->seg_boundary_mask = -1; + if (!rs->bounce_pfn) + rs->bounce_pfn = -1; } int dm_table_add_target(struct dm_table *t, const char *type, @@ -862,6 +877,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) /* * Search the btree for the correct target. + * + * Caller should check returned pointer with dm_target_is_valid() + * to trap I/O beyond end of device. */ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) { @@ -891,7 +909,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) q->max_hw_segments = t->limits.max_hw_segments; q->hardsect_size = t->limits.hardsect_size; q->max_segment_size = t->limits.max_segment_size; + q->max_hw_sectors = t->limits.max_hw_sectors; q->seg_boundary_mask = t->limits.seg_boundary_mask; + q->bounce_pfn = t->limits.bounce_pfn; if (t->limits.no_cluster) q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); else @@ -979,7 +999,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) devices = dm_table_get_devices(t); for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); r |= bdi_congested(&q->backing_dev_info, bdi_bits); } @@ -992,40 +1012,12 @@ void dm_table_unplug_all(struct dm_table *t) for (d = devices->next; d != devices; d = d->next) { struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); + struct request_queue *q = bdev_get_queue(dd->bdev); - if (q->unplug_fn) - q->unplug_fn(q); + blk_unplug(q); } } -int dm_table_flush_all(struct dm_table *t) -{ - struct list_head *d, *devices = dm_table_get_devices(t); - int ret = 0; - unsigned i; - - for (i = 0; i < t->num_targets; i++) - if (t->targets[i].type->flush) - t->targets[i].type->flush(&t->targets[i]); - - for (d = devices->next; d != devices; d = d->next) { - struct dm_dev *dd = list_entry(d, struct dm_dev, list); - request_queue_t *q = bdev_get_queue(dd->bdev); - int err; - - if (!q->issue_flush_fn) - err = -EOPNOTSUPP; - else - err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL); - - if (!ret) - ret = err; - } - - return ret; -} - struct mapped_device *dm_table_get_md(struct dm_table *t) { dm_get(t->md); @@ -1043,4 +1035,3 @@ EXPORT_SYMBOL(dm_table_get_md); EXPORT_SYMBOL(dm_table_put); EXPORT_SYMBOL(dm_table_get); EXPORT_SYMBOL(dm_table_unplug_all); -EXPORT_SYMBOL(dm_table_flush_all);