]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/md/dm-table.c
[IA64] run rest drivers/misc/sgi-xp through scripts/Lindent
[linux-2.6-omap-h63xx.git] / drivers / md / dm-table.c
index 2fc199b0016bd5c819b42572b97dbc69dc03844c..e75b1437b58b67ad8ecac097d6b3c50e9bf0b2ba 100644 (file)
@@ -99,9 +99,14 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
        lhs->max_segment_size =
                min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
 
+       lhs->max_hw_sectors =
+               min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
+
        lhs->seg_boundary_mask =
                min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
 
+       lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
+
        lhs->no_cluster |= rhs->no_cluster;
 }
 
@@ -187,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
 
        /*
         * Allocate both the target array and offset array at once.
+        * Append an empty entry to catch sectors beyond the end of
+        * the device.
         */
-       n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
+       n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
                                          sizeof(sector_t));
        if (!n_highs)
                return -ENOMEM;
@@ -213,12 +220,11 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
 int dm_table_create(struct dm_table **result, int mode,
                    unsigned num_targets, struct mapped_device *md)
 {
-       struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+       struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 
        if (!t)
                return -ENOMEM;
 
-       memset(t, 0, sizeof(*t));
        INIT_LIST_HEAD(&t->devices);
        atomic_set(&t->holders, 1);
 
@@ -281,9 +287,8 @@ static void free_devices(struct list_head *devices)
 {
        struct list_head *tmp, *next;
 
-       for (tmp = devices->next; tmp != devices; tmp = next) {
+       list_for_each_safe(tmp, next, devices) {
                struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
-               next = tmp->next;
                kfree(dd);
        }
 }
@@ -356,7 +361,7 @@ static int lookup_device(const char *path, dev_t *dev)
        if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
                return r;
 
-       inode = nd.dentry->d_inode;
+       inode = nd.path.dentry->d_inode;
        if (!inode) {
                r = -ENOENT;
                goto out;
@@ -370,7 +375,7 @@ static int lookup_device(const char *path, dev_t *dev)
        *dev = inode->i_rdev;
 
  out:
-       path_release(&nd);
+       path_put(&nd.path);
        return r;
 }
 
@@ -470,7 +475,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
                              int mode, struct dm_dev **result)
 {
        int r;
-       dev_t dev;
+       dev_t uninitialized_var(dev);
        struct dm_dev *dd;
        unsigned int major, minor;
 
@@ -526,7 +531,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
 
 void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
 {
-       request_queue_t *q = bdev_get_queue(bdev);
+       struct request_queue *q = bdev_get_queue(bdev);
        struct io_restrictions *rs = &ti->limits;
 
        /*
@@ -563,10 +568,15 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
        rs->max_segment_size =
                min_not_zero(rs->max_segment_size, q->max_segment_size);
 
+       rs->max_hw_sectors =
+               min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+
        rs->seg_boundary_mask =
                min_not_zero(rs->seg_boundary_mask,
                             q->seg_boundary_mask);
 
+       rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+
        rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 }
 EXPORT_SYMBOL_GPL(dm_set_device_limits);
@@ -698,6 +708,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
 {
        if (!rs->max_sectors)
                rs->max_sectors = SAFE_MAX_SECTORS;
+       if (!rs->max_hw_sectors)
+               rs->max_hw_sectors = SAFE_MAX_SECTORS;
        if (!rs->max_phys_segments)
                rs->max_phys_segments = MAX_PHYS_SEGMENTS;
        if (!rs->max_hw_segments)
@@ -708,6 +720,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
                rs->max_segment_size = MAX_SEGMENT_SIZE;
        if (!rs->seg_boundary_mask)
                rs->seg_boundary_mask = -1;
+       if (!rs->bounce_pfn)
+               rs->bounce_pfn = -1;
 }
 
 int dm_table_add_target(struct dm_table *t, const char *type,
@@ -790,7 +804,7 @@ static int setup_indexes(struct dm_table *t)
                return -ENOMEM;
 
        /* set up internal nodes, bottom-up */
-       for (i = t->depth - 2, total = 0; i >= 0; i--) {
+       for (i = t->depth - 2; i >= 0; i--) {
                t->index[i] = indexes;
                indexes += (KEYS_PER_NODE * t->counts[i]);
                setup_btree_index(i, t);
@@ -862,6 +876,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
 
 /*
  * Search the btree for the correct target.
+ *
+ * Caller should check returned pointer with dm_target_is_valid()
+ * to trap I/O beyond end of device.
  */
 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
 {
@@ -891,7 +908,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
        q->max_hw_segments = t->limits.max_hw_segments;
        q->hardsect_size = t->limits.hardsect_size;
        q->max_segment_size = t->limits.max_segment_size;
+       q->max_hw_sectors = t->limits.max_hw_sectors;
        q->seg_boundary_mask = t->limits.seg_boundary_mask;
+       q->bounce_pfn = t->limits.bounce_pfn;
        if (t->limits.no_cluster)
                q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
        else
@@ -973,13 +992,12 @@ int dm_table_resume_targets(struct dm_table *t)
 
 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
 {
-       struct list_head *d, *devices;
+       struct dm_dev *dd;
+       struct list_head *devices = dm_table_get_devices(t);
        int r = 0;
 
-       devices = dm_table_get_devices(t);
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
+       list_for_each_entry(dd, devices, list) {
+               struct request_queue *q = bdev_get_queue(dd->bdev);
                r |= bdi_congested(&q->backing_dev_info, bdi_bits);
        }
 
@@ -988,42 +1006,14 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
 
 void dm_table_unplug_all(struct dm_table *t)
 {
-       struct list_head *d, *devices = dm_table_get_devices(t);
-
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
+       struct dm_dev *dd;
+       struct list_head *devices = dm_table_get_devices(t);
 
-               if (q->unplug_fn)
-                       q->unplug_fn(q);
-       }
-}
+       list_for_each_entry(dd, devices, list) {
+               struct request_queue *q = bdev_get_queue(dd->bdev);
 
-int dm_table_flush_all(struct dm_table *t)
-{
-       struct list_head *d, *devices = dm_table_get_devices(t);
-       int ret = 0;
-       unsigned i;
-
-       for (i = 0; i < t->num_targets; i++)
-               if (t->targets[i].type->flush)
-                       t->targets[i].type->flush(&t->targets[i]);
-
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
-               int err;
-
-               if (!q->issue_flush_fn)
-                       err = -EOPNOTSUPP;
-               else
-                       err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
-               if (!ret)
-                       ret = err;
+               blk_unplug(q);
        }
-
-       return ret;
 }
 
 struct mapped_device *dm_table_get_md(struct dm_table *t)
@@ -1043,4 +1033,3 @@ EXPORT_SYMBOL(dm_table_get_md);
 EXPORT_SYMBOL(dm_table_put);
 EXPORT_SYMBOL(dm_table_get);
 EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);