X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmd%2Fdm.c;h=d1d0cd0f5750a2693f29cdf7bf5e5d99cbbfa2c0;hb=326528a54f61e38fc16bf2e8ac028c6a33b615ed;hp=ace998ce59f6af2616da4a24eca66de8b59c69ba;hpb=0afe2db21394820d32646a695eccf3fbfe6ab5c7;p=linux-2.6-omap-h63xx.git diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ace998ce59f..d1d0cd0f575 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -76,7 +76,6 @@ union map_info *dm_get_mapinfo(struct bio *bio) */ struct dm_wq_req { enum { - DM_WQ_FLUSH_ALL, DM_WQ_FLUSH_DEFERRED, } type; struct work_struct work; @@ -151,40 +150,40 @@ static struct kmem_cache *_tio_cache; static int __init local_init(void) { - int r; + int r = -ENOMEM; /* allocate a slab for the dm_ios */ _io_cache = KMEM_CACHE(dm_io, 0); if (!_io_cache) - return -ENOMEM; + return r; /* allocate a slab for the target ios */ _tio_cache = KMEM_CACHE(dm_target_io, 0); - if (!_tio_cache) { - kmem_cache_destroy(_io_cache); - return -ENOMEM; - } + if (!_tio_cache) + goto out_free_io_cache; r = dm_uevent_init(); - if (r) { - kmem_cache_destroy(_tio_cache); - kmem_cache_destroy(_io_cache); - return r; - } + if (r) + goto out_free_tio_cache; _major = major; r = register_blkdev(_major, _name); - if (r < 0) { - kmem_cache_destroy(_tio_cache); - kmem_cache_destroy(_io_cache); - dm_uevent_exit(); - return r; - } + if (r < 0) + goto out_uevent_exit; if (!_major) _major = r; return 0; + +out_uevent_exit: + dm_uevent_exit(); +out_free_tio_cache: + kmem_cache_destroy(_tio_cache); +out_free_io_cache: + kmem_cache_destroy(_io_cache); + + return r; } static void local_exit(void) @@ -377,13 +376,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; + int cpu; io->start_time = jiffies; - preempt_disable(); - disk_round_stats(dm_disk(md)); - preempt_enable(); - dm_disk(md)->in_flight = atomic_inc_return(&md->pending); + cpu = part_stat_lock(); + part_round_stats(cpu, &dm_disk(md)->part0); + part_stat_unlock(); + dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); } static int end_io_acct(struct dm_io *io) @@ -391,15 +391,16 @@ static int end_io_acct(struct dm_io *io) struct mapped_device *md = io->md; struct bio *bio = io->bio; unsigned long duration = jiffies - io->start_time; - int pending; + int pending, cpu; int rw = bio_data_dir(bio); - preempt_disable(); - disk_round_stats(dm_disk(md)); - preempt_enable(); - dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); + cpu = part_stat_lock(); + part_round_stats(cpu, &dm_disk(md)->part0); + part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); + part_stat_unlock(); - disk_stat_add(dm_disk(md), ticks[rw], duration); + dm_disk(md)->part0.in_flight = pending = + atomic_dec_return(&md->pending); return !pending; } @@ -667,6 +668,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; clone->bi_io_vec->bv_len = clone->bi_size; + clone->bi_flags |= 1 << BIO_CLONED; return clone; } @@ -885,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio) int r = -EIO; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; + int cpu; /* * There is no use in forwarding any barrier request since we can't @@ -897,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio) down_read(&md->io_lock); - disk_stat_inc(dm_disk(md), ios[rw]); - disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); + cpu = part_stat_lock(); + part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); + part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); + part_stat_unlock(); /* * If we're suspended we have to queue @@ -1146,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md); static void free_dev(struct mapped_device *md) { - int minor = md->disk->first_minor; + int minor = MINOR(disk_devt(md->disk)); if (md->suspended_bdev) { unlock_fs(md); @@ -1182,7 +1187,7 @@ static void event_callback(void *context) list_splice_init(&md->uevent_list, &uevents); spin_unlock_irqrestore(&md->uevent_lock, flags); - dm_send_uevents(&uevents, &md->disk->dev.kobj); + dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); atomic_inc(&md->event_nr); wake_up(&md->eventq); @@ -1267,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev) md = idr_find(&_minor_idr, minor); if (md && (md == MINOR_ALLOCED || - (dm_disk(md)->first_minor != minor) || + (MINOR(disk_devt(dm_disk(md))) != minor) || test_bit(DMF_FREEING, &md->flags))) { md = NULL; goto out; @@ -1318,7 +1323,8 @@ void dm_put(struct mapped_device *md) if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { map = dm_get_table(md); - idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); + idr_replace(&_minor_idr, MINOR_ALLOCED, + MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); if (!dm_suspended(md)) { @@ -1388,9 +1394,6 @@ static void dm_wq_work(struct work_struct *work) down_write(&md->io_lock); switch (req->type) { - case DM_WQ_FLUSH_ALL: - __merge_pushback_list(md); - /* pass through */ case DM_WQ_FLUSH_DEFERRED: __flush_deferred_io(md); break; @@ -1520,7 +1523,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; - goto flush_and_out; + goto out; } /* @@ -1571,14 +1574,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) set_bit(DMF_SUSPENDED, &md->flags); -flush_and_out: - if (r && noflush) - /* - * Because there may be already I/Os in the pushback list, - * flush them before return. - */ - dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL); - out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); @@ -1638,7 +1633,7 @@ out: *---------------------------------------------------------------*/ void dm_kobject_uevent(struct mapped_device *md) { - kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE); + kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE); } uint32_t dm_next_uevent_seq(struct mapped_device *md)