X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fdma%2Fdmaengine.c;h=29965231b9127e6e9e658997f26b72ffb69c4d3c;hb=0bb67f181834044db6e9b15c7d5cc3cce0489bfd;hp=82489923af09a6ae048a50c1bf471e43f9260492;hpb=c2dc1ad582196208a2f990eb0230eb922046c684;p=linux-2.6-omap-h63xx.git diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 82489923af0..29965231b91 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -41,12 +41,12 @@ * the definition of dma_event_callback in dmaengine.h. * * Each device has a kref, which is initialized to 1 when the device is - * registered. A kref_get is done for each class_device registered. When the - * class_device is released, the coresponding kref_put is done in the release + * registered. A kref_get is done for each device registered. When the + * device is released, the coresponding kref_put is done in the release * method. Every time one of the device's channels is allocated to a client, * a kref_get occurs. When the channel is freed, the coresponding kref_put * happens. The device's release function does a completion, so - * unregister_device does a remove event, class_device_unregister, a kref_put + * unregister_device does a remove event, device_unregister, a kref_put * for the first reference, then waits on the completion for all other * references to finish. * @@ -77,9 +77,9 @@ static LIST_HEAD(dma_client_list); /* --- sysfs implementation --- */ -static ssize_t show_memcpy_count(struct class_device *cd, char *buf) +static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); unsigned long count = 0; int i; @@ -89,9 +89,10 @@ static ssize_t show_memcpy_count(struct class_device *cd, char *buf) return sprintf(buf, "%lu\n", count); } -static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) +static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, + char *buf) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); unsigned long count = 0; int i; @@ -101,9 +102,9 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf) return sprintf(buf, "%lu\n", count); } -static ssize_t show_in_use(struct class_device *cd, char *buf) +static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); int in_use = 0; if (unlikely(chan->slow_ref) && @@ -119,7 +120,7 @@ static ssize_t show_in_use(struct class_device *cd, char *buf) return sprintf(buf, "%d\n", in_use); } -static struct class_device_attribute dma_class_attrs[] = { +static struct device_attribute dma_attrs[] = { __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), __ATTR(in_use, S_IRUGO, show_in_use, NULL), @@ -128,16 +129,16 @@ static struct class_device_attribute dma_class_attrs[] = { static void dma_async_device_cleanup(struct kref *kref); -static void dma_class_dev_release(struct class_device *cd) +static void dma_dev_release(struct device *dev) { - struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev); + struct dma_chan *chan = to_dma_chan(dev); kref_put(&chan->device->refcount, dma_async_device_cleanup); } static struct class dma_devclass = { - .name = "dma", - .class_dev_attrs = dma_class_attrs, - .release = dma_class_dev_release, + .name = "dma", + .dev_attrs = dma_attrs, + .dev_release = dma_dev_release, }; /* --- client and device registration --- */ @@ -182,10 +183,9 @@ static void dma_client_chan_alloc(struct dma_client *client) /* we are done once this client rejects * an available resource */ - if (ack == DMA_ACK) { + if (ack == DMA_ACK) dma_chan_get(chan); - kref_get(&device->refcount); - } else if (ack == DMA_NAK) + else if (ack == DMA_NAK) return; } } @@ -272,11 +272,8 @@ static void dma_clients_notify_removed(struct dma_chan *chan) /* client was holding resources for this channel so * free it */ - if (ack == DMA_ACK) { + if (ack == DMA_ACK) dma_chan_put(chan); - kref_put(&chan->device->refcount, - dma_async_device_cleanup); - } } mutex_unlock(&dma_list_mutex); @@ -316,11 +313,8 @@ void dma_async_client_unregister(struct dma_client *client) ack = client->event_callback(client, chan, DMA_RESOURCE_REMOVED); - if (ack == DMA_ACK) { + if (ack == DMA_ACK) dma_chan_put(chan); - kref_put(&chan->device->refcount, - dma_async_device_cleanup); - } } list_del(&client->global_node); @@ -384,12 +378,12 @@ int dma_async_device_register(struct dma_device *device) continue; chan->chan_id = chancnt++; - chan->class_dev.class = &dma_devclass; - chan->class_dev.dev = NULL; - snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d", + chan->dev.class = &dma_devclass; + chan->dev.parent = NULL; + snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", device->dev_id, chan->chan_id); - rc = class_device_register(&chan->class_dev); + rc = device_register(&chan->dev); if (rc) { chancnt--; free_percpu(chan->local); @@ -397,6 +391,8 @@ int dma_async_device_register(struct dma_device *device) goto err_out; } + /* One for the channel, one of the class device */ + kref_get(&device->refcount); kref_get(&device->refcount); kref_init(&chan->refcount); chan->slow_ref = 0; @@ -416,7 +412,7 @@ err_out: if (chan->local == NULL) continue; kref_put(&device->refcount, dma_async_device_cleanup); - class_device_unregister(&chan->class_dev); + device_unregister(&chan->dev); chancnt--; free_percpu(chan->local); } @@ -450,7 +446,7 @@ void dma_async_device_unregister(struct dma_device *device) list_for_each_entry(chan, &device->channels, device_node) { dma_clients_notify_removed(chan); - class_device_unregister(&chan->class_dev); + device_unregister(&chan->dev); dma_chan_release(chan); } @@ -477,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; - tx = dev->device_prep_dma_memcpy(chan, len, 0); - if (!tx) + dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); + dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); + + if (!tx) { + dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; + } tx->ack = 1; tx->callback = NULL; - addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); cookie = tx->tx_submit(tx); cpu = get_cpu(); @@ -521,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; - tx = dev->device_prep_dma_memcpy(chan, len, 0); - if (!tx) + dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); + dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); + + if (!tx) { + dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; + } tx->ack = 1; tx->callback = NULL; - addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); cookie = tx->tx_submit(tx); cpu = get_cpu(); @@ -567,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t addr; + dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; - tx = dev->device_prep_dma_memcpy(chan, len, 0); - if (!tx) + dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); + dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); + + if (!tx) { + dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; + } tx->ack = 1; tx->callback = NULL; - addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - tx->tx_set_src(addr, tx, 0); - addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); - tx->tx_set_dest(addr, tx, 0); cookie = tx->tx_submit(tx); cpu = get_cpu();