]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/dma/dmaengine.c
[SCSI] sun3x_esp: convert to esp_scsi
[linux-2.6-omap-h63xx.git] / drivers / dma / dmaengine.c
index 404cc7b6e70530066b5a91970d791c8ae8f978c0..29965231b9127e6e9e658997f26b72ffb69c4d3c 100644 (file)
  * Each device has a channels list, which runs unlocked but is never modified
  * once the device is registered, it's just setup by the driver.
  *
- * Each client has a channels list, it's only modified under the client->lock
- * and in an RCU callback, so it's safe to read under rcu_read_lock().
+ * Each client is responsible for keeping track of the channels it uses.  See
+ * the definition of dma_event_callback in dmaengine.h.
  *
  * Each device has a kref, which is initialized to 1 when the device is
- * registered. A kref_put is done for each class_device registered.  When the
- * class_device is released, the coresponding kref_put is done in the release
+ * registered. A kref_get is done for each device registered.  When the
+ * device is released, the coresponding kref_put is done in the release
  * method. Every time one of the device's channels is allocated to a client,
  * a kref_get occurs.  When the channel is freed, the coresponding kref_put
  * happens. The device's release function does a completion, so
- * unregister_device does a remove event, class_device_unregister, a kref_put
+ * unregister_device does a remove event, device_unregister, a kref_put
  * for the first reference, then waits on the completion for all other
  * references to finish.
  *
  * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
- * with a kref and a per_cpu local_t.  A single reference is set when on an
- * ADDED event, and removed with a REMOVE event.  Net DMA client takes an
- * extra reference per outstanding transaction.  The relase function does a
- * kref_put on the device. -ChrisL
+ * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
+ * signals that it wants to use a channel, and dma_chan_put is called when
+ * a channel is removed or a client using it is unregesitered.  A client can
+ * take extra references per outstanding transaction, as is the case with
+ * the NET DMA client.  The release function does a kref_put on the device.
+ *     -ChrisL, DanW
  */
 
 #include <linux/init.h>
@@ -75,9 +77,9 @@ static LIST_HEAD(dma_client_list);
 
 /* --- sysfs implementation --- */
 
-static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
+static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
+       struct dma_chan *chan = to_dma_chan(dev);
        unsigned long count = 0;
        int i;
 
@@ -87,9 +89,10 @@ static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
        return sprintf(buf, "%lu\n", count);
 }
 
-static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
+static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
+                                     char *buf)
 {
-       struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
+       struct dma_chan *chan = to_dma_chan(dev);
        unsigned long count = 0;
        int i;
 
@@ -99,14 +102,25 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
        return sprintf(buf, "%lu\n", count);
 }
 
-static ssize_t show_in_use(struct class_device *cd, char *buf)
+static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
+       struct dma_chan *chan = to_dma_chan(dev);
+       int in_use = 0;
+
+       if (unlikely(chan->slow_ref) &&
+               atomic_read(&chan->refcount.refcount) > 1)
+               in_use = 1;
+       else {
+               if (local_read(&(per_cpu_ptr(chan->local,
+                       get_cpu())->refcount)) > 0)
+                       in_use = 1;
+               put_cpu();
+       }
 
-       return sprintf(buf, "%d\n", (chan->client ? 1 : 0));
+       return sprintf(buf, "%d\n", in_use);
 }
 
-static struct class_device_attribute dma_class_attrs[] = {
+static struct device_attribute dma_attrs[] = {
        __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
        __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
        __ATTR(in_use, S_IRUGO, show_in_use, NULL),
@@ -115,56 +129,66 @@ static struct class_device_attribute dma_class_attrs[] = {
 
 static void dma_async_device_cleanup(struct kref *kref);
 
-static void dma_class_dev_release(struct class_device *cd)
+static void dma_dev_release(struct device *dev)
 {
-       struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
+       struct dma_chan *chan = to_dma_chan(dev);
        kref_put(&chan->device->refcount, dma_async_device_cleanup);
 }
 
 static struct class dma_devclass = {
-       .name            = "dma",
-       .class_dev_attrs = dma_class_attrs,
-       .release = dma_class_dev_release,
+       .name           = "dma",
+       .dev_attrs      = dma_attrs,
+       .dev_release    = dma_dev_release,
 };
 
 /* --- client and device registration --- */
 
+#define dma_chan_satisfies_mask(chan, mask) \
+       __dma_chan_satisfies_mask((chan), &(mask))
+static int
+__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
+{
+       dma_cap_mask_t has;
+
+       bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
+               DMA_TX_TYPE_END);
+       return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
+}
+
 /**
- * dma_client_chan_alloc - try to allocate a channel to a client
+ * dma_client_chan_alloc - try to allocate channels to a client
  * @client: &dma_client
  *
  * Called with dma_list_mutex held.
  */
-static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)
+static void dma_client_chan_alloc(struct dma_client *client)
 {
        struct dma_device *device;
        struct dma_chan *chan;
-       unsigned long flags;
        int desc;       /* allocated descriptor count */
+       enum dma_state_client ack;
 
-       /* Find a channel, any DMA engine will do */
-       list_for_each_entry(device, &dma_device_list, global_node) {
+       /* Find a channel */
+       list_for_each_entry(device, &dma_device_list, global_node)
                list_for_each_entry(chan, &device->channels, device_node) {
-                       if (chan->client)
+                       if (!dma_chan_satisfies_mask(chan, client->cap_mask))
                                continue;
 
                        desc = chan->device->device_alloc_chan_resources(chan);
                        if (desc >= 0) {
-                               kref_get(&device->refcount);
-                               kref_init(&chan->refcount);
-                               chan->slow_ref = 0;
-                               INIT_RCU_HEAD(&chan->rcu);
-                               chan->client = client;
-                               spin_lock_irqsave(&client->lock, flags);
-                               list_add_tail_rcu(&chan->client_node,
-                                                 &client->channels);
-                               spin_unlock_irqrestore(&client->lock, flags);
-                               return chan;
+                               ack = client->event_callback(client,
+                                               chan,
+                                               DMA_RESOURCE_AVAILABLE);
+
+                               /* we are done once this client rejects
+                                * an available resource
+                                */
+                               if (ack == DMA_ACK)
+                                       dma_chan_get(chan);
+                               else if (ack == DMA_NAK)
+                                       return;
                        }
                }
-       }
-
-       return NULL;
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -193,7 +217,6 @@ void dma_chan_cleanup(struct kref *kref)
 {
        struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
        chan->device->device_free_chan_resources(chan);
-       chan->client = NULL;
        kref_put(&chan->device->refcount, dma_async_device_cleanup);
 }
 EXPORT_SYMBOL(dma_chan_cleanup);
@@ -209,7 +232,7 @@ static void dma_chan_free_rcu(struct rcu_head *rcu)
        kref_put(&chan->refcount, dma_chan_cleanup);
 }
 
-static void dma_client_chan_free(struct dma_chan *chan)
+static void dma_chan_release(struct dma_chan *chan)
 {
        atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
        chan->slow_ref = 1;
@@ -217,70 +240,54 @@ static void dma_client_chan_free(struct dma_chan *chan)
 }
 
 /**
- * dma_chans_rebalance - reallocate channels to clients
- *
- * When the number of DMA channel in the system changes,
- * channels need to be rebalanced among clients.
+ * dma_chans_notify_available - broadcast available channels to the clients
  */
-static void dma_chans_rebalance(void)
+static void dma_clients_notify_available(void)
 {
        struct dma_client *client;
-       struct dma_chan *chan;
-       unsigned long flags;
 
        mutex_lock(&dma_list_mutex);
 
-       list_for_each_entry(client, &dma_client_list, global_node) {
-               while (client->chans_desired > client->chan_count) {
-                       chan = dma_client_chan_alloc(client);
-                       if (!chan)
-                               break;
-                       client->chan_count++;
-                       client->event_callback(client,
-                                              chan,
-                                              DMA_RESOURCE_ADDED);
-               }
-               while (client->chans_desired < client->chan_count) {
-                       spin_lock_irqsave(&client->lock, flags);
-                       chan = list_entry(client->channels.next,
-                                         struct dma_chan,
-                                         client_node);
-                       list_del_rcu(&chan->client_node);
-                       spin_unlock_irqrestore(&client->lock, flags);
-                       client->chan_count--;
-                       client->event_callback(client,
-                                              chan,
-                                              DMA_RESOURCE_REMOVED);
-                       dma_client_chan_free(chan);
-               }
-       }
+       list_for_each_entry(client, &dma_client_list, global_node)
+               dma_client_chan_alloc(client);
 
        mutex_unlock(&dma_list_mutex);
 }
 
 /**
- * dma_async_client_register - allocate and register a &dma_client
- * @event_callback: callback for notification of channel addition/removal
+ * dma_chans_notify_available - tell the clients that a channel is going away
+ * @chan: channel on its way out
  */
-struct dma_client *dma_async_client_register(dma_event_callback event_callback)
+static void dma_clients_notify_removed(struct dma_chan *chan)
 {
        struct dma_client *client;
+       enum dma_state_client ack;
 
-       client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (!client)
-               return NULL;
+       mutex_lock(&dma_list_mutex);
 
-       INIT_LIST_HEAD(&client->channels);
-       spin_lock_init(&client->lock);
-       client->chans_desired = 0;
-       client->chan_count = 0;
-       client->event_callback = event_callback;
+       list_for_each_entry(client, &dma_client_list, global_node) {
+               ack = client->event_callback(client, chan,
+                               DMA_RESOURCE_REMOVED);
+
+               /* client was holding resources for this channel so
+                * free it
+                */
+               if (ack == DMA_ACK)
+                       dma_chan_put(chan);
+       }
 
+       mutex_unlock(&dma_list_mutex);
+}
+
+/**
+ * dma_async_client_register - register a &dma_client
+ * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
+ */
+void dma_async_client_register(struct dma_client *client)
+{
        mutex_lock(&dma_list_mutex);
        list_add_tail(&client->global_node, &dma_client_list);
        mutex_unlock(&dma_list_mutex);
-
-       return client;
 }
 EXPORT_SYMBOL(dma_async_client_register);
 
@@ -292,40 +299,39 @@ EXPORT_SYMBOL(dma_async_client_register);
  */
 void dma_async_client_unregister(struct dma_client *client)
 {
+       struct dma_device *device;
        struct dma_chan *chan;
+       enum dma_state_client ack;
 
        if (!client)
                return;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(chan, &client->channels, client_node)
-               dma_client_chan_free(chan);
-       rcu_read_unlock();
-
        mutex_lock(&dma_list_mutex);
+       /* free all channels the client is holding */
+       list_for_each_entry(device, &dma_device_list, global_node)
+               list_for_each_entry(chan, &device->channels, device_node) {
+                       ack = client->event_callback(client, chan,
+                               DMA_RESOURCE_REMOVED);
+
+                       if (ack == DMA_ACK)
+                               dma_chan_put(chan);
+               }
+
        list_del(&client->global_node);
        mutex_unlock(&dma_list_mutex);
-
-       kfree(client);
-       dma_chans_rebalance();
 }
 EXPORT_SYMBOL(dma_async_client_unregister);
 
 /**
- * dma_async_client_chan_request - request DMA channels
- * @client: &dma_client
- * @number: count of DMA channels requested
- *
- * Clients call dma_async_client_chan_request() to specify how many
- * DMA channels they need, 0 to free all currently allocated.
- * The resulting allocations/frees are indicated to the client via the
- * event callback.
+ * dma_async_client_chan_request - send all available channels to the
+ * client that satisfy the capability mask
+ * @client - requester
  */
-void dma_async_client_chan_request(struct dma_client *client,
-                       unsigned int number)
+void dma_async_client_chan_request(struct dma_client *client)
 {
-       client->chans_desired = number;
-       dma_chans_rebalance();
+       mutex_lock(&dma_list_mutex);
+       dma_client_chan_alloc(client);
+       mutex_unlock(&dma_list_mutex);
 }
 EXPORT_SYMBOL(dma_async_client_chan_request);
 
@@ -372,12 +378,12 @@ int dma_async_device_register(struct dma_device *device)
                        continue;
 
                chan->chan_id = chancnt++;
-               chan->class_dev.class = &dma_devclass;
-               chan->class_dev.dev = NULL;
-               snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
+               chan->dev.class = &dma_devclass;
+               chan->dev.parent = NULL;
+               snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
                         device->dev_id, chan->chan_id);
 
-               rc = class_device_register(&chan->class_dev);
+               rc = device_register(&chan->dev);
                if (rc) {
                        chancnt--;
                        free_percpu(chan->local);
@@ -385,14 +391,19 @@ int dma_async_device_register(struct dma_device *device)
                        goto err_out;
                }
 
+               /* One for the channel, one of the class device */
+               kref_get(&device->refcount);
                kref_get(&device->refcount);
+               kref_init(&chan->refcount);
+               chan->slow_ref = 0;
+               INIT_RCU_HEAD(&chan->rcu);
        }
 
        mutex_lock(&dma_list_mutex);
        list_add_tail(&device->global_node, &dma_device_list);
        mutex_unlock(&dma_list_mutex);
 
-       dma_chans_rebalance();
+       dma_clients_notify_available();
 
        return 0;
 
@@ -401,7 +412,7 @@ err_out:
                if (chan->local == NULL)
                        continue;
                kref_put(&device->refcount, dma_async_device_cleanup);
-               class_device_unregister(&chan->class_dev);
+               device_unregister(&chan->dev);
                chancnt--;
                free_percpu(chan->local);
        }
@@ -428,26 +439,16 @@ static void dma_async_device_cleanup(struct kref *kref)
 void dma_async_device_unregister(struct dma_device *device)
 {
        struct dma_chan *chan;
-       unsigned long flags;
 
        mutex_lock(&dma_list_mutex);
        list_del(&device->global_node);
        mutex_unlock(&dma_list_mutex);
 
        list_for_each_entry(chan, &device->channels, device_node) {
-               if (chan->client) {
-                       spin_lock_irqsave(&chan->client->lock, flags);
-                       list_del(&chan->client_node);
-                       chan->client->chan_count--;
-                       spin_unlock_irqrestore(&chan->client->lock, flags);
-                       chan->client->event_callback(chan->client,
-                                                    chan,
-                                                    DMA_RESOURCE_REMOVED);
-                       dma_client_chan_free(chan);
-               }
-               class_device_unregister(&chan->class_dev);
+               dma_clients_notify_removed(chan);
+               device_unregister(&chan->dev);
+               dma_chan_release(chan);
        }
-       dma_chans_rebalance();
 
        kref_put(&device->refcount, dma_async_device_cleanup);
        wait_for_completion(&device->done);
@@ -472,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 {
        struct dma_device *dev = chan->device;
        struct dma_async_tx_descriptor *tx;
-       dma_addr_t addr;
+       dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
        int cpu;
 
-       tx = dev->device_prep_dma_memcpy(chan, len, 0);
-       if (!tx)
+       dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
+       dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
+       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+       if (!tx) {
+               dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+               dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
                return -ENOMEM;
+       }
 
        tx->ack = 1;
        tx->callback = NULL;
-       addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
-       tx->tx_set_src(addr, tx, 0);
-       addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
-       tx->tx_set_dest(addr, tx, 0);
        cookie = tx->tx_submit(tx);
 
        cpu = get_cpu();
@@ -516,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 {
        struct dma_device *dev = chan->device;
        struct dma_async_tx_descriptor *tx;
-       dma_addr_t addr;
+       dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
        int cpu;
 
-       tx = dev->device_prep_dma_memcpy(chan, len, 0);
-       if (!tx)
+       dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
+       dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
+       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+       if (!tx) {
+               dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+               dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
                return -ENOMEM;
+       }
 
        tx->ack = 1;
        tx->callback = NULL;
-       addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
-       tx->tx_set_src(addr, tx, 0);
-       addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
-       tx->tx_set_dest(addr, tx, 0);
        cookie = tx->tx_submit(tx);
 
        cpu = get_cpu();
@@ -562,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 {
        struct dma_device *dev = chan->device;
        struct dma_async_tx_descriptor *tx;
-       dma_addr_t addr;
+       dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
        int cpu;
 
-       tx = dev->device_prep_dma_memcpy(chan, len, 0);
-       if (!tx)
+       dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
+       dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
+                               DMA_FROM_DEVICE);
+       tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+       if (!tx) {
+               dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
+               dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
                return -ENOMEM;
+       }
 
        tx->ack = 1;
        tx->callback = NULL;
-       addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
-       tx->tx_set_src(addr, tx, 0);
-       addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
-       tx->tx_set_dest(addr, tx, 0);
        cookie = tx->tx_submit(tx);
 
        cpu = get_cpu();