2 * core routines for the asynchronous memory transfer/transform api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/rculist.h>
27 #include <linux/kernel.h>
28 #include <linux/async_tx.h>
30 #ifdef CONFIG_DMA_ENGINE
31 static enum dma_state_client
32 dma_channel_add_remove(struct dma_client *client,
33 struct dma_chan *chan, enum dma_state state);
35 static struct dma_client async_tx_dma = {
36 .event_callback = dma_channel_add_remove,
37 /* .cap_mask == 0 defaults to all channels */
41 * dma_cap_mask_all - enable iteration over all operation types
43 static dma_cap_mask_t dma_cap_mask_all;
46 * chan_ref_percpu - tracks channel allocations per core/opertion
48 struct chan_ref_percpu {
49 struct dma_chan_ref *ref;
52 static int channel_table_initialized;
53 static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
56 * async_tx_lock - protect modification of async_tx_master_list and serialize
57 * rebalance operations
59 static spinlock_t async_tx_lock;
61 static LIST_HEAD(async_tx_master_list);
63 /* async_tx_issue_pending_all - start all transactions on all channels */
64 void async_tx_issue_pending_all(void)
66 struct dma_chan_ref *ref;
69 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
70 ref->chan->device->device_issue_pending(ref->chan);
73 EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
76 free_dma_chan_ref(struct rcu_head *rcu)
78 struct dma_chan_ref *ref;
79 ref = container_of(rcu, struct dma_chan_ref, rcu);
84 init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
86 INIT_LIST_HEAD(&ref->node);
87 INIT_RCU_HEAD(&ref->rcu);
89 atomic_set(&ref->count, 0);
93 * get_chan_ref_by_cap - returns the nth channel of the given capability
94 * defaults to returning the channel with the desired capability and the
95 * lowest reference count if the index can not be satisfied
96 * @cap: capability to match
97 * @index: nth channel desired, passing -1 has the effect of forcing the
98 * default return value
100 static struct dma_chan_ref *
101 get_chan_ref_by_cap(enum dma_transaction_type cap, int index)
103 struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref;
106 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
107 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
110 else if (atomic_read(&ref->count) <
111 atomic_read(&min_ref->count))
125 atomic_inc(&ret_ref->count);
131 * async_tx_rebalance - redistribute the available channels, optimize
132 * for cpu isolation in the SMP case, and opertaion isolation in the
135 static void async_tx_rebalance(void)
137 int cpu, cap, cpu_idx = 0;
140 if (!channel_table_initialized)
143 spin_lock_irqsave(&async_tx_lock, flags);
145 /* undo the last distribution */
146 for_each_dma_cap_mask(cap, dma_cap_mask_all)
147 for_each_possible_cpu(cpu) {
148 struct dma_chan_ref *ref =
149 per_cpu_ptr(channel_table[cap], cpu)->ref;
151 atomic_set(&ref->count, 0);
152 per_cpu_ptr(channel_table[cap], cpu)->ref =
157 for_each_dma_cap_mask(cap, dma_cap_mask_all)
158 for_each_online_cpu(cpu) {
159 struct dma_chan_ref *new;
161 new = get_chan_ref_by_cap(cap, cpu_idx++);
163 new = get_chan_ref_by_cap(cap, -1);
165 per_cpu_ptr(channel_table[cap], cpu)->ref = new;
168 spin_unlock_irqrestore(&async_tx_lock, flags);
171 static enum dma_state_client
172 dma_channel_add_remove(struct dma_client *client,
173 struct dma_chan *chan, enum dma_state state)
175 unsigned long found, flags;
176 struct dma_chan_ref *master_ref, *ref;
177 enum dma_state_client ack = DMA_DUP; /* default: take no action */
180 case DMA_RESOURCE_AVAILABLE:
183 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
184 if (ref->chan == chan) {
190 pr_debug("async_tx: dma resource available [%s]\n",
191 found ? "old" : "new");
198 /* add the channel to the generic management list */
199 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
201 /* keep a reference until async_tx is unloaded */
203 init_dma_chan_ref(master_ref, chan);
204 spin_lock_irqsave(&async_tx_lock, flags);
205 list_add_tail_rcu(&master_ref->node,
206 &async_tx_master_list);
207 spin_unlock_irqrestore(&async_tx_lock,
210 printk(KERN_WARNING "async_tx: unable to create"
211 " new master entry in response to"
212 " a DMA_RESOURCE_ADDED event"
217 async_tx_rebalance();
219 case DMA_RESOURCE_REMOVED:
221 spin_lock_irqsave(&async_tx_lock, flags);
222 list_for_each_entry(ref, &async_tx_master_list, node)
223 if (ref->chan == chan) {
224 /* permit backing devices to go away */
225 dma_chan_put(ref->chan);
226 list_del_rcu(&ref->node);
227 call_rcu(&ref->rcu, free_dma_chan_ref);
231 spin_unlock_irqrestore(&async_tx_lock, flags);
233 pr_debug("async_tx: dma resource removed [%s]\n",
234 found ? "ours" : "not ours");
241 async_tx_rebalance();
243 case DMA_RESOURCE_SUSPEND:
244 case DMA_RESOURCE_RESUME:
245 printk(KERN_WARNING "async_tx: does not support dma channel"
246 " suspend/resume\n");
258 enum dma_transaction_type cap;
260 spin_lock_init(&async_tx_lock);
261 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
263 /* an interrupt will never be an explicit operation type.
264 * clearing this bit prevents allocation to a slot in 'channel_table'
266 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
268 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
269 channel_table[cap] = alloc_percpu(struct chan_ref_percpu);
270 if (!channel_table[cap])
274 channel_table_initialized = 1;
275 dma_async_client_register(&async_tx_dma);
276 dma_async_client_chan_request(&async_tx_dma);
278 printk(KERN_INFO "async_tx: api initialized (async)\n");
282 printk(KERN_ERR "async_tx: initialization failure\n");
285 free_percpu(channel_table[cap]);
290 static void __exit async_tx_exit(void)
292 enum dma_transaction_type cap;
294 channel_table_initialized = 0;
296 for_each_dma_cap_mask(cap, dma_cap_mask_all)
297 if (channel_table[cap])
298 free_percpu(channel_table[cap]);
300 dma_async_client_unregister(&async_tx_dma);
304 * __async_tx_find_channel - find a channel to carry out the operation or let
305 * the transaction execute synchronously
306 * @depend_tx: transaction dependency
307 * @tx_type: transaction type
310 __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
311 enum dma_transaction_type tx_type)
313 /* see if we can keep the chain on one channel */
315 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
316 return depend_tx->chan;
317 else if (likely(channel_table_initialized)) {
318 struct dma_chan_ref *ref;
320 ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref;
322 return ref ? ref->chan : NULL;
326 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
328 static int __init async_tx_init(void)
330 printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
334 static void __exit async_tx_exit(void)
342 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
344 * @depend_tx: the operation that must finish before the new operation runs
345 * @tx: the new operation
348 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
349 struct dma_async_tx_descriptor *tx)
351 struct dma_chan *chan;
352 struct dma_device *device;
353 struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
355 /* first check to see if we can still append to depend_tx */
356 spin_lock_bh(&depend_tx->lock);
357 if (depend_tx->parent && depend_tx->chan == tx->chan) {
358 tx->parent = depend_tx;
359 depend_tx->next = tx;
362 spin_unlock_bh(&depend_tx->lock);
367 chan = depend_tx->chan;
368 device = chan->device;
370 /* see if we can schedule an interrupt
371 * otherwise poll for completion
373 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
374 intr_tx = device->device_prep_dma_interrupt(chan, 0);
379 intr_tx->callback = NULL;
380 intr_tx->callback_param = NULL;
381 tx->parent = intr_tx;
382 /* safe to set ->next outside the lock since we know we are
387 /* check if we need to append */
388 spin_lock_bh(&depend_tx->lock);
389 if (depend_tx->parent) {
390 intr_tx->parent = depend_tx;
391 depend_tx->next = intr_tx;
392 async_tx_ack(intr_tx);
395 spin_unlock_bh(&depend_tx->lock);
398 intr_tx->parent = NULL;
399 intr_tx->tx_submit(intr_tx);
400 async_tx_ack(intr_tx);
403 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
404 panic("%s: DMA_ERROR waiting for depend_tx\n",
412 * submit_disposition - while holding depend_tx->lock we must avoid submitting
413 * new operations to prevent a circular locking dependency with
414 * drivers that already hold a channel lock when calling
415 * async_tx_run_dependencies.
416 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
417 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
418 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
420 enum submit_disposition {
422 ASYNC_TX_CHANNEL_SWITCH,
423 ASYNC_TX_DIRECT_SUBMIT,
427 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
428 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
429 dma_async_tx_callback cb_fn, void *cb_param)
431 tx->callback = cb_fn;
432 tx->callback_param = cb_param;
435 enum submit_disposition s;
437 /* sanity check the dependency chain:
438 * 1/ if ack is already set then we cannot be sure
439 * we are referring to the correct operation
440 * 2/ dependencies are 1:1 i.e. two transactions can
441 * not depend on the same parent
443 BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
446 /* the lock prevents async_tx_run_dependencies from missing
447 * the setting of ->next when ->parent != NULL
449 spin_lock_bh(&depend_tx->lock);
450 if (depend_tx->parent) {
451 /* we have a parent so we can not submit directly
452 * if we are staying on the same channel: append
453 * else: channel switch
455 if (depend_tx->chan == chan) {
456 tx->parent = depend_tx;
457 depend_tx->next = tx;
458 s = ASYNC_TX_SUBMITTED;
460 s = ASYNC_TX_CHANNEL_SWITCH;
462 /* we do not have a parent so we may be able to submit
463 * directly if we are staying on the same channel
465 if (depend_tx->chan == chan)
466 s = ASYNC_TX_DIRECT_SUBMIT;
468 s = ASYNC_TX_CHANNEL_SWITCH;
470 spin_unlock_bh(&depend_tx->lock);
473 case ASYNC_TX_SUBMITTED:
475 case ASYNC_TX_CHANNEL_SWITCH:
476 async_tx_channel_switch(depend_tx, tx);
478 case ASYNC_TX_DIRECT_SUBMIT:
488 if (flags & ASYNC_TX_ACK)
491 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
492 async_tx_ack(depend_tx);
494 EXPORT_SYMBOL_GPL(async_tx_submit);
497 * async_trigger_callback - schedules the callback function to be run after
498 * any dependent operations have been completed.
499 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
500 * @depend_tx: 'callback' requires the completion of this transaction
501 * @cb_fn: function to call after depend_tx completes
502 * @cb_param: parameter to pass to the callback routine
504 struct dma_async_tx_descriptor *
505 async_trigger_callback(enum async_tx_flags flags,
506 struct dma_async_tx_descriptor *depend_tx,
507 dma_async_tx_callback cb_fn, void *cb_param)
509 struct dma_chan *chan;
510 struct dma_device *device;
511 struct dma_async_tx_descriptor *tx;
514 chan = depend_tx->chan;
515 device = chan->device;
517 /* see if we can schedule an interrupt
518 * otherwise poll for completion
520 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
523 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
528 pr_debug("%s: (async)\n", __func__);
530 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
532 pr_debug("%s: (sync)\n", __func__);
534 /* wait for any prerequisite operations */
535 async_tx_quiesce(&depend_tx);
537 async_tx_sync_epilog(cb_fn, cb_param);
542 EXPORT_SYMBOL_GPL(async_trigger_callback);
545 * async_tx_quiesce - ensure tx is complete and freeable upon return
546 * @tx - transaction to quiesce
548 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
551 /* if ack is already set then we cannot be sure
552 * we are referring to the correct operation
554 BUG_ON(async_tx_test_ack(*tx));
555 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
556 panic("DMA_ERROR waiting for transaction\n");
561 EXPORT_SYMBOL_GPL(async_tx_quiesce);
563 module_init(async_tx_init);
564 module_exit(async_tx_exit);
566 MODULE_AUTHOR("Intel Corporation");
567 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
568 MODULE_LICENSE("GPL");