]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Apr 2009 19:13:45 +0000 (12:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 Apr 2009 19:13:45 +0000 (12:13 -0700)
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  dma: Add SoF and EoF debugging to ipu_idmac.c, minor cleanup
  dw_dmac: add cyclic API to DW DMA driver
  dmaengine: Add privatecnt to revert DMA_PRIVATE property
  dmatest: add dma interrupts and callbacks
  dmatest: add xor test
  dmaengine: allow dma support for async_tx to be toggled
  async_tx: provide __async_inline for HAS_DMA=n archs
  dmaengine: kill some unused headers
  dmaengine: initialize tx_list in dma_async_tx_descriptor_init
  dma: i.MX31 IPU DMA robustness improvements
  dma: improve section assignment in i.MX31 IPU DMA driver
  dma: ipu_idmac driver cosmetic clean-up
  dmaengine: fail device registration if channel registration fails

1  2 
drivers/dma/dw_dmac.c
drivers/dma/ipu/ipu_idmac.c

diff --combined drivers/dma/dw_dmac.c
index 20ad3d26bec2d5330ac40bd8b437475009e0d8f0,0b8aada08aa8c750d76d4ea67b0025117d61dc14..98c9a847bf51c27a8671cd3947690458e0896746
@@@ -363,6 -363,82 +363,82 @@@ static void dwc_handle_error(struct dw_
        dwc_descriptor_complete(dwc, bad_desc);
  }
  
+ /* --------------------- Cyclic DMA API extensions -------------------- */
+ inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+ {
+       struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+       return channel_readl(dwc, SAR);
+ }
+ EXPORT_SYMBOL(dw_dma_get_src_addr);
+ inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+ {
+       struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+       return channel_readl(dwc, DAR);
+ }
+ EXPORT_SYMBOL(dw_dma_get_dst_addr);
+ /* called with dwc->lock held and all DMAC interrupts disabled */
+ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+               u32 status_block, u32 status_err, u32 status_xfer)
+ {
+       if (status_block & dwc->mask) {
+               void (*callback)(void *param);
+               void *callback_param;
+               dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+                               channel_readl(dwc, LLP));
+               dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+               callback = dwc->cdesc->period_callback;
+               callback_param = dwc->cdesc->period_callback_param;
+               if (callback) {
+                       spin_unlock(&dwc->lock);
+                       callback(callback_param);
+                       spin_lock(&dwc->lock);
+               }
+       }
+       /*
+        * Error and transfer complete are highly unlikely, and will most
+        * likely be due to a configuration error by the user.
+        */
+       if (unlikely(status_err & dwc->mask) ||
+                       unlikely(status_xfer & dwc->mask)) {
+               int i;
+               dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+                               "interrupt, stopping DMA transfer\n",
+                               status_xfer ? "xfer" : "error");
+               dev_err(chan2dev(&dwc->chan),
+                       "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+                       channel_readl(dwc, SAR),
+                       channel_readl(dwc, DAR),
+                       channel_readl(dwc, LLP),
+                       channel_readl(dwc, CTL_HI),
+                       channel_readl(dwc, CTL_LO));
+               channel_clear_bit(dw, CH_EN, dwc->mask);
+               while (dma_readl(dw, CH_EN) & dwc->mask)
+                       cpu_relax();
+               /* make sure DMA does not restart by loading a new list */
+               channel_writel(dwc, LLP, 0);
+               channel_writel(dwc, CTL_LO, 0);
+               channel_writel(dwc, CTL_HI, 0);
+               dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+               dma_writel(dw, CLEAR.ERROR, dwc->mask);
+               dma_writel(dw, CLEAR.XFER, dwc->mask);
+               for (i = 0; i < dwc->cdesc->periods; i++)
+                       dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+       }
+ }
+ /* ------------------------------------------------------------------------- */
  static void dw_dma_tasklet(unsigned long data)
  {
        struct dw_dma *dw = (struct dw_dma *)data;
        for (i = 0; i < dw->dma.chancnt; i++) {
                dwc = &dw->chan[i];
                spin_lock(&dwc->lock);
-               if (status_err & (1 << i))
+               if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+                       dwc_handle_cyclic(dw, dwc, status_block, status_err,
+                                       status_xfer);
+               else if (status_err & (1 << i))
                        dwc_handle_error(dw, dwc);
                else if ((status_block | status_xfer) & (1 << i))
                        dwc_scan_descriptors(dw, dwc);
@@@ -826,7 -905,6 +905,6 @@@ static int dwc_alloc_chan_resources(str
                dma_async_tx_descriptor_init(&desc->txd, chan);
                desc->txd.tx_submit = dwc_tx_submit;
                desc->txd.flags = DMA_CTRL_ACK;
-               INIT_LIST_HEAD(&desc->txd.tx_list);
                desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
                                sizeof(desc->lli), DMA_TO_DEVICE);
                dwc_desc_put(dwc, desc);
@@@ -884,6 -962,257 +962,257 @@@ static void dwc_free_chan_resources(str
        dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
  }
  
+ /* --------------------- Cyclic DMA API extensions -------------------- */
+ /**
+  * dw_dma_cyclic_start - start the cyclic DMA transfer
+  * @chan: the DMA channel to start
+  *
+  * Must be called with soft interrupts disabled. Returns zero on success or
+  * -errno on failure.
+  */
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+       if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+               dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+               return -ENODEV;
+       }
+       spin_lock(&dwc->lock);
+       /* assert channel is idle */
+       if (dma_readl(dw, CH_EN) & dwc->mask) {
+               dev_err(chan2dev(&dwc->chan),
+                       "BUG: Attempted to start non-idle channel\n");
+               dev_err(chan2dev(&dwc->chan),
+                       "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+                       channel_readl(dwc, SAR),
+                       channel_readl(dwc, DAR),
+                       channel_readl(dwc, LLP),
+                       channel_readl(dwc, CTL_HI),
+                       channel_readl(dwc, CTL_LO));
+               spin_unlock(&dwc->lock);
+               return -EBUSY;
+       }
+       dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+       dma_writel(dw, CLEAR.ERROR, dwc->mask);
+       dma_writel(dw, CLEAR.XFER, dwc->mask);
+       /* setup DMAC channel registers */
+       channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+       channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+       channel_writel(dwc, CTL_HI, 0);
+       channel_set_bit(dw, CH_EN, dwc->mask);
+       spin_unlock(&dwc->lock);
+       return 0;
+ }
+ EXPORT_SYMBOL(dw_dma_cyclic_start);
+ /**
+  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+  * @chan: the DMA channel to stop
+  *
+  * Must be called with soft interrupts disabled.
+  */
+ void dw_dma_cyclic_stop(struct dma_chan *chan)
+ {
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+       spin_lock(&dwc->lock);
+       channel_clear_bit(dw, CH_EN, dwc->mask);
+       while (dma_readl(dw, CH_EN) & dwc->mask)
+               cpu_relax();
+       spin_unlock(&dwc->lock);
+ }
+ EXPORT_SYMBOL(dw_dma_cyclic_stop);
+ /**
+  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+  * @chan: the DMA channel to prepare
+  * @buf_addr: physical DMA address where the buffer starts
+  * @buf_len: total number of bytes for the entire buffer
+  * @period_len: number of bytes for each period
+  * @direction: transfer direction, to or from device
+  *
+  * Must be called before trying to start the transfer. Returns a valid struct
+  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+  */
+ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+               dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+               enum dma_data_direction direction)
+ {
+       struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
+       struct dw_cyclic_desc           *cdesc;
+       struct dw_cyclic_desc           *retval = NULL;
+       struct dw_desc                  *desc;
+       struct dw_desc                  *last = NULL;
+       struct dw_dma_slave             *dws = chan->private;
+       unsigned long                   was_cyclic;
+       unsigned int                    reg_width;
+       unsigned int                    periods;
+       unsigned int                    i;
+       spin_lock_bh(&dwc->lock);
+       if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+               spin_unlock_bh(&dwc->lock);
+               dev_dbg(chan2dev(&dwc->chan),
+                               "queue and/or active list are not empty\n");
+               return ERR_PTR(-EBUSY);
+       }
+       was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+       spin_unlock_bh(&dwc->lock);
+       if (was_cyclic) {
+               dev_dbg(chan2dev(&dwc->chan),
+                               "channel already prepared for cyclic DMA\n");
+               return ERR_PTR(-EBUSY);
+       }
+       retval = ERR_PTR(-EINVAL);
+       reg_width = dws->reg_width;
+       periods = buf_len / period_len;
+       /* Check for too big/unaligned periods and unaligned DMA buffer. */
+       if (period_len > (DWC_MAX_COUNT << reg_width))
+               goto out_err;
+       if (unlikely(period_len & ((1 << reg_width) - 1)))
+               goto out_err;
+       if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+               goto out_err;
+       if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+               goto out_err;
+       retval = ERR_PTR(-ENOMEM);
+       if (periods > NR_DESCS_PER_CHANNEL)
+               goto out_err;
+       cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+       if (!cdesc)
+               goto out_err;
+       cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+       if (!cdesc->desc)
+               goto out_err_alloc;
+       for (i = 0; i < periods; i++) {
+               desc = dwc_desc_get(dwc);
+               if (!desc)
+                       goto out_err_desc_get;
+               switch (direction) {
+               case DMA_TO_DEVICE:
+                       desc->lli.dar = dws->tx_reg;
+                       desc->lli.sar = buf_addr + (period_len * i);
+                       desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+                                       | DWC_CTLL_DST_WIDTH(reg_width)
+                                       | DWC_CTLL_SRC_WIDTH(reg_width)
+                                       | DWC_CTLL_DST_FIX
+                                       | DWC_CTLL_SRC_INC
+                                       | DWC_CTLL_FC_M2P
+                                       | DWC_CTLL_INT_EN);
+                       break;
+               case DMA_FROM_DEVICE:
+                       desc->lli.dar = buf_addr + (period_len * i);
+                       desc->lli.sar = dws->rx_reg;
+                       desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+                                       | DWC_CTLL_SRC_WIDTH(reg_width)
+                                       | DWC_CTLL_DST_WIDTH(reg_width)
+                                       | DWC_CTLL_DST_INC
+                                       | DWC_CTLL_SRC_FIX
+                                       | DWC_CTLL_FC_P2M
+                                       | DWC_CTLL_INT_EN);
+                       break;
+               default:
+                       break;
+               }
+               desc->lli.ctlhi = (period_len >> reg_width);
+               cdesc->desc[i] = desc;
+               if (last) {
+                       last->lli.llp = desc->txd.phys;
+                       dma_sync_single_for_device(chan2parent(chan),
+                                       last->txd.phys, sizeof(last->lli),
+                                       DMA_TO_DEVICE);
+               }
+               last = desc;
+       }
+       /* lets make a cyclic list */
+       last->lli.llp = cdesc->desc[0]->txd.phys;
+       dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+                       sizeof(last->lli), DMA_TO_DEVICE);
+       dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
+                       "period %zu periods %d\n", buf_addr, buf_len,
+                       period_len, periods);
+       cdesc->periods = periods;
+       dwc->cdesc = cdesc;
+       return cdesc;
+ out_err_desc_get:
+       while (i--)
+               dwc_desc_put(dwc, cdesc->desc[i]);
+ out_err_alloc:
+       kfree(cdesc);
+ out_err:
+       clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+       return (struct dw_cyclic_desc *)retval;
+ }
+ EXPORT_SYMBOL(dw_dma_cyclic_prep);
+ /**
+  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+  * @chan: the DMA channel to free
+  */
+ void dw_dma_cyclic_free(struct dma_chan *chan)
+ {
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+       struct dw_cyclic_desc   *cdesc = dwc->cdesc;
+       int                     i;
+       dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+       if (!cdesc)
+               return;
+       spin_lock_bh(&dwc->lock);
+       channel_clear_bit(dw, CH_EN, dwc->mask);
+       while (dma_readl(dw, CH_EN) & dwc->mask)
+               cpu_relax();
+       dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+       dma_writel(dw, CLEAR.ERROR, dwc->mask);
+       dma_writel(dw, CLEAR.XFER, dwc->mask);
+       spin_unlock_bh(&dwc->lock);
+       for (i = 0; i < cdesc->periods; i++)
+               dwc_desc_put(dwc, cdesc->desc[i]);
+       kfree(cdesc->desc);
+       kfree(cdesc);
+       clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ }
+ EXPORT_SYMBOL(dw_dma_cyclic_free);
  /*----------------------------------------------------------------------*/
  
  static void dw_dma_off(struct dw_dma *dw)
@@@ -1011,7 -1340,7 +1340,7 @@@ static int __init dw_probe(struct platf
        dma_writel(dw, CFG, DW_CFG_DMA_EN);
  
        printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
 -                      pdev->dev.bus_id, dw->dma.chancnt);
 +                      dev_name(&pdev->dev), dw->dma.chancnt);
  
        dma_async_device_register(&dw->dma);
  
index da781d1078951f9164dcf5c1da1706f376b1a21d,90773844cc89ac28d9f83a469355852a1138a562..e202a6ce55735bf3d6fdb456f718fade1301ba32
@@@ -28,6 -28,9 +28,9 @@@
  #define FS_VF_IN_VALID        0x00000002
  #define FS_ENC_IN_VALID       0x00000001
  
+ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
+                              bool wait_for_stop);
  /*
   * There can be only one, we could allocate it dynamically, but then we'd have
   * to add an extra parameter to some functions, and use something as ugly as
@@@ -107,7 -110,7 +110,7 @@@ static uint32_t bytes_per_pixel(enum pi
        }
  }
  
- /* Enable / disable direct write to memory by the Camera Sensor Interface */
+ /* Enable direct write to memory by the Camera Sensor Interface */
  static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
  {
        uint32_t ic_conf, mask;
        idmac_write_icreg(ipu, ic_conf, IC_CONF);
  }
  
+ /* Called under spin_lock_irqsave(&ipu_data.lock) */
  static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
  {
        uint32_t ic_conf, mask;
@@@ -422,7 -426,7 +426,7 @@@ static void ipu_ch_param_set_size(unio
                break;
        default:
                dev_err(ipu_data.dev,
-                       "mxc ipu: unimplemented pixel format %d\n", pixel_fmt);
+                       "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
                break;
        }
  
@@@ -433,20 -437,20 +437,20 @@@ static void ipu_ch_param_set_burst_size
                                        uint16_t burst_pixels)
  {
        params->pp.npb = burst_pixels - 1;
- };
+ }
  
  static void ipu_ch_param_set_buffer(union chan_param_mem *params,
                                    dma_addr_t buf0, dma_addr_t buf1)
  {
        params->pp.eba0 = buf0;
        params->pp.eba1 = buf1;
- };
+ }
  
  static void ipu_ch_param_set_rotation(union chan_param_mem *params,
                                      enum ipu_rotate_mode rotate)
  {
        params->pp.bam = rotate;
- };
+ }
  
  static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
                                uint32_t num_words)
@@@ -571,7 -575,7 +575,7 @@@ static uint32_t dma_param_addr(uint32_
  {
        /* Channel Parameter Memory */
        return 0x10000 | (dma_ch << 4);
- };
+ }
  
  static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
                                     bool prio)
@@@ -611,7 -615,8 +615,8 @@@ static uint32_t ipu_channel_conf_mask(e
  
  /**
   * ipu_enable_channel() - enable an IPU channel.
-  * @channel:  channel ID.
+  * @idmac:    IPU DMAC context.
+  * @ichan:    IDMAC channel.
   * @return:   0 on success or negative error code on failure.
   */
  static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
  
  /**
   * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
-  * @channel:  channel ID.
+  * @ichan:    IDMAC channel.
   * @pixel_fmt:        pixel format of buffer. Pixel format is a FOURCC ASCII code.
   * @width:    width of buffer in pixels.
   * @height:   height of buffer in pixels.
@@@ -687,7 -692,7 +692,7 @@@ static int ipu_init_channel_buffer(stru
        }
  
        /* IC channel's stride must be a multiple of 8 pixels */
-       if ((channel <= 13) && (stride % 8)) {
+       if ((channel <= IDMAC_IC_13) && (stride % 8)) {
                dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
                return -EINVAL;
        }
@@@ -752,7 -757,7 +757,7 @@@ static void ipu_select_buffer(enum ipu_
  
  /**
   * ipu_update_channel_buffer() - update physical address of a channel buffer.
-  * @channel:  channel ID.
+  * @ichan:    IDMAC channel.
   * @buffer_n: buffer number to update.
   *            0 or 1 are the only valid values.
   * @phyaddr:  buffer physical address.
   *              function will fail if the buffer is set to ready.
   */
  /* Called under spin_lock(_irqsave)(&ichan->lock) */
- static int ipu_update_channel_buffer(enum ipu_channel channel,
+ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
                                     int buffer_n, dma_addr_t phyaddr)
  {
+       enum ipu_channel channel = ichan->dma_chan.chan_id;
        uint32_t reg;
        unsigned long flags;
  
        if (buffer_n == 0) {
                reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
                if (reg & (1UL << channel)) {
-                       spin_unlock_irqrestore(&ipu_data.lock, flags);
-                       return -EACCES;
+                       ipu_ic_disable_task(&ipu_data, channel);
+                       ichan->status = IPU_CHANNEL_READY;
                }
  
                /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
        } else {
                reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
                if (reg & (1UL << channel)) {
-                       spin_unlock_irqrestore(&ipu_data.lock, flags);
-                       return -EACCES;
+                       ipu_ic_disable_task(&ipu_data, channel);
+                       ichan->status = IPU_CHANNEL_READY;
                }
  
                /* Check if double-buffering is already enabled */
        return 0;
  }
  
+ /* Called under spin_lock_irqsave(&ichan->lock) */
+ static int ipu_submit_buffer(struct idmac_channel *ichan,
+       struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
+ {
+       unsigned int chan_id = ichan->dma_chan.chan_id;
+       struct device *dev = &ichan->dma_chan.dev->device;
+       int ret;
+       if (async_tx_test_ack(&desc->txd))
+               return -EINTR;
+       /*
+        * On first invocation this shouldn't be necessary, the call to
+        * ipu_init_channel_buffer() above will set addresses for us, so we
+        * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
+        * doing it again shouldn't hurt either.
+        */
+       ret = ipu_update_channel_buffer(ichan, buf_idx,
+                                       sg_dma_address(sg));
+       if (ret < 0) {
+               dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
+                       sg, chan_id, buf_idx);
+               return ret;
+       }
+       ipu_select_buffer(chan_id, buf_idx);
+       dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
+               sg, chan_id, buf_idx);
+       return 0;
+ }
  /* Called under spin_lock_irqsave(&ichan->lock) */
  static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
                                      struct idmac_tx_desc *desc)
                if (!ichan->sg[i]) {
                        ichan->sg[i] = sg;
  
-                       /*
-                        * On first invocation this shouldn't be necessary, the
-                        * call to ipu_init_channel_buffer() above will set
-                        * addresses for us, so we could make it conditional
-                        * on status >= IPU_CHANNEL_ENABLED, but doing it again
-                        * shouldn't hurt either.
-                        */
-                       ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
-                                                       sg_dma_address(sg));
+                       ret = ipu_submit_buffer(ichan, desc, sg, i);
                        if (ret < 0)
                                return ret;
  
-                       ipu_select_buffer(ichan->dma_chan.chan_id, i);
                        sg = sg_next(sg);
                }
        }
@@@ -842,19 -871,22 +871,22 @@@ static dma_cookie_t idmac_tx_submit(str
        struct idmac_channel *ichan = to_idmac_chan(tx->chan);
        struct idmac *idmac = to_idmac(tx->chan->device);
        struct ipu *ipu = to_ipu(idmac);
+       struct device *dev = &ichan->dma_chan.dev->device;
        dma_cookie_t cookie;
        unsigned long flags;
+       int ret;
  
        /* Sanity check */
        if (!list_empty(&desc->list)) {
                /* The descriptor doesn't belong to client */
-               dev_err(&ichan->dma_chan.dev->device,
-                       "Descriptor %p not prepared!\n", tx);
+               dev_err(dev, "Descriptor %p not prepared!\n", tx);
                return -EBUSY;
        }
  
        mutex_lock(&ichan->chan_mutex);
  
+       async_tx_clear_ack(tx);
        if (ichan->status < IPU_CHANNEL_READY) {
                struct idmac_video_param *video = &ichan->params.video;
                /*
                        goto out;
        }
  
-       /* ipu->lock can be taken under ichan->lock, but not v.v. */
-       spin_lock_irqsave(&ichan->lock, flags);
-       /* submit_buffers() atomically verifies and fills empty sg slots */
-       cookie = ipu_submit_channel_buffers(ichan, desc);
-       spin_unlock_irqrestore(&ichan->lock, flags);
-       if (cookie < 0)
-               goto out;
+       dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
  
        cookie = ichan->dma_chan.cookie;
  
        /* from dmaengine.h: "last cookie value returned to client" */
        ichan->dma_chan.cookie = cookie;
        tx->cookie = cookie;
+       /* ipu->lock can be taken under ichan->lock, but not v.v. */
        spin_lock_irqsave(&ichan->lock, flags);
        list_add_tail(&desc->list, &ichan->queue);
+       /* submit_buffers() atomically verifies and fills empty sg slots */
+       ret = ipu_submit_channel_buffers(ichan, desc);
        spin_unlock_irqrestore(&ichan->lock, flags);
  
+       if (ret < 0) {
+               cookie = ret;
+               goto dequeue;
+       }
        if (ichan->status < IPU_CHANNEL_ENABLED) {
-               int ret = ipu_enable_channel(idmac, ichan);
+               ret = ipu_enable_channel(idmac, ichan);
                if (ret < 0) {
                        cookie = ret;
-                       spin_lock_irqsave(&ichan->lock, flags);
-                       list_del_init(&desc->list);
-                       spin_unlock_irqrestore(&ichan->lock, flags);
-                       tx->cookie = cookie;
-                       ichan->dma_chan.cookie = cookie;
+                       goto dequeue;
                }
        }
  
        dump_idmac_reg(ipu);
  
+ dequeue:
+       if (cookie < 0) {
+               spin_lock_irqsave(&ichan->lock, flags);
+               list_del_init(&desc->list);
+               spin_unlock_irqrestore(&ichan->lock, flags);
+               tx->cookie = cookie;
+               ichan->dma_chan.cookie = cookie;
+       }
  out:
        mutex_unlock(&ichan->chan_mutex);
  
@@@ -944,8 -983,6 +983,6 @@@ static int idmac_desc_alloc(struct idma
                memset(txd, 0, sizeof(*txd));
                dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
                txd->tx_submit          = idmac_tx_submit;
-               txd->chan               = &ichan->dma_chan;
-               INIT_LIST_HEAD(&txd->tx_list);
  
                list_add(&desc->list, &ichan->free_list);
  
@@@ -1161,6 -1198,24 +1198,24 @@@ static int ipu_disable_channel(struct i
        return 0;
  }
  
+ static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
+       struct idmac_tx_desc **desc, struct scatterlist *sg)
+ {
+       struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
+       if (sgnew)
+               /* next sg-element in this list */
+               return sgnew;
+       if ((*desc)->list.next == &ichan->queue)
+               /* No more descriptors on the queue */
+               return NULL;
+       /* Fetch next descriptor */
+       *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
+       return (*desc)->sg;
+ }
  /*
   * We have several possibilities here:
   * current BUF                next BUF
  static irqreturn_t idmac_interrupt(int irq, void *dev_id)
  {
        struct idmac_channel *ichan = dev_id;
+       struct device *dev = &ichan->dma_chan.dev->device;
        unsigned int chan_id = ichan->dma_chan.chan_id;
        struct scatterlist **sg, *sgnext, *sgnew = NULL;
        /* Next transfer descriptor */
-       struct idmac_tx_desc *desc = NULL, *descnew;
+       struct idmac_tx_desc *desc, *descnew;
        dma_async_tx_callback callback;
        void *callback_param;
        bool done = false;
-       u32     ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY),
-               ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY),
-               curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
+       u32 ready0, ready1, curbuf, err;
+       unsigned long flags;
  
        /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
  
-       pr_debug("IDMAC irq %d\n", irq);
+       dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
+       spin_lock_irqsave(&ipu_data.lock, flags);
+       ready0  = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
+       ready1  = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
+       curbuf  = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
+       err     = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
+       if (err & (1 << chan_id)) {
+               idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
+               spin_unlock_irqrestore(&ipu_data.lock, flags);
+               /*
+                * Doing this
+                * ichan->sg[0] = ichan->sg[1] = NULL;
+                * you can force channel re-enable on the next tx_submit(), but
+                * this is dirty - think about descriptors with multiple
+                * sg elements.
+                */
+               dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
+                        chan_id, ready0, ready1, curbuf);
+               return IRQ_HANDLED;
+       }
+       spin_unlock_irqrestore(&ipu_data.lock, flags);
        /* Other interrupts do not interfere with this channel */
        spin_lock(&ichan->lock);
        if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
                     ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
                int i = 100;
  
                if (!i) {
                        spin_unlock(&ichan->lock);
-                       dev_dbg(ichan->dma_chan.device->dev,
+                       dev_dbg(dev,
                                "IRQ on active buffer on channel %x, active "
                                "%d, ready %x, %x, current %x!\n", chan_id,
                                ichan->active_buffer, ready0, ready1, curbuf);
                        return IRQ_NONE;
-               }
+               } else
+                       dev_dbg(dev,
+                               "Buffer deactivated on channel %x, active "
+                               "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
+                               ichan->active_buffer, ready0, ready1, curbuf, i);
        }
  
        if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
                     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
                     )) {
                spin_unlock(&ichan->lock);
-               dev_dbg(ichan->dma_chan.device->dev,
+               dev_dbg(dev,
                        "IRQ with active buffer still ready on channel %x, "
                        "active %d, ready %x, %x!\n", chan_id,
                        ichan->active_buffer, ready0, ready1);
        }
  
        if (unlikely(list_empty(&ichan->queue))) {
+               ichan->sg[ichan->active_buffer] = NULL;
                spin_unlock(&ichan->lock);
-               dev_err(ichan->dma_chan.device->dev,
+               dev_err(dev,
                        "IRQ without queued buffers on channel %x, active %d, "
                        "ready %x, %x!\n", chan_id,
                        ichan->active_buffer, ready0, ready1);
        sg = &ichan->sg[ichan->active_buffer];
        sgnext = ichan->sg[!ichan->active_buffer];
  
+       if (!*sg) {
+               spin_unlock(&ichan->lock);
+               return IRQ_HANDLED;
+       }
+       desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
+       descnew = desc;
+       dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
+               irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
+       /* Find the descriptor of sgnext */
+       sgnew = idmac_sg_next(ichan, &descnew, *sg);
+       if (sgnext != sgnew)
+               dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
        /*
         * if sgnext == NULL sg must be the last element in a scatterlist and
         * queue must be empty
         */
        if (unlikely(!sgnext)) {
-               if (unlikely(sg_next(*sg))) {
-                       dev_err(ichan->dma_chan.device->dev,
-                               "Broken buffer-update locking on channel %x!\n",
-                               chan_id);
-                       /* We'll let the user catch up */
+               if (!WARN_ON(sg_next(*sg)))
+                       dev_dbg(dev, "Underrun on channel %x\n", chan_id);
+               ichan->sg[!ichan->active_buffer] = sgnew;
+               if (unlikely(sgnew)) {
+                       ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
                } else {
-                       /* Underrun */
+                       spin_lock_irqsave(&ipu_data.lock, flags);
                        ipu_ic_disable_task(&ipu_data, chan_id);
-                       dev_dbg(ichan->dma_chan.device->dev,
-                               "Underrun on channel %x\n", chan_id);
+                       spin_unlock_irqrestore(&ipu_data.lock, flags);
                        ichan->status = IPU_CHANNEL_READY;
                        /* Continue to check for complete descriptor */
                }
        }
  
-       desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
-       /* First calculate and submit the next sg element */
-       if (likely(sgnext))
-               sgnew = sg_next(sgnext);
-       if (unlikely(!sgnew)) {
-               /* Start a new scatterlist, if any queued */
-               if (likely(desc->list.next != &ichan->queue)) {
-                       descnew = list_entry(desc->list.next,
-                                            struct idmac_tx_desc, list);
-                       sgnew = &descnew->sg[0];
-               }
-       }
+       /* Calculate and submit the next sg element */
+       sgnew = idmac_sg_next(ichan, &descnew, sgnew);
  
        if (unlikely(!sg_next(*sg)) || !sgnext) {
                /*
  
        *sg = sgnew;
  
-       if (likely(sgnew)) {
-               int ret;
-               ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer,
-                                               sg_dma_address(*sg));
-               if (ret < 0)
-                       dev_err(ichan->dma_chan.device->dev,
-                               "Failed to update buffer on channel %x buffer %d!\n",
-                               chan_id, ichan->active_buffer);
-               else
-                       ipu_select_buffer(chan_id, ichan->active_buffer);
+       if (likely(sgnew) &&
+           ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
+               callback = desc->txd.callback;
+               callback_param = desc->txd.callback_param;
+               spin_unlock(&ichan->lock);
+               callback(callback_param);
+               spin_lock(&ichan->lock);
        }
  
        /* Flip the active buffer - even if update above failed */
@@@ -1327,13 -1410,20 +1410,20 @@@ static void ipu_gc_tasklet(unsigned lon
                struct idmac_channel *ichan = ipu->channel + i;
                struct idmac_tx_desc *desc;
                unsigned long flags;
-               int j;
+               struct scatterlist *sg;
+               int j, k;
  
                for (j = 0; j < ichan->n_tx_desc; j++) {
                        desc = ichan->desc + j;
                        spin_lock_irqsave(&ichan->lock, flags);
                        if (async_tx_test_ack(&desc->txd)) {
                                list_move(&desc->list, &ichan->free_list);
+                               for_each_sg(desc->sg, sg, desc->sg_len, k) {
+                                       if (ichan->sg[0] == sg)
+                                               ichan->sg[0] = NULL;
+                                       else if (ichan->sg[1] == sg)
+                                               ichan->sg[1] = NULL;
+                               }
                                async_tx_clear_ack(&desc->txd);
                        }
                        spin_unlock_irqrestore(&ichan->lock, flags);
        }
  }
  
- /*
-  * At the time .device_alloc_chan_resources() method is called, we cannot know,
-  * whether the client will accept the channel. Thus we must only check, if we
-  * can satisfy client's request but the only real criterion to verify, whether
-  * the client has accepted our offer is the client_count. That's why we have to
-  * perform the rest of our allocation tasks on the first call to this function.
-  */
+ /* Allocate and initialise a transfer descriptor. */
  static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
                struct scatterlist *sgl, unsigned int sg_len,
                enum dma_data_direction direction, unsigned long tx_flags)
        unsigned long flags;
  
        /* We only can handle these three channels so far */
-       if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 &&
-           ichan->dma_chan.chan_id != IDMAC_IC_7)
+       if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
+           chan->chan_id != IDMAC_IC_7)
                return NULL;
  
        if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
@@@ -1400,7 -1484,7 +1484,7 @@@ static void idmac_issue_pending(struct 
  
        /* This is not always needed, but doesn't hurt either */
        spin_lock_irqsave(&ipu->lock, flags);
-       ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer);
+       ipu_select_buffer(chan->chan_id, ichan->active_buffer);
        spin_unlock_irqrestore(&ipu->lock, flags);
  
        /*
@@@ -1432,8 -1516,7 +1516,7 @@@ static void __idmac_terminate_all(struc
                        struct idmac_tx_desc *desc = ichan->desc + i;
                        if (list_empty(&desc->list))
                                /* Descriptor was prepared, but not submitted */
-                               list_add(&desc->list,
-                                        &ichan->free_list);
+                               list_add(&desc->list, &ichan->free_list);
  
                        async_tx_clear_ack(&desc->txd);
                }
@@@ -1458,6 -1541,28 +1541,28 @@@ static void idmac_terminate_all(struct 
        mutex_unlock(&ichan->chan_mutex);
  }
  
+ #ifdef DEBUG
+ static irqreturn_t ic_sof_irq(int irq, void *dev_id)
+ {
+       struct idmac_channel *ichan = dev_id;
+       printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
+              irq, ichan->dma_chan.chan_id);
+       disable_irq(irq);
+       return IRQ_HANDLED;
+ }
+ static irqreturn_t ic_eof_irq(int irq, void *dev_id)
+ {
+       struct idmac_channel *ichan = dev_id;
+       printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
+              irq, ichan->dma_chan.chan_id);
+       disable_irq(irq);
+       return IRQ_HANDLED;
+ }
+ static int ic_sof = -EINVAL, ic_eof = -EINVAL;
+ #endif
  static int idmac_alloc_chan_resources(struct dma_chan *chan)
  {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        chan->cookie            = 1;
        ichan->completed        = -ENXIO;
  
-       ret = ipu_irq_map(ichan->dma_chan.chan_id);
+       ret = ipu_irq_map(chan->chan_id);
        if (ret < 0)
                goto eimap;
  
        ichan->eof_irq = ret;
+       /*
+        * Important to first disable the channel, because maybe someone
+        * used it before us, e.g., the bootloader
+        */
+       ipu_disable_channel(idmac, ichan, true);
+       ret = ipu_init_channel(idmac, ichan);
+       if (ret < 0)
+               goto eichan;
        ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
                          ichan->eof_name, ichan);
        if (ret < 0)
                goto erirq;
  
-       ret = ipu_init_channel(idmac, ichan);
-       if (ret < 0)
-               goto eichan;
+ #ifdef DEBUG
+       if (chan->chan_id == IDMAC_IC_7) {
+               ic_sof = ipu_irq_map(69);
+               if (ic_sof > 0)
+                       request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
+               ic_eof = ipu_irq_map(70);
+               if (ic_eof > 0)
+                       request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
+       }
+ #endif
  
        ichan->status = IPU_CHANNEL_INITIALIZED;
  
-       dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
-               ichan->dma_chan.chan_id, ichan->eof_irq);
+       dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
+               chan->chan_id, ichan->eof_irq);
  
        return ret;
  
- eichan:
-       free_irq(ichan->eof_irq, ichan);
  erirq:
-       ipu_irq_unmap(ichan->dma_chan.chan_id);
+       ipu_uninit_channel(idmac, ichan);
+ eichan:
+       ipu_irq_unmap(chan->chan_id);
  eimap:
        return ret;
  }
@@@ -1510,8 -1633,22 +1633,22 @@@ static void idmac_free_chan_resources(s
        __idmac_terminate_all(chan);
  
        if (ichan->status > IPU_CHANNEL_FREE) {
+ #ifdef DEBUG
+               if (chan->chan_id == IDMAC_IC_7) {
+                       if (ic_sof > 0) {
+                               free_irq(ic_sof, ichan);
+                               ipu_irq_unmap(69);
+                               ic_sof = -EINVAL;
+                       }
+                       if (ic_eof > 0) {
+                               free_irq(ic_eof, ichan);
+                               ipu_irq_unmap(70);
+                               ic_eof = -EINVAL;
+                       }
+               }
+ #endif
                free_irq(ichan->eof_irq, ichan);
-               ipu_irq_unmap(ichan->dma_chan.chan_id);
+               ipu_irq_unmap(chan->chan_id);
        }
  
        ichan->status = IPU_CHANNEL_FREE;
@@@ -1573,7 -1710,7 +1710,7 @@@ static int __init ipu_idmac_init(struc
                dma_chan->device        = &idmac->dma;
                dma_chan->cookie        = 1;
                dma_chan->chan_id       = i;
-               list_add_tail(&ichan->dma_chan.device_node, &dma->channels);
+               list_add_tail(&dma_chan->device_node, &dma->channels);
        }
  
        idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
        return dma_async_device_register(&idmac->dma);
  }
  
- static void ipu_idmac_exit(struct ipu *ipu)
+ static void __exit ipu_idmac_exit(struct ipu *ipu)
  {
        int i;
        struct idmac *idmac = &ipu->idmac;
   * IPU common probe / remove
   */
  
- static int ipu_probe(struct platform_device *pdev)
+ static int __init ipu_probe(struct platform_device *pdev)
  {
        struct ipu_platform_data *pdata = pdev->dev.platform_data;
        struct resource *mem_ipu, *mem_ic;
        }
  
        /* Get IPU clock */
 -      ipu_data.ipu_clk = clk_get(&pdev->dev, "ipu_clk");
 +      ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(ipu_data.ipu_clk)) {
                ret = PTR_ERR(ipu_data.ipu_clk);
                goto err_clk_get;
@@@ -1700,7 -1837,7 +1837,7 @@@ err_noirq
        return ret;
  }
  
- static int ipu_remove(struct platform_device *pdev)
+ static int __exit ipu_remove(struct platform_device *pdev)
  {
        struct ipu *ipu = platform_get_drvdata(pdev);
  
@@@ -1725,7 -1862,7 +1862,7 @@@ static struct platform_driver ipu_platf
                .name   = "ipu-core",
                .owner  = THIS_MODULE,
        },
-       .remove         = ipu_remove,
+       .remove         = __exit_p(ipu_remove),
  };
  
  static int __init ipu_init(void)