]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
DMAENGINE: generic slave control v2
authorLinus Walleij <linus.walleij@stericsson.com>
Fri, 26 Mar 2010 23:44:01 +0000 (16:44 -0700)
committerDan Williams <dan.j.williams@intel.com>
Fri, 26 Mar 2010 23:44:01 +0000 (16:44 -0700)
Convert the device_terminate_all() operation on the
DMA engine to a generic device_control() operation
which can now optionally support also pausing and
resuming DMA on a certain channel. Implemented for the
COH 901 318 DMAC as an example.

[dan.j.williams@intel.com: update for timberdale]
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Acked-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Cc: Maciej Sosnowski <maciej.sosnowski@intel.com>
Cc: Nicolas Ferre <nicolas.ferre@atmel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Li Yang <leoli@freescale.com>
Cc: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Cc: Magnus Damm <damm@opensource.se>
Cc: Liam Girdwood <lrg@slimlogic.co.uk>
Cc: Joe Perches <joe@perches.com>
Cc: Roland Dreier <rdreier@cisco.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
15 files changed:
arch/arm/mach-u300/include/mach/coh901318.h
drivers/dma/at_hdmac.c
drivers/dma/coh901318.c
drivers/dma/dmaengine.c
drivers/dma/dw_dmac.c
drivers/dma/fsldma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/shdma.c
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
drivers/mmc/host/atmel-mci.c
drivers/serial/sh-sci.c
drivers/video/mx3fb.c
include/linux/dmaengine.h
sound/soc/txx9/txx9aclc.c

index b8155b4e5ffa149cb065529e74e9ad636ad31f1b..43ec040e765bef354dc5bd841040b121680e0330 100644 (file)
@@ -109,20 +109,6 @@ struct coh901318_platform {
  */
 u32 coh901318_get_bytes_left(struct dma_chan *chan);
 
-/**
- * coh901318_stop() - Stops dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_stop(struct dma_chan *chan);
-
-/**
- * coh901318_continue() - Resumes a stopped dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_continue(struct dma_chan *chan);
-
 /**
  * coh901318_filter_id() - DMA channel filter function
  * @chan: dma channel handle
index efc1a61ca2310bec0b517d20d7675c3f800f0717..f9143cf9e50afb64a5f5fd7d44649c475f3dba9d 100644 (file)
@@ -759,13 +759,17 @@ err_desc_get:
        return NULL;
 }
 
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma           *atdma = to_at_dma(chan->device);
        struct at_desc          *desc, *_desc;
        LIST_HEAD(list);
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        /*
         * This is only called when something went wrong elsewhere, so
         * we don't really care about the data. Just disable the
@@ -789,6 +793,8 @@ static void atc_terminate_all(struct dma_chan *chan)
        /* Flush all pending and queued descriptors */
        list_for_each_entry_safe(desc, _desc, &list, desc_node)
                atc_chain_complete(atchan, desc);
+
+       return 0;
 }
 
 /**
@@ -1091,7 +1097,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
        if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
                atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
-               atdma->dma_common.device_terminate_all = atc_terminate_all;
+               atdma->dma_common.device_control = atc_control;
        }
 
        dma_writel(atdma, EN, AT_DMA_ENABLE);
index f636c4a87c7f3ba780ea7869112c436b5b8524b6..53c54e034aa37eec865c7244d9dbf2c3921c4cca 100644 (file)
@@ -506,10 +506,11 @@ u32 coh901318_get_bytes_left(struct dma_chan *chan)
 EXPORT_SYMBOL(coh901318_get_bytes_left);
 
 
-/* Stops a transfer without losing data. Enables power save.
-   Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
+/*
+ * Pauses a transfer without losing data. Enables power save.
+ * Use this function in conjunction with coh901318_resume.
+ */
+static void coh901318_pause(struct dma_chan *chan)
 {
        u32 val;
        unsigned long flags;
@@ -550,12 +551,11 @@ void coh901318_stop(struct dma_chan *chan)
 
        spin_unlock_irqrestore(&cohc->lock, flags);
 }
-EXPORT_SYMBOL(coh901318_stop);
 
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
+/* Resumes a transfer that has been stopped via 300_dma_stop(..).
    Power save is handled.
 */
-void coh901318_continue(struct dma_chan *chan)
+static void coh901318_resume(struct dma_chan *chan)
 {
        u32 val;
        unsigned long flags;
@@ -581,7 +581,6 @@ void coh901318_continue(struct dma_chan *chan)
 
        spin_unlock_irqrestore(&cohc->lock, flags);
 }
-EXPORT_SYMBOL(coh901318_continue);
 
 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
 {
@@ -945,7 +944,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
 
        spin_unlock_irqrestore(&cohc->lock, flags);
 
-       chan->device->device_terminate_all(chan);
+       chan->device->device_control(chan, DMA_TERMINATE_ALL);
 }
 
 
@@ -1179,16 +1178,29 @@ coh901318_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&cohc->lock, flags);
 }
 
-static void
-coh901318_terminate_all(struct dma_chan *chan)
+static int
+coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        unsigned long flags;
        struct coh901318_chan *cohc = to_coh901318_chan(chan);
        struct coh901318_desc *cohd;
        void __iomem *virtbase = cohc->base->virtbase;
 
-       coh901318_stop(chan);
+       if (cmd == DMA_PAUSE) {
+               coh901318_pause(chan);
+               return 0;
+       }
+
+       if (cmd == DMA_RESUME) {
+               coh901318_resume(chan);
+               return 0;
+       }
+
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
 
+       /* The remainder of this function terminates the transfer */
+       coh901318_pause(chan);
        spin_lock_irqsave(&cohc->lock, flags);
 
        /* Clear any pending BE or TC interrupt */
@@ -1227,6 +1239,8 @@ coh901318_terminate_all(struct dma_chan *chan)
        cohc->busy = 0;
 
        spin_unlock_irqrestore(&cohc->lock, flags);
+
+       return 0;
 }
 void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
                         struct coh901318_base *base)
@@ -1344,7 +1358,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
        base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
        base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
        base->dma_slave.device_issue_pending = coh901318_issue_pending;
-       base->dma_slave.device_terminate_all = coh901318_terminate_all;
+       base->dma_slave.device_control = coh901318_control;
        base->dma_slave.dev = &pdev->dev;
 
        err = dma_async_device_register(&base->dma_slave);
@@ -1364,7 +1378,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
        base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
        base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
        base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
-       base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+       base->dma_memcpy.device_control = coh901318_control;
        base->dma_memcpy.dev = &pdev->dev;
        /*
         * This controller can only access address at even 32bit boundaries,
index 87399cafce37f3036e228a916126a266d0b501b2..ffc4ee9c5e2152d107b3036d6e1fa62ca8f16463 100644 (file)
@@ -694,7 +694,7 @@ int dma_async_device_register(struct dma_device *device)
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
                !device->device_prep_slave_sg);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
-               !device->device_terminate_all);
+               !device->device_control);
 
        BUG_ON(!device->device_alloc_chan_resources);
        BUG_ON(!device->device_free_chan_resources);
index d28369f7afd2ba8a2da690b235c0318a89719a82..8a6b85f61176dd457a9ff549ef1c1be71b2a88af 100644 (file)
@@ -781,13 +781,17 @@ err_desc_get:
        return NULL;
 }
 
-static void dwc_terminate_all(struct dma_chan *chan)
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
        struct dw_dma           *dw = to_dw_dma(chan->device);
        struct dw_desc          *desc, *_desc;
        LIST_HEAD(list);
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        /*
         * This is only called when something went wrong elsewhere, so
         * we don't really care about the data. Just disable the
@@ -810,6 +814,8 @@ static void dwc_terminate_all(struct dma_chan *chan)
        /* Flush all pending and queued descriptors */
        list_for_each_entry_safe(desc, _desc, &list, desc_node)
                dwc_descriptor_complete(dwc, desc);
+
+       return 0;
 }
 
 static enum dma_status
@@ -1338,7 +1344,7 @@ static int __init dw_probe(struct platform_device *pdev)
        dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
 
        dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
-       dw->dma.device_terminate_all = dwc_terminate_all;
+       dw->dma.device_control = dwc_control;
 
        dw->dma.device_is_tx_complete = dwc_is_tx_complete;
        dw->dma.device_issue_pending = dwc_issue_pending;
index bbb4be5a3ff493be3bf35ac07474e43002efd71d..714fc46e76954bca56d225d47ef0cd2e6aa41169 100644 (file)
@@ -774,13 +774,18 @@ fail:
        return NULL;
 }
 
-static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
+static int fsl_dma_device_control(struct dma_chan *dchan,
+                                 enum dma_ctrl_cmd cmd)
 {
        struct fsldma_chan *chan;
        unsigned long flags;
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        if (!dchan)
-               return;
+               return -EINVAL;
 
        chan = to_fsl_chan(dchan);
 
@@ -794,6 +799,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
        fsldma_free_desc_list(chan, &chan->ld_running);
 
        spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+       return 0;
 }
 
 /**
@@ -1332,7 +1339,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
        fdev->common.device_is_tx_complete = fsl_dma_is_complete;
        fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
        fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
-       fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
+       fdev->common.device_control = fsl_dma_device_control;
        fdev->common.dev = &op->dev;
 
        dev_set_drvdata(&op->dev, fdev);
index 2a446397c8847aa836c203d6586b672cf3b3024f..39e7fb2a90e369ada79052f45604bc858cd3d8b2 100644 (file)
@@ -1472,13 +1472,17 @@ static void idmac_issue_pending(struct dma_chan *chan)
         */
 }
 
-static void __idmac_terminate_all(struct dma_chan *chan)
+static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        struct idmac *idmac = to_idmac(chan->device);
        unsigned long flags;
        int i;
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        ipu_disable_channel(idmac, ichan,
                            ichan->status >= IPU_CHANNEL_ENABLED);
 
@@ -1505,17 +1509,22 @@ static void __idmac_terminate_all(struct dma_chan *chan)
        tasklet_enable(&to_ipu(idmac)->tasklet);
 
        ichan->status = IPU_CHANNEL_INITIALIZED;
+
+       return 0;
 }
 
-static void idmac_terminate_all(struct dma_chan *chan)
+static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct idmac_channel *ichan = to_idmac_chan(chan);
+       int ret;
 
        mutex_lock(&ichan->chan_mutex);
 
-       __idmac_terminate_all(chan);
+       ret = __idmac_control(chan, cmd);
 
        mutex_unlock(&ichan->chan_mutex);
+
+       return ret;
 }
 
 #ifdef DEBUG
@@ -1607,7 +1616,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
 
        mutex_lock(&ichan->chan_mutex);
 
-       __idmac_terminate_all(chan);
+       __idmac_control(chan, DMA_TERMINATE_ALL);
 
        if (ichan->status > IPU_CHANNEL_FREE) {
 #ifdef DEBUG
@@ -1669,7 +1678,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
 
        /* Compulsory for DMA_SLAVE fields */
        dma->device_prep_slave_sg               = idmac_prep_slave_sg;
-       dma->device_terminate_all               = idmac_terminate_all;
+       dma->device_control                     = idmac_control;
 
        INIT_LIST_HEAD(&dma->channels);
        for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1712,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
        for (i = 0; i < IPU_CHANNELS_NUM; i++) {
                struct idmac_channel *ichan = ipu->channel + i;
 
-               idmac_terminate_all(&ichan->dma_chan);
+               idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL);
                idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
        }
 
index 5d17e09cb625412beaf0e8275f39142b7006a4c1..ce28c1e22825fc8338d274a801c346d91586c973 100644 (file)
@@ -580,12 +580,16 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
                               direction, flags);
 }
 
-static void sh_dmae_terminate_all(struct dma_chan *chan)
+static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        if (!chan)
-               return;
+               return -EINVAL;
 
        dmae_halt(sh_chan);
 
@@ -601,6 +605,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
        spin_unlock_bh(&sh_chan->desc_lock);
 
        sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+       return 0;
 }
 
 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -1029,7 +1035,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
        /* Compulsory for DMA_SLAVE fields */
        shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
-       shdev->common.device_terminate_all = sh_dmae_terminate_all;
+       shdev->common.device_control = sh_dmae_control;
 
        shdev->common.dev = &pdev->dev;
        /* Default transfer size of 32 bytes requires 32-byte alignment */
index 145f1c23408f09d6962dd1f6b8672166f0ca8caa..7c06471ef863d26f75fdaa822a1429b3bd93563a 100644 (file)
@@ -613,7 +613,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
        return &td_desc->txd;
 }
 
-static void td_terminate_all(struct dma_chan *chan)
+static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct timb_dma_chan *td_chan =
                container_of(chan, struct timb_dma_chan, chan);
@@ -621,6 +621,9 @@ static void td_terminate_all(struct dma_chan *chan)
 
        dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
 
+       if (cmd != DMA_TERMINATE_ALL)
+               return -ENXIO;
+
        /* first the easy part, put the queue into the free list */
        spin_lock_bh(&td_chan->lock);
        list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -630,6 +633,8 @@ static void td_terminate_all(struct dma_chan *chan)
        /* now tear down the runnning */
        __td_finish(td_chan);
        spin_unlock_bh(&td_chan->lock);
+
+       return 0;
 }
 
 static void td_tasklet(unsigned long data)
@@ -743,7 +748,7 @@ static int __devinit td_probe(struct platform_device *pdev)
        dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
        dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
        td->dma.device_prep_slave_sg = td_prep_slave_sg;
-       td->dma.device_terminate_all = td_terminate_all;
+       td->dma.device_control = td_control;
 
        td->dma.dev = &pdev->dev;
 
index 3ebc61067e548d407af798687db075ae15003385..e528e15f44abfae687749b71ca7fcaa24c326aeb 100644 (file)
@@ -938,12 +938,16 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        return &first->txd;
 }
 
-static void txx9dmac_terminate_all(struct dma_chan *chan)
+static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
 {
        struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
        struct txx9dmac_desc *desc, *_desc;
        LIST_HEAD(list);
 
+       /* Only supports DMA_TERMINATE_ALL */
+       if (cmd != DMA_TERMINATE_ALL)
+               return -EINVAL;
+
        dev_vdbg(chan2dev(chan), "terminate_all\n");
        spin_lock_bh(&dc->lock);
 
@@ -958,6 +962,8 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
        /* Flush all pending and queued descriptors */
        list_for_each_entry_safe(desc, _desc, &list, desc_node)
                txx9dmac_descriptor_complete(dc, desc);
+
+       return 0;
 }
 
 static enum dma_status
@@ -1153,7 +1159,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
        dc->dma.dev = &pdev->dev;
        dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
        dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
-       dc->dma.device_terminate_all = txx9dmac_terminate_all;
+       dc->dma.device_control = txx9dmac_control;
        dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
        dc->dma.device_issue_pending = txx9dmac_issue_pending;
        if (pdata && pdata->memcpy_chan == ch) {
index 8072128e933b80a8ff6f68f2ed160a964ed04bc2..ae6d24ba4f081cd7a77cc3dc04f8df9016a8e19b 100644 (file)
@@ -578,7 +578,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
        struct dma_chan *chan = host->data_chan;
 
        if (chan) {
-               chan->device->device_terminate_all(chan);
+               chan->device->device_control(chan, DMA_TERMINATE_ALL);
                atmci_dma_cleanup(host);
        } else {
                /* Data transfer was stopped by the interrupt handler */
index f7b9aff88f4a28836ef502b59ff37e77c32faf08..690988237971d7a01fbddb6c22b64dc60deaefda 100644 (file)
@@ -1087,7 +1087,7 @@ static void work_fn_rx(struct work_struct *work)
                unsigned long flags;
                int count;
 
-               chan->device->device_terminate_all(chan);
+               chan->device->device_control(chan, DMA_TERMINATE_ALL);
                dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
                        sh_desc->partial, sh_desc->cookie);
 
index 772ba3f45e6f511d78bf2035046dd7829244277b..3aa50bc276ebad7c30ab9710bce103af812a0d7a 100644 (file)
@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
 
        spin_unlock_irqrestore(&mx3fb->lock, flags);
 
-       mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan);
+       mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan,
+                                                  DMA_TERMINATE_ALL);
        mx3_fbi->txd = NULL;
        mx3_fbi->cookie = -EINVAL;
 }
index 20ea12c86fd007ce9cd66d084399621b4b323f3a..0731802f876f77ac88521fdf0ff3b48343b8fe15 100644 (file)
@@ -106,6 +106,19 @@ enum dma_ctrl_flags {
        DMA_PREP_FENCE = (1 << 9),
 };
 
+/**
+ * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
+ * on a running channel.
+ * @DMA_TERMINATE_ALL: terminate all ongoing transfers
+ * @DMA_PAUSE: pause ongoing transfers
+ * @DMA_RESUME: resume paused transfer
+ */
+enum dma_ctrl_cmd {
+       DMA_TERMINATE_ALL,
+       DMA_PAUSE,
+       DMA_RESUME,
+};
+
 /**
  * enum sum_check_bits - bit position of pq_check_flags
  */
@@ -261,7 +274,8 @@ struct dma_async_tx_descriptor {
  * @device_prep_dma_memset: prepares a memset operation
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
  * @device_prep_slave_sg: prepares a slave dma operation
- * @device_terminate_all: terminate all pending operations
+ * @device_control: manipulate all pending operations on a channel, returns
+ *     zero or error code
  * @device_is_tx_complete: poll for transaction completion
  * @device_issue_pending: push pending transactions to hardware
  */
@@ -313,7 +327,7 @@ struct dma_device {
                struct dma_chan *chan, struct scatterlist *sgl,
                unsigned int sg_len, enum dma_data_direction direction,
                unsigned long flags);
-       void (*device_terminate_all)(struct dma_chan *chan);
+       int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd);
 
        enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
                        dma_cookie_t cookie, dma_cookie_t *last,
index efed64b8b026c18117b6b58a49d8cfb1dd467955..b35d00706c0eb21bb40afd6857447d783e8b7557 100644 (file)
@@ -159,7 +159,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
                void __iomem *base = drvdata->base;
 
                spin_unlock_irqrestore(&dmadata->dma_lock, flags);
-               chan->device->device_terminate_all(chan);
+               chan->device->device_control(chan, DMA_TERMINATE_ALL);
                /* first time */
                for (i = 0; i < NR_DMA_CHAIN; i++) {
                        desc = txx9aclc_dma_submit(dmadata,
@@ -267,7 +267,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
        struct dma_chan *chan = dmadata->dma_chan;
 
        dmadata->frag_count = -1;
-       chan->device->device_terminate_all(chan);
+       chan->device->device_control(chan, DMA_TERMINATE_ALL);
        return 0;
 }
 
@@ -396,7 +396,7 @@ static int txx9aclc_pcm_remove(struct platform_device *pdev)
                struct dma_chan *chan = dmadata->dma_chan;
                if (chan) {
                        dmadata->frag_count = -1;
-                       chan->device->device_terminate_all(chan);
+                       chan->device->device_control(chan, DMA_TERMINATE_ALL);
                        dma_release_channel(chan);
                }
                dev->dmadata[i].dma_chan = NULL;