2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller,
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
26 #include "at_hdmac_regs.h"
32 * at_hdmac : Name of the ATmel AHB DMA Controller
33 * at_dma_ / atdma : ATmel DMA controller entity related
34 * atc_ / atchan : ATmel DMA Channel entity related
37 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38 #define ATC_DEFAULT_CTRLA (0)
39 #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
43 * Initial number of descriptors to allocate for each channel. This could
44 * be increased during dma usage.
46 static unsigned int init_nr_desc_per_channel = 64;
47 module_param(init_nr_desc_per_channel, uint, 0644);
48 MODULE_PARM_DESC(init_nr_desc_per_channel,
49 "initial descriptors per channel (default: 64)");
53 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
56 /*----------------------------------------------------------------------*/
58 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
60 return list_first_entry(&atchan->active_list,
61 struct at_desc, desc_node);
64 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
66 return list_first_entry(&atchan->queue,
67 struct at_desc, desc_node);
71 * atc_alloc_descriptor - allocate and return an initilized descriptor
72 * @chan: the channel to allocate descriptors for
73 * @gfp_flags: GFP allocation flags
75 * Note: The ack-bit is positioned in the descriptor flag at creation time
76 * to make initial allocation more convenient. This bit will be cleared
77 * and control will be given to client at usage time (during
78 * preparation functions).
80 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
83 struct at_desc *desc = NULL;
84 struct at_dma *atdma = to_at_dma(chan->device);
87 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
89 memset(desc, 0, sizeof(struct at_desc));
90 dma_async_tx_descriptor_init(&desc->txd, chan);
91 /* txd.flags will be overwritten in prep functions */
92 desc->txd.flags = DMA_CTRL_ACK;
93 desc->txd.tx_submit = atc_tx_submit;
94 desc->txd.phys = phys;
101 * atc_desc_get - get a unsused descriptor from free_list
102 * @atchan: channel we want a new descriptor for
104 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
106 struct at_desc *desc, *_desc;
107 struct at_desc *ret = NULL;
111 spin_lock_bh(&atchan->lock);
112 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
114 if (async_tx_test_ack(&desc->txd)) {
115 list_del(&desc->desc_node);
119 dev_dbg(chan2dev(&atchan->chan_common),
120 "desc %p not ACKed\n", desc);
122 spin_unlock_bh(&atchan->lock);
123 dev_vdbg(chan2dev(&atchan->chan_common),
124 "scanned %u descriptors on freelist\n", i);
126 /* no more descriptor available in initial pool: create one more */
128 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
130 spin_lock_bh(&atchan->lock);
131 atchan->descs_allocated++;
132 spin_unlock_bh(&atchan->lock);
134 dev_err(chan2dev(&atchan->chan_common),
135 "not enough descriptors available\n");
143 * atc_desc_put - move a descriptor, including any children, to the free list
144 * @atchan: channel we work on
145 * @desc: descriptor, at the head of a chain, to move to free list
147 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150 struct at_desc *child;
152 spin_lock_bh(&atchan->lock);
153 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
154 dev_vdbg(chan2dev(&atchan->chan_common),
155 "moving child desc %p to freelist\n",
157 list_splice_init(&desc->txd.tx_list, &atchan->free_list);
158 dev_vdbg(chan2dev(&atchan->chan_common),
159 "moving desc %p to freelist\n", desc);
160 list_add(&desc->desc_node, &atchan->free_list);
161 spin_unlock_bh(&atchan->lock);
166 * atc_assign_cookie - compute and assign new cookie
167 * @atchan: channel we work on
168 * @desc: descriptor to asign cookie for
170 * Called with atchan->lock held and bh disabled
173 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
175 dma_cookie_t cookie = atchan->chan_common.cookie;
180 atchan->chan_common.cookie = cookie;
181 desc->txd.cookie = cookie;
187 * atc_dostart - starts the DMA engine for real
188 * @atchan: the channel we want to start
189 * @first: first descriptor in the list we want to begin with
191 * Called with atchan->lock held and bh disabled
193 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
195 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
197 /* ASSERT: channel is idle */
198 if (atc_chan_is_enabled(atchan)) {
199 dev_err(chan2dev(&atchan->chan_common),
200 "BUG: Attempted to start non-idle channel\n");
201 dev_err(chan2dev(&atchan->chan_common),
202 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
203 channel_readl(atchan, SADDR),
204 channel_readl(atchan, DADDR),
205 channel_readl(atchan, CTRLA),
206 channel_readl(atchan, CTRLB),
207 channel_readl(atchan, DSCR));
209 /* The tasklet will hopefully advance the queue... */
213 vdbg_dump_regs(atchan);
215 /* clear any pending interrupt */
216 while (dma_readl(atdma, EBCISR))
219 channel_writel(atchan, SADDR, 0);
220 channel_writel(atchan, DADDR, 0);
221 channel_writel(atchan, CTRLA, 0);
222 channel_writel(atchan, CTRLB, 0);
223 channel_writel(atchan, DSCR, first->txd.phys);
224 dma_writel(atdma, CHER, atchan->mask);
226 vdbg_dump_regs(atchan);
230 * atc_chain_complete - finish work for one transaction chain
231 * @atchan: channel we work on
232 * @desc: descriptor at the head of the chain we want do complete
234 * Called with atchan->lock held and bh disabled */
236 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
238 dma_async_tx_callback callback;
240 struct dma_async_tx_descriptor *txd = &desc->txd;
242 dev_vdbg(chan2dev(&atchan->chan_common),
243 "descriptor %u complete\n", txd->cookie);
245 atchan->completed_cookie = txd->cookie;
246 callback = txd->callback;
247 param = txd->callback_param;
249 /* move children to free_list */
250 list_splice_init(&txd->tx_list, &atchan->free_list);
251 /* move myself to free_list */
252 list_move(&desc->desc_node, &atchan->free_list);
254 /* unmap dma addresses */
255 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
256 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
257 dma_unmap_single(chan2parent(&atchan->chan_common),
259 desc->len, DMA_FROM_DEVICE);
261 dma_unmap_page(chan2parent(&atchan->chan_common),
263 desc->len, DMA_FROM_DEVICE);
265 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267 dma_unmap_single(chan2parent(&atchan->chan_common),
269 desc->len, DMA_TO_DEVICE);
271 dma_unmap_page(chan2parent(&atchan->chan_common),
273 desc->len, DMA_TO_DEVICE);
277 * The API requires that no submissions are done from a
278 * callback, so we don't need to drop the lock here
283 dma_run_dependencies(txd);
287 * atc_complete_all - finish work for all transactions
288 * @atchan: channel to complete transactions for
290 * Eventually submit queued descriptors if any
292 * Assume channel is idle while calling this function
293 * Called with atchan->lock held and bh disabled
295 static void atc_complete_all(struct at_dma_chan *atchan)
297 struct at_desc *desc, *_desc;
300 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
302 BUG_ON(atc_chan_is_enabled(atchan));
305 * Submit queued descriptors ASAP, i.e. before we go through
306 * the completed ones.
308 if (!list_empty(&atchan->queue))
309 atc_dostart(atchan, atc_first_queued(atchan));
310 /* empty active_list now it is completed */
311 list_splice_init(&atchan->active_list, &list);
312 /* empty queue list by moving descriptors (if any) to active_list */
313 list_splice_init(&atchan->queue, &atchan->active_list);
315 list_for_each_entry_safe(desc, _desc, &list, desc_node)
316 atc_chain_complete(atchan, desc);
320 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
321 * @atchan: channel to be cleaned up
323 * Called with atchan->lock held and bh disabled
325 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
327 struct at_desc *desc, *_desc;
328 struct at_desc *child;
330 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
332 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
333 if (!(desc->lli.ctrla & ATC_DONE))
334 /* This one is currently in progress */
337 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
338 if (!(child->lli.ctrla & ATC_DONE))
339 /* Currently in progress */
343 * No descriptors so far seem to be in progress, i.e.
344 * this chain must be done.
346 atc_chain_complete(atchan, desc);
351 * atc_advance_work - at the end of a transaction, move forward
352 * @atchan: channel where the transaction ended
354 * Called with atchan->lock held and bh disabled
356 static void atc_advance_work(struct at_dma_chan *atchan)
358 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
360 if (list_empty(&atchan->active_list) ||
361 list_is_singular(&atchan->active_list)) {
362 atc_complete_all(atchan);
364 atc_chain_complete(atchan, atc_first_active(atchan));
366 atc_dostart(atchan, atc_first_active(atchan));
372 * atc_handle_error - handle errors reported by DMA controller
373 * @atchan: channel where error occurs
375 * Called with atchan->lock held and bh disabled
377 static void atc_handle_error(struct at_dma_chan *atchan)
379 struct at_desc *bad_desc;
380 struct at_desc *child;
383 * The descriptor currently at the head of the active list is
384 * broked. Since we don't have any way to report errors, we'll
385 * just have to scream loudly and try to carry on.
387 bad_desc = atc_first_active(atchan);
388 list_del_init(&bad_desc->desc_node);
390 /* As we are stopped, take advantage to push queued descriptors
392 list_splice_init(&atchan->queue, atchan->active_list.prev);
394 /* Try to restart the controller */
395 if (!list_empty(&atchan->active_list))
396 atc_dostart(atchan, atc_first_active(atchan));
399 * KERN_CRITICAL may seem harsh, but since this only happens
400 * when someone submits a bad physical address in a
401 * descriptor, we should consider ourselves lucky that the
402 * controller flagged an error instead of scribbling over
403 * random memory locations.
405 dev_crit(chan2dev(&atchan->chan_common),
406 "Bad descriptor submitted for DMA!\n");
407 dev_crit(chan2dev(&atchan->chan_common),
408 " cookie: %d\n", bad_desc->txd.cookie);
409 atc_dump_lli(atchan, &bad_desc->lli);
410 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
411 atc_dump_lli(atchan, &child->lli);
413 /* Pretend the descriptor completed successfully */
414 atc_chain_complete(atchan, bad_desc);
418 /*-- IRQ & Tasklet ---------------------------------------------------*/
420 static void atc_tasklet(unsigned long data)
422 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
424 /* Channel cannot be enabled here */
425 if (atc_chan_is_enabled(atchan)) {
426 dev_err(chan2dev(&atchan->chan_common),
427 "BUG: channel enabled in tasklet\n");
431 spin_lock(&atchan->lock);
432 if (test_and_clear_bit(0, &atchan->error_status))
433 atc_handle_error(atchan);
435 atc_advance_work(atchan);
437 spin_unlock(&atchan->lock);
440 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
442 struct at_dma *atdma = (struct at_dma *)dev_id;
443 struct at_dma_chan *atchan;
445 u32 status, pending, imr;
449 imr = dma_readl(atdma, EBCIMR);
450 status = dma_readl(atdma, EBCISR);
451 pending = status & imr;
456 dev_vdbg(atdma->dma_common.dev,
457 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
458 status, imr, pending);
460 for (i = 0; i < atdma->dma_common.chancnt; i++) {
461 atchan = &atdma->chan[i];
462 if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
463 if (pending & AT_DMA_ERR(i)) {
464 /* Disable channel on AHB error */
465 dma_writel(atdma, CHDR, atchan->mask);
466 /* Give information to tasklet */
467 set_bit(0, &atchan->error_status);
469 tasklet_schedule(&atchan->tasklet);
480 /*-- DMA Engine API --------------------------------------------------*/
483 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
484 * @desc: descriptor at the head of the transaction chain
486 * Queue chain if DMA engine is working already
488 * Cookie increment and adding to active_list or queue must be atomic
490 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
492 struct at_desc *desc = txd_to_at_desc(tx);
493 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
496 spin_lock_bh(&atchan->lock);
497 cookie = atc_assign_cookie(atchan, desc);
499 if (list_empty(&atchan->active_list)) {
500 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
502 atc_dostart(atchan, desc);
503 list_add_tail(&desc->desc_node, &atchan->active_list);
505 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
507 list_add_tail(&desc->desc_node, &atchan->queue);
510 spin_unlock_bh(&atchan->lock);
516 * atc_prep_dma_memcpy - prepare a memcpy operation
517 * @chan: the channel to prepare operation on
518 * @dest: operation virtual destination address
519 * @src: operation virtual source address
520 * @len: operation length
521 * @flags: tx descriptor status flags
523 static struct dma_async_tx_descriptor *
524 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
525 size_t len, unsigned long flags)
527 struct at_dma_chan *atchan = to_at_dma_chan(chan);
528 struct at_desc *desc = NULL;
529 struct at_desc *first = NULL;
530 struct at_desc *prev = NULL;
533 unsigned int src_width;
534 unsigned int dst_width;
538 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
539 dest, src, len, flags);
541 if (unlikely(!len)) {
542 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
546 ctrla = ATC_DEFAULT_CTRLA;
547 ctrlb = ATC_DEFAULT_CTRLB
548 | ATC_SRC_ADDR_MODE_INCR
549 | ATC_DST_ADDR_MODE_INCR
553 * We can be a lot more clever here, but this should take care
554 * of the most common optimization.
556 if (!((src | dest | len) & 3)) {
557 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
558 src_width = dst_width = 2;
559 } else if (!((src | dest | len) & 1)) {
560 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
561 src_width = dst_width = 1;
563 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
564 src_width = dst_width = 0;
567 for (offset = 0; offset < len; offset += xfer_count << src_width) {
568 xfer_count = min_t(size_t, (len - offset) >> src_width,
571 desc = atc_desc_get(atchan);
575 desc->lli.saddr = src + offset;
576 desc->lli.daddr = dest + offset;
577 desc->lli.ctrla = ctrla | xfer_count;
578 desc->lli.ctrlb = ctrlb;
580 desc->txd.cookie = 0;
581 async_tx_ack(&desc->txd);
586 /* inform the HW lli about chaining */
587 prev->lli.dscr = desc->txd.phys;
588 /* insert the link descriptor to the LD ring */
589 list_add_tail(&desc->desc_node,
590 &first->txd.tx_list);
595 /* First descriptor of the chain embedds additional information */
596 first->txd.cookie = -EBUSY;
599 /* set end-of-link to the last link descriptor of list*/
602 desc->txd.flags = flags; /* client is in control of this ack */
607 atc_desc_put(atchan, first);
612 * atc_is_tx_complete - poll for transaction completion
614 * @cookie: transaction identifier to check status of
615 * @done: if not %NULL, updated with last completed transaction
616 * @used: if not %NULL, updated with last used transaction
618 * If @done and @used are passed in, upon return they reflect the driver
619 * internal state and can be used with dma_async_is_complete() to check
620 * the status of multiple cookies without re-checking hardware state.
622 static enum dma_status
623 atc_is_tx_complete(struct dma_chan *chan,
625 dma_cookie_t *done, dma_cookie_t *used)
627 struct at_dma_chan *atchan = to_at_dma_chan(chan);
628 dma_cookie_t last_used;
629 dma_cookie_t last_complete;
632 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
633 cookie, done ? *done : 0, used ? *used : 0);
635 spin_lock_bh(atchan->lock);
637 last_complete = atchan->completed_cookie;
638 last_used = chan->cookie;
640 ret = dma_async_is_complete(cookie, last_complete, last_used);
641 if (ret != DMA_SUCCESS) {
642 atc_cleanup_descriptors(atchan);
644 last_complete = atchan->completed_cookie;
645 last_used = chan->cookie;
647 ret = dma_async_is_complete(cookie, last_complete, last_used);
650 spin_unlock_bh(atchan->lock);
653 *done = last_complete;
661 * atc_issue_pending - try to finish work
662 * @chan: target DMA channel
664 static void atc_issue_pending(struct dma_chan *chan)
666 struct at_dma_chan *atchan = to_at_dma_chan(chan);
668 dev_vdbg(chan2dev(chan), "issue_pending\n");
670 if (!atc_chan_is_enabled(atchan)) {
671 spin_lock_bh(&atchan->lock);
672 atc_advance_work(atchan);
673 spin_unlock_bh(&atchan->lock);
678 * atc_alloc_chan_resources - allocate resources for DMA channel
679 * @chan: allocate descriptor resources for this channel
680 * @client: current client requesting the channel be ready for requests
682 * return - the number of allocated descriptors
684 static int atc_alloc_chan_resources(struct dma_chan *chan)
686 struct at_dma_chan *atchan = to_at_dma_chan(chan);
687 struct at_dma *atdma = to_at_dma(chan->device);
688 struct at_desc *desc;
692 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
694 /* ASSERT: channel is idle */
695 if (atc_chan_is_enabled(atchan)) {
696 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
700 /* have we already been set up? */
701 if (!list_empty(&atchan->free_list))
702 return atchan->descs_allocated;
704 /* Allocate initial pool of descriptors */
705 for (i = 0; i < init_nr_desc_per_channel; i++) {
706 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
708 dev_err(atdma->dma_common.dev,
709 "Only %d initial descriptors\n", i);
712 list_add_tail(&desc->desc_node, &tmp_list);
715 spin_lock_bh(&atchan->lock);
716 atchan->descs_allocated = i;
717 list_splice(&tmp_list, &atchan->free_list);
718 atchan->completed_cookie = chan->cookie = 1;
719 spin_unlock_bh(&atchan->lock);
721 /* channel parameters */
722 channel_writel(atchan, CFG, ATC_DEFAULT_CFG);
724 dev_dbg(chan2dev(chan),
725 "alloc_chan_resources: allocated %d descriptors\n",
726 atchan->descs_allocated);
728 return atchan->descs_allocated;
732 * atc_free_chan_resources - free all channel resources
735 static void atc_free_chan_resources(struct dma_chan *chan)
737 struct at_dma_chan *atchan = to_at_dma_chan(chan);
738 struct at_dma *atdma = to_at_dma(chan->device);
739 struct at_desc *desc, *_desc;
742 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
743 atchan->descs_allocated);
745 /* ASSERT: channel is idle */
746 BUG_ON(!list_empty(&atchan->active_list));
747 BUG_ON(!list_empty(&atchan->queue));
748 BUG_ON(atc_chan_is_enabled(atchan));
750 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
751 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
752 list_del(&desc->desc_node);
753 /* free link descriptor */
754 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
756 list_splice_init(&atchan->free_list, &list);
757 atchan->descs_allocated = 0;
759 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
763 /*-- Module Management -----------------------------------------------*/
766 * at_dma_off - disable DMA controller
767 * @atdma: the Atmel HDAMC device
769 static void at_dma_off(struct at_dma *atdma)
771 dma_writel(atdma, EN, 0);
773 /* disable all interrupts */
774 dma_writel(atdma, EBCIDR, -1L);
776 /* confirm that all channels are disabled */
777 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
781 static int __init at_dma_probe(struct platform_device *pdev)
783 struct at_dma_platform_data *pdata;
785 struct at_dma *atdma;
791 /* get DMA Controller parameters from platform */
792 pdata = pdev->dev.platform_data;
793 if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
796 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
800 irq = platform_get_irq(pdev, 0);
804 size = sizeof(struct at_dma);
805 size += pdata->nr_channels * sizeof(struct at_dma_chan);
806 atdma = kzalloc(size, GFP_KERNEL);
810 /* discover transaction capabilites from the platform data */
811 atdma->dma_common.cap_mask = pdata->cap_mask;
812 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
814 size = io->end - io->start + 1;
815 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
820 atdma->regs = ioremap(io->start, size);
826 atdma->clk = clk_get(&pdev->dev, "dma_clk");
827 if (IS_ERR(atdma->clk)) {
828 err = PTR_ERR(atdma->clk);
831 clk_enable(atdma->clk);
833 /* force dma off, just in case */
836 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
840 platform_set_drvdata(pdev, atdma);
842 /* create a pool of consistent memory blocks for hardware descriptors */
843 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
844 &pdev->dev, sizeof(struct at_desc),
845 4 /* word alignment */, 0);
846 if (!atdma->dma_desc_pool) {
847 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
849 goto err_pool_create;
852 /* clear any pending interrupt */
853 while (dma_readl(atdma, EBCISR))
856 /* initialize channels related values */
857 INIT_LIST_HEAD(&atdma->dma_common.channels);
858 for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
859 struct at_dma_chan *atchan = &atdma->chan[i];
861 atchan->chan_common.device = &atdma->dma_common;
862 atchan->chan_common.cookie = atchan->completed_cookie = 1;
863 atchan->chan_common.chan_id = i;
864 list_add_tail(&atchan->chan_common.device_node,
865 &atdma->dma_common.channels);
867 atchan->ch_regs = atdma->regs + ch_regs(i);
868 spin_lock_init(&atchan->lock);
869 atchan->mask = 1 << i;
871 INIT_LIST_HEAD(&atchan->active_list);
872 INIT_LIST_HEAD(&atchan->queue);
873 INIT_LIST_HEAD(&atchan->free_list);
875 tasklet_init(&atchan->tasklet, atc_tasklet,
876 (unsigned long)atchan);
877 atc_enable_irq(atchan);
880 /* set base routines */
881 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
882 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
883 atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
884 atdma->dma_common.device_issue_pending = atc_issue_pending;
885 atdma->dma_common.dev = &pdev->dev;
887 /* set prep routines based on capability */
888 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
889 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
891 dma_writel(atdma, EN, AT_DMA_ENABLE);
893 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
894 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
895 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
896 atdma->dma_common.chancnt);
898 dma_async_device_register(&atdma->dma_common);
903 platform_set_drvdata(pdev, NULL);
904 free_irq(platform_get_irq(pdev, 0), atdma);
906 clk_disable(atdma->clk);
909 iounmap(atdma->regs);
912 release_mem_region(io->start, size);
918 static int __exit at_dma_remove(struct platform_device *pdev)
920 struct at_dma *atdma = platform_get_drvdata(pdev);
921 struct dma_chan *chan, *_chan;
925 dma_async_device_unregister(&atdma->dma_common);
927 dma_pool_destroy(atdma->dma_desc_pool);
928 platform_set_drvdata(pdev, NULL);
929 free_irq(platform_get_irq(pdev, 0), atdma);
931 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
933 struct at_dma_chan *atchan = to_at_dma_chan(chan);
935 /* Disable interrupts */
936 atc_disable_irq(atchan);
937 tasklet_disable(&atchan->tasklet);
939 tasklet_kill(&atchan->tasklet);
940 list_del(&chan->device_node);
943 clk_disable(atdma->clk);
946 iounmap(atdma->regs);
949 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
950 release_mem_region(io->start, io->end - io->start + 1);
957 static void at_dma_shutdown(struct platform_device *pdev)
959 struct at_dma *atdma = platform_get_drvdata(pdev);
961 at_dma_off(platform_get_drvdata(pdev));
962 clk_disable(atdma->clk);
965 static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
967 struct at_dma *atdma = platform_get_drvdata(pdev);
969 at_dma_off(platform_get_drvdata(pdev));
970 clk_disable(atdma->clk);
974 static int at_dma_resume_early(struct platform_device *pdev)
976 struct at_dma *atdma = platform_get_drvdata(pdev);
978 clk_enable(atdma->clk);
979 dma_writel(atdma, EN, AT_DMA_ENABLE);
984 static struct platform_driver at_dma_driver = {
985 .remove = __exit_p(at_dma_remove),
986 .shutdown = at_dma_shutdown,
987 .suspend_late = at_dma_suspend_late,
988 .resume_early = at_dma_resume_early,
994 static int __init at_dma_init(void)
996 return platform_driver_probe(&at_dma_driver, at_dma_probe);
998 module_init(at_dma_init);
1000 static void __exit at_dma_exit(void)
1002 platform_driver_unregister(&at_dma_driver);
1004 module_exit(at_dma_exit);
1006 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1007 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1008 MODULE_LICENSE("GPL");
1009 MODULE_ALIAS("platform:at_hdmac");