]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mach-s3c64xx/dma.c
DMAENGINE: correct PL080 register header file
[net-next-2.6.git] / arch / arm / mach-s3c64xx / dma.c
CommitLineData
fa7a7883
BD
1/* linux/arch/arm/plat-s3c64xx/dma.c
2 *
3 * Copyright 2009 Openmoko, Inc.
4 * Copyright 2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * S3C64XX DMA core
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/dmapool.h>
19#include <linux/sysdev.h>
20#include <linux/errno.h>
5a0e3ad6 21#include <linux/slab.h>
fa7a7883
BD
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/io.h>
26
27#include <mach/dma.h>
28#include <mach/map.h>
29#include <mach/irqs.h>
30
3501c9ae 31#include <mach/regs-sys.h>
fa7a7883
BD
32
33#include <asm/hardware/pl080.h>
34
35/* dma channel state information */
36
37struct s3c64xx_dmac {
38 struct sys_device sysdev;
39 struct clk *clk;
40 void __iomem *regs;
41 struct s3c2410_dma_chan *channels;
42 enum dma_ch chanbase;
43};
44
45/* pool to provide LLI buffers */
46static struct dma_pool *dma_pool;
47
48/* Debug configuration and code */
49
50static unsigned char debug_show_buffs = 0;
51
52static void dbg_showchan(struct s3c2410_dma_chan *chan)
53{
54 pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
55 chan->number,
56 readl(chan->regs + PL080_CH_SRC_ADDR),
57 readl(chan->regs + PL080_CH_DST_ADDR),
58 readl(chan->regs + PL080_CH_LLI),
59 readl(chan->regs + PL080_CH_CONTROL),
60 readl(chan->regs + PL080S_CH_CONTROL2),
61 readl(chan->regs + PL080S_CH_CONFIG));
62}
63
64static void show_lli(struct pl080s_lli *lli)
65{
66 pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
67 lli, lli->src_addr, lli->dst_addr, lli->next_lli,
68 lli->control0, lli->control1);
69}
70
71static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
72{
73 struct s3c64xx_dma_buff *ptr;
74 struct s3c64xx_dma_buff *end;
75
76 pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
77 chan->number, chan->next, chan->curr, chan->end);
78
79 ptr = chan->next;
80 end = chan->end;
81
82 if (debug_show_buffs) {
83 for (; ptr != NULL; ptr = ptr->next) {
84 pr_debug("DMA%d: %08x ",
85 chan->number, ptr->lli_dma);
86 show_lli(ptr->lli);
87 }
88 }
89}
90
91/* End of Debug */
92
93static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
94{
95 struct s3c2410_dma_chan *chan;
96 unsigned int start, offs;
97
98 start = 0;
99
100 if (channel >= DMACH_PCM1_TX)
101 start = 8;
102
103 for (offs = 0; offs < 8; offs++) {
104 chan = &s3c2410_chans[start + offs];
105 if (!chan->in_use)
106 goto found;
107 }
108
109 return NULL;
110
111found:
112 s3c_dma_chan_map[channel] = chan;
113 return chan;
114}
115
116int s3c2410_dma_config(unsigned int channel, int xferunit)
117{
118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
119
120 if (chan == NULL)
121 return -EINVAL;
122
123 switch (xferunit) {
124 case 1:
125 chan->hw_width = 0;
126 break;
127 case 2:
128 chan->hw_width = 1;
129 break;
130 case 4:
131 chan->hw_width = 2;
132 break;
133 default:
134 printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
135 return -EINVAL;
136 }
137
138 return 0;
139}
140EXPORT_SYMBOL(s3c2410_dma_config);
141
142static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
143 struct pl080s_lli *lli,
144 dma_addr_t data, int size)
145{
146 dma_addr_t src, dst;
147 u32 control0, control1;
148
149 switch (chan->source) {
150 case S3C2410_DMASRC_HW:
151 src = chan->dev_addr;
152 dst = data;
153 control0 = PL080_CONTROL_SRC_AHB2;
fa7a7883
BD
154 control0 |= PL080_CONTROL_DST_INCR;
155 break;
156
157 case S3C2410_DMASRC_MEM:
158 src = data;
159 dst = chan->dev_addr;
160 control0 = PL080_CONTROL_DST_AHB2;
fa7a7883
BD
161 control0 |= PL080_CONTROL_SRC_INCR;
162 break;
163 default:
164 BUG();
165 }
166
167 /* note, we do not currently setup any of the burst controls */
168
169 control1 = size >> chan->hw_width; /* size in no of xfers */
170 control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
171 control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
7507f39c
JB
172 control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
173 control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
fa7a7883
BD
174
175 lli->src_addr = src;
176 lli->dst_addr = dst;
177 lli->next_lli = 0;
178 lli->control0 = control0;
179 lli->control1 = control1;
180}
181
182static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
183 struct pl080s_lli *lli)
184{
185 void __iomem *regs = chan->regs;
186
187 pr_debug("%s: LLI %p => regs\n", __func__, lli);
188 show_lli(lli);
189
190 writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
191 writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
192 writel(lli->next_lli, regs + PL080_CH_LLI);
193 writel(lli->control0, regs + PL080_CH_CONTROL);
194 writel(lli->control1, regs + PL080S_CH_CONTROL2);
195}
196
197static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
198{
199 struct s3c64xx_dmac *dmac = chan->dmac;
200 u32 config;
201 u32 bit = chan->bit;
202
203 dbg_showchan(chan);
204
205 pr_debug("%s: clearing interrupts\n", __func__);
206
207 /* clear interrupts */
208 writel(bit, dmac->regs + PL080_TC_CLEAR);
209 writel(bit, dmac->regs + PL080_ERR_CLEAR);
210
211 pr_debug("%s: starting channel\n", __func__);
212
213 config = readl(chan->regs + PL080S_CH_CONFIG);
214 config |= PL080_CONFIG_ENABLE;
215
216 pr_debug("%s: writing config %08x\n", __func__, config);
217 writel(config, chan->regs + PL080S_CH_CONFIG);
218
219 return 0;
220}
221
222static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
223{
224 u32 config;
225 int timeout;
226
227 pr_debug("%s: stopping channel\n", __func__);
228
229 dbg_showchan(chan);
230
231 config = readl(chan->regs + PL080S_CH_CONFIG);
232 config |= PL080_CONFIG_HALT;
233 writel(config, chan->regs + PL080S_CH_CONFIG);
234
235 timeout = 1000;
236 do {
237 config = readl(chan->regs + PL080S_CH_CONFIG);
238 pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
239 if (config & PL080_CONFIG_ACTIVE)
240 udelay(10);
241 else
242 break;
243 } while (--timeout > 0);
244
245 if (config & PL080_CONFIG_ACTIVE) {
246 printk(KERN_ERR "%s: channel still active\n", __func__);
247 return -EFAULT;
248 }
249
250 config = readl(chan->regs + PL080S_CH_CONFIG);
251 config &= ~PL080_CONFIG_ENABLE;
252 writel(config, chan->regs + PL080S_CH_CONFIG);
253
254 return 0;
255}
256
257static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
258 struct s3c64xx_dma_buff *buf,
259 enum s3c2410_dma_buffresult result)
260{
261 if (chan->callback_fn != NULL)
262 (chan->callback_fn)(chan, buf->pw, 0, result);
263}
264
265static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
266{
267 dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
268 kfree(buff);
269}
270
271static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
272{
273 struct s3c64xx_dma_buff *buff, *next;
274 u32 config;
275
276 dbg_showchan(chan);
277
278 pr_debug("%s: flushing channel\n", __func__);
279
280 config = readl(chan->regs + PL080S_CH_CONFIG);
281 config &= ~PL080_CONFIG_ENABLE;
282 writel(config, chan->regs + PL080S_CH_CONFIG);
283
284 /* dump all the buffers associated with this channel */
285
286 for (buff = chan->curr; buff != NULL; buff = next) {
287 next = buff->next;
288 pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
289
290 s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
291 s3c64xx_dma_freebuff(buff);
292 }
293
294 chan->curr = chan->next = chan->end = NULL;
295
296 return 0;
297}
298
299int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
300{
301 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
302
303 WARN_ON(!chan);
304 if (!chan)
305 return -EINVAL;
306
307 switch (op) {
308 case S3C2410_DMAOP_START:
309 return s3c64xx_dma_start(chan);
310
311 case S3C2410_DMAOP_STOP:
312 return s3c64xx_dma_stop(chan);
313
314 case S3C2410_DMAOP_FLUSH:
315 return s3c64xx_dma_flush(chan);
316
317 /* belive PAUSE/RESUME are no-ops */
318 case S3C2410_DMAOP_PAUSE:
319 case S3C2410_DMAOP_RESUME:
320 case S3C2410_DMAOP_STARTED:
321 case S3C2410_DMAOP_TIMEOUT:
322 return 0;
323 }
324
325 return -ENOENT;
326}
327EXPORT_SYMBOL(s3c2410_dma_ctrl);
328
329/* s3c2410_dma_enque
330 *
331 */
332
333int s3c2410_dma_enqueue(unsigned int channel, void *id,
334 dma_addr_t data, int size)
335{
336 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
337 struct s3c64xx_dma_buff *next;
338 struct s3c64xx_dma_buff *buff;
339 struct pl080s_lli *lli;
210012a6 340 unsigned long flags;
fa7a7883
BD
341 int ret;
342
343 WARN_ON(!chan);
344 if (!chan)
345 return -EINVAL;
346
b93011e7 347 buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
fa7a7883
BD
348 if (!buff) {
349 printk(KERN_ERR "%s: no memory for buffer\n", __func__);
350 return -ENOMEM;
351 }
352
b93011e7 353 lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
fa7a7883
BD
354 if (!lli) {
355 printk(KERN_ERR "%s: no memory for lli\n", __func__);
356 ret = -ENOMEM;
357 goto err_buff;
358 }
359
360 pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
361 __func__, buff, data, lli, (u32)buff->lli_dma, size);
362
363 buff->lli = lli;
364 buff->pw = id;
365
366 s3c64xx_dma_fill_lli(chan, lli, data, size);
367
210012a6
JB
368 local_irq_save(flags);
369
fa7a7883
BD
370 if ((next = chan->next) != NULL) {
371 struct s3c64xx_dma_buff *end = chan->end;
372 struct pl080s_lli *endlli = end->lli;
373
374 pr_debug("enquing onto channel\n");
375
376 end->next = buff;
377 endlli->next_lli = buff->lli_dma;
378
379 if (chan->flags & S3C2410_DMAF_CIRCULAR) {
380 struct s3c64xx_dma_buff *curr = chan->curr;
381 lli->next_lli = curr->lli_dma;
382 }
383
384 if (next == chan->curr) {
385 writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
386 chan->next = buff;
387 }
388
389 show_lli(endlli);
390 chan->end = buff;
391 } else {
392 pr_debug("enquing onto empty channel\n");
393
394 chan->curr = buff;
395 chan->next = buff;
396 chan->end = buff;
397
398 s3c64xx_lli_to_regs(chan, lli);
399 }
400
210012a6
JB
401 local_irq_restore(flags);
402
fa7a7883
BD
403 show_lli(lli);
404
405 dbg_showchan(chan);
406 dbg_showbuffs(chan);
407 return 0;
408
409err_buff:
410 kfree(buff);
411 return ret;
412}
413
414EXPORT_SYMBOL(s3c2410_dma_enqueue);
415
416
14894b9b 417int s3c2410_dma_devconfig(unsigned int channel,
fa7a7883
BD
418 enum s3c2410_dmasrc source,
419 unsigned long devaddr)
420{
421 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
422 u32 peripheral;
423 u32 config = 0;
424
0b13406a 425 pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
fa7a7883
BD
426 __func__, channel, source, devaddr, chan);
427
428 WARN_ON(!chan);
429 if (!chan)
430 return -EINVAL;
431
432 peripheral = (chan->peripheral & 0xf);
433 chan->source = source;
434 chan->dev_addr = devaddr;
435
436 pr_debug("%s: peripheral %d\n", __func__, peripheral);
437
438 switch (source) {
439 case S3C2410_DMASRC_HW:
440 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
441 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
442 break;
443 case S3C2410_DMASRC_MEM:
444 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
445 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
446 break;
447 default:
448 printk(KERN_ERR "%s: bad source\n", __func__);
449 return -EINVAL;
450 }
451
452 /* allow TC and ERR interrupts */
453 config |= PL080_CONFIG_TC_IRQ_MASK;
454 config |= PL080_CONFIG_ERR_IRQ_MASK;
455
456 pr_debug("%s: config %08x\n", __func__, config);
457
458 writel(config, chan->regs + PL080S_CH_CONFIG);
459
460 return 0;
461}
462EXPORT_SYMBOL(s3c2410_dma_devconfig);
463
464
465int s3c2410_dma_getposition(unsigned int channel,
466 dma_addr_t *src, dma_addr_t *dst)
467{
468 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
469
470 WARN_ON(!chan);
471 if (!chan)
472 return -EINVAL;
473
474 if (src != NULL)
475 *src = readl(chan->regs + PL080_CH_SRC_ADDR);
476
477 if (dst != NULL)
478 *dst = readl(chan->regs + PL080_CH_DST_ADDR);
479
480 return 0;
481}
482EXPORT_SYMBOL(s3c2410_dma_getposition);
483
484/* s3c2410_request_dma
485 *
486 * get control of an dma channel
487*/
488
489int s3c2410_dma_request(unsigned int channel,
490 struct s3c2410_dma_client *client,
491 void *dev)
492{
493 struct s3c2410_dma_chan *chan;
494 unsigned long flags;
495
496 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
497 channel, client->name, dev);
498
499 local_irq_save(flags);
500
501 chan = s3c64xx_dma_map_channel(channel);
502 if (chan == NULL) {
503 local_irq_restore(flags);
504 return -EBUSY;
505 }
506
507 dbg_showchan(chan);
508
509 chan->client = client;
510 chan->in_use = 1;
511 chan->peripheral = channel;
512
513 local_irq_restore(flags);
514
515 /* need to setup */
516
517 pr_debug("%s: channel initialised, %p\n", __func__, chan);
518
519 return chan->number | DMACH_LOW_LEVEL;
520}
521
522EXPORT_SYMBOL(s3c2410_dma_request);
523
524/* s3c2410_dma_free
525 *
526 * release the given channel back to the system, will stop and flush
527 * any outstanding transfers, and ensure the channel is ready for the
528 * next claimant.
529 *
530 * Note, although a warning is currently printed if the freeing client
531 * info is not the same as the registrant's client info, the free is still
532 * allowed to go through.
533*/
534
535int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
536{
537 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
538 unsigned long flags;
539
540 if (chan == NULL)
541 return -EINVAL;
542
543 local_irq_save(flags);
544
545 if (chan->client != client) {
546 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
547 channel, chan->client, client);
548 }
549
550 /* sort out stopping and freeing the channel */
551
552
553 chan->client = NULL;
554 chan->in_use = 0;
555
556 if (!(channel & DMACH_LOW_LEVEL))
557 s3c_dma_chan_map[channel] = NULL;
558
559 local_irq_restore(flags);
560
561 return 0;
562}
563
564EXPORT_SYMBOL(s3c2410_dma_free);
565
fa7a7883
BD
566static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
567{
568 struct s3c64xx_dmac *dmac = pw;
6d0b8627
JB
569 struct s3c2410_dma_chan *chan;
570 enum s3c2410_dma_buffresult res;
fa7a7883
BD
571 u32 tcstat, errstat;
572 u32 bit;
573 int offs;
574
575 tcstat = readl(dmac->regs + PL080_TC_STATUS);
576 errstat = readl(dmac->regs + PL080_ERR_STATUS);
577
578 for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
9b08284b 579 struct s3c64xx_dma_buff *buff;
6d0b8627
JB
580
581 if (!(errstat & bit) && !(tcstat & bit))
582 continue;
583
584 chan = dmac->channels + offs;
585 res = S3C2410_RES_ERR;
586
fa7a7883
BD
587 if (tcstat & bit) {
588 writel(bit, dmac->regs + PL080_TC_CLEAR);
6d0b8627 589 res = S3C2410_RES_OK;
fa7a7883
BD
590 }
591
6d0b8627 592 if (errstat & bit)
fa7a7883 593 writel(bit, dmac->regs + PL080_ERR_CLEAR);
6d0b8627 594
9b08284b
JB
595 /* 'next' points to the buffer that is next to the
596 * currently active buffer.
597 * For CIRCULAR queues, 'next' will be same as 'curr'
598 * when 'end' is the active buffer.
599 */
600 buff = chan->curr;
601 while (buff && buff != chan->next
602 && buff->next != chan->next)
603 buff = buff->next;
604
605 if (!buff)
606 BUG();
607
608 if (buff == chan->next)
609 buff = chan->end;
610
611 s3c64xx_dma_bufffdone(chan, buff, res);
612
336b1a31
JB
613 /* Free the node and update curr, if non-circular queue */
614 if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
615 chan->curr = buff->next;
616 s3c64xx_dma_freebuff(buff);
617 }
618
9b08284b
JB
619 /* Update 'next' */
620 buff = chan->next;
621 if (chan->next == chan->end) {
622 chan->next = chan->curr;
623 if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
624 chan->end = NULL;
625 } else {
626 chan->next = buff->next;
627 }
fa7a7883
BD
628 }
629
630 return IRQ_HANDLED;
631}
632
633static struct sysdev_class dma_sysclass = {
634 .name = "s3c64xx-dma",
635};
636
637static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
638 int irq, unsigned int base)
639{
640 struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
641 struct s3c64xx_dmac *dmac;
642 char clkname[16];
643 void __iomem *regs;
644 void __iomem *regptr;
645 int err, ch;
646
647 dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
648 if (!dmac) {
649 printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
650 return -ENOMEM;
651 }
652
653 dmac->sysdev.id = chno / 8;
654 dmac->sysdev.cls = &dma_sysclass;
655
656 err = sysdev_register(&dmac->sysdev);
657 if (err) {
658 printk(KERN_ERR "%s: failed to register sysdevice\n", __func__);
659 goto err_alloc;
660 }
661
662 regs = ioremap(base, 0x200);
663 if (!regs) {
664 printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
665 err = -ENXIO;
666 goto err_dev;
667 }
668
669 snprintf(clkname, sizeof(clkname), "dma%d", dmac->sysdev.id);
670
671 dmac->clk = clk_get(NULL, clkname);
672 if (IS_ERR(dmac->clk)) {
673 printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
674 err = PTR_ERR(dmac->clk);
675 goto err_map;
676 }
677
678 clk_enable(dmac->clk);
679
680 dmac->regs = regs;
681 dmac->chanbase = chbase;
682 dmac->channels = chptr;
683
684 err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
685 if (err < 0) {
686 printk(KERN_ERR "%s: failed to get irq\n", __func__);
687 goto err_clk;
688 }
689
690 regptr = regs + PL080_Cx_BASE(0);
691
692 for (ch = 0; ch < 8; ch++, chno++, chptr++) {
693 printk(KERN_INFO "%s: registering DMA %d (%p)\n",
694 __func__, chno, regptr);
695
696 chptr->bit = 1 << ch;
697 chptr->number = chno;
698 chptr->dmac = dmac;
699 chptr->regs = regptr;
d92342df 700 regptr += PL080_Cx_STRIDE;
fa7a7883
BD
701 }
702
703 /* for the moment, permanently enable the controller */
704 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
705
706 printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
707
708 return 0;
709
710err_clk:
711 clk_disable(dmac->clk);
712 clk_put(dmac->clk);
713err_map:
714 iounmap(regs);
715err_dev:
716 sysdev_unregister(&dmac->sysdev);
717err_alloc:
718 kfree(dmac);
719 return err;
720}
721
722static int __init s3c64xx_dma_init(void)
723{
724 int ret;
725
726 printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
727
3ea61e49 728 dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
fa7a7883
BD
729 if (!dma_pool) {
730 printk(KERN_ERR "%s: failed to create pool\n", __func__);
731 return -ENOMEM;
732 }
733
734 ret = sysdev_class_register(&dma_sysclass);
735 if (ret) {
736 printk(KERN_ERR "%s: failed to create sysclass\n", __func__);
737 return -ENOMEM;
738 }
739
740 /* Set all DMA configuration to be DMA, not SDMA */
741 writel(0xffffff, S3C_SYSREG(0x110));
742
743 /* Register standard DMA controlers */
744 s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
745 s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
746
747 return 0;
748}
749
750arch_initcall(s3c64xx_dma_init);