]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/dma/ste_dma40.c
DMAENGINE: ste_dma40: only write phy channel config first time
[net-next-2.6.git] / drivers / dma / ste_dma40.c
CommitLineData
8d318a50
LW
1/*
2 * driver/dma/ste_dma40.c
3 *
4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
17
18#include <plat/ste_dma40.h>
19
20#include "ste_dma40_ll.h"
21
22#define D40_NAME "dma40"
23
24#define D40_PHY_CHAN -1
25
26/* For masking out/in 2 bit channel positions */
27#define D40_CHAN_POS(chan) (2 * (chan / 2))
28#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30/* Maximum iterations taken before giving up suspending a channel */
31#define D40_SUSPEND_MAX_IT 500
32
33#define D40_ALLOC_FREE (1 << 31)
34#define D40_ALLOC_PHY (1 << 30)
35#define D40_ALLOC_LOG_FREE 0
36
37/* The number of free d40_desc to keep in memory before starting
38 * to kfree() them */
39#define D40_DESC_CACHE_SIZE 50
40
41/* Hardware designer of the block */
42#define D40_PERIPHID2_DESIGNER 0x8
43
44/**
45 * enum 40_command - The different commands and/or statuses.
46 *
47 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
48 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
49 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
50 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
51 */
52enum d40_command {
53 D40_DMA_STOP = 0,
54 D40_DMA_RUN = 1,
55 D40_DMA_SUSPEND_REQ = 2,
56 D40_DMA_SUSPENDED = 3
57};
58
59/**
60 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 *
62 * @base: Pointer to memory area when the pre_alloc_lli's are not large
63 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
64 * pre_alloc_lli is used.
65 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
66 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
67 * one buffer to one buffer.
68 */
69struct d40_lli_pool {
70 void *base;
71 int size;
72 /* Space for dst and src, plus an extra for padding */
73 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
74};
75
76/**
77 * struct d40_desc - A descriptor is one DMA job.
78 *
79 * @lli_phy: LLI settings for physical channel. Both src and dst=
80 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * lli_len equals one.
82 * @lli_log: Same as above but for logical channels.
83 * @lli_pool: The pool with two entries pre-allocated.
941b77a3
PF
84 * @lli_len: Number of llis of current descriptor.
85 * @lli_count: Number of transfered llis.
86 * @lli_tx_len: Max number of LLIs per transfer, there can be
87 * many transfer for one descriptor.
8d318a50
LW
88 * @txd: DMA engine struct. Used for among other things for communication
89 * during a transfer.
90 * @node: List entry.
91 * @dir: The transfer direction of this job.
92 * @is_in_client_list: true if the client owns this descriptor.
93 *
94 * This descriptor is used for both logical and physical transfers.
95 */
96
97struct d40_desc {
98 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy;
100 /* LLI logical */
101 struct d40_log_lli_bidir lli_log;
102
103 struct d40_lli_pool lli_pool;
941b77a3
PF
104 int lli_len;
105 int lli_count;
106 u32 lli_tx_len;
8d318a50
LW
107
108 struct dma_async_tx_descriptor txd;
109 struct list_head node;
110
111 enum dma_data_direction dir;
112 bool is_in_client_list;
113};
114
115/**
116 * struct d40_lcla_pool - LCLA pool settings and data.
117 *
118 * @base: The virtual address of LCLA.
119 * @phy: Physical base address of LCLA.
120 * @base_size: size of lcla.
121 * @lock: Lock to protect the content in this struct.
122 * @alloc_map: Mapping between physical channel and LCLA entries.
123 * @num_blocks: The number of entries of alloc_map. Equals to the
124 * number of physical channels.
125 */
126struct d40_lcla_pool {
127 void *base;
128 dma_addr_t phy;
129 resource_size_t base_size;
130 spinlock_t lock;
131 u32 *alloc_map;
132 int num_blocks;
133};
134
135/**
136 * struct d40_phy_res - struct for handling eventlines mapped to physical
137 * channels.
138 *
139 * @lock: A lock protection this entity.
140 * @num: The physical channel number of this entity.
141 * @allocated_src: Bit mapped to show which src event line's are mapped to
142 * this physical channel. Can also be free or physically allocated.
143 * @allocated_dst: Same as for src but is dst.
144 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
145 * event line number. Both allocated_src and allocated_dst can not be
146 * allocated to a physical channel, since the interrupt handler has then
147 * no way of figure out which one the interrupt belongs to.
148 */
149struct d40_phy_res {
150 spinlock_t lock;
151 int num;
152 u32 allocated_src;
153 u32 allocated_dst;
154};
155
156struct d40_base;
157
158/**
159 * struct d40_chan - Struct that describes a channel.
160 *
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
164 * current cookie.
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
166 * and tasklet.
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on.
169 * @chan: DMA engine handle.
170 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
171 * transfer and call client callback.
172 * @client: Cliented owned descriptor list.
173 * @active: Active descriptor.
174 * @queue: Queued jobs.
175 * @free: List of free descripts, ready to be reused.
176 * @free_len: Number of descriptors in the free list.
177 * @dma_cfg: The client configuration of this dma channel.
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
184 *
185 * This struct can either "be" a logical or a physical channel.
186 */
187struct d40_chan {
188 spinlock_t lock;
189 int log_num;
190 /* ID of the most recent completed transfer */
191 int completed;
192 int pending_tx;
193 bool busy;
194 struct d40_phy_res *phy_chan;
195 struct dma_chan chan;
196 struct tasklet_struct tasklet;
197 struct list_head client;
198 struct list_head active;
199 struct list_head queue;
200 struct list_head free;
201 int free_len;
202 struct stedma40_chan_cfg dma_cfg;
203 struct d40_base *base;
204 /* Default register configurations */
205 u32 src_def_cfg;
206 u32 dst_def_cfg;
207 struct d40_def_lcsp log_def;
208 struct d40_lcla_elem lcla;
209 struct d40_log_lli_full *lcpa;
210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.
214 *
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
220 * @clk: Pointer to the DMA clock structure.
221 * @phy_start: Physical memory start of the DMA registers.
222 * @phy_size: Size of the DMA register map.
223 * @irq: The IRQ number.
224 * @num_phy_chans: The number of physical channels. Read from HW. This
225 * is the number of available channels for this driver, not counting "Secure
226 * mode" allocated physical channels.
227 * @num_log_chans: The number of logical channels. Calculated from
228 * num_phy_chans.
229 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
230 * @dma_slave: dma_device channels that can do only do slave transfers.
231 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
232 * @phy_chans: Room for all possible physical channels in system.
233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
239 * configuration.
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
245 */
246struct d40_base {
247 spinlock_t interrupt_lock;
248 spinlock_t execmd_lock;
249 struct device *dev;
250 void __iomem *virtbase;
251 struct clk *clk;
252 phys_addr_t phy_start;
253 resource_size_t phy_size;
254 int irq;
255 int num_phy_chans;
256 int num_log_chans;
257 struct dma_device dma_both;
258 struct dma_device dma_slave;
259 struct dma_device dma_memcpy;
260 struct d40_chan *phy_chans;
261 struct d40_chan *log_chans;
262 struct d40_chan **lookup_log_chans;
263 struct d40_chan **lookup_phy_chans;
264 struct stedma40_platform_data *plat_data;
265 /* Physical half channels */
266 struct d40_phy_res *phy_res;
267 struct d40_lcla_pool lcla_pool;
268 void *lcpa_base;
269 dma_addr_t phy_lcpa;
270 resource_size_t lcpa_size;
271};
272
273/**
274 * struct d40_interrupt_lookup - lookup table for interrupt handler
275 *
276 * @src: Interrupt mask register.
277 * @clr: Interrupt clear register.
278 * @is_error: true if this is an error interrupt.
279 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
280 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
281 */
282struct d40_interrupt_lookup {
283 u32 src;
284 u32 clr;
285 bool is_error;
286 int offset;
287};
288
289/**
290 * struct d40_reg_val - simple lookup struct
291 *
292 * @reg: The register.
293 * @val: The value that belongs to the register in reg.
294 */
295struct d40_reg_val {
296 unsigned int reg;
297 unsigned int val;
298};
299
300static int d40_pool_lli_alloc(struct d40_desc *d40d,
301 int lli_len, bool is_log)
302{
303 u32 align;
304 void *base;
305
306 if (is_log)
307 align = sizeof(struct d40_log_lli);
308 else
309 align = sizeof(struct d40_phy_lli);
310
311 if (lli_len == 1) {
312 base = d40d->lli_pool.pre_alloc_lli;
313 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
314 d40d->lli_pool.base = NULL;
315 } else {
316 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
317
318 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
319 d40d->lli_pool.base = base;
320
321 if (d40d->lli_pool.base == NULL)
322 return -ENOMEM;
323 }
324
325 if (is_log) {
326 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
327 align);
328 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
329 align);
330 } else {
331 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
332 align);
333 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
334 align);
335
336 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
337 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
338 }
339
340 return 0;
341}
342
343static void d40_pool_lli_free(struct d40_desc *d40d)
344{
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
352 d40d->lli_phy.src_addr = 0;
353 d40d->lli_phy.dst_addr = 0;
354}
355
356static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
357 struct d40_desc *desc)
358{
359 dma_cookie_t cookie = d40c->chan.cookie;
360
361 if (++cookie < 0)
362 cookie = 1;
363
364 d40c->chan.cookie = cookie;
365 desc->txd.cookie = cookie;
366
367 return cookie;
368}
369
8d318a50
LW
370static void d40_desc_remove(struct d40_desc *d40d)
371{
372 list_del(&d40d->node);
373}
374
375static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
376{
377 struct d40_desc *desc;
378 struct d40_desc *d;
379 struct d40_desc *_d;
380
381 if (!list_empty(&d40c->client)) {
382 list_for_each_entry_safe(d, _d, &d40c->client, node)
383 if (async_tx_test_ack(&d->txd)) {
384 d40_pool_lli_free(d);
385 d40_desc_remove(d);
386 desc = d;
387 goto out;
388 }
389 }
390
391 if (list_empty(&d40c->free)) {
392 /* Alloc new desc because we're out of used ones */
393 desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
394 if (desc == NULL)
395 goto out;
396 INIT_LIST_HEAD(&desc->node);
397 } else {
398 /* Reuse an old desc. */
399 desc = list_first_entry(&d40c->free,
400 struct d40_desc,
401 node);
402 list_del(&desc->node);
403 d40c->free_len--;
404 }
405out:
406 return desc;
407}
408
409static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
410{
411 if (d40c->free_len < D40_DESC_CACHE_SIZE) {
412 list_add_tail(&d40d->node, &d40c->free);
413 d40c->free_len++;
414 } else
415 kfree(d40d);
416}
417
418static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
419{
420 list_add_tail(&desc->node, &d40c->active);
421}
422
423static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
424{
425 struct d40_desc *d;
426
427 if (list_empty(&d40c->active))
428 return NULL;
429
430 d = list_first_entry(&d40c->active,
431 struct d40_desc,
432 node);
433 return d;
434}
435
436static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
437{
438 list_add_tail(&desc->node, &d40c->queue);
439}
440
441static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
442{
443 struct d40_desc *d;
444
445 if (list_empty(&d40c->queue))
446 return NULL;
447
448 d = list_first_entry(&d40c->queue,
449 struct d40_desc,
450 node);
451 return d;
452}
453
454/* Support functions for logical channels */
455
456static int d40_lcla_id_get(struct d40_chan *d40c,
457 struct d40_lcla_pool *pool)
458{
459 int src_id = 0;
460 int dst_id = 0;
461 struct d40_log_lli *lcla_lidx_base =
462 pool->base + d40c->phy_chan->num * 1024;
463 int i;
464 int lli_per_log = d40c->base->plat_data->llis_per_log;
465
466 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
467 return 0;
468
469 if (pool->num_blocks > 32)
470 return -EINVAL;
471
472 spin_lock(&pool->lock);
473
474 for (i = 0; i < pool->num_blocks; i++) {
475 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
476 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
477 break;
478 }
479 }
480 src_id = i;
481 if (src_id >= pool->num_blocks)
482 goto err;
483
484 for (; i < pool->num_blocks; i++) {
485 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
486 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
487 break;
488 }
489 }
490
491 dst_id = i;
492 if (dst_id == src_id)
493 goto err;
494
495 d40c->lcla.src_id = src_id;
496 d40c->lcla.dst_id = dst_id;
497 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
498 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
499
500
501 spin_unlock(&pool->lock);
502 return 0;
503err:
504 spin_unlock(&pool->lock);
505 return -EINVAL;
506}
507
508static void d40_lcla_id_put(struct d40_chan *d40c,
509 struct d40_lcla_pool *pool,
510 int id)
511{
512 if (id < 0)
513 return;
514
515 d40c->lcla.src_id = -1;
516 d40c->lcla.dst_id = -1;
517
518 spin_lock(&pool->lock);
519 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
520 spin_unlock(&pool->lock);
521}
522
523static int d40_channel_execute_command(struct d40_chan *d40c,
524 enum d40_command command)
525{
526 int status, i;
527 void __iomem *active_reg;
528 int ret = 0;
529 unsigned long flags;
530
531 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
532
533 if (d40c->phy_chan->num % 2 == 0)
534 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
535 else
536 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
537
538 if (command == D40_DMA_SUSPEND_REQ) {
539 status = (readl(active_reg) &
540 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
541 D40_CHAN_POS(d40c->phy_chan->num);
542
543 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
544 goto done;
545 }
546
547 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
548
549 if (command == D40_DMA_SUSPEND_REQ) {
550
551 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
552 status = (readl(active_reg) &
553 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
554 D40_CHAN_POS(d40c->phy_chan->num);
555
556 cpu_relax();
557 /*
558 * Reduce the number of bus accesses while
559 * waiting for the DMA to suspend.
560 */
561 udelay(3);
562
563 if (status == D40_DMA_STOP ||
564 status == D40_DMA_SUSPENDED)
565 break;
566 }
567
568 if (i == D40_SUSPEND_MAX_IT) {
569 dev_err(&d40c->chan.dev->device,
570 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
571 __func__, d40c->phy_chan->num, d40c->log_num,
572 status);
573 dump_stack();
574 ret = -EBUSY;
575 }
576
577 }
578done:
579 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
580 return ret;
581}
582
583static void d40_term_all(struct d40_chan *d40c)
584{
585 struct d40_desc *d40d;
586 struct d40_desc *d;
587 struct d40_desc *_d;
588
589 /* Release active descriptors */
590 while ((d40d = d40_first_active_get(d40c))) {
591 d40_desc_remove(d40d);
592
593 /* Return desc to free-list */
594 d40_desc_free(d40c, d40d);
595 }
596
597 /* Release queued descriptors waiting for transfer */
598 while ((d40d = d40_first_queued(d40c))) {
599 d40_desc_remove(d40d);
600
601 /* Return desc to free-list */
602 d40_desc_free(d40c, d40d);
603 }
604
605 /* Release client owned descriptors */
606 if (!list_empty(&d40c->client))
607 list_for_each_entry_safe(d, _d, &d40c->client, node) {
608 d40_pool_lli_free(d);
609 d40_desc_remove(d);
610 /* Return desc to free-list */
611 d40_desc_free(d40c, d40d);
612 }
613
614 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
615 d40c->lcla.src_id);
616 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
617 d40c->lcla.dst_id);
618
619 d40c->pending_tx = 0;
620 d40c->busy = false;
621}
622
623static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
624{
625 u32 val;
626 unsigned long flags;
627
628 if (do_enable)
629 val = D40_ACTIVATE_EVENTLINE;
630 else
631 val = D40_DEACTIVATE_EVENTLINE;
632
633 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
634
635 /* Enable event line connected to device (or memcpy) */
636 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
637 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
638 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
639
640 writel((val << D40_EVENTLINE_POS(event)) |
641 ~D40_EVENTLINE_MASK(event),
642 d40c->base->virtbase + D40_DREG_PCBASE +
643 d40c->phy_chan->num * D40_DREG_PCDELTA +
644 D40_CHAN_REG_SSLNK);
645 }
646 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
647 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
648
649 writel((val << D40_EVENTLINE_POS(event)) |
650 ~D40_EVENTLINE_MASK(event),
651 d40c->base->virtbase + D40_DREG_PCBASE +
652 d40c->phy_chan->num * D40_DREG_PCDELTA +
653 D40_CHAN_REG_SDLNK);
654 }
655
656 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
657}
658
a5ebca47 659static u32 d40_chan_has_events(struct d40_chan *d40c)
8d318a50
LW
660{
661 u32 val = 0;
662
663 /* If SSLNK or SDLNK is zero all events are disabled */
664 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
665 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
666 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
667 d40c->phy_chan->num * D40_DREG_PCDELTA +
668 D40_CHAN_REG_SSLNK);
669
670 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
671 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
672 d40c->phy_chan->num * D40_DREG_PCDELTA +
673 D40_CHAN_REG_SDLNK);
a5ebca47 674 return val;
8d318a50
LW
675}
676
677static void d40_config_enable_lidx(struct d40_chan *d40c)
678{
679 /* Set LIDX for lcla */
680 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
681 D40_SREG_ELEM_LOG_LIDX_MASK,
682 d40c->base->virtbase + D40_DREG_PCBASE +
683 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
684
685 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
686 D40_SREG_ELEM_LOG_LIDX_MASK,
687 d40c->base->virtbase + D40_DREG_PCBASE +
688 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
689}
690
691static int d40_config_write(struct d40_chan *d40c)
692{
693 u32 addr_base;
694 u32 var;
695 int res;
696
697 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
698 if (res)
699 return res;
700
701 /* Odd addresses are even addresses + 4 */
702 addr_base = (d40c->phy_chan->num % 2) * 4;
703 /* Setup channel mode to logical or physical */
704 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
705 D40_CHAN_POS(d40c->phy_chan->num);
706 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
707
708 /* Setup operational mode option register */
709 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
710 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
711
712 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
713
714 if (d40c->log_num != D40_PHY_CHAN) {
715 /* Set default config for CFG reg */
716 writel(d40c->src_def_cfg,
717 d40c->base->virtbase + D40_DREG_PCBASE +
718 d40c->phy_chan->num * D40_DREG_PCDELTA +
719 D40_CHAN_REG_SSCFG);
720 writel(d40c->dst_def_cfg,
721 d40c->base->virtbase + D40_DREG_PCBASE +
722 d40c->phy_chan->num * D40_DREG_PCDELTA +
723 D40_CHAN_REG_SDCFG);
724
725 d40_config_enable_lidx(d40c);
726 }
727 return res;
728}
729
730static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
731{
732
733 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
734 d40_phy_lli_write(d40c->base->virtbase,
735 d40c->phy_chan->num,
736 d40d->lli_phy.dst,
737 d40d->lli_phy.src);
8d318a50 738 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
8d318a50
LW
739 struct d40_log_lli *src = d40d->lli_log.src;
740 struct d40_log_lli *dst = d40d->lli_log.dst;
741
941b77a3
PF
742 src += d40d->lli_count;
743 dst += d40d->lli_count;
8d318a50
LW
744 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
745 d40c->lcla.dst,
746 dst, src,
747 d40c->base->plat_data->llis_per_log);
748 }
941b77a3 749 d40d->lli_count += d40d->lli_tx_len;
8d318a50
LW
750}
751
752static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
753{
754 struct d40_chan *d40c = container_of(tx->chan,
755 struct d40_chan,
756 chan);
757 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
758 unsigned long flags;
759
760 spin_lock_irqsave(&d40c->lock, flags);
761
762 tx->cookie = d40_assign_cookie(d40c, d40d);
763
764 d40_desc_queue(d40c, d40d);
765
766 spin_unlock_irqrestore(&d40c->lock, flags);
767
768 return tx->cookie;
769}
770
771static int d40_start(struct d40_chan *d40c)
772{
773 int err;
774
775 if (d40c->log_num != D40_PHY_CHAN) {
776 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
777 if (err)
778 return err;
779 d40_config_set_event(d40c, true);
780 }
781
782 err = d40_channel_execute_command(d40c, D40_DMA_RUN);
783
784 return err;
785}
786
787static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
788{
789 struct d40_desc *d40d;
790 int err;
791
792 /* Start queued jobs, if any */
793 d40d = d40_first_queued(d40c);
794
795 if (d40d != NULL) {
796 d40c->busy = true;
797
798 /* Remove from queue */
799 d40_desc_remove(d40d);
800
801 /* Add to active queue */
802 d40_desc_submit(d40c, d40d);
803
804 /* Initiate DMA job */
805 d40_desc_load(d40c, d40d);
806
807 /* Start dma job */
808 err = d40_start(d40c);
809
810 if (err)
811 return NULL;
812 }
813
814 return d40d;
815}
816
817/* called from interrupt context */
818static void dma_tc_handle(struct d40_chan *d40c)
819{
820 struct d40_desc *d40d;
821
822 if (!d40c->phy_chan)
823 return;
824
825 /* Get first active entry from list */
826 d40d = d40_first_active_get(d40c);
827
828 if (d40d == NULL)
829 return;
830
941b77a3 831 if (d40d->lli_count < d40d->lli_len) {
8d318a50
LW
832
833 d40_desc_load(d40c, d40d);
834 /* Start dma job */
835 (void) d40_start(d40c);
836 return;
837 }
838
839 if (d40_queue_start(d40c) == NULL)
840 d40c->busy = false;
841
842 d40c->pending_tx++;
843 tasklet_schedule(&d40c->tasklet);
844
845}
846
847static void dma_tasklet(unsigned long data)
848{
849 struct d40_chan *d40c = (struct d40_chan *) data;
850 struct d40_desc *d40d_fin;
851 unsigned long flags;
852 dma_async_tx_callback callback;
853 void *callback_param;
854
855 spin_lock_irqsave(&d40c->lock, flags);
856
857 /* Get first active entry from list */
858 d40d_fin = d40_first_active_get(d40c);
859
860 if (d40d_fin == NULL)
861 goto err;
862
863 d40c->completed = d40d_fin->txd.cookie;
864
865 /*
866 * If terminating a channel pending_tx is set to zero.
867 * This prevents any finished active jobs to return to the client.
868 */
869 if (d40c->pending_tx == 0) {
870 spin_unlock_irqrestore(&d40c->lock, flags);
871 return;
872 }
873
874 /* Callback to client */
875 callback = d40d_fin->txd.callback;
876 callback_param = d40d_fin->txd.callback_param;
877
878 if (async_tx_test_ack(&d40d_fin->txd)) {
879 d40_pool_lli_free(d40d_fin);
880 d40_desc_remove(d40d_fin);
881 /* Return desc to free-list */
882 d40_desc_free(d40c, d40d_fin);
883 } else {
8d318a50
LW
884 if (!d40d_fin->is_in_client_list) {
885 d40_desc_remove(d40d_fin);
886 list_add_tail(&d40d_fin->node, &d40c->client);
887 d40d_fin->is_in_client_list = true;
888 }
889 }
890
891 d40c->pending_tx--;
892
893 if (d40c->pending_tx)
894 tasklet_schedule(&d40c->tasklet);
895
896 spin_unlock_irqrestore(&d40c->lock, flags);
897
898 if (callback)
899 callback(callback_param);
900
901 return;
902
903 err:
904 /* Rescue manouver if receiving double interrupts */
905 if (d40c->pending_tx > 0)
906 d40c->pending_tx--;
907 spin_unlock_irqrestore(&d40c->lock, flags);
908}
909
910static irqreturn_t d40_handle_interrupt(int irq, void *data)
911{
912 static const struct d40_interrupt_lookup il[] = {
913 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
914 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
915 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
916 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
917 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
918 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
919 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
920 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
921 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
922 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
923 };
924
925 int i;
926 u32 regs[ARRAY_SIZE(il)];
927 u32 tmp;
928 u32 idx;
929 u32 row;
930 long chan = -1;
931 struct d40_chan *d40c;
932 unsigned long flags;
933 struct d40_base *base = data;
934
935 spin_lock_irqsave(&base->interrupt_lock, flags);
936
937 /* Read interrupt status of both logical and physical channels */
938 for (i = 0; i < ARRAY_SIZE(il); i++)
939 regs[i] = readl(base->virtbase + il[i].src);
940
941 for (;;) {
942
943 chan = find_next_bit((unsigned long *)regs,
944 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
945
946 /* No more set bits found? */
947 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
948 break;
949
950 row = chan / BITS_PER_LONG;
951 idx = chan & (BITS_PER_LONG - 1);
952
953 /* ACK interrupt */
954 tmp = readl(base->virtbase + il[row].clr);
955 tmp |= 1 << idx;
956 writel(tmp, base->virtbase + il[row].clr);
957
958 if (il[row].offset == D40_PHY_CHAN)
959 d40c = base->lookup_phy_chans[idx];
960 else
961 d40c = base->lookup_log_chans[il[row].offset + idx];
962 spin_lock(&d40c->lock);
963
964 if (!il[row].is_error)
965 dma_tc_handle(d40c);
966 else
967 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
968 __func__, chan, il[row].offset, idx);
969
970 spin_unlock(&d40c->lock);
971 }
972
973 spin_unlock_irqrestore(&base->interrupt_lock, flags);
974
975 return IRQ_HANDLED;
976}
977
978
979static int d40_validate_conf(struct d40_chan *d40c,
980 struct stedma40_chan_cfg *conf)
981{
982 int res = 0;
983 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
984 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
985 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
986 == STEDMA40_CHANNEL_IN_LOG_MODE;
987
988 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
989 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
990 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
991 __func__);
992 res = -EINVAL;
993 }
994
995 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
996 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
997 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
998 __func__);
999 res = -EINVAL;
1000 }
1001
1002 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1003 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1004 dev_err(&d40c->chan.dev->device,
1005 "[%s] No event line\n", __func__);
1006 res = -EINVAL;
1007 }
1008
1009 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1010 (src_event_group != dst_event_group)) {
1011 dev_err(&d40c->chan.dev->device,
1012 "[%s] Invalid event group\n", __func__);
1013 res = -EINVAL;
1014 }
1015
1016 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1017 /*
1018 * DMAC HW supports it. Will be added to this driver,
1019 * in case any dma client requires it.
1020 */
1021 dev_err(&d40c->chan.dev->device,
1022 "[%s] periph to periph not supported\n",
1023 __func__);
1024 res = -EINVAL;
1025 }
1026
1027 return res;
1028}
1029
1030static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
4aed79b2 1031 int log_event_line, bool is_log)
8d318a50
LW
1032{
1033 unsigned long flags;
1034 spin_lock_irqsave(&phy->lock, flags);
4aed79b2 1035 if (!is_log) {
8d318a50
LW
1036 /* Physical interrupts are masked per physical full channel */
1037 if (phy->allocated_src == D40_ALLOC_FREE &&
1038 phy->allocated_dst == D40_ALLOC_FREE) {
1039 phy->allocated_dst = D40_ALLOC_PHY;
1040 phy->allocated_src = D40_ALLOC_PHY;
1041 goto found;
1042 } else
1043 goto not_found;
1044 }
1045
1046 /* Logical channel */
1047 if (is_src) {
1048 if (phy->allocated_src == D40_ALLOC_PHY)
1049 goto not_found;
1050
1051 if (phy->allocated_src == D40_ALLOC_FREE)
1052 phy->allocated_src = D40_ALLOC_LOG_FREE;
1053
1054 if (!(phy->allocated_src & (1 << log_event_line))) {
1055 phy->allocated_src |= 1 << log_event_line;
1056 goto found;
1057 } else
1058 goto not_found;
1059 } else {
1060 if (phy->allocated_dst == D40_ALLOC_PHY)
1061 goto not_found;
1062
1063 if (phy->allocated_dst == D40_ALLOC_FREE)
1064 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1065
1066 if (!(phy->allocated_dst & (1 << log_event_line))) {
1067 phy->allocated_dst |= 1 << log_event_line;
1068 goto found;
1069 } else
1070 goto not_found;
1071 }
1072
1073not_found:
1074 spin_unlock_irqrestore(&phy->lock, flags);
1075 return false;
1076found:
1077 spin_unlock_irqrestore(&phy->lock, flags);
1078 return true;
1079}
1080
1081static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1082 int log_event_line)
1083{
1084 unsigned long flags;
1085 bool is_free = false;
1086
1087 spin_lock_irqsave(&phy->lock, flags);
1088 if (!log_event_line) {
1089 /* Physical interrupts are masked per physical full channel */
1090 phy->allocated_dst = D40_ALLOC_FREE;
1091 phy->allocated_src = D40_ALLOC_FREE;
1092 is_free = true;
1093 goto out;
1094 }
1095
1096 /* Logical channel */
1097 if (is_src) {
1098 phy->allocated_src &= ~(1 << log_event_line);
1099 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1100 phy->allocated_src = D40_ALLOC_FREE;
1101 } else {
1102 phy->allocated_dst &= ~(1 << log_event_line);
1103 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1104 phy->allocated_dst = D40_ALLOC_FREE;
1105 }
1106
1107 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1108 D40_ALLOC_FREE);
1109
1110out:
1111 spin_unlock_irqrestore(&phy->lock, flags);
1112
1113 return is_free;
1114}
1115
1116static int d40_allocate_channel(struct d40_chan *d40c)
1117{
1118 int dev_type;
1119 int event_group;
1120 int event_line;
1121 struct d40_phy_res *phys;
1122 int i;
1123 int j;
1124 int log_num;
1125 bool is_src;
1126 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1127 == STEDMA40_CHANNEL_IN_LOG_MODE;
1128
1129
1130 phys = d40c->base->phy_res;
1131
1132 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1133 dev_type = d40c->dma_cfg.src_dev_type;
1134 log_num = 2 * dev_type;
1135 is_src = true;
1136 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1137 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1138 /* dst event lines are used for logical memcpy */
1139 dev_type = d40c->dma_cfg.dst_dev_type;
1140 log_num = 2 * dev_type + 1;
1141 is_src = false;
1142 } else
1143 return -EINVAL;
1144
1145 event_group = D40_TYPE_TO_GROUP(dev_type);
1146 event_line = D40_TYPE_TO_EVENT(dev_type);
1147
1148 if (!is_log) {
1149 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1150 /* Find physical half channel */
1151 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1152
4aed79b2
MM
1153 if (d40_alloc_mask_set(&phys[i], is_src,
1154 0, is_log))
8d318a50
LW
1155 goto found_phy;
1156 }
1157 } else
1158 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1159 int phy_num = j + event_group * 2;
1160 for (i = phy_num; i < phy_num + 2; i++) {
4aed79b2
MM
1161 if (d40_alloc_mask_set(&phys[i], is_src,
1162 0, is_log))
8d318a50
LW
1163 goto found_phy;
1164 }
1165 }
1166 return -EINVAL;
1167found_phy:
1168 d40c->phy_chan = &phys[i];
1169 d40c->log_num = D40_PHY_CHAN;
1170 goto out;
1171 }
1172 if (dev_type == -1)
1173 return -EINVAL;
1174
1175 /* Find logical channel */
1176 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1177 int phy_num = j + event_group * 2;
1178 /*
1179 * Spread logical channels across all available physical rather
1180 * than pack every logical channel at the first available phy
1181 * channels.
1182 */
1183 if (is_src) {
1184 for (i = phy_num; i < phy_num + 2; i++) {
1185 if (d40_alloc_mask_set(&phys[i], is_src,
4aed79b2 1186 event_line, is_log))
8d318a50
LW
1187 goto found_log;
1188 }
1189 } else {
1190 for (i = phy_num + 1; i >= phy_num; i--) {
1191 if (d40_alloc_mask_set(&phys[i], is_src,
4aed79b2 1192 event_line, is_log))
8d318a50
LW
1193 goto found_log;
1194 }
1195 }
1196 }
1197 return -EINVAL;
1198
1199found_log:
1200 d40c->phy_chan = &phys[i];
1201 d40c->log_num = log_num;
1202out:
1203
1204 if (is_log)
1205 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1206 else
1207 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1208
1209 return 0;
1210
1211}
1212
8d318a50
LW
1213static int d40_config_memcpy(struct d40_chan *d40c)
1214{
1215 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1216
1217 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1218 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1219 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1220 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1221 memcpy[d40c->chan.chan_id];
1222
1223 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1224 dma_has_cap(DMA_SLAVE, cap)) {
1225 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1226 } else {
1227 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1228 __func__);
1229 return -EINVAL;
1230 }
1231
1232 return 0;
1233}
1234
1235
1236static int d40_free_dma(struct d40_chan *d40c)
1237{
1238
1239 int res = 0;
1240 u32 event, dir;
1241 struct d40_phy_res *phy = d40c->phy_chan;
1242 bool is_src;
1243
1244 /* Terminate all queued and active transfers */
1245 d40_term_all(d40c);
1246
1247 if (phy == NULL) {
1248 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1249 __func__);
1250 return -EINVAL;
1251 }
1252
1253 if (phy->allocated_src == D40_ALLOC_FREE &&
1254 phy->allocated_dst == D40_ALLOC_FREE) {
1255 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1256 __func__);
1257 return -EINVAL;
1258 }
1259
1260
1261 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1262 if (res) {
1263 dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
1264 __func__);
1265 return res;
1266 }
1267
1268 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1269 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1270 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1271 dir = D40_CHAN_REG_SDLNK;
1272 is_src = false;
1273 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1274 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1275 dir = D40_CHAN_REG_SSLNK;
1276 is_src = true;
1277 } else {
1278 dev_err(&d40c->chan.dev->device,
1279 "[%s] Unknown direction\n", __func__);
1280 return -EINVAL;
1281 }
1282
1283 if (d40c->log_num != D40_PHY_CHAN) {
1284 /*
1285 * Release logical channel, deactivate the event line during
1286 * the time physical res is suspended.
1287 */
1288 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1289 D40_EVENTLINE_MASK(event),
1290 d40c->base->virtbase + D40_DREG_PCBASE +
1291 phy->num * D40_DREG_PCDELTA + dir);
1292
1293 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1294
1295 /*
1296 * Check if there are more logical allocation
1297 * on this phy channel.
1298 */
1299 if (!d40_alloc_mask_free(phy, is_src, event)) {
1300 /* Resume the other logical channels if any */
1301 if (d40_chan_has_events(d40c)) {
1302 res = d40_channel_execute_command(d40c,
1303 D40_DMA_RUN);
1304 if (res) {
1305 dev_err(&d40c->chan.dev->device,
1306 "[%s] Executing RUN command\n",
1307 __func__);
1308 return res;
1309 }
1310 }
1311 return 0;
1312 }
1313 } else
1314 d40_alloc_mask_free(phy, is_src, 0);
1315
1316 /* Release physical channel */
1317 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1318 if (res) {
1319 dev_err(&d40c->chan.dev->device,
1320 "[%s] Failed to stop channel\n", __func__);
1321 return res;
1322 }
1323 d40c->phy_chan = NULL;
1324 /* Invalidate channel type */
1325 d40c->dma_cfg.channel_type = 0;
1326 d40c->base->lookup_phy_chans[phy->num] = NULL;
1327
1328 return 0;
1329
1330
1331}
1332
1333static int d40_pause(struct dma_chan *chan)
1334{
1335 struct d40_chan *d40c =
1336 container_of(chan, struct d40_chan, chan);
1337 int res;
1338
1339 unsigned long flags;
1340
1341 spin_lock_irqsave(&d40c->lock, flags);
1342
1343 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1344 if (res == 0) {
1345 if (d40c->log_num != D40_PHY_CHAN) {
1346 d40_config_set_event(d40c, false);
1347 /* Resume the other logical channels if any */
1348 if (d40_chan_has_events(d40c))
1349 res = d40_channel_execute_command(d40c,
1350 D40_DMA_RUN);
1351 }
1352 }
1353
1354 spin_unlock_irqrestore(&d40c->lock, flags);
1355 return res;
1356}
1357
a5ebca47
JA
1358static bool d40_is_paused(struct d40_chan *d40c)
1359{
1360 bool is_paused = false;
1361 unsigned long flags;
1362 void __iomem *active_reg;
1363 u32 status;
1364 u32 event;
1365 int res;
1366
1367 spin_lock_irqsave(&d40c->lock, flags);
1368
1369 if (d40c->log_num == D40_PHY_CHAN) {
1370 if (d40c->phy_chan->num % 2 == 0)
1371 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1372 else
1373 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1374
1375 status = (readl(active_reg) &
1376 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1377 D40_CHAN_POS(d40c->phy_chan->num);
1378 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1379 is_paused = true;
1380
1381 goto _exit;
1382 }
1383
1384 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1385 if (res != 0)
1386 goto _exit;
1387
1388 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1389 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1390 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1391 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1392 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1393 else {
1394 dev_err(&d40c->chan.dev->device,
1395 "[%s] Unknown direction\n", __func__);
1396 goto _exit;
1397 }
1398 status = d40_chan_has_events(d40c);
1399 status = (status & D40_EVENTLINE_MASK(event)) >>
1400 D40_EVENTLINE_POS(event);
1401
1402 if (status != D40_DMA_RUN)
1403 is_paused = true;
1404
1405 /* Resume the other logical channels if any */
1406 if (d40_chan_has_events(d40c))
1407 res = d40_channel_execute_command(d40c,
1408 D40_DMA_RUN);
1409
1410_exit:
1411 spin_unlock_irqrestore(&d40c->lock, flags);
1412 return is_paused;
1413
1414}
1415
1416
8d318a50
LW
1417static bool d40_tx_is_linked(struct d40_chan *d40c)
1418{
1419 bool is_link;
1420
1421 if (d40c->log_num != D40_PHY_CHAN)
1422 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1423 else
1424 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1425 d40c->phy_chan->num * D40_DREG_PCDELTA +
1426 D40_CHAN_REG_SDLNK) &
1427 D40_SREG_LNK_PHYS_LNK_MASK;
1428 return is_link;
1429}
1430
1431static u32 d40_residue(struct d40_chan *d40c)
1432{
1433 u32 num_elt;
1434
1435 if (d40c->log_num != D40_PHY_CHAN)
1436 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1437 >> D40_MEM_LCSP2_ECNT_POS;
1438 else
1439 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1440 d40c->phy_chan->num * D40_DREG_PCDELTA +
1441 D40_CHAN_REG_SDELT) &
1442 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1443 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1444}
1445
1446static int d40_resume(struct dma_chan *chan)
1447{
1448 struct d40_chan *d40c =
1449 container_of(chan, struct d40_chan, chan);
1450 int res = 0;
1451 unsigned long flags;
1452
1453 spin_lock_irqsave(&d40c->lock, flags);
1454
1455 if (d40c->log_num != D40_PHY_CHAN) {
1456 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1457 if (res)
1458 goto out;
1459
1460 /* If bytes left to transfer or linked tx resume job */
1461 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1462 d40_config_set_event(d40c, true);
1463 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1464 }
1465 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1466 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1467
1468out:
1469 spin_unlock_irqrestore(&d40c->lock, flags);
1470 return res;
1471}
1472
1473static u32 stedma40_residue(struct dma_chan *chan)
1474{
1475 struct d40_chan *d40c =
1476 container_of(chan, struct d40_chan, chan);
1477 u32 bytes_left;
1478 unsigned long flags;
1479
1480 spin_lock_irqsave(&d40c->lock, flags);
1481 bytes_left = d40_residue(d40c);
1482 spin_unlock_irqrestore(&d40c->lock, flags);
1483
1484 return bytes_left;
1485}
1486
1487/* Public DMA functions in addition to the DMA engine framework */
1488
1489int stedma40_set_psize(struct dma_chan *chan,
1490 int src_psize,
1491 int dst_psize)
1492{
1493 struct d40_chan *d40c =
1494 container_of(chan, struct d40_chan, chan);
1495 unsigned long flags;
1496
1497 spin_lock_irqsave(&d40c->lock, flags);
1498
1499 if (d40c->log_num != D40_PHY_CHAN) {
1500 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1501 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1502 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1503 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1504 goto out;
1505 }
1506
1507 if (src_psize == STEDMA40_PSIZE_PHY_1)
1508 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1509 else {
1510 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1511 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1512 D40_SREG_CFG_PSIZE_POS);
1513 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1514 }
1515
1516 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1517 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1518 else {
1519 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1520 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1521 D40_SREG_CFG_PSIZE_POS);
1522 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1523 }
1524out:
1525 spin_unlock_irqrestore(&d40c->lock, flags);
1526 return 0;
1527}
1528EXPORT_SYMBOL(stedma40_set_psize);
1529
1530struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1531 struct scatterlist *sgl_dst,
1532 struct scatterlist *sgl_src,
1533 unsigned int sgl_len,
1534 unsigned long flags)
1535{
1536 int res;
1537 struct d40_desc *d40d;
1538 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1539 chan);
1540 unsigned long flg;
8d318a50
LW
1541
1542
1543 spin_lock_irqsave(&d40c->lock, flg);
1544 d40d = d40_desc_get(d40c);
1545
1546 if (d40d == NULL)
1547 goto err;
1548
1549 memset(d40d, 0, sizeof(struct d40_desc));
1550 d40d->lli_len = sgl_len;
941b77a3 1551 d40d->lli_tx_len = d40d->lli_len;
8d318a50
LW
1552 d40d->txd.flags = flags;
1553
1554 if (d40c->log_num != D40_PHY_CHAN) {
941b77a3
PF
1555 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1556 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1557
8d318a50
LW
1558 if (sgl_len > 1)
1559 /*
1560 * Check if there is space available in lcla. If not,
1561 * split list into 1-length and run only in lcpa
1562 * space.
1563 */
1564 if (d40_lcla_id_get(d40c,
1565 &d40c->base->lcla_pool) != 0)
941b77a3 1566 d40d->lli_tx_len = 1;
8d318a50
LW
1567
1568 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1569 dev_err(&d40c->chan.dev->device,
1570 "[%s] Out of memory\n", __func__);
1571 goto err;
1572 }
1573
1574 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1575 sgl_src,
1576 sgl_len,
1577 d40d->lli_log.src,
1578 d40c->log_def.lcsp1,
1579 d40c->dma_cfg.src_info.data_width,
941b77a3
PF
1580 flags & DMA_PREP_INTERRUPT,
1581 d40d->lli_tx_len,
8d318a50
LW
1582 d40c->base->plat_data->llis_per_log);
1583
1584 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1585 sgl_dst,
1586 sgl_len,
1587 d40d->lli_log.dst,
1588 d40c->log_def.lcsp3,
1589 d40c->dma_cfg.dst_info.data_width,
941b77a3
PF
1590 flags & DMA_PREP_INTERRUPT,
1591 d40d->lli_tx_len,
8d318a50
LW
1592 d40c->base->plat_data->llis_per_log);
1593
1594
1595 } else {
1596 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1597 dev_err(&d40c->chan.dev->device,
1598 "[%s] Out of memory\n", __func__);
1599 goto err;
1600 }
1601
1602 res = d40_phy_sg_to_lli(sgl_src,
1603 sgl_len,
1604 0,
1605 d40d->lli_phy.src,
1606 d40d->lli_phy.src_addr,
1607 d40c->src_def_cfg,
1608 d40c->dma_cfg.src_info.data_width,
1609 d40c->dma_cfg.src_info.psize,
1610 true);
1611
1612 if (res < 0)
1613 goto err;
1614
1615 res = d40_phy_sg_to_lli(sgl_dst,
1616 sgl_len,
1617 0,
1618 d40d->lli_phy.dst,
1619 d40d->lli_phy.dst_addr,
1620 d40c->dst_def_cfg,
1621 d40c->dma_cfg.dst_info.data_width,
1622 d40c->dma_cfg.dst_info.psize,
1623 true);
1624
1625 if (res < 0)
1626 goto err;
1627
1628 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1629 d40d->lli_pool.size, DMA_TO_DEVICE);
1630 }
1631
1632 dma_async_tx_descriptor_init(&d40d->txd, chan);
1633
1634 d40d->txd.tx_submit = d40_tx_submit;
1635
1636 spin_unlock_irqrestore(&d40c->lock, flg);
1637
1638 return &d40d->txd;
1639err:
1640 spin_unlock_irqrestore(&d40c->lock, flg);
1641 return NULL;
1642}
1643EXPORT_SYMBOL(stedma40_memcpy_sg);
1644
1645bool stedma40_filter(struct dma_chan *chan, void *data)
1646{
1647 struct stedma40_chan_cfg *info = data;
1648 struct d40_chan *d40c =
1649 container_of(chan, struct d40_chan, chan);
1650 int err;
1651
1652 if (data) {
1653 err = d40_validate_conf(d40c, info);
1654 if (!err)
1655 d40c->dma_cfg = *info;
1656 } else
1657 err = d40_config_memcpy(d40c);
1658
1659 return err == 0;
1660}
1661EXPORT_SYMBOL(stedma40_filter);
1662
1663/* DMA ENGINE functions */
1664static int d40_alloc_chan_resources(struct dma_chan *chan)
1665{
1666 int err;
1667 unsigned long flags;
1668 struct d40_chan *d40c =
1669 container_of(chan, struct d40_chan, chan);
ef1872ec 1670 bool is_free_phy;
8d318a50
LW
1671 spin_lock_irqsave(&d40c->lock, flags);
1672
1673 d40c->completed = chan->cookie = 1;
1674
1675 /*
1676 * If no dma configuration is set (channel_type == 0)
ef1872ec 1677 * use default configuration (memcpy)
8d318a50
LW
1678 */
1679 if (d40c->dma_cfg.channel_type == 0) {
1680 err = d40_config_memcpy(d40c);
1681 if (err)
1682 goto err_alloc;
1683 }
ef1872ec 1684 is_free_phy = (d40c->phy_chan == NULL);
8d318a50
LW
1685
1686 err = d40_allocate_channel(d40c);
1687 if (err) {
1688 dev_err(&d40c->chan.dev->device,
1689 "[%s] Failed to allocate channel\n", __func__);
1690 goto err_alloc;
1691 }
1692
ef1872ec
LW
1693 /* Fill in basic CFG register values */
1694 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1695 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1696
1697 if (d40c->log_num != D40_PHY_CHAN) {
1698 d40_log_cfg(&d40c->dma_cfg,
1699 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1700
1701 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1702 d40c->lcpa = d40c->base->lcpa_base +
1703 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1704 else
1705 d40c->lcpa = d40c->base->lcpa_base +
1706 d40c->dma_cfg.dst_dev_type *
1707 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1708 }
1709
1710 /*
1711 * Only write channel configuration to the DMA if the physical
1712 * resource is free. In case of multiple logical channels
1713 * on the same physical resource, only the first write is necessary.
1714 */
1715 if (is_free_phy) {
1716 err = d40_config_write(d40c);
1717 if (err) {
1718 dev_err(&d40c->chan.dev->device,
1719 "[%s] Failed to configure channel\n",
1720 __func__);
1721 }
8d318a50
LW
1722 }
1723
1724 spin_unlock_irqrestore(&d40c->lock, flags);
1725 return 0;
1726
1727 err_config:
1728 (void) d40_free_dma(d40c);
1729 err_alloc:
1730 spin_unlock_irqrestore(&d40c->lock, flags);
1731 dev_err(&d40c->chan.dev->device,
1732 "[%s] Channel allocation failed\n", __func__);
1733 return -EINVAL;
1734}
1735
1736static void d40_free_chan_resources(struct dma_chan *chan)
1737{
1738 struct d40_chan *d40c =
1739 container_of(chan, struct d40_chan, chan);
1740 int err;
1741 unsigned long flags;
1742
1743 spin_lock_irqsave(&d40c->lock, flags);
1744
1745 err = d40_free_dma(d40c);
1746
1747 if (err)
1748 dev_err(&d40c->chan.dev->device,
1749 "[%s] Failed to free channel\n", __func__);
1750 spin_unlock_irqrestore(&d40c->lock, flags);
1751}
1752
1753static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1754 dma_addr_t dst,
1755 dma_addr_t src,
1756 size_t size,
1757 unsigned long flags)
1758{
1759 struct d40_desc *d40d;
1760 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1761 chan);
1762 unsigned long flg;
1763 int err = 0;
1764
1765 spin_lock_irqsave(&d40c->lock, flg);
1766 d40d = d40_desc_get(d40c);
1767
1768 if (d40d == NULL) {
1769 dev_err(&d40c->chan.dev->device,
1770 "[%s] Descriptor is NULL\n", __func__);
1771 goto err;
1772 }
1773
1774 memset(d40d, 0, sizeof(struct d40_desc));
1775
1776 d40d->txd.flags = flags;
1777
1778 dma_async_tx_descriptor_init(&d40d->txd, chan);
1779
1780 d40d->txd.tx_submit = d40_tx_submit;
1781
1782 if (d40c->log_num != D40_PHY_CHAN) {
1783
1784 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1785 dev_err(&d40c->chan.dev->device,
1786 "[%s] Out of memory\n", __func__);
1787 goto err;
1788 }
1789 d40d->lli_len = 1;
941b77a3 1790 d40d->lli_tx_len = 1;
8d318a50
LW
1791
1792 d40_log_fill_lli(d40d->lli_log.src,
1793 src,
1794 size,
1795 0,
1796 d40c->log_def.lcsp1,
1797 d40c->dma_cfg.src_info.data_width,
1798 true, true);
1799
1800 d40_log_fill_lli(d40d->lli_log.dst,
1801 dst,
1802 size,
1803 0,
1804 d40c->log_def.lcsp3,
1805 d40c->dma_cfg.dst_info.data_width,
1806 true, true);
1807
1808 } else {
1809
1810 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1811 dev_err(&d40c->chan.dev->device,
1812 "[%s] Out of memory\n", __func__);
1813 goto err;
1814 }
1815
1816 err = d40_phy_fill_lli(d40d->lli_phy.src,
1817 src,
1818 size,
1819 d40c->dma_cfg.src_info.psize,
1820 0,
1821 d40c->src_def_cfg,
1822 true,
1823 d40c->dma_cfg.src_info.data_width,
1824 false);
1825 if (err)
1826 goto err_fill_lli;
1827
1828 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1829 dst,
1830 size,
1831 d40c->dma_cfg.dst_info.psize,
1832 0,
1833 d40c->dst_def_cfg,
1834 true,
1835 d40c->dma_cfg.dst_info.data_width,
1836 false);
1837
1838 if (err)
1839 goto err_fill_lli;
1840
1841 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1842 d40d->lli_pool.size, DMA_TO_DEVICE);
1843 }
1844
1845 spin_unlock_irqrestore(&d40c->lock, flg);
1846 return &d40d->txd;
1847
1848err_fill_lli:
1849 dev_err(&d40c->chan.dev->device,
1850 "[%s] Failed filling in PHY LLI\n", __func__);
1851 d40_pool_lli_free(d40d);
1852err:
1853 spin_unlock_irqrestore(&d40c->lock, flg);
1854 return NULL;
1855}
1856
1857static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1858 struct d40_chan *d40c,
1859 struct scatterlist *sgl,
1860 unsigned int sg_len,
1861 enum dma_data_direction direction,
1862 unsigned long flags)
1863{
1864 dma_addr_t dev_addr = 0;
1865 int total_size;
8d318a50
LW
1866
1867 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1868 dev_err(&d40c->chan.dev->device,
1869 "[%s] Out of memory\n", __func__);
1870 return -ENOMEM;
1871 }
1872
1873 d40d->lli_len = sg_len;
941b77a3
PF
1874 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1875 d40d->lli_tx_len = d40d->lli_len;
1876 else
1877 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
8d318a50
LW
1878
1879 if (sg_len > 1)
1880 /*
1881 * Check if there is space available in lcla.
1882 * If not, split list into 1-length and run only
1883 * in lcpa space.
1884 */
1885 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
941b77a3 1886 d40d->lli_tx_len = 1;
8d318a50
LW
1887
1888 if (direction == DMA_FROM_DEVICE) {
1889 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1890 total_size = d40_log_sg_to_dev(&d40c->lcla,
1891 sgl, sg_len,
1892 &d40d->lli_log,
1893 &d40c->log_def,
1894 d40c->dma_cfg.src_info.data_width,
1895 d40c->dma_cfg.dst_info.data_width,
1896 direction,
1897 flags & DMA_PREP_INTERRUPT,
941b77a3 1898 dev_addr, d40d->lli_tx_len,
8d318a50
LW
1899 d40c->base->plat_data->llis_per_log);
1900 } else if (direction == DMA_TO_DEVICE) {
1901 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1902 total_size = d40_log_sg_to_dev(&d40c->lcla,
1903 sgl, sg_len,
1904 &d40d->lli_log,
1905 &d40c->log_def,
1906 d40c->dma_cfg.src_info.data_width,
1907 d40c->dma_cfg.dst_info.data_width,
1908 direction,
1909 flags & DMA_PREP_INTERRUPT,
941b77a3 1910 dev_addr, d40d->lli_tx_len,
8d318a50
LW
1911 d40c->base->plat_data->llis_per_log);
1912 } else
1913 return -EINVAL;
1914 if (total_size < 0)
1915 return -EINVAL;
1916
1917 return 0;
1918}
1919
1920static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1921 struct d40_chan *d40c,
1922 struct scatterlist *sgl,
1923 unsigned int sgl_len,
1924 enum dma_data_direction direction,
1925 unsigned long flags)
1926{
1927 dma_addr_t src_dev_addr;
1928 dma_addr_t dst_dev_addr;
1929 int res;
1930
1931 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1932 dev_err(&d40c->chan.dev->device,
1933 "[%s] Out of memory\n", __func__);
1934 return -ENOMEM;
1935 }
1936
1937 d40d->lli_len = sgl_len;
941b77a3 1938 d40d->lli_tx_len = sgl_len;
8d318a50
LW
1939
1940 if (direction == DMA_FROM_DEVICE) {
1941 dst_dev_addr = 0;
1942 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1943 } else if (direction == DMA_TO_DEVICE) {
1944 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1945 src_dev_addr = 0;
1946 } else
1947 return -EINVAL;
1948
1949 res = d40_phy_sg_to_lli(sgl,
1950 sgl_len,
1951 src_dev_addr,
1952 d40d->lli_phy.src,
1953 d40d->lli_phy.src_addr,
1954 d40c->src_def_cfg,
1955 d40c->dma_cfg.src_info.data_width,
1956 d40c->dma_cfg.src_info.psize,
1957 true);
1958 if (res < 0)
1959 return res;
1960
1961 res = d40_phy_sg_to_lli(sgl,
1962 sgl_len,
1963 dst_dev_addr,
1964 d40d->lli_phy.dst,
1965 d40d->lli_phy.dst_addr,
1966 d40c->dst_def_cfg,
1967 d40c->dma_cfg.dst_info.data_width,
1968 d40c->dma_cfg.dst_info.psize,
1969 true);
1970 if (res < 0)
1971 return res;
1972
1973 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1974 d40d->lli_pool.size, DMA_TO_DEVICE);
1975 return 0;
1976}
1977
1978static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1979 struct scatterlist *sgl,
1980 unsigned int sg_len,
1981 enum dma_data_direction direction,
1982 unsigned long flags)
1983{
1984 struct d40_desc *d40d;
1985 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1986 chan);
1987 unsigned long flg;
1988 int err;
1989
1990 if (d40c->dma_cfg.pre_transfer)
1991 d40c->dma_cfg.pre_transfer(chan,
1992 d40c->dma_cfg.pre_transfer_data,
1993 sg_dma_len(sgl));
1994
1995 spin_lock_irqsave(&d40c->lock, flg);
1996 d40d = d40_desc_get(d40c);
1997 spin_unlock_irqrestore(&d40c->lock, flg);
1998
1999 if (d40d == NULL)
2000 return NULL;
2001
2002 memset(d40d, 0, sizeof(struct d40_desc));
2003
2004 if (d40c->log_num != D40_PHY_CHAN)
2005 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2006 direction, flags);
2007 else
2008 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2009 direction, flags);
2010 if (err) {
2011 dev_err(&d40c->chan.dev->device,
2012 "[%s] Failed to prepare %s slave sg job: %d\n",
2013 __func__,
2014 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2015 return NULL;
2016 }
2017
2018 d40d->txd.flags = flags;
2019
2020 dma_async_tx_descriptor_init(&d40d->txd, chan);
2021
2022 d40d->txd.tx_submit = d40_tx_submit;
2023
2024 return &d40d->txd;
2025}
2026
2027static enum dma_status d40_tx_status(struct dma_chan *chan,
2028 dma_cookie_t cookie,
2029 struct dma_tx_state *txstate)
2030{
2031 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2032 dma_cookie_t last_used;
2033 dma_cookie_t last_complete;
2034 int ret;
2035
2036 last_complete = d40c->completed;
2037 last_used = chan->cookie;
2038
a5ebca47
JA
2039 if (d40_is_paused(d40c))
2040 ret = DMA_PAUSED;
2041 else
2042 ret = dma_async_is_complete(cookie, last_complete, last_used);
8d318a50 2043
a5ebca47
JA
2044 dma_set_tx_state(txstate, last_complete, last_used,
2045 stedma40_residue(chan));
8d318a50
LW
2046
2047 return ret;
2048}
2049
2050static void d40_issue_pending(struct dma_chan *chan)
2051{
2052 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&d40c->lock, flags);
2056
2057 /* Busy means that pending jobs are already being processed */
2058 if (!d40c->busy)
2059 (void) d40_queue_start(d40c);
2060
2061 spin_unlock_irqrestore(&d40c->lock, flags);
2062}
2063
05827630
LW
2064static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2065 unsigned long arg)
8d318a50
LW
2066{
2067 unsigned long flags;
2068 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2069
2070 switch (cmd) {
2071 case DMA_TERMINATE_ALL:
2072 spin_lock_irqsave(&d40c->lock, flags);
2073 d40_term_all(d40c);
2074 spin_unlock_irqrestore(&d40c->lock, flags);
2075 return 0;
2076 case DMA_PAUSE:
2077 return d40_pause(chan);
2078 case DMA_RESUME:
2079 return d40_resume(chan);
2080 }
2081
2082 /* Other commands are unimplemented */
2083 return -ENXIO;
2084}
2085
2086/* Initialization functions */
2087
2088static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2089 struct d40_chan *chans, int offset,
2090 int num_chans)
2091{
2092 int i = 0;
2093 struct d40_chan *d40c;
2094
2095 INIT_LIST_HEAD(&dma->channels);
2096
2097 for (i = offset; i < offset + num_chans; i++) {
2098 d40c = &chans[i];
2099 d40c->base = base;
2100 d40c->chan.device = dma;
2101
2102 /* Invalidate lcla element */
2103 d40c->lcla.src_id = -1;
2104 d40c->lcla.dst_id = -1;
2105
2106 spin_lock_init(&d40c->lock);
2107
2108 d40c->log_num = D40_PHY_CHAN;
2109
2110 INIT_LIST_HEAD(&d40c->free);
2111 INIT_LIST_HEAD(&d40c->active);
2112 INIT_LIST_HEAD(&d40c->queue);
2113 INIT_LIST_HEAD(&d40c->client);
2114
2115 d40c->free_len = 0;
2116
2117 tasklet_init(&d40c->tasklet, dma_tasklet,
2118 (unsigned long) d40c);
2119
2120 list_add_tail(&d40c->chan.device_node,
2121 &dma->channels);
2122 }
2123}
2124
2125static int __init d40_dmaengine_init(struct d40_base *base,
2126 int num_reserved_chans)
2127{
2128 int err ;
2129
2130 d40_chan_init(base, &base->dma_slave, base->log_chans,
2131 0, base->num_log_chans);
2132
2133 dma_cap_zero(base->dma_slave.cap_mask);
2134 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2135
2136 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2137 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2138 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2139 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2140 base->dma_slave.device_tx_status = d40_tx_status;
2141 base->dma_slave.device_issue_pending = d40_issue_pending;
2142 base->dma_slave.device_control = d40_control;
2143 base->dma_slave.dev = base->dev;
2144
2145 err = dma_async_device_register(&base->dma_slave);
2146
2147 if (err) {
2148 dev_err(base->dev,
2149 "[%s] Failed to register slave channels\n",
2150 __func__);
2151 goto failure1;
2152 }
2153
2154 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2155 base->num_log_chans, base->plat_data->memcpy_len);
2156
2157 dma_cap_zero(base->dma_memcpy.cap_mask);
2158 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2159
2160 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2161 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2162 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2163 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2164 base->dma_memcpy.device_tx_status = d40_tx_status;
2165 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2166 base->dma_memcpy.device_control = d40_control;
2167 base->dma_memcpy.dev = base->dev;
2168 /*
2169 * This controller can only access address at even
2170 * 32bit boundaries, i.e. 2^2
2171 */
2172 base->dma_memcpy.copy_align = 2;
2173
2174 err = dma_async_device_register(&base->dma_memcpy);
2175
2176 if (err) {
2177 dev_err(base->dev,
2178 "[%s] Failed to regsiter memcpy only channels\n",
2179 __func__);
2180 goto failure2;
2181 }
2182
2183 d40_chan_init(base, &base->dma_both, base->phy_chans,
2184 0, num_reserved_chans);
2185
2186 dma_cap_zero(base->dma_both.cap_mask);
2187 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2188 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2189
2190 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2191 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2192 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2193 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2194 base->dma_both.device_tx_status = d40_tx_status;
2195 base->dma_both.device_issue_pending = d40_issue_pending;
2196 base->dma_both.device_control = d40_control;
2197 base->dma_both.dev = base->dev;
2198 base->dma_both.copy_align = 2;
2199 err = dma_async_device_register(&base->dma_both);
2200
2201 if (err) {
2202 dev_err(base->dev,
2203 "[%s] Failed to register logical and physical capable channels\n",
2204 __func__);
2205 goto failure3;
2206 }
2207 return 0;
2208failure3:
2209 dma_async_device_unregister(&base->dma_memcpy);
2210failure2:
2211 dma_async_device_unregister(&base->dma_slave);
2212failure1:
2213 return err;
2214}
2215
2216/* Initialization functions. */
2217
2218static int __init d40_phy_res_init(struct d40_base *base)
2219{
2220 int i;
2221 int num_phy_chans_avail = 0;
2222 u32 val[2];
2223 int odd_even_bit = -2;
2224
2225 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2226 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2227
2228 for (i = 0; i < base->num_phy_chans; i++) {
2229 base->phy_res[i].num = i;
2230 odd_even_bit += 2 * ((i % 2) == 0);
2231 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2232 /* Mark security only channels as occupied */
2233 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2234 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2235 } else {
2236 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2237 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2238 num_phy_chans_avail++;
2239 }
2240 spin_lock_init(&base->phy_res[i].lock);
2241 }
2242 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2243 num_phy_chans_avail, base->num_phy_chans);
2244
2245 /* Verify settings extended vs standard */
2246 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2247
2248 for (i = 0; i < base->num_phy_chans; i++) {
2249
2250 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2251 (val[0] & 0x3) != 1)
2252 dev_info(base->dev,
2253 "[%s] INFO: channel %d is misconfigured (%d)\n",
2254 __func__, i, val[0] & 0x3);
2255
2256 val[0] = val[0] >> 2;
2257 }
2258
2259 return num_phy_chans_avail;
2260}
2261
2262static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2263{
2264 static const struct d40_reg_val dma_id_regs[] = {
2265 /* Peripheral Id */
2266 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2267 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2268 /*
2269 * D40_DREG_PERIPHID2 Depends on HW revision:
2270 * MOP500/HREF ED has 0x0008,
2271 * ? has 0x0018,
2272 * HREF V1 has 0x0028
2273 */
2274 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2275
2276 /* PCell Id */
2277 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2278 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2279 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2280 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2281 };
2282 struct stedma40_platform_data *plat_data;
2283 struct clk *clk = NULL;
2284 void __iomem *virtbase = NULL;
2285 struct resource *res = NULL;
2286 struct d40_base *base = NULL;
2287 int num_log_chans = 0;
2288 int num_phy_chans;
2289 int i;
2290
2291 clk = clk_get(&pdev->dev, NULL);
2292
2293 if (IS_ERR(clk)) {
2294 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2295 __func__);
2296 goto failure;
2297 }
2298
2299 clk_enable(clk);
2300
2301 /* Get IO for DMAC base address */
2302 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2303 if (!res)
2304 goto failure;
2305
2306 if (request_mem_region(res->start, resource_size(res),
2307 D40_NAME " I/O base") == NULL)
2308 goto failure;
2309
2310 virtbase = ioremap(res->start, resource_size(res));
2311 if (!virtbase)
2312 goto failure;
2313
2314 /* HW version check */
2315 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2316 if (dma_id_regs[i].val !=
2317 readl(virtbase + dma_id_regs[i].reg)) {
2318 dev_err(&pdev->dev,
2319 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2320 __func__,
2321 dma_id_regs[i].val,
2322 dma_id_regs[i].reg,
2323 readl(virtbase + dma_id_regs[i].reg));
2324 goto failure;
2325 }
2326 }
2327
2328 i = readl(virtbase + D40_DREG_PERIPHID2);
2329
2330 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2331 dev_err(&pdev->dev,
2332 "[%s] Unknown designer! Got %x wanted %x\n",
2333 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2334 goto failure;
2335 }
2336
2337 /* The number of physical channels on this HW */
2338 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2339
2340 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2341 (i >> 4) & 0xf, res->start);
2342
2343 plat_data = pdev->dev.platform_data;
2344
2345 /* Count the number of logical channels in use */
2346 for (i = 0; i < plat_data->dev_len; i++)
2347 if (plat_data->dev_rx[i] != 0)
2348 num_log_chans++;
2349
2350 for (i = 0; i < plat_data->dev_len; i++)
2351 if (plat_data->dev_tx[i] != 0)
2352 num_log_chans++;
2353
2354 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2355 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2356 sizeof(struct d40_chan), GFP_KERNEL);
2357
2358 if (base == NULL) {
2359 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2360 goto failure;
2361 }
2362
2363 base->clk = clk;
2364 base->num_phy_chans = num_phy_chans;
2365 base->num_log_chans = num_log_chans;
2366 base->phy_start = res->start;
2367 base->phy_size = resource_size(res);
2368 base->virtbase = virtbase;
2369 base->plat_data = plat_data;
2370 base->dev = &pdev->dev;
2371 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2372 base->log_chans = &base->phy_chans[num_phy_chans];
2373
2374 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2375 GFP_KERNEL);
2376 if (!base->phy_res)
2377 goto failure;
2378
2379 base->lookup_phy_chans = kzalloc(num_phy_chans *
2380 sizeof(struct d40_chan *),
2381 GFP_KERNEL);
2382 if (!base->lookup_phy_chans)
2383 goto failure;
2384
2385 if (num_log_chans + plat_data->memcpy_len) {
2386 /*
2387 * The max number of logical channels are event lines for all
2388 * src devices and dst devices
2389 */
2390 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2391 sizeof(struct d40_chan *),
2392 GFP_KERNEL);
2393 if (!base->lookup_log_chans)
2394 goto failure;
2395 }
2396 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2397 GFP_KERNEL);
2398 if (!base->lcla_pool.alloc_map)
2399 goto failure;
2400
2401 return base;
2402
2403failure:
2404 if (clk) {
2405 clk_disable(clk);
2406 clk_put(clk);
2407 }
2408 if (virtbase)
2409 iounmap(virtbase);
2410 if (res)
2411 release_mem_region(res->start,
2412 resource_size(res));
2413 if (virtbase)
2414 iounmap(virtbase);
2415
2416 if (base) {
2417 kfree(base->lcla_pool.alloc_map);
2418 kfree(base->lookup_log_chans);
2419 kfree(base->lookup_phy_chans);
2420 kfree(base->phy_res);
2421 kfree(base);
2422 }
2423
2424 return NULL;
2425}
2426
2427static void __init d40_hw_init(struct d40_base *base)
2428{
2429
2430 static const struct d40_reg_val dma_init_reg[] = {
2431 /* Clock every part of the DMA block from start */
2432 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2433
2434 /* Interrupts on all logical channels */
2435 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2436 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2437 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2438 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2439 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2440 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2441 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2442 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2447 };
2448 int i;
2449 u32 prmseo[2] = {0, 0};
2450 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2451 u32 pcmis = 0;
2452 u32 pcicr = 0;
2453
2454 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2455 writel(dma_init_reg[i].val,
2456 base->virtbase + dma_init_reg[i].reg);
2457
2458 /* Configure all our dma channels to default settings */
2459 for (i = 0; i < base->num_phy_chans; i++) {
2460
2461 activeo[i % 2] = activeo[i % 2] << 2;
2462
2463 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2464 == D40_ALLOC_PHY) {
2465 activeo[i % 2] |= 3;
2466 continue;
2467 }
2468
2469 /* Enable interrupt # */
2470 pcmis = (pcmis << 1) | 1;
2471
2472 /* Clear interrupt # */
2473 pcicr = (pcicr << 1) | 1;
2474
2475 /* Set channel to physical mode */
2476 prmseo[i % 2] = prmseo[i % 2] << 2;
2477 prmseo[i % 2] |= 1;
2478
2479 }
2480
2481 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2482 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2483 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2484 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2485
2486 /* Write which interrupt to enable */
2487 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2488
2489 /* Write which interrupt to clear */
2490 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2491
2492}
2493
2494static int __init d40_probe(struct platform_device *pdev)
2495{
2496 int err;
2497 int ret = -ENOENT;
2498 struct d40_base *base;
2499 struct resource *res = NULL;
2500 int num_reserved_chans;
2501 u32 val;
2502
2503 base = d40_hw_detect_init(pdev);
2504
2505 if (!base)
2506 goto failure;
2507
2508 num_reserved_chans = d40_phy_res_init(base);
2509
2510 platform_set_drvdata(pdev, base);
2511
2512 spin_lock_init(&base->interrupt_lock);
2513 spin_lock_init(&base->execmd_lock);
2514
2515 /* Get IO for logical channel parameter address */
2516 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2517 if (!res) {
2518 ret = -ENOENT;
2519 dev_err(&pdev->dev,
2520 "[%s] No \"lcpa\" memory resource\n",
2521 __func__);
2522 goto failure;
2523 }
2524 base->lcpa_size = resource_size(res);
2525 base->phy_lcpa = res->start;
2526
2527 if (request_mem_region(res->start, resource_size(res),
2528 D40_NAME " I/O lcpa") == NULL) {
2529 ret = -EBUSY;
2530 dev_err(&pdev->dev,
2531 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2532 __func__, res->start, res->end);
2533 goto failure;
2534 }
2535
2536 /* We make use of ESRAM memory for this. */
2537 val = readl(base->virtbase + D40_DREG_LCPA);
2538 if (res->start != val && val != 0) {
2539 dev_warn(&pdev->dev,
2540 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2541 __func__, val, res->start);
2542 } else
2543 writel(res->start, base->virtbase + D40_DREG_LCPA);
2544
2545 base->lcpa_base = ioremap(res->start, resource_size(res));
2546 if (!base->lcpa_base) {
2547 ret = -ENOMEM;
2548 dev_err(&pdev->dev,
2549 "[%s] Failed to ioremap LCPA region\n",
2550 __func__);
2551 goto failure;
2552 }
2553 /* Get IO for logical channel link address */
2554 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2555 if (!res) {
2556 ret = -ENOENT;
2557 dev_err(&pdev->dev,
2558 "[%s] No \"lcla\" resource defined\n",
2559 __func__);
2560 goto failure;
2561 }
2562
2563 base->lcla_pool.base_size = resource_size(res);
2564 base->lcla_pool.phy = res->start;
2565
2566 if (request_mem_region(res->start, resource_size(res),
2567 D40_NAME " I/O lcla") == NULL) {
2568 ret = -EBUSY;
2569 dev_err(&pdev->dev,
2570 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2571 __func__, res->start, res->end);
2572 goto failure;
2573 }
2574 val = readl(base->virtbase + D40_DREG_LCLA);
2575 if (res->start != val && val != 0) {
2576 dev_warn(&pdev->dev,
2577 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2578 __func__, val, res->start);
2579 } else
2580 writel(res->start, base->virtbase + D40_DREG_LCLA);
2581
2582 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2583 if (!base->lcla_pool.base) {
2584 ret = -ENOMEM;
2585 dev_err(&pdev->dev,
2586 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2587 __func__, res->start, res->end);
2588 goto failure;
2589 }
2590
2591 spin_lock_init(&base->lcla_pool.lock);
2592
2593 base->lcla_pool.num_blocks = base->num_phy_chans;
2594
2595 base->irq = platform_get_irq(pdev, 0);
2596
2597 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2598
2599 if (ret) {
2600 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2601 goto failure;
2602 }
2603
2604 err = d40_dmaengine_init(base, num_reserved_chans);
2605 if (err)
2606 goto failure;
2607
2608 d40_hw_init(base);
2609
2610 dev_info(base->dev, "initialized\n");
2611 return 0;
2612
2613failure:
2614 if (base) {
2615 if (base->virtbase)
2616 iounmap(base->virtbase);
2617 if (base->lcla_pool.phy)
2618 release_mem_region(base->lcla_pool.phy,
2619 base->lcla_pool.base_size);
2620 if (base->phy_lcpa)
2621 release_mem_region(base->phy_lcpa,
2622 base->lcpa_size);
2623 if (base->phy_start)
2624 release_mem_region(base->phy_start,
2625 base->phy_size);
2626 if (base->clk) {
2627 clk_disable(base->clk);
2628 clk_put(base->clk);
2629 }
2630
2631 kfree(base->lcla_pool.alloc_map);
2632 kfree(base->lookup_log_chans);
2633 kfree(base->lookup_phy_chans);
2634 kfree(base->phy_res);
2635 kfree(base);
2636 }
2637
2638 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2639 return ret;
2640}
2641
2642static struct platform_driver d40_driver = {
2643 .driver = {
2644 .owner = THIS_MODULE,
2645 .name = D40_NAME,
2646 },
2647};
2648
2649int __init stedma40_init(void)
2650{
2651 return platform_driver_probe(&d40_driver, d40_probe);
2652}
2653arch_initcall(stedma40_init);