]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/plat-omap/iovmm.c
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[net-next-2.6.git] / arch / arm / plat-omap / iovmm.c
CommitLineData
69d3a84a
HD
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
5a0e3ad6 14#include <linux/slab.h>
69d3a84a
HD
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18
19#include <asm/cacheflush.h>
20#include <asm/mach/map.h>
21
ce491cf8
TL
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
69d3a84a
HD
24
25#include "iopgtable.h"
26
27/*
28 * A device driver needs to create address mappings between:
29 *
30 * - iommu/device address
31 * - physical address
32 * - mpu virtual address
33 *
34 * There are 4 possible patterns for them:
35 *
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
43 *
44 *
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
49 *
50 * 'c': contiguous memory area
ba6a1179 51 * 'd': discontiguous memory area
69d3a84a
HD
52 * 'a': anonymous memory allocation
53 * '()': optional feature
54 *
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57 *
58 * '*': not yet, but feasible.
59 */
60
61static struct kmem_cache *iovm_area_cachep;
62
63/* return total bytes of sg buffers */
64static size_t sgtable_len(const struct sg_table *sgt)
65{
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
68
69 if (!sgt)
70 return 0;
71
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 size_t bytes;
74
75 bytes = sg_dma_len(sg);
76
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 __func__, i, bytes);
80 return 0;
81 }
82
83 total += bytes;
84 }
85
86 return total;
87}
88#define sgtable_ok(x) (!!sgtable_len(x))
89
90/*
91 * calculate the optimal number sg elements from total bytes based on
92 * iommu superpages
93 */
94static unsigned int sgtable_nents(size_t bytes)
95{
96 int i;
97 unsigned int nr_entries;
98 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
99
100 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
101 pr_err("%s: wrong size %08x\n", __func__, bytes);
102 return 0;
103 }
104
105 nr_entries = 0;
106 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
107 if (bytes >= pagesize[i]) {
108 nr_entries += (bytes / pagesize[i]);
109 bytes %= pagesize[i];
110 }
111 }
112 BUG_ON(bytes);
113
114 return nr_entries;
115}
116
117/* allocate and initialize sg_table header(a kind of 'superblock') */
118static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
119{
120 unsigned int nr_entries;
121 int err;
122 struct sg_table *sgt;
123
124 if (!bytes)
125 return ERR_PTR(-EINVAL);
126
127 if (!IS_ALIGNED(bytes, PAGE_SIZE))
128 return ERR_PTR(-EINVAL);
129
130 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
131 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
132 nr_entries = sgtable_nents(bytes);
133 if (!nr_entries)
134 return ERR_PTR(-EINVAL);
135 } else
136 nr_entries = bytes / PAGE_SIZE;
137
138 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
139 if (!sgt)
140 return ERR_PTR(-ENOMEM);
141
142 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
7f1225bd
S
143 if (err) {
144 kfree(sgt);
69d3a84a 145 return ERR_PTR(err);
7f1225bd 146 }
69d3a84a
HD
147
148 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
149
150 return sgt;
151}
152
153/* free sg_table header(a kind of superblock) */
154static void sgtable_free(struct sg_table *sgt)
155{
156 if (!sgt)
157 return;
158
159 sg_free_table(sgt);
160 kfree(sgt);
161
162 pr_debug("%s: sgt:%p\n", __func__, sgt);
163}
164
165/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
166static void *vmap_sg(const struct sg_table *sgt)
167{
168 u32 va;
169 size_t total;
170 unsigned int i;
171 struct scatterlist *sg;
172 struct vm_struct *new;
173 const struct mem_type *mtype;
174
175 mtype = get_mem_type(MT_DEVICE);
176 if (!mtype)
177 return ERR_PTR(-EINVAL);
178
179 total = sgtable_len(sgt);
180 if (!total)
181 return ERR_PTR(-EINVAL);
182
183 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
184 if (!new)
185 return ERR_PTR(-ENOMEM);
186 va = (u32)new->addr;
187
188 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
189 size_t bytes;
190 u32 pa;
191 int err;
192
193 pa = sg_phys(sg);
194 bytes = sg_dma_len(sg);
195
196 BUG_ON(bytes != PAGE_SIZE);
197
198 err = ioremap_page(va, pa, mtype);
199 if (err)
200 goto err_out;
201
202 va += bytes;
203 }
204
6716bd06
SP
205 flush_cache_vmap((unsigned long)new->addr,
206 (unsigned long)(new->addr + total));
69d3a84a
HD
207 return new->addr;
208
209err_out:
210 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
211 vunmap(new->addr);
212 return ERR_PTR(-EAGAIN);
213}
214
215static inline void vunmap_sg(const void *va)
216{
217 vunmap(va);
218}
219
220static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
221{
222 struct iovm_struct *tmp;
223
224 list_for_each_entry(tmp, &obj->mmap, list) {
225 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
226 size_t len;
227
228 len = tmp->da_end - tmp->da_start;
229
230 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
231 __func__, tmp->da_start, da, tmp->da_end, len,
232 tmp->flags);
233
234 return tmp;
235 }
236 }
237
238 return NULL;
239}
240
241/**
242 * find_iovm_area - find iovma which includes @da
243 * @da: iommu device virtual address
244 *
245 * Find the existing iovma starting at @da
246 */
247struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
248{
249 struct iovm_struct *area;
250
251 mutex_lock(&obj->mmap_lock);
252 area = __find_iovm_area(obj, da);
253 mutex_unlock(&obj->mmap_lock);
254
255 return area;
256}
257EXPORT_SYMBOL_GPL(find_iovm_area);
258
259/*
260 * This finds the hole(area) which fits the requested address and len
261 * in iovmas mmap, and returns the new allocated iovma.
262 */
263static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
264 size_t bytes, u32 flags)
265{
266 struct iovm_struct *new, *tmp;
267 u32 start, prev_end, alignement;
268
269 if (!obj || !bytes)
270 return ERR_PTR(-EINVAL);
271
272 start = da;
273 alignement = PAGE_SIZE;
274
275 if (flags & IOVMF_DA_ANON) {
276 /*
277 * Reserve the first page for NULL
278 */
279 start = PAGE_SIZE;
280 if (flags & IOVMF_LINEAR)
281 alignement = iopgsz_max(bytes);
282 start = roundup(start, alignement);
283 }
284
285 tmp = NULL;
286 if (list_empty(&obj->mmap))
287 goto found;
288
289 prev_end = 0;
290 list_for_each_entry(tmp, &obj->mmap, list) {
291
e0a42e4f
HD
292 if (prev_end >= start)
293 break;
294
295 if (start + bytes < tmp->da_start)
69d3a84a
HD
296 goto found;
297
298 if (flags & IOVMF_DA_ANON)
fa460b88 299 start = roundup(tmp->da_end + 1, alignement);
69d3a84a
HD
300
301 prev_end = tmp->da_end;
302 }
303
fa460b88 304 if ((start > prev_end) && (ULONG_MAX - start >= bytes))
69d3a84a
HD
305 goto found;
306
307 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
308 __func__, da, bytes, flags);
309
310 return ERR_PTR(-EINVAL);
311
312found:
313 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
314 if (!new)
315 return ERR_PTR(-ENOMEM);
316
317 new->iommu = obj;
318 new->da_start = start;
319 new->da_end = start + bytes;
320 new->flags = flags;
321
322 /*
323 * keep ascending order of iovmas
324 */
325 if (tmp)
326 list_add_tail(&new->list, &tmp->list);
327 else
328 list_add(&new->list, &obj->mmap);
329
330 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
331 __func__, new->da_start, start, new->da_end, bytes, flags);
332
333 return new;
334}
335
336static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
337{
338 size_t bytes;
339
340 BUG_ON(!obj || !area);
341
342 bytes = area->da_end - area->da_start;
343
344 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
345 __func__, area->da_start, area->da_end, bytes, area->flags);
346
347 list_del(&area->list);
348 kmem_cache_free(iovm_area_cachep, area);
349}
350
351/**
352 * da_to_va - convert (d) to (v)
353 * @obj: objective iommu
354 * @da: iommu device virtual address
355 * @va: mpu virtual address
356 *
357 * Returns mpu virtual addr which corresponds to a given device virtual addr
358 */
359void *da_to_va(struct iommu *obj, u32 da)
360{
361 void *va = NULL;
362 struct iovm_struct *area;
363
364 mutex_lock(&obj->mmap_lock);
365
366 area = __find_iovm_area(obj, da);
367 if (!area) {
368 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
369 goto out;
370 }
371 va = area->va;
69d3a84a 372out:
26548900
DW
373 mutex_unlock(&obj->mmap_lock);
374
69d3a84a
HD
375 return va;
376}
377EXPORT_SYMBOL_GPL(da_to_va);
378
379static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
380{
381 unsigned int i;
382 struct scatterlist *sg;
383 void *va = _va;
384 void *va_end;
385
386 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
387 struct page *pg;
388 const size_t bytes = PAGE_SIZE;
389
390 /*
391 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
392 */
393 pg = vmalloc_to_page(va);
394 BUG_ON(!pg);
395 sg_set_page(sg, pg, bytes, 0);
396
397 va += bytes;
398 }
399
400 va_end = _va + PAGE_SIZE * i;
69d3a84a
HD
401}
402
403static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
404{
405 /*
406 * Actually this is not necessary at all, just exists for
ba6a1179 407 * consistency of the code readability.
69d3a84a
HD
408 */
409 BUG_ON(!sgt);
410}
411
412static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
413{
414 unsigned int i;
415 struct scatterlist *sg;
416 void *va;
417
418 va = phys_to_virt(pa);
419
420 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
421 size_t bytes;
422
423 bytes = iopgsz_max(len);
424
425 BUG_ON(!iopgsz_ok(bytes));
426
427 sg_set_buf(sg, phys_to_virt(pa), bytes);
428 /*
429 * 'pa' is cotinuous(linear).
430 */
431 pa += bytes;
432 len -= bytes;
433 }
434 BUG_ON(len);
69d3a84a
HD
435}
436
437static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
438{
439 /*
440 * Actually this is not necessary at all, just exists for
ba6a1179 441 * consistency of the code readability
69d3a84a
HD
442 */
443 BUG_ON(!sgt);
444}
445
446/* create 'da' <-> 'pa' mapping from 'sgt' */
447static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
448 const struct sg_table *sgt, u32 flags)
449{
450 int err;
451 unsigned int i, j;
452 struct scatterlist *sg;
453 u32 da = new->da_start;
454
20e11c2d 455 if (!obj || !sgt)
69d3a84a
HD
456 return -EINVAL;
457
458 BUG_ON(!sgtable_ok(sgt));
459
460 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
461 u32 pa;
462 int pgsz;
463 size_t bytes;
464 struct iotlb_entry e;
465
466 pa = sg_phys(sg);
467 bytes = sg_dma_len(sg);
468
469 flags &= ~IOVMF_PGSZ_MASK;
470 pgsz = bytes_to_iopgsz(bytes);
471 if (pgsz < 0)
472 goto err_out;
473 flags |= pgsz;
474
475 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
476 i, da, pa, bytes);
477
478 iotlb_init_entry(&e, da, pa, flags);
479 err = iopgtable_store_entry(obj, &e);
480 if (err)
481 goto err_out;
482
483 da += bytes;
484 }
485 return 0;
486
487err_out:
488 da = new->da_start;
489
490 for_each_sg(sgt->sgl, sg, i, j) {
491 size_t bytes;
492
493 bytes = iopgtable_clear_entry(obj, da);
494
495 BUG_ON(!iopgsz_ok(bytes));
496
497 da += bytes;
498 }
499 return err;
500}
501
502/* release 'da' <-> 'pa' mapping */
503static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
504{
505 u32 start;
506 size_t total = area->da_end - area->da_start;
507
508 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
509
510 start = area->da_start;
511 while (total > 0) {
512 size_t bytes;
513
514 bytes = iopgtable_clear_entry(obj, start);
515 if (bytes == 0)
516 bytes = PAGE_SIZE;
517 else
518 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
519 __func__, start, bytes, area->flags);
520
521 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
522
523 total -= bytes;
524 start += bytes;
525 }
526 BUG_ON(total);
527}
528
529/* template function for all unmapping */
530static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
531 void (*fn)(const void *), u32 flags)
532{
533 struct sg_table *sgt = NULL;
534 struct iovm_struct *area;
535
536 if (!IS_ALIGNED(da, PAGE_SIZE)) {
537 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
538 return NULL;
539 }
540
541 mutex_lock(&obj->mmap_lock);
542
543 area = __find_iovm_area(obj, da);
544 if (!area) {
545 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
546 goto out;
547 }
548
549 if ((area->flags & flags) != flags) {
550 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
551 area->flags);
552 goto out;
553 }
554 sgt = (struct sg_table *)area->sgt;
555
556 unmap_iovm_area(obj, area);
557
558 fn(area->va);
559
560 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
561 area->da_start, da, area->da_end,
562 area->da_end - area->da_start, area->flags);
563
564 free_iovm_area(obj, area);
565out:
566 mutex_unlock(&obj->mmap_lock);
567
568 return sgt;
569}
570
571static u32 map_iommu_region(struct iommu *obj, u32 da,
572 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
573{
574 int err = -ENOMEM;
575 struct iovm_struct *new;
576
577 mutex_lock(&obj->mmap_lock);
578
579 new = alloc_iovm_area(obj, da, bytes, flags);
580 if (IS_ERR(new)) {
581 err = PTR_ERR(new);
582 goto err_alloc_iovma;
583 }
584 new->va = va;
585 new->sgt = sgt;
586
587 if (map_iovm_area(obj, new, sgt, new->flags))
588 goto err_map;
589
590 mutex_unlock(&obj->mmap_lock);
591
592 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
593 __func__, new->da_start, bytes, new->flags, va);
594
595 return new->da_start;
596
597err_map:
598 free_iovm_area(obj, new);
599err_alloc_iovma:
600 mutex_unlock(&obj->mmap_lock);
601 return err;
602}
603
604static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
605 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
606{
607 return map_iommu_region(obj, da, sgt, va, bytes, flags);
608}
609
610/**
611 * iommu_vmap - (d)-(p)-(v) address mapper
612 * @obj: objective iommu
613 * @sgt: address of scatter gather table
614 * @flags: iovma and page property
615 *
616 * Creates 1-n-1 mapping with given @sgt and returns @da.
617 * All @sgt element must be io page size aligned.
618 */
619u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
620 u32 flags)
621{
622 size_t bytes;
935e4739 623 void *va = NULL;
69d3a84a
HD
624
625 if (!obj || !obj->dev || !sgt)
626 return -EINVAL;
627
628 bytes = sgtable_len(sgt);
629 if (!bytes)
630 return -EINVAL;
631 bytes = PAGE_ALIGN(bytes);
632
935e4739
HD
633 if (flags & IOVMF_MMIO) {
634 va = vmap_sg(sgt);
635 if (IS_ERR(va))
636 return PTR_ERR(va);
637 }
69d3a84a
HD
638
639 flags &= IOVMF_HW_MASK;
640 flags |= IOVMF_DISCONT;
641 flags |= IOVMF_MMIO;
642 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
643
644 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
645 if (IS_ERR_VALUE(da))
646 vunmap_sg(va);
647
648 return da;
649}
650EXPORT_SYMBOL_GPL(iommu_vmap);
651
652/**
653 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
654 * @obj: objective iommu
655 * @da: iommu device virtual address
656 *
657 * Free the iommu virtually contiguous memory area starting at
658 * @da, which was returned by 'iommu_vmap()'.
659 */
660struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
661{
662 struct sg_table *sgt;
663 /*
664 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
665 * Just returns 'sgt' to the caller to free
666 */
667 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
668 if (!sgt)
669 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
670 return sgt;
671}
672EXPORT_SYMBOL_GPL(iommu_vunmap);
673
674/**
675 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
676 * @obj: objective iommu
677 * @da: contiguous iommu virtual memory
678 * @bytes: allocation size
679 * @flags: iovma and page property
680 *
681 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
682 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
683 */
684u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
685{
686 void *va;
687 struct sg_table *sgt;
688
689 if (!obj || !obj->dev || !bytes)
690 return -EINVAL;
691
692 bytes = PAGE_ALIGN(bytes);
693
694 va = vmalloc(bytes);
695 if (!va)
696 return -ENOMEM;
697
698 sgt = sgtable_alloc(bytes, flags);
699 if (IS_ERR(sgt)) {
700 da = PTR_ERR(sgt);
701 goto err_sgt_alloc;
702 }
703 sgtable_fill_vmalloc(sgt, va);
704
705 flags &= IOVMF_HW_MASK;
706 flags |= IOVMF_DISCONT;
707 flags |= IOVMF_ALLOC;
708 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
709
710 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
711 if (IS_ERR_VALUE(da))
712 goto err_iommu_vmap;
713
714 return da;
715
716err_iommu_vmap:
717 sgtable_drain_vmalloc(sgt);
718 sgtable_free(sgt);
719err_sgt_alloc:
720 vfree(va);
721 return da;
722}
723EXPORT_SYMBOL_GPL(iommu_vmalloc);
724
725/**
726 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
727 * @obj: objective iommu
728 * @da: iommu device virtual address
729 *
730 * Frees the iommu virtually continuous memory area starting at
731 * @da, as obtained from 'iommu_vmalloc()'.
732 */
733void iommu_vfree(struct iommu *obj, const u32 da)
734{
735 struct sg_table *sgt;
736
737 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
738 if (!sgt)
739 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
740 sgtable_free(sgt);
741}
742EXPORT_SYMBOL_GPL(iommu_vfree);
743
744static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
745 size_t bytes, u32 flags)
746{
747 struct sg_table *sgt;
748
749 sgt = sgtable_alloc(bytes, flags);
750 if (IS_ERR(sgt))
751 return PTR_ERR(sgt);
752
753 sgtable_fill_kmalloc(sgt, pa, bytes);
754
755 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
756 if (IS_ERR_VALUE(da)) {
757 sgtable_drain_kmalloc(sgt);
758 sgtable_free(sgt);
759 }
760
761 return da;
762}
763
764/**
765 * iommu_kmap - (d)-(p)-(v) address mapper
766 * @obj: objective iommu
767 * @da: contiguous iommu virtual memory
768 * @pa: contiguous physical memory
769 * @flags: iovma and page property
770 *
771 * Creates 1-1-1 mapping and returns @da again, which can be
772 * adjusted if 'IOVMF_DA_ANON' is set.
773 */
774u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
775 u32 flags)
776{
777 void *va;
778
779 if (!obj || !obj->dev || !bytes)
780 return -EINVAL;
781
782 bytes = PAGE_ALIGN(bytes);
783
784 va = ioremap(pa, bytes);
785 if (!va)
786 return -ENOMEM;
787
788 flags &= IOVMF_HW_MASK;
789 flags |= IOVMF_LINEAR;
790 flags |= IOVMF_MMIO;
791 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
792
793 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
794 if (IS_ERR_VALUE(da))
795 iounmap(va);
796
797 return da;
798}
799EXPORT_SYMBOL_GPL(iommu_kmap);
800
801/**
802 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
803 * @obj: objective iommu
804 * @da: iommu device virtual address
805 *
806 * Frees the iommu virtually contiguous memory area starting at
807 * @da, which was passed to and was returned by'iommu_kmap()'.
808 */
809void iommu_kunmap(struct iommu *obj, u32 da)
810{
811 struct sg_table *sgt;
812 typedef void (*func_t)(const void *);
813
814 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
815 IOVMF_LINEAR | IOVMF_MMIO);
816 if (!sgt)
817 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
818 sgtable_free(sgt);
819}
820EXPORT_SYMBOL_GPL(iommu_kunmap);
821
822/**
823 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
824 * @obj: objective iommu
825 * @da: contiguous iommu virtual memory
826 * @bytes: bytes for allocation
827 * @flags: iovma and page property
828 *
829 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
830 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
831 */
832u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
833{
834 void *va;
835 u32 pa;
836
837 if (!obj || !obj->dev || !bytes)
838 return -EINVAL;
839
840 bytes = PAGE_ALIGN(bytes);
841
842 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
843 if (!va)
844 return -ENOMEM;
845 pa = virt_to_phys(va);
846
847 flags &= IOVMF_HW_MASK;
848 flags |= IOVMF_LINEAR;
849 flags |= IOVMF_ALLOC;
850 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
851
852 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
853 if (IS_ERR_VALUE(da))
854 kfree(va);
855
856 return da;
857}
858EXPORT_SYMBOL_GPL(iommu_kmalloc);
859
860/**
861 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
862 * @obj: objective iommu
863 * @da: iommu device virtual address
864 *
865 * Frees the iommu virtually contiguous memory area starting at
866 * @da, which was passed to and was returned by'iommu_kmalloc()'.
867 */
868void iommu_kfree(struct iommu *obj, u32 da)
869{
870 struct sg_table *sgt;
871
872 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
873 if (!sgt)
874 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
875 sgtable_free(sgt);
876}
877EXPORT_SYMBOL_GPL(iommu_kfree);
878
879
880static int __init iovmm_init(void)
881{
882 const unsigned long flags = SLAB_HWCACHE_ALIGN;
883 struct kmem_cache *p;
884
885 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
886 flags, NULL);
887 if (!p)
888 return -ENOMEM;
889 iovm_area_cachep = p;
890
891 return 0;
892}
893module_init(iovmm_init);
894
895static void __exit iovmm_exit(void)
896{
897 kmem_cache_destroy(iovm_area_cachep);
898}
899module_exit(iovmm_exit);
900
901MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
902MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
903MODULE_LICENSE("GPL v2");