]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/sparc/kernel/iommu.c
iommu-helper: use bitmap library
[net-next-2.6.git] / arch / sparc / kernel / iommu.c
CommitLineData
ad7ad57c 1/* iommu.c: Generic sparc64 IOMMU support.
1da177e4 2 *
d284142c 3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
1da177e4
LT
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
ad7ad57c 8#include <linux/module.h>
4dbc30fb 9#include <linux/delay.h>
ad7ad57c
DM
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
d284142c 13#include <linux/iommu-helper.h>
a66022c4 14#include <linux/bitmap.h>
ad7ad57c
DM
15
16#ifdef CONFIG_PCI
c57c2ffb 17#include <linux/pci.h>
ad7ad57c 18#endif
1da177e4 19
ad7ad57c 20#include <asm/iommu.h>
1da177e4
LT
21
22#include "iommu_common.h"
23
ad7ad57c 24#define STC_CTXMATCH_ADDR(STC, CTX) \
1da177e4 25 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
ad7ad57c
DM
26#define STC_FLUSHFLAG_INIT(STC) \
27 (*((STC)->strbuf_flushflag) = 0UL)
28#define STC_FLUSHFLAG_SET(STC) \
29 (*((STC)->strbuf_flushflag) != 0UL)
1da177e4 30
ad7ad57c 31#define iommu_read(__reg) \
1da177e4
LT
32({ u64 __ret; \
33 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "=r" (__ret) \
35 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
36 : "memory"); \
37 __ret; \
38})
ad7ad57c 39#define iommu_write(__reg, __val) \
1da177e4
LT
40 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : /* no outputs */ \
42 : "r" (__val), "r" (__reg), \
43 "i" (ASI_PHYS_BYPASS_EC_E))
44
45/* Must be invoked under the IOMMU lock. */
d284142c 46static void iommu_flushall(struct iommu *iommu)
1da177e4 47{
861fe906 48 if (iommu->iommu_flushinv) {
ad7ad57c 49 iommu_write(iommu->iommu_flushinv, ~(u64)0);
861fe906
DM
50 } else {
51 unsigned long tag;
52 int entry;
1da177e4 53
ad7ad57c 54 tag = iommu->iommu_tags;
861fe906 55 for (entry = 0; entry < 16; entry++) {
ad7ad57c 56 iommu_write(tag, 0);
861fe906
DM
57 tag += 8;
58 }
1da177e4 59
861fe906 60 /* Ensure completion of previous PIO writes. */
ad7ad57c 61 (void) iommu_read(iommu->write_complete_reg);
861fe906 62 }
1da177e4
LT
63}
64
65#define IOPTE_CONSISTENT(CTX) \
66 (IOPTE_VALID | IOPTE_CACHE | \
67 (((CTX) << 47) & IOPTE_CONTEXT))
68
69#define IOPTE_STREAMING(CTX) \
70 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
71
72/* Existing mappings are never marked invalid, instead they
73 * are pointed to a dummy page.
74 */
75#define IOPTE_IS_DUMMY(iommu, iopte) \
76 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
77
16ce82d8 78static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
1da177e4
LT
79{
80 unsigned long val = iopte_val(*iopte);
81
82 val &= ~IOPTE_PAGE;
83 val |= iommu->dummy_page_pa;
84
85 iopte_val(*iopte) = val;
86}
87
d284142c
DM
88/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
89 * facility it must all be done in one pass while under the iommu lock.
90 *
91 * On sun4u platforms, we only flush the IOMMU once every time we've passed
92 * over the entire page table doing allocations. Therefore we only ever advance
93 * the hint and cannot backtrack it.
94 */
95unsigned long iommu_range_alloc(struct device *dev,
96 struct iommu *iommu,
97 unsigned long npages,
98 unsigned long *handle)
688cb30b 99{
d284142c 100 unsigned long n, end, start, limit, boundary_size;
9b3627f3 101 struct iommu_arena *arena = &iommu->arena;
d284142c
DM
102 int pass = 0;
103
104 /* This allocator was derived from x86_64's bit string search */
105
106 /* Sanity check */
107 if (unlikely(npages == 0)) {
108 if (printk_ratelimit())
109 WARN_ON(1);
110 return DMA_ERROR_CODE;
111 }
112
113 if (handle && *handle)
114 start = *handle;
115 else
116 start = arena->hint;
688cb30b
DM
117
118 limit = arena->limit;
688cb30b 119
d284142c
DM
120 /* The case below can happen if we have a small segment appended
121 * to a large, or when the previous alloc was at the very end of
122 * the available space. If so, go back to the beginning and flush.
123 */
124 if (start >= limit) {
125 start = 0;
126 if (iommu->flush_all)
127 iommu->flush_all(iommu);
128 }
129
130 again:
131
132 if (dev)
133 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
134 1 << IO_PAGE_SHIFT);
135 else
136 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
137
89c94f2f
FT
138 n = iommu_area_alloc(arena->map, limit, start, npages,
139 iommu->page_table_map_base >> IO_PAGE_SHIFT,
d284142c
DM
140 boundary_size >> IO_PAGE_SHIFT, 0);
141 if (n == -1) {
688cb30b 142 if (likely(pass < 1)) {
d284142c 143 /* First failure, rescan from the beginning. */
688cb30b 144 start = 0;
d284142c
DM
145 if (iommu->flush_all)
146 iommu->flush_all(iommu);
688cb30b
DM
147 pass++;
148 goto again;
149 } else {
d284142c
DM
150 /* Second failure, give up */
151 return DMA_ERROR_CODE;
688cb30b
DM
152 }
153 }
154
d284142c 155 end = n + npages;
688cb30b
DM
156
157 arena->hint = end;
158
d284142c
DM
159 /* Update handle for SG allocations */
160 if (handle)
161 *handle = end;
162
688cb30b
DM
163 return n;
164}
165
d284142c 166void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
688cb30b 167{
d284142c
DM
168 struct iommu_arena *arena = &iommu->arena;
169 unsigned long entry;
688cb30b 170
d284142c
DM
171 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
172
a66022c4 173 bitmap_clear(arena->map, entry, npages);
688cb30b
DM
174}
175
ad7ad57c 176int iommu_table_init(struct iommu *iommu, int tsbsize,
c1b1a5f1
DM
177 u32 dma_offset, u32 dma_addr_mask,
178 int numa_node)
1da177e4 179{
c1b1a5f1
DM
180 unsigned long i, order, sz, num_tsb_entries;
181 struct page *page;
688cb30b
DM
182
183 num_tsb_entries = tsbsize / sizeof(iopte_t);
51e85136
DM
184
185 /* Setup initial software IOMMU state. */
186 spin_lock_init(&iommu->lock);
187 iommu->ctx_lowest_free = 1;
188 iommu->page_table_map_base = dma_offset;
189 iommu->dma_addr_mask = dma_addr_mask;
190
688cb30b
DM
191 /* Allocate and initialize the free area map. */
192 sz = num_tsb_entries / 8;
193 sz = (sz + 7UL) & ~7UL;
c1b1a5f1 194 iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
688cb30b 195 if (!iommu->arena.map) {
ad7ad57c
DM
196 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
197 return -ENOMEM;
51e85136 198 }
c1b1a5f1 199 memset(iommu->arena.map, 0, sz);
688cb30b 200 iommu->arena.limit = num_tsb_entries;
1da177e4 201
d284142c
DM
202 if (tlb_type != hypervisor)
203 iommu->flush_all = iommu_flushall;
204
51e85136
DM
205 /* Allocate and initialize the dummy page which we
206 * set inactive IO PTEs to point to.
207 */
c1b1a5f1
DM
208 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
209 if (!page) {
ad7ad57c
DM
210 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
211 goto out_free_map;
51e85136 212 }
c1b1a5f1
DM
213 iommu->dummy_page = (unsigned long) page_address(page);
214 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
51e85136
DM
215 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
216
217 /* Now allocate and setup the IOMMU page table itself. */
218 order = get_order(tsbsize);
c1b1a5f1
DM
219 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
220 if (!page) {
ad7ad57c
DM
221 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
222 goto out_free_dummy_page;
51e85136 223 }
c1b1a5f1 224 iommu->page_table = (iopte_t *)page_address(page);
1da177e4 225
688cb30b 226 for (i = 0; i < num_tsb_entries; i++)
1da177e4 227 iopte_make_dummy(iommu, &iommu->page_table[i]);
ad7ad57c
DM
228
229 return 0;
230
231out_free_dummy_page:
232 free_page(iommu->dummy_page);
233 iommu->dummy_page = 0UL;
234
235out_free_map:
236 kfree(iommu->arena.map);
237 iommu->arena.map = NULL;
238
239 return -ENOMEM;
1da177e4
LT
240}
241
d284142c
DM
242static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
243 unsigned long npages)
1da177e4 244{
d284142c 245 unsigned long entry;
1da177e4 246
d284142c
DM
247 entry = iommu_range_alloc(dev, iommu, npages, NULL);
248 if (unlikely(entry == DMA_ERROR_CODE))
688cb30b 249 return NULL;
1da177e4 250
688cb30b 251 return iommu->page_table + entry;
1da177e4
LT
252}
253
16ce82d8 254static int iommu_alloc_ctx(struct iommu *iommu)
7c963ad1
DM
255{
256 int lowest = iommu->ctx_lowest_free;
257 int sz = IOMMU_NUM_CTXS - lowest;
258 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
259
260 if (unlikely(n == sz)) {
261 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262 if (unlikely(n == lowest)) {
263 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264 n = 0;
265 }
266 }
267 if (n)
268 __set_bit(n, iommu->ctx_bitmap);
269
270 return n;
271}
272
16ce82d8 273static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
7c963ad1
DM
274{
275 if (likely(ctx)) {
276 __clear_bit(ctx, iommu->ctx_bitmap);
277 if (ctx < iommu->ctx_lowest_free)
278 iommu->ctx_lowest_free = ctx;
279 }
280}
281
ad7ad57c
DM
282static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283 dma_addr_t *dma_addrp, gfp_t gfp)
1da177e4 284{
c1b1a5f1 285 unsigned long flags, order, first_page;
16ce82d8 286 struct iommu *iommu;
c1b1a5f1
DM
287 struct page *page;
288 int npages, nid;
1da177e4 289 iopte_t *iopte;
1da177e4 290 void *ret;
1da177e4
LT
291
292 size = IO_PAGE_ALIGN(size);
293 order = get_order(size);
294 if (order >= 10)
295 return NULL;
296
c1b1a5f1
DM
297 nid = dev->archdata.numa_node;
298 page = alloc_pages_node(nid, gfp, order);
299 if (unlikely(!page))
1da177e4 300 return NULL;
c1b1a5f1
DM
301
302 first_page = (unsigned long) page_address(page);
1da177e4
LT
303 memset((char *)first_page, 0, PAGE_SIZE << order);
304
ad7ad57c 305 iommu = dev->archdata.iommu;
1da177e4
LT
306
307 spin_lock_irqsave(&iommu->lock, flags);
d284142c 308 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
688cb30b
DM
309 spin_unlock_irqrestore(&iommu->lock, flags);
310
311 if (unlikely(iopte == NULL)) {
1da177e4
LT
312 free_pages(first_page, order);
313 return NULL;
314 }
315
316 *dma_addrp = (iommu->page_table_map_base +
317 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
318 ret = (void *) first_page;
319 npages = size >> IO_PAGE_SHIFT;
1da177e4
LT
320 first_page = __pa(first_page);
321 while (npages--) {
688cb30b 322 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
1da177e4
LT
323 IOPTE_WRITE |
324 (first_page & IOPTE_PAGE));
325 iopte++;
326 first_page += IO_PAGE_SIZE;
327 }
328
1da177e4
LT
329 return ret;
330}
331
ad7ad57c
DM
332static void dma_4u_free_coherent(struct device *dev, size_t size,
333 void *cpu, dma_addr_t dvma)
1da177e4 334{
16ce82d8 335 struct iommu *iommu;
1da177e4 336 iopte_t *iopte;
688cb30b 337 unsigned long flags, order, npages;
1da177e4
LT
338
339 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
ad7ad57c 340 iommu = dev->archdata.iommu;
1da177e4
LT
341 iopte = iommu->page_table +
342 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
343
344 spin_lock_irqsave(&iommu->lock, flags);
345
d284142c 346 iommu_range_free(iommu, dvma, npages);
7c963ad1 347
1da177e4
LT
348 spin_unlock_irqrestore(&iommu->lock, flags);
349
350 order = get_order(size);
351 if (order < 10)
352 free_pages((unsigned long)cpu, order);
353}
354
797a7568
FT
355static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
356 unsigned long offset, size_t sz,
bc0a14f1
FT
357 enum dma_data_direction direction,
358 struct dma_attrs *attrs)
1da177e4 359{
16ce82d8
DM
360 struct iommu *iommu;
361 struct strbuf *strbuf;
1da177e4
LT
362 iopte_t *base;
363 unsigned long flags, npages, oaddr;
364 unsigned long i, base_paddr, ctx;
365 u32 bus_addr, ret;
366 unsigned long iopte_protection;
367
ad7ad57c
DM
368 iommu = dev->archdata.iommu;
369 strbuf = dev->archdata.stc;
1da177e4 370
ad7ad57c 371 if (unlikely(direction == DMA_NONE))
688cb30b 372 goto bad_no_ctx;
1da177e4 373
797a7568 374 oaddr = (unsigned long)(page_address(page) + offset);
1da177e4
LT
375 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
376 npages >>= IO_PAGE_SHIFT;
377
378 spin_lock_irqsave(&iommu->lock, flags);
d284142c 379 base = alloc_npages(dev, iommu, npages);
688cb30b
DM
380 ctx = 0;
381 if (iommu->iommu_ctxflush)
382 ctx = iommu_alloc_ctx(iommu);
383 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 384
688cb30b 385 if (unlikely(!base))
1da177e4 386 goto bad;
688cb30b 387
1da177e4
LT
388 bus_addr = (iommu->page_table_map_base +
389 ((base - iommu->page_table) << IO_PAGE_SHIFT));
390 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
391 base_paddr = __pa(oaddr & IO_PAGE_MASK);
1da177e4
LT
392 if (strbuf->strbuf_enabled)
393 iopte_protection = IOPTE_STREAMING(ctx);
394 else
395 iopte_protection = IOPTE_CONSISTENT(ctx);
ad7ad57c 396 if (direction != DMA_TO_DEVICE)
1da177e4
LT
397 iopte_protection |= IOPTE_WRITE;
398
399 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
400 iopte_val(*base) = iopte_protection | base_paddr;
401
1da177e4
LT
402 return ret;
403
404bad:
688cb30b
DM
405 iommu_free_ctx(iommu, ctx);
406bad_no_ctx:
407 if (printk_ratelimit())
408 WARN_ON(1);
ad7ad57c 409 return DMA_ERROR_CODE;
1da177e4
LT
410}
411
ad7ad57c
DM
412static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
413 u32 vaddr, unsigned long ctx, unsigned long npages,
414 enum dma_data_direction direction)
4dbc30fb
DM
415{
416 int limit;
417
4dbc30fb
DM
418 if (strbuf->strbuf_ctxflush &&
419 iommu->iommu_ctxflush) {
420 unsigned long matchreg, flushreg;
7c963ad1 421 u64 val;
4dbc30fb
DM
422
423 flushreg = strbuf->strbuf_ctxflush;
ad7ad57c 424 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
4dbc30fb 425
ad7ad57c
DM
426 iommu_write(flushreg, ctx);
427 val = iommu_read(matchreg);
88314ee7
DM
428 val &= 0xffff;
429 if (!val)
7c963ad1
DM
430 goto do_flush_sync;
431
7c963ad1
DM
432 while (val) {
433 if (val & 0x1)
ad7ad57c 434 iommu_write(flushreg, ctx);
7c963ad1 435 val >>= 1;
a228dfd5 436 }
ad7ad57c 437 val = iommu_read(matchreg);
7c963ad1 438 if (unlikely(val)) {
ad7ad57c 439 printk(KERN_WARNING "strbuf_flush: ctx flush "
90181136 440 "timeout matchreg[%llx] ctx[%lx]\n",
7c963ad1
DM
441 val, ctx);
442 goto do_page_flush;
443 }
4dbc30fb
DM
444 } else {
445 unsigned long i;
446
7c963ad1 447 do_page_flush:
4dbc30fb 448 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
ad7ad57c 449 iommu_write(strbuf->strbuf_pflush, vaddr);
4dbc30fb
DM
450 }
451
7c963ad1
DM
452do_flush_sync:
453 /* If the device could not have possibly put dirty data into
454 * the streaming cache, no flush-flag synchronization needs
455 * to be performed.
456 */
ad7ad57c 457 if (direction == DMA_TO_DEVICE)
7c963ad1
DM
458 return;
459
ad7ad57c
DM
460 STC_FLUSHFLAG_INIT(strbuf);
461 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
462 (void) iommu_read(iommu->write_complete_reg);
4dbc30fb 463
a228dfd5 464 limit = 100000;
ad7ad57c 465 while (!STC_FLUSHFLAG_SET(strbuf)) {
4dbc30fb
DM
466 limit--;
467 if (!limit)
468 break;
a228dfd5 469 udelay(1);
4f07118f 470 rmb();
4dbc30fb
DM
471 }
472 if (!limit)
ad7ad57c 473 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
4dbc30fb
DM
474 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
475 vaddr, ctx, npages);
476}
477
797a7568 478static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
bc0a14f1
FT
479 size_t sz, enum dma_data_direction direction,
480 struct dma_attrs *attrs)
1da177e4 481{
16ce82d8
DM
482 struct iommu *iommu;
483 struct strbuf *strbuf;
1da177e4 484 iopte_t *base;
688cb30b 485 unsigned long flags, npages, ctx, i;
1da177e4 486
ad7ad57c 487 if (unlikely(direction == DMA_NONE)) {
688cb30b
DM
488 if (printk_ratelimit())
489 WARN_ON(1);
490 return;
491 }
1da177e4 492
ad7ad57c
DM
493 iommu = dev->archdata.iommu;
494 strbuf = dev->archdata.stc;
1da177e4
LT
495
496 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
497 npages >>= IO_PAGE_SHIFT;
498 base = iommu->page_table +
499 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
1da177e4
LT
500 bus_addr &= IO_PAGE_MASK;
501
502 spin_lock_irqsave(&iommu->lock, flags);
503
504 /* Record the context, if any. */
505 ctx = 0;
506 if (iommu->iommu_ctxflush)
507 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
508
509 /* Step 1: Kick data out of streaming buffers if necessary. */
4dbc30fb 510 if (strbuf->strbuf_enabled)
ad7ad57c
DM
511 strbuf_flush(strbuf, iommu, bus_addr, ctx,
512 npages, direction);
1da177e4 513
688cb30b
DM
514 /* Step 2: Clear out TSB entries. */
515 for (i = 0; i < npages; i++)
516 iopte_make_dummy(iommu, base + i);
1da177e4 517
d284142c 518 iommu_range_free(iommu, bus_addr, npages);
1da177e4 519
7c963ad1
DM
520 iommu_free_ctx(iommu, ctx);
521
1da177e4
LT
522 spin_unlock_irqrestore(&iommu->lock, flags);
523}
524
ad7ad57c 525static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1
FT
526 int nelems, enum dma_data_direction direction,
527 struct dma_attrs *attrs)
1da177e4 528{
13fa14e1
DM
529 struct scatterlist *s, *outs, *segstart;
530 unsigned long flags, handle, prot, ctx;
531 dma_addr_t dma_next = 0, dma_addr;
532 unsigned int max_seg_size;
f0880257 533 unsigned long seg_boundary_size;
13fa14e1 534 int outcount, incount, i;
16ce82d8 535 struct strbuf *strbuf;
38192d52 536 struct iommu *iommu;
f0880257 537 unsigned long base_shift;
13fa14e1
DM
538
539 BUG_ON(direction == DMA_NONE);
1da177e4 540
ad7ad57c
DM
541 iommu = dev->archdata.iommu;
542 strbuf = dev->archdata.stc;
13fa14e1
DM
543 if (nelems == 0 || !iommu)
544 return 0;
1da177e4
LT
545
546 spin_lock_irqsave(&iommu->lock, flags);
547
688cb30b
DM
548 ctx = 0;
549 if (iommu->iommu_ctxflush)
550 ctx = iommu_alloc_ctx(iommu);
551
1da177e4 552 if (strbuf->strbuf_enabled)
13fa14e1 553 prot = IOPTE_STREAMING(ctx);
1da177e4 554 else
13fa14e1 555 prot = IOPTE_CONSISTENT(ctx);
ad7ad57c 556 if (direction != DMA_TO_DEVICE)
13fa14e1
DM
557 prot |= IOPTE_WRITE;
558
559 outs = s = segstart = &sglist[0];
560 outcount = 1;
561 incount = nelems;
562 handle = 0;
563
564 /* Init first segment length for backout at failure */
565 outs->dma_length = 0;
566
567 max_seg_size = dma_get_max_seg_size(dev);
f0880257
FT
568 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
569 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
570 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
13fa14e1 571 for_each_sg(sglist, s, nelems, i) {
f0880257 572 unsigned long paddr, npages, entry, out_entry = 0, slen;
13fa14e1
DM
573 iopte_t *base;
574
575 slen = s->length;
576 /* Sanity check */
577 if (slen == 0) {
578 dma_next = 0;
579 continue;
580 }
581 /* Allocate iommu entries for that segment */
582 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
0fcff28f 583 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
13fa14e1
DM
584 entry = iommu_range_alloc(dev, iommu, npages, &handle);
585
586 /* Handle failure */
587 if (unlikely(entry == DMA_ERROR_CODE)) {
588 if (printk_ratelimit())
589 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
590 " npages %lx\n", iommu, paddr, npages);
591 goto iommu_map_failed;
592 }
688cb30b 593
13fa14e1 594 base = iommu->page_table + entry;
1da177e4 595
13fa14e1
DM
596 /* Convert entry to a dma_addr_t */
597 dma_addr = iommu->page_table_map_base +
598 (entry << IO_PAGE_SHIFT);
599 dma_addr |= (s->offset & ~IO_PAGE_MASK);
38192d52 600
13fa14e1 601 /* Insert into HW table */
38192d52 602 paddr &= IO_PAGE_MASK;
13fa14e1
DM
603 while (npages--) {
604 iopte_val(*base) = prot | paddr;
38192d52
DM
605 base++;
606 paddr += IO_PAGE_SIZE;
38192d52 607 }
13fa14e1
DM
608
609 /* If we are in an open segment, try merging */
610 if (segstart != s) {
611 /* We cannot merge if:
612 * - allocated dma_addr isn't contiguous to previous allocation
613 */
614 if ((dma_addr != dma_next) ||
f0880257
FT
615 (outs->dma_length + s->length > max_seg_size) ||
616 (is_span_boundary(out_entry, base_shift,
617 seg_boundary_size, outs, s))) {
13fa14e1
DM
618 /* Can't merge: create a new segment */
619 segstart = s;
620 outcount++;
621 outs = sg_next(outs);
622 } else {
623 outs->dma_length += s->length;
624 }
625 }
626
627 if (segstart == s) {
628 /* This is a new segment, fill entries */
629 outs->dma_address = dma_addr;
630 outs->dma_length = slen;
f0880257 631 out_entry = entry;
13fa14e1
DM
632 }
633
634 /* Calculate next page pointer for contiguous check */
635 dma_next = dma_addr + slen;
38192d52
DM
636 }
637
13fa14e1
DM
638 spin_unlock_irqrestore(&iommu->lock, flags);
639
640 if (outcount < incount) {
641 outs = sg_next(outs);
642 outs->dma_address = DMA_ERROR_CODE;
643 outs->dma_length = 0;
644 }
645
646 return outcount;
647
648iommu_map_failed:
649 for_each_sg(sglist, s, nelems, i) {
650 if (s->dma_length != 0) {
6c830fef 651 unsigned long vaddr, npages, entry, j;
13fa14e1
DM
652 iopte_t *base;
653
654 vaddr = s->dma_address & IO_PAGE_MASK;
0fcff28f
JR
655 npages = iommu_num_pages(s->dma_address, s->dma_length,
656 IO_PAGE_SIZE);
13fa14e1
DM
657 iommu_range_free(iommu, vaddr, npages);
658
659 entry = (vaddr - iommu->page_table_map_base)
660 >> IO_PAGE_SHIFT;
661 base = iommu->page_table + entry;
662
6c830fef
DM
663 for (j = 0; j < npages; j++)
664 iopte_make_dummy(iommu, base + j);
13fa14e1
DM
665
666 s->dma_address = DMA_ERROR_CODE;
667 s->dma_length = 0;
668 }
669 if (s == outs)
670 break;
671 }
672 spin_unlock_irqrestore(&iommu->lock, flags);
1da177e4 673
688cb30b 674 return 0;
1da177e4
LT
675}
676
13fa14e1
DM
677/* If contexts are being used, they are the same in all of the mappings
678 * we make for a particular SG.
679 */
680static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
681{
682 unsigned long ctx = 0;
683
684 if (iommu->iommu_ctxflush) {
685 iopte_t *base;
686 u32 bus_addr;
687
688 bus_addr = sg->dma_address & IO_PAGE_MASK;
689 base = iommu->page_table +
690 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
691
692 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
693 }
694 return ctx;
695}
696
ad7ad57c 697static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bc0a14f1
FT
698 int nelems, enum dma_data_direction direction,
699 struct dma_attrs *attrs)
1da177e4 700{
13fa14e1
DM
701 unsigned long flags, ctx;
702 struct scatterlist *sg;
16ce82d8 703 struct strbuf *strbuf;
38192d52 704 struct iommu *iommu;
1da177e4 705
13fa14e1 706 BUG_ON(direction == DMA_NONE);
1da177e4 707
ad7ad57c
DM
708 iommu = dev->archdata.iommu;
709 strbuf = dev->archdata.stc;
710
13fa14e1 711 ctx = fetch_sg_ctx(iommu, sglist);
1da177e4 712
13fa14e1 713 spin_lock_irqsave(&iommu->lock, flags);
1da177e4 714
13fa14e1
DM
715 sg = sglist;
716 while (nelems--) {
717 dma_addr_t dma_handle = sg->dma_address;
718 unsigned int len = sg->dma_length;
719 unsigned long npages, entry;
720 iopte_t *base;
721 int i;
1da177e4 722
13fa14e1
DM
723 if (!len)
724 break;
0fcff28f 725 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
13fa14e1 726 iommu_range_free(iommu, dma_handle, npages);
1da177e4 727
13fa14e1
DM
728 entry = ((dma_handle - iommu->page_table_map_base)
729 >> IO_PAGE_SHIFT);
730 base = iommu->page_table + entry;
1da177e4 731
13fa14e1
DM
732 dma_handle &= IO_PAGE_MASK;
733 if (strbuf->strbuf_enabled)
734 strbuf_flush(strbuf, iommu, dma_handle, ctx,
735 npages, direction);
1da177e4 736
13fa14e1
DM
737 for (i = 0; i < npages; i++)
738 iopte_make_dummy(iommu, base + i);
1da177e4 739
13fa14e1
DM
740 sg = sg_next(sg);
741 }
1da177e4 742
7c963ad1
DM
743 iommu_free_ctx(iommu, ctx);
744
1da177e4
LT
745 spin_unlock_irqrestore(&iommu->lock, flags);
746}
747
ad7ad57c
DM
748static void dma_4u_sync_single_for_cpu(struct device *dev,
749 dma_addr_t bus_addr, size_t sz,
750 enum dma_data_direction direction)
1da177e4 751{
16ce82d8
DM
752 struct iommu *iommu;
753 struct strbuf *strbuf;
1da177e4
LT
754 unsigned long flags, ctx, npages;
755
ad7ad57c
DM
756 iommu = dev->archdata.iommu;
757 strbuf = dev->archdata.stc;
1da177e4
LT
758
759 if (!strbuf->strbuf_enabled)
760 return;
761
762 spin_lock_irqsave(&iommu->lock, flags);
763
764 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
765 npages >>= IO_PAGE_SHIFT;
766 bus_addr &= IO_PAGE_MASK;
767
768 /* Step 1: Record the context, if any. */
769 ctx = 0;
770 if (iommu->iommu_ctxflush &&
771 strbuf->strbuf_ctxflush) {
772 iopte_t *iopte;
773
774 iopte = iommu->page_table +
775 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
776 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
777 }
778
779 /* Step 2: Kick data out of streaming buffers. */
ad7ad57c 780 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
781
782 spin_unlock_irqrestore(&iommu->lock, flags);
783}
784
ad7ad57c
DM
785static void dma_4u_sync_sg_for_cpu(struct device *dev,
786 struct scatterlist *sglist, int nelems,
787 enum dma_data_direction direction)
1da177e4 788{
16ce82d8
DM
789 struct iommu *iommu;
790 struct strbuf *strbuf;
4dbc30fb 791 unsigned long flags, ctx, npages, i;
2c941a20 792 struct scatterlist *sg, *sgprv;
4dbc30fb 793 u32 bus_addr;
1da177e4 794
ad7ad57c
DM
795 iommu = dev->archdata.iommu;
796 strbuf = dev->archdata.stc;
1da177e4
LT
797
798 if (!strbuf->strbuf_enabled)
799 return;
800
801 spin_lock_irqsave(&iommu->lock, flags);
802
803 /* Step 1: Record the context, if any. */
804 ctx = 0;
805 if (iommu->iommu_ctxflush &&
806 strbuf->strbuf_ctxflush) {
807 iopte_t *iopte;
808
809 iopte = iommu->page_table +
810 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
811 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
812 }
813
814 /* Step 2: Kick data out of streaming buffers. */
4dbc30fb 815 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
2c941a20
JA
816 sgprv = NULL;
817 for_each_sg(sglist, sg, nelems, i) {
818 if (sg->dma_length == 0)
4dbc30fb 819 break;
2c941a20
JA
820 sgprv = sg;
821 }
822
823 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
4dbc30fb 824 - bus_addr) >> IO_PAGE_SHIFT;
ad7ad57c 825 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
1da177e4
LT
826
827 spin_unlock_irqrestore(&iommu->lock, flags);
828}
829
02f7a189 830static struct dma_map_ops sun4u_dma_ops = {
ad7ad57c
DM
831 .alloc_coherent = dma_4u_alloc_coherent,
832 .free_coherent = dma_4u_free_coherent,
797a7568
FT
833 .map_page = dma_4u_map_page,
834 .unmap_page = dma_4u_unmap_page,
ad7ad57c
DM
835 .map_sg = dma_4u_map_sg,
836 .unmap_sg = dma_4u_unmap_sg,
837 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
838 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
8f6a93a1
DM
839};
840
02f7a189 841struct dma_map_ops *dma_ops = &sun4u_dma_ops;
ad7ad57c 842EXPORT_SYMBOL(dma_ops);
1da177e4 843
ee664a92
FT
844extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
845
ad7ad57c 846int dma_supported(struct device *dev, u64 device_mask)
1da177e4 847{
ad7ad57c
DM
848 struct iommu *iommu = dev->archdata.iommu;
849 u64 dma_addr_mask = iommu->dma_addr_mask;
1da177e4 850
ad7ad57c
DM
851 if (device_mask >= (1UL << 32UL))
852 return 0;
1da177e4 853
ad7ad57c
DM
854 if ((device_mask & dma_addr_mask) == dma_addr_mask)
855 return 1;
1da177e4 856
ad7ad57c
DM
857#ifdef CONFIG_PCI
858 if (dev->bus == &pci_bus_type)
ee664a92 859 return pci64_dma_supported(to_pci_dev(dev), device_mask);
ad7ad57c 860#endif
1da177e4 861
ad7ad57c
DM
862 return 0;
863}
864EXPORT_SYMBOL(dma_supported);
1da177e4 865
ad7ad57c
DM
866int dma_set_mask(struct device *dev, u64 dma_mask)
867{
868#ifdef CONFIG_PCI
869 if (dev->bus == &pci_bus_type)
870 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
871#endif
872 return -EINVAL;
1da177e4 873}
ad7ad57c 874EXPORT_SYMBOL(dma_set_mask);