]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/cacheflush.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_CACHEFLUSH_H | |
11 | #define _ASMARM_CACHEFLUSH_H | |
12 | ||
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | ||
1da177e4 | 15 | #include <asm/glue.h> |
b8a9b66f | 16 | #include <asm/shmparam.h> |
376e1421 | 17 | #include <asm/cachetype.h> |
b8a9b66f RK |
18 | |
19 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
1da177e4 LT |
20 | |
21 | /* | |
22 | * Cache Model | |
23 | * =========== | |
24 | */ | |
25 | #undef _CACHE | |
26 | #undef MULTI_CACHE | |
27 | ||
6cc7cbef | 28 | #if defined(CONFIG_CPU_CACHE_V3) |
1da177e4 LT |
29 | # ifdef _CACHE |
30 | # define MULTI_CACHE 1 | |
31 | # else | |
32 | # define _CACHE v3 | |
33 | # endif | |
34 | #endif | |
35 | ||
6cc7cbef | 36 | #if defined(CONFIG_CPU_CACHE_V4) |
1da177e4 LT |
37 | # ifdef _CACHE |
38 | # define MULTI_CACHE 1 | |
39 | # else | |
40 | # define _CACHE v4 | |
41 | # endif | |
42 | #endif | |
43 | ||
44 | #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ | |
1c8e170a AWG |
45 | defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ |
46 | defined(CONFIG_CPU_ARM1026) | |
1da177e4 LT |
47 | # define MULTI_CACHE 1 |
48 | #endif | |
49 | ||
28853ac8 PZ |
50 | #if defined(CONFIG_CPU_FA526) |
51 | # ifdef _CACHE | |
52 | # define MULTI_CACHE 1 | |
53 | # else | |
54 | # define _CACHE fa | |
55 | # endif | |
56 | #endif | |
57 | ||
1da177e4 LT |
58 | #if defined(CONFIG_CPU_ARM926T) |
59 | # ifdef _CACHE | |
60 | # define MULTI_CACHE 1 | |
61 | # else | |
62 | # define _CACHE arm926 | |
63 | # endif | |
64 | #endif | |
65 | ||
d60674eb HC |
66 | #if defined(CONFIG_CPU_ARM940T) |
67 | # ifdef _CACHE | |
68 | # define MULTI_CACHE 1 | |
69 | # else | |
70 | # define _CACHE arm940 | |
71 | # endif | |
72 | #endif | |
73 | ||
f37f46eb HC |
74 | #if defined(CONFIG_CPU_ARM946E) |
75 | # ifdef _CACHE | |
76 | # define MULTI_CACHE 1 | |
77 | # else | |
78 | # define _CACHE arm946 | |
79 | # endif | |
80 | #endif | |
81 | ||
6cc7cbef | 82 | #if defined(CONFIG_CPU_CACHE_V4WB) |
1da177e4 LT |
83 | # ifdef _CACHE |
84 | # define MULTI_CACHE 1 | |
85 | # else | |
86 | # define _CACHE v4wb | |
87 | # endif | |
88 | #endif | |
89 | ||
90 | #if defined(CONFIG_CPU_XSCALE) | |
91 | # ifdef _CACHE | |
92 | # define MULTI_CACHE 1 | |
93 | # else | |
94 | # define _CACHE xscale | |
95 | # endif | |
96 | #endif | |
97 | ||
23bdf86a LB |
98 | #if defined(CONFIG_CPU_XSC3) |
99 | # ifdef _CACHE | |
100 | # define MULTI_CACHE 1 | |
101 | # else | |
102 | # define _CACHE xsc3 | |
103 | # endif | |
104 | #endif | |
105 | ||
49cbe786 EM |
106 | #if defined(CONFIG_CPU_MOHAWK) |
107 | # ifdef _CACHE | |
108 | # define MULTI_CACHE 1 | |
109 | # else | |
110 | # define _CACHE mohawk | |
111 | # endif | |
112 | #endif | |
113 | ||
e50d6409 | 114 | #if defined(CONFIG_CPU_FEROCEON) |
836a8051 | 115 | # define MULTI_CACHE 1 |
e50d6409 AH |
116 | #endif |
117 | ||
1da177e4 LT |
118 | #if defined(CONFIG_CPU_V6) |
119 | //# ifdef _CACHE | |
120 | # define MULTI_CACHE 1 | |
121 | //# else | |
122 | //# define _CACHE v6 | |
123 | //# endif | |
124 | #endif | |
125 | ||
bbe88886 CM |
126 | #if defined(CONFIG_CPU_V7) |
127 | //# ifdef _CACHE | |
128 | # define MULTI_CACHE 1 | |
129 | //# else | |
130 | //# define _CACHE v7 | |
131 | //# endif | |
132 | #endif | |
133 | ||
1da177e4 LT |
134 | #if !defined(_CACHE) && !defined(MULTI_CACHE) |
135 | #error Unknown cache maintainence model | |
136 | #endif | |
137 | ||
138 | /* | |
139 | * This flag is used to indicate that the page pointed to by a pte | |
140 | * is dirty and requires cleaning before returning it to the user. | |
141 | */ | |
142 | #define PG_dcache_dirty PG_arch_1 | |
143 | ||
144 | /* | |
145 | * MM Cache Management | |
146 | * =================== | |
147 | * | |
148 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | |
149 | * implement these methods. | |
150 | * | |
151 | * Start addresses are inclusive and end addresses are exclusive; | |
152 | * start addresses should be rounded down, end addresses up. | |
153 | * | |
154 | * See Documentation/cachetlb.txt for more information. | |
155 | * Please note that the implementation of these, and the required | |
156 | * effects are cache-type (VIVT/VIPT/PIPT) specific. | |
157 | * | |
2045124f | 158 | * flush_kern_all() |
1da177e4 LT |
159 | * |
160 | * Unconditionally clean and invalidate the entire cache. | |
161 | * | |
2045124f | 162 | * flush_user_all() |
1da177e4 LT |
163 | * |
164 | * Clean and invalidate all user space cache entries | |
165 | * before a change of page tables. | |
166 | * | |
2045124f | 167 | * flush_user_range(start, end, flags) |
1da177e4 LT |
168 | * |
169 | * Clean and invalidate a range of cache entries in the | |
170 | * specified address space before a change of page tables. | |
171 | * - start - user start address (inclusive, page aligned) | |
172 | * - end - user end address (exclusive, page aligned) | |
173 | * - flags - vma->vm_flags field | |
174 | * | |
175 | * coherent_kern_range(start, end) | |
176 | * | |
177 | * Ensure coherency between the Icache and the Dcache in the | |
178 | * region described by start, end. If you have non-snooping | |
179 | * Harvard caches, you need to implement this function. | |
180 | * - start - virtual start address | |
181 | * - end - virtual end address | |
182 | * | |
2045124f TL |
183 | * coherent_user_range(start, end) |
184 | * | |
185 | * Ensure coherency between the Icache and the Dcache in the | |
186 | * region described by start, end. If you have non-snooping | |
187 | * Harvard caches, you need to implement this function. | |
188 | * - start - virtual start address | |
189 | * - end - virtual end address | |
190 | * | |
191 | * flush_kern_dcache_area(kaddr, size) | |
192 | * | |
193 | * Ensure that the data held in page is written back. | |
194 | * - kaddr - page address | |
195 | * - size - region size | |
196 | * | |
1da177e4 LT |
197 | * DMA Cache Coherency |
198 | * =================== | |
199 | * | |
200 | * dma_inv_range(start, end) | |
201 | * | |
202 | * Invalidate (discard) the specified virtual address range. | |
203 | * May not write back any entries. If 'start' or 'end' | |
204 | * are not cache line aligned, those lines must be written | |
205 | * back. | |
206 | * - start - virtual start address | |
207 | * - end - virtual end address | |
208 | * | |
209 | * dma_clean_range(start, end) | |
210 | * | |
211 | * Clean (write back) the specified virtual address range. | |
212 | * - start - virtual start address | |
213 | * - end - virtual end address | |
214 | * | |
215 | * dma_flush_range(start, end) | |
216 | * | |
217 | * Clean and invalidate the specified virtual address range. | |
218 | * - start - virtual start address | |
219 | * - end - virtual end address | |
220 | */ | |
221 | ||
222 | struct cpu_cache_fns { | |
223 | void (*flush_kern_all)(void); | |
224 | void (*flush_user_all)(void); | |
225 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | |
226 | ||
227 | void (*coherent_kern_range)(unsigned long, unsigned long); | |
228 | void (*coherent_user_range)(unsigned long, unsigned long); | |
2c9b9c84 | 229 | void (*flush_kern_dcache_area)(void *, size_t); |
1da177e4 | 230 | |
7ae5a761 RK |
231 | void (*dma_inv_range)(const void *, const void *); |
232 | void (*dma_clean_range)(const void *, const void *); | |
233 | void (*dma_flush_range)(const void *, const void *); | |
1da177e4 LT |
234 | }; |
235 | ||
953233dc CM |
236 | struct outer_cache_fns { |
237 | void (*inv_range)(unsigned long, unsigned long); | |
238 | void (*clean_range)(unsigned long, unsigned long); | |
239 | void (*flush_range)(unsigned long, unsigned long); | |
240 | }; | |
241 | ||
1da177e4 LT |
242 | /* |
243 | * Select the calling method | |
244 | */ | |
245 | #ifdef MULTI_CACHE | |
246 | ||
247 | extern struct cpu_cache_fns cpu_cache; | |
248 | ||
249 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all | |
250 | #define __cpuc_flush_user_all cpu_cache.flush_user_all | |
251 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | |
252 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | |
253 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range | |
2c9b9c84 | 254 | #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area |
1da177e4 LT |
255 | |
256 | /* | |
257 | * These are private to the dma-mapping API. Do not use directly. | |
258 | * Their sole purpose is to ensure that data held in the cache | |
259 | * is visible to DMA, or data written by DMA to system memory is | |
260 | * visible to the CPU. | |
261 | */ | |
262 | #define dmac_inv_range cpu_cache.dma_inv_range | |
263 | #define dmac_clean_range cpu_cache.dma_clean_range | |
264 | #define dmac_flush_range cpu_cache.dma_flush_range | |
265 | ||
266 | #else | |
267 | ||
268 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | |
269 | #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) | |
270 | #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) | |
271 | #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) | |
272 | #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) | |
2c9b9c84 | 273 | #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) |
1da177e4 LT |
274 | |
275 | extern void __cpuc_flush_kern_all(void); | |
276 | extern void __cpuc_flush_user_all(void); | |
277 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | |
278 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | |
279 | extern void __cpuc_coherent_user_range(unsigned long, unsigned long); | |
2c9b9c84 | 280 | extern void __cpuc_flush_dcache_area(void *, size_t); |
1da177e4 LT |
281 | |
282 | /* | |
283 | * These are private to the dma-mapping API. Do not use directly. | |
284 | * Their sole purpose is to ensure that data held in the cache | |
285 | * is visible to DMA, or data written by DMA to system memory is | |
286 | * visible to the CPU. | |
287 | */ | |
288 | #define dmac_inv_range __glue(_CACHE,_dma_inv_range) | |
289 | #define dmac_clean_range __glue(_CACHE,_dma_clean_range) | |
290 | #define dmac_flush_range __glue(_CACHE,_dma_flush_range) | |
291 | ||
7ae5a761 RK |
292 | extern void dmac_inv_range(const void *, const void *); |
293 | extern void dmac_clean_range(const void *, const void *); | |
294 | extern void dmac_flush_range(const void *, const void *); | |
1da177e4 LT |
295 | |
296 | #endif | |
297 | ||
953233dc CM |
298 | #ifdef CONFIG_OUTER_CACHE |
299 | ||
300 | extern struct outer_cache_fns outer_cache; | |
301 | ||
302 | static inline void outer_inv_range(unsigned long start, unsigned long end) | |
303 | { | |
304 | if (outer_cache.inv_range) | |
305 | outer_cache.inv_range(start, end); | |
306 | } | |
307 | static inline void outer_clean_range(unsigned long start, unsigned long end) | |
308 | { | |
309 | if (outer_cache.clean_range) | |
310 | outer_cache.clean_range(start, end); | |
311 | } | |
312 | static inline void outer_flush_range(unsigned long start, unsigned long end) | |
313 | { | |
314 | if (outer_cache.flush_range) | |
315 | outer_cache.flush_range(start, end); | |
316 | } | |
317 | ||
318 | #else | |
319 | ||
320 | static inline void outer_inv_range(unsigned long start, unsigned long end) | |
321 | { } | |
322 | static inline void outer_clean_range(unsigned long start, unsigned long end) | |
323 | { } | |
324 | static inline void outer_flush_range(unsigned long start, unsigned long end) | |
325 | { } | |
326 | ||
327 | #endif | |
328 | ||
1da177e4 LT |
329 | /* |
330 | * Copy user data from/to a page which is mapped into a different | |
331 | * processes address space. Really, we want to allow our "user | |
332 | * space" model to handle this. | |
333 | */ | |
334 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | |
335 | do { \ | |
1da177e4 | 336 | memcpy(dst, src, len); \ |
a188ad2b | 337 | flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ |
1da177e4 LT |
338 | } while (0) |
339 | ||
340 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | |
341 | do { \ | |
1da177e4 LT |
342 | memcpy(dst, src, len); \ |
343 | } while (0) | |
344 | ||
345 | /* | |
346 | * Convert calls to our calling convention. | |
347 | */ | |
348 | #define flush_cache_all() __cpuc_flush_kern_all() | |
2f0b1926 RK |
349 | |
350 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) | |
1da177e4 | 351 | { |
56f8ba83 | 352 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
1da177e4 LT |
353 | __cpuc_flush_user_all(); |
354 | } | |
355 | ||
356 | static inline void | |
2f0b1926 | 357 | vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1da177e4 | 358 | { |
56f8ba83 | 359 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) |
1da177e4 LT |
360 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), |
361 | vma->vm_flags); | |
362 | } | |
363 | ||
364 | static inline void | |
2f0b1926 | 365 | vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
1da177e4 | 366 | { |
56f8ba83 | 367 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
1da177e4 LT |
368 | unsigned long addr = user_addr & PAGE_MASK; |
369 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
370 | } | |
371 | } | |
a188ad2b GD |
372 | |
373 | static inline void | |
2f0b1926 | 374 | vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
a188ad2b GD |
375 | unsigned long uaddr, void *kaddr, |
376 | unsigned long len, int write) | |
377 | { | |
56f8ba83 | 378 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
a188ad2b GD |
379 | unsigned long addr = (unsigned long)kaddr; |
380 | __cpuc_coherent_kern_range(addr, addr + len); | |
381 | } | |
382 | } | |
2f0b1926 RK |
383 | |
384 | #ifndef CONFIG_CPU_CACHE_VIPT | |
385 | #define flush_cache_mm(mm) \ | |
386 | vivt_flush_cache_mm(mm) | |
387 | #define flush_cache_range(vma,start,end) \ | |
388 | vivt_flush_cache_range(vma,start,end) | |
389 | #define flush_cache_page(vma,addr,pfn) \ | |
390 | vivt_flush_cache_page(vma,addr,pfn) | |
391 | #define flush_ptrace_access(vma,page,ua,ka,len,write) \ | |
392 | vivt_flush_ptrace_access(vma,page,ua,ka,len,write) | |
d7b6b358 RK |
393 | #else |
394 | extern void flush_cache_mm(struct mm_struct *mm); | |
395 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
396 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | |
a188ad2b GD |
397 | extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
398 | unsigned long uaddr, void *kaddr, | |
399 | unsigned long len, int write); | |
d7b6b358 | 400 | #endif |
1da177e4 | 401 | |
ec8c0446 RB |
402 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
403 | ||
1da177e4 LT |
404 | /* |
405 | * flush_cache_user_range is used when we want to ensure that the | |
406 | * Harvard caches are synchronised for the user space address range. | |
407 | * This is used for the ARM private sys_cacheflush system call. | |
408 | */ | |
409 | #define flush_cache_user_range(vma,start,end) \ | |
410 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) | |
411 | ||
412 | /* | |
413 | * Perform necessary cache operations to ensure that data previously | |
414 | * stored within this range of addresses can be executed by the CPU. | |
415 | */ | |
416 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | |
417 | ||
418 | /* | |
419 | * Perform necessary cache operations to ensure that the TLB will | |
420 | * see data written in the specified area. | |
421 | */ | |
422 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) | |
423 | ||
424 | /* | |
425 | * flush_dcache_page is used when the kernel has written to the page | |
426 | * cache page at virtual address page->virtual. | |
427 | * | |
428 | * If this page isn't mapped (ie, page_mapping == NULL), or it might | |
429 | * have userspace mappings, then we _must_ always clean + invalidate | |
430 | * the dcache entries associated with the kernel mapping. | |
431 | * | |
432 | * Otherwise we can defer the operation, and clean the cache when we are | |
433 | * about to change to user space. This is the same method as used on SPARC64. | |
434 | * See update_mmu_cache for the user space part. | |
435 | */ | |
2d4dc890 | 436 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
1da177e4 LT |
437 | extern void flush_dcache_page(struct page *); |
438 | ||
826cbdaf CM |
439 | static inline void __flush_icache_all(void) |
440 | { | |
df71dfd4 RK |
441 | #ifdef CONFIG_ARM_ERRATA_411920 |
442 | extern void v6_icache_inval_all(void); | |
443 | v6_icache_inval_all(); | |
444 | #else | |
826cbdaf CM |
445 | asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" |
446 | : | |
447 | : "r" (0)); | |
df71dfd4 | 448 | #endif |
826cbdaf CM |
449 | } |
450 | ||
6020dff0 RK |
451 | #define ARCH_HAS_FLUSH_ANON_PAGE |
452 | static inline void flush_anon_page(struct vm_area_struct *vma, | |
453 | struct page *page, unsigned long vmaddr) | |
454 | { | |
455 | extern void __flush_anon_page(struct vm_area_struct *vma, | |
456 | struct page *, unsigned long); | |
457 | if (PageAnon(page)) | |
458 | __flush_anon_page(vma, page, vmaddr); | |
459 | } | |
460 | ||
73be1591 NP |
461 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
462 | static inline void flush_kernel_dcache_page(struct page *page) | |
463 | { | |
464 | /* highmem pages are always flushed upon kunmap already */ | |
465 | if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) | |
2c9b9c84 | 466 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
73be1591 NP |
467 | } |
468 | ||
1da177e4 | 469 | #define flush_dcache_mmap_lock(mapping) \ |
19fd6231 | 470 | spin_lock_irq(&(mapping)->tree_lock) |
1da177e4 | 471 | #define flush_dcache_mmap_unlock(mapping) \ |
19fd6231 | 472 | spin_unlock_irq(&(mapping)->tree_lock) |
1da177e4 LT |
473 | |
474 | #define flush_icache_user_range(vma,page,addr,len) \ | |
475 | flush_dcache_page(page) | |
476 | ||
477 | /* | |
478 | * We don't appear to need to do anything here. In fact, if we did, we'd | |
479 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | |
480 | */ | |
481 | #define flush_icache_page(vma,page) do { } while (0) | |
482 | ||
376e1421 CM |
483 | /* |
484 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | |
485 | * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT | |
486 | * caches, since the direct-mappings of these pages may contain cached | |
487 | * data, we need to do a full cache flush to ensure that writebacks | |
488 | * don't corrupt data placed into these pages via the new mappings. | |
489 | */ | |
490 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | |
491 | { | |
492 | if (!cache_is_vipt_nonaliasing()) | |
493 | flush_cache_all(); | |
494 | else | |
495 | /* | |
496 | * set_pte_at() called from vmap_pte_range() does not | |
497 | * have a DSB after cleaning the cache line. | |
498 | */ | |
499 | dsb(); | |
500 | } | |
501 | ||
502 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |
503 | { | |
504 | if (!cache_is_vipt_nonaliasing()) | |
505 | flush_cache_all(); | |
506 | } | |
507 | ||
1da177e4 | 508 | #endif |