]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/include/asm/cacheflush.h
ARM: 6379/1: Assume new page cache pages have dirty D-cache
[net-next-2.6.git] / arch / arm / include / asm / cacheflush.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/cacheflush.h
1da177e4
LT
3 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
1da177e4
LT
13#include <linux/mm.h>
14
1da177e4 15#include <asm/glue.h>
b8a9b66f 16#include <asm/shmparam.h>
376e1421 17#include <asm/cachetype.h>
33f663ff 18#include <asm/outercache.h>
b8a9b66f
RK
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
1da177e4
LT
21
22/*
23 * Cache Model
24 * ===========
25 */
26#undef _CACHE
27#undef MULTI_CACHE
28
6cc7cbef 29#if defined(CONFIG_CPU_CACHE_V3)
1da177e4
LT
30# ifdef _CACHE
31# define MULTI_CACHE 1
32# else
33# define _CACHE v3
34# endif
35#endif
36
6cc7cbef 37#if defined(CONFIG_CPU_CACHE_V4)
1da177e4
LT
38# ifdef _CACHE
39# define MULTI_CACHE 1
40# else
41# define _CACHE v4
42# endif
43#endif
44
45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
1c8e170a
AWG
46 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
47 defined(CONFIG_CPU_ARM1026)
1da177e4
LT
48# define MULTI_CACHE 1
49#endif
50
28853ac8
PZ
51#if defined(CONFIG_CPU_FA526)
52# ifdef _CACHE
53# define MULTI_CACHE 1
54# else
55# define _CACHE fa
56# endif
57#endif
58
1da177e4
LT
59#if defined(CONFIG_CPU_ARM926T)
60# ifdef _CACHE
61# define MULTI_CACHE 1
62# else
63# define _CACHE arm926
64# endif
65#endif
66
d60674eb
HC
67#if defined(CONFIG_CPU_ARM940T)
68# ifdef _CACHE
69# define MULTI_CACHE 1
70# else
71# define _CACHE arm940
72# endif
73#endif
74
f37f46eb
HC
75#if defined(CONFIG_CPU_ARM946E)
76# ifdef _CACHE
77# define MULTI_CACHE 1
78# else
79# define _CACHE arm946
80# endif
81#endif
82
6cc7cbef 83#if defined(CONFIG_CPU_CACHE_V4WB)
1da177e4
LT
84# ifdef _CACHE
85# define MULTI_CACHE 1
86# else
87# define _CACHE v4wb
88# endif
89#endif
90
91#if defined(CONFIG_CPU_XSCALE)
92# ifdef _CACHE
93# define MULTI_CACHE 1
94# else
95# define _CACHE xscale
96# endif
97#endif
98
23bdf86a
LB
99#if defined(CONFIG_CPU_XSC3)
100# ifdef _CACHE
101# define MULTI_CACHE 1
102# else
103# define _CACHE xsc3
104# endif
105#endif
106
49cbe786
EM
107#if defined(CONFIG_CPU_MOHAWK)
108# ifdef _CACHE
109# define MULTI_CACHE 1
110# else
111# define _CACHE mohawk
112# endif
113#endif
114
e50d6409 115#if defined(CONFIG_CPU_FEROCEON)
836a8051 116# define MULTI_CACHE 1
e50d6409
AH
117#endif
118
1da177e4
LT
119#if defined(CONFIG_CPU_V6)
120//# ifdef _CACHE
121# define MULTI_CACHE 1
122//# else
123//# define _CACHE v6
124//# endif
125#endif
126
bbe88886
CM
127#if defined(CONFIG_CPU_V7)
128//# ifdef _CACHE
129# define MULTI_CACHE 1
130//# else
131//# define _CACHE v7
132//# endif
133#endif
134
1da177e4
LT
135#if !defined(_CACHE) && !defined(MULTI_CACHE)
136#error Unknown cache maintainence model
137#endif
138
139/*
c0177800
CM
140 * This flag is used to indicate that the page pointed to by a pte is clean
141 * and does not require cleaning before returning it to the user.
1da177e4 142 */
c0177800 143#define PG_dcache_clean PG_arch_1
1da177e4
LT
144
145/*
146 * MM Cache Management
147 * ===================
148 *
149 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
150 * implement these methods.
151 *
152 * Start addresses are inclusive and end addresses are exclusive;
153 * start addresses should be rounded down, end addresses up.
154 *
155 * See Documentation/cachetlb.txt for more information.
156 * Please note that the implementation of these, and the required
157 * effects are cache-type (VIVT/VIPT/PIPT) specific.
158 *
2045124f 159 * flush_kern_all()
1da177e4
LT
160 *
161 * Unconditionally clean and invalidate the entire cache.
162 *
2045124f 163 * flush_user_all()
1da177e4
LT
164 *
165 * Clean and invalidate all user space cache entries
166 * before a change of page tables.
167 *
2045124f 168 * flush_user_range(start, end, flags)
1da177e4
LT
169 *
170 * Clean and invalidate a range of cache entries in the
171 * specified address space before a change of page tables.
172 * - start - user start address (inclusive, page aligned)
173 * - end - user end address (exclusive, page aligned)
174 * - flags - vma->vm_flags field
175 *
176 * coherent_kern_range(start, end)
177 *
178 * Ensure coherency between the Icache and the Dcache in the
179 * region described by start, end. If you have non-snooping
180 * Harvard caches, you need to implement this function.
181 * - start - virtual start address
182 * - end - virtual end address
183 *
2045124f
TL
184 * coherent_user_range(start, end)
185 *
186 * Ensure coherency between the Icache and the Dcache in the
187 * region described by start, end. If you have non-snooping
188 * Harvard caches, you need to implement this function.
189 * - start - virtual start address
190 * - end - virtual end address
191 *
192 * flush_kern_dcache_area(kaddr, size)
193 *
194 * Ensure that the data held in page is written back.
195 * - kaddr - page address
196 * - size - region size
197 *
1da177e4
LT
198 * DMA Cache Coherency
199 * ===================
200 *
1da177e4
LT
201 * dma_flush_range(start, end)
202 *
203 * Clean and invalidate the specified virtual address range.
204 * - start - virtual start address
205 * - end - virtual end address
206 */
207
208struct cpu_cache_fns {
209 void (*flush_kern_all)(void);
210 void (*flush_user_all)(void);
211 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
212
213 void (*coherent_kern_range)(unsigned long, unsigned long);
214 void (*coherent_user_range)(unsigned long, unsigned long);
2c9b9c84 215 void (*flush_kern_dcache_area)(void *, size_t);
1da177e4 216
a9c9147e
RK
217 void (*dma_map_area)(const void *, size_t, int);
218 void (*dma_unmap_area)(const void *, size_t, int);
1da177e4 219
7ae5a761 220 void (*dma_flush_range)(const void *, const void *);
1da177e4
LT
221};
222
223/*
224 * Select the calling method
225 */
226#ifdef MULTI_CACHE
227
228extern struct cpu_cache_fns cpu_cache;
229
230#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
231#define __cpuc_flush_user_all cpu_cache.flush_user_all
232#define __cpuc_flush_user_range cpu_cache.flush_user_range
233#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
234#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
2c9b9c84 235#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
1da177e4
LT
236
237/*
238 * These are private to the dma-mapping API. Do not use directly.
239 * Their sole purpose is to ensure that data held in the cache
240 * is visible to DMA, or data written by DMA to system memory is
241 * visible to the CPU.
242 */
a9c9147e
RK
243#define dmac_map_area cpu_cache.dma_map_area
244#define dmac_unmap_area cpu_cache.dma_unmap_area
1da177e4
LT
245#define dmac_flush_range cpu_cache.dma_flush_range
246
247#else
248
249#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
250#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
251#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
252#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
253#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
2c9b9c84 254#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
1da177e4
LT
255
256extern void __cpuc_flush_kern_all(void);
257extern void __cpuc_flush_user_all(void);
258extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
259extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
260extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
2c9b9c84 261extern void __cpuc_flush_dcache_area(void *, size_t);
1da177e4
LT
262
263/*
264 * These are private to the dma-mapping API. Do not use directly.
265 * Their sole purpose is to ensure that data held in the cache
266 * is visible to DMA, or data written by DMA to system memory is
267 * visible to the CPU.
268 */
a9c9147e
RK
269#define dmac_map_area __glue(_CACHE,_dma_map_area)
270#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
1da177e4
LT
271#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
272
a9c9147e
RK
273extern void dmac_map_area(const void *, size_t, int);
274extern void dmac_unmap_area(const void *, size_t, int);
7ae5a761 275extern void dmac_flush_range(const void *, const void *);
1da177e4
LT
276
277#endif
278
1da177e4
LT
279/*
280 * Copy user data from/to a page which is mapped into a different
281 * processes address space. Really, we want to allow our "user
282 * space" model to handle this.
283 */
2ef7f3db
RK
284extern void copy_to_user_page(struct vm_area_struct *, struct page *,
285 unsigned long, void *, const void *, unsigned long);
1da177e4
LT
286#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
287 do { \
1da177e4
LT
288 memcpy(dst, src, len); \
289 } while (0)
290
291/*
292 * Convert calls to our calling convention.
293 */
294#define flush_cache_all() __cpuc_flush_kern_all()
2f0b1926
RK
295
296static inline void vivt_flush_cache_mm(struct mm_struct *mm)
1da177e4 297{
56f8ba83 298 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
1da177e4
LT
299 __cpuc_flush_user_all();
300}
301
302static inline void
2f0b1926 303vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1da177e4 304{
56f8ba83 305 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
1da177e4
LT
306 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
307 vma->vm_flags);
308}
309
310static inline void
2f0b1926 311vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
1da177e4 312{
56f8ba83 313 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
1da177e4
LT
314 unsigned long addr = user_addr & PAGE_MASK;
315 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
316 }
317}
a188ad2b 318
2f0b1926
RK
319#ifndef CONFIG_CPU_CACHE_VIPT
320#define flush_cache_mm(mm) \
321 vivt_flush_cache_mm(mm)
322#define flush_cache_range(vma,start,end) \
323 vivt_flush_cache_range(vma,start,end)
324#define flush_cache_page(vma,addr,pfn) \
325 vivt_flush_cache_page(vma,addr,pfn)
d7b6b358
RK
326#else
327extern void flush_cache_mm(struct mm_struct *mm);
328extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
329extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
330#endif
1da177e4 331
ec8c0446
RB
332#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
333
1da177e4
LT
334/*
335 * flush_cache_user_range is used when we want to ensure that the
336 * Harvard caches are synchronised for the user space address range.
337 * This is used for the ARM private sys_cacheflush system call.
338 */
339#define flush_cache_user_range(vma,start,end) \
340 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
341
342/*
343 * Perform necessary cache operations to ensure that data previously
344 * stored within this range of addresses can be executed by the CPU.
345 */
346#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
347
348/*
349 * Perform necessary cache operations to ensure that the TLB will
350 * see data written in the specified area.
351 */
352#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
353
354/*
355 * flush_dcache_page is used when the kernel has written to the page
356 * cache page at virtual address page->virtual.
357 *
358 * If this page isn't mapped (ie, page_mapping == NULL), or it might
359 * have userspace mappings, then we _must_ always clean + invalidate
360 * the dcache entries associated with the kernel mapping.
361 *
362 * Otherwise we can defer the operation, and clean the cache when we are
363 * about to change to user space. This is the same method as used on SPARC64.
364 * See update_mmu_cache for the user space part.
365 */
2d4dc890 366#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1da177e4
LT
367extern void flush_dcache_page(struct page *);
368
826cbdaf
CM
369static inline void __flush_icache_all(void)
370{
df71dfd4
RK
371#ifdef CONFIG_ARM_ERRATA_411920
372 extern void v6_icache_inval_all(void);
373 v6_icache_inval_all();
b8349b56
CM
374#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
375 asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
376 :
377 : "r" (0));
df71dfd4 378#else
826cbdaf
CM
379 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
380 :
381 : "r" (0));
df71dfd4 382#endif
826cbdaf 383}
252a9aff
JB
384static inline void flush_kernel_vmap_range(void *addr, int size)
385{
386 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
387 __cpuc_flush_dcache_area(addr, (size_t)size);
388}
389static inline void invalidate_kernel_vmap_range(void *addr, int size)
390{
391 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
392 __cpuc_flush_dcache_area(addr, (size_t)size);
393}
826cbdaf 394
6020dff0
RK
395#define ARCH_HAS_FLUSH_ANON_PAGE
396static inline void flush_anon_page(struct vm_area_struct *vma,
397 struct page *page, unsigned long vmaddr)
398{
399 extern void __flush_anon_page(struct vm_area_struct *vma,
400 struct page *, unsigned long);
401 if (PageAnon(page))
402 __flush_anon_page(vma, page, vmaddr);
403}
404
73be1591
NP
405#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
406static inline void flush_kernel_dcache_page(struct page *page)
407{
408 /* highmem pages are always flushed upon kunmap already */
409 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
2c9b9c84 410 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
73be1591
NP
411}
412
1da177e4 413#define flush_dcache_mmap_lock(mapping) \
19fd6231 414 spin_lock_irq(&(mapping)->tree_lock)
1da177e4 415#define flush_dcache_mmap_unlock(mapping) \
19fd6231 416 spin_unlock_irq(&(mapping)->tree_lock)
1da177e4
LT
417
418#define flush_icache_user_range(vma,page,addr,len) \
419 flush_dcache_page(page)
420
421/*
422 * We don't appear to need to do anything here. In fact, if we did, we'd
423 * duplicate cache flushing elsewhere performed by flush_dcache_page().
424 */
425#define flush_icache_page(vma,page) do { } while (0)
426
376e1421
CM
427/*
428 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
429 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
430 * caches, since the direct-mappings of these pages may contain cached
431 * data, we need to do a full cache flush to ensure that writebacks
432 * don't corrupt data placed into these pages via the new mappings.
433 */
434static inline void flush_cache_vmap(unsigned long start, unsigned long end)
435{
436 if (!cache_is_vipt_nonaliasing())
437 flush_cache_all();
438 else
439 /*
440 * set_pte_at() called from vmap_pte_range() does not
441 * have a DSB after cleaning the cache line.
442 */
443 dsb();
444}
445
446static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
447{
448 if (!cache_is_vipt_nonaliasing())
449 flush_cache_all();
450}
451
1da177e4 452#endif