]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/cache-v4wb.S
Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
[net-next-2.6.git] / arch / arm / mm / cache-v4wb.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/cache-v4wb.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/linkage.h>
11#include <linux/init.h>
95f3df6b 12#include <asm/memory.h>
1da177e4
LT
13#include <asm/page.h>
14#include "proc-macros.S"
15
16/*
17 * The size of one data cache line.
18 */
19#define CACHE_DLINESIZE 32
20
21/*
22 * The total size of the data cache.
23 */
24#if defined(CONFIG_CPU_SA110)
25# define CACHE_DSIZE 16384
26#elif defined(CONFIG_CPU_SA1100)
27# define CACHE_DSIZE 8192
28#else
29# error Unknown cache size
30#endif
31
32/*
33 * This is the size at which it becomes more efficient to
34 * clean the whole cache, rather than using the individual
35 * cache line maintainence instructions.
36 *
37 * Size Clean (ticks) Dirty (ticks)
38 * 4096 21 20 21 53 55 54
39 * 8192 40 41 40 106 100 102
40 * 16384 77 77 76 140 140 138
41 * 32768 150 149 150 214 216 212 <---
42 * 65536 296 297 296 351 358 361
43 * 131072 591 591 591 656 657 651
44 * Whole 132 136 132 221 217 207 <---
45 */
46#define CACHE_DLIMIT (CACHE_DSIZE * 4)
47
95f3df6b
RK
48 .data
49flush_base:
50 .long FLUSH_BASE
51 .text
52
c8c90860
MW
53/*
54 * flush_icache_all()
55 *
56 * Unconditionally clean and invalidate the entire icache.
57 */
58ENTRY(v4wb_flush_icache_all)
59 mov r0, #0
60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
61 mov pc, lr
62ENDPROC(v4wb_flush_icache_all)
63
1da177e4
LT
64/*
65 * flush_user_cache_all()
66 *
67 * Clean and invalidate all cache entries in a particular address
68 * space.
69 */
70ENTRY(v4wb_flush_user_cache_all)
71 /* FALLTHROUGH */
72/*
73 * flush_kern_cache_all()
74 *
75 * Clean and invalidate the entire cache.
76 */
77ENTRY(v4wb_flush_kern_cache_all)
78 mov ip, #0
79 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
80__flush_whole_cache:
95f3df6b
RK
81 ldr r3, =flush_base
82 ldr r1, [r3, #0]
83 eor r1, r1, #CACHE_DSIZE
84 str r1, [r3, #0]
85 add r2, r1, #CACHE_DSIZE
861: ldr r3, [r1], #32
87 cmp r1, r2
88 blo 1b
89#ifdef FLUSH_BASE_MINICACHE
90 add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
91 sub r1, r2, #512 @ only 512 bytes
921: ldr r3, [r1], #32
93 cmp r1, r2
1da177e4 94 blo 1b
95f3df6b 95#endif
1da177e4
LT
96 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
97 mov pc, lr
98
99/*
100 * flush_user_cache_range(start, end, flags)
101 *
102 * Invalidate a range of cache entries in the specified
103 * address space.
104 *
105 * - start - start address (inclusive, page aligned)
106 * - end - end address (exclusive, page aligned)
107 * - flags - vma_area_struct flags describing address space
108 */
109ENTRY(v4wb_flush_user_cache_range)
95f3df6b 110 mov ip, #0
1da177e4
LT
111 sub r3, r1, r0 @ calculate total size
112 tst r2, #VM_EXEC @ executable region?
113 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
114
115 cmp r3, #CACHE_DLIMIT @ total size >= limit?
116 bhs __flush_whole_cache @ flush whole D cache
117
1181: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
119 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
120 add r0, r0, #CACHE_DLINESIZE
121 cmp r0, r1
122 blo 1b
123 tst r2, #VM_EXEC
124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
125 mov pc, lr
126
127/*
2c9b9c84 128 * flush_kern_dcache_area(void *addr, size_t size)
1da177e4
LT
129 *
130 * Ensure no D cache aliasing occurs, either with itself or
131 * the I cache
132 *
2c9b9c84
RK
133 * - addr - kernel address
134 * - size - region size
1da177e4 135 */
2c9b9c84
RK
136ENTRY(v4wb_flush_kern_dcache_area)
137 add r1, r0, r1
1da177e4
LT
138 /* fall through */
139
140/*
141 * coherent_kern_range(start, end)
142 *
143 * Ensure coherency between the Icache and the Dcache in the
144 * region described by start. If you have non-snooping
145 * Harvard caches, you need to implement this function.
146 *
147 * - start - virtual start address
148 * - end - virtual end address
149 */
150ENTRY(v4wb_coherent_kern_range)
151 /* fall through */
152
153/*
154 * coherent_user_range(start, end)
155 *
156 * Ensure coherency between the Icache and the Dcache in the
157 * region described by start. If you have non-snooping
158 * Harvard caches, you need to implement this function.
159 *
160 * - start - virtual start address
161 * - end - virtual end address
162 */
163ENTRY(v4wb_coherent_user_range)
164 bic r0, r0, #CACHE_DLINESIZE - 1
1651: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
166 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
167 add r0, r0, #CACHE_DLINESIZE
168 cmp r0, r1
169 blo 1b
170 mov ip, #0
171 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, ip, c7, c10, 4 @ drain WB
173 mov pc, lr
174
175
176/*
177 * dma_inv_range(start, end)
178 *
179 * Invalidate (discard) the specified virtual address range.
180 * May not write back any entries. If 'start' or 'end'
181 * are not cache line aligned, those lines must be written
182 * back.
183 *
184 * - start - virtual start address
185 * - end - virtual end address
186 */
702b94bf 187v4wb_dma_inv_range:
1da177e4
LT
188 tst r0, #CACHE_DLINESIZE - 1
189 bic r0, r0, #CACHE_DLINESIZE - 1
190 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
191 tst r1, #CACHE_DLINESIZE - 1
192 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1931: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
194 add r0, r0, #CACHE_DLINESIZE
195 cmp r0, r1
196 blo 1b
197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
198 mov pc, lr
199
200/*
201 * dma_clean_range(start, end)
202 *
203 * Clean (write back) the specified virtual address range.
204 *
205 * - start - virtual start address
206 * - end - virtual end address
207 */
702b94bf 208v4wb_dma_clean_range:
1da177e4
LT
209 bic r0, r0, #CACHE_DLINESIZE - 1
2101: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
211 add r0, r0, #CACHE_DLINESIZE
212 cmp r0, r1
213 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
215 mov pc, lr
216
217/*
218 * dma_flush_range(start, end)
219 *
220 * Clean and invalidate the specified virtual address range.
221 *
222 * - start - virtual start address
223 * - end - virtual end address
224 *
225 * This is actually the same as v4wb_coherent_kern_range()
226 */
227 .globl v4wb_dma_flush_range
228 .set v4wb_dma_flush_range, v4wb_coherent_kern_range
229
a9c9147e
RK
230/*
231 * dma_map_area(start, size, dir)
232 * - start - kernel virtual start address
233 * - size - size of region
234 * - dir - DMA direction
235 */
236ENTRY(v4wb_dma_map_area)
237 add r1, r1, r0
238 cmp r2, #DMA_TO_DEVICE
239 beq v4wb_dma_clean_range
240 bcs v4wb_dma_inv_range
241 b v4wb_dma_flush_range
242ENDPROC(v4wb_dma_map_area)
243
244/*
245 * dma_unmap_area(start, size, dir)
246 * - start - kernel virtual start address
247 * - size - size of region
248 * - dir - DMA direction
249 */
250ENTRY(v4wb_dma_unmap_area)
251 mov pc, lr
252ENDPROC(v4wb_dma_unmap_area)
253
1da177e4
LT
254 __INITDATA
255
256 .type v4wb_cache_fns, #object
257ENTRY(v4wb_cache_fns)
c8c90860 258 .long v4wb_flush_icache_all
1da177e4
LT
259 .long v4wb_flush_kern_cache_all
260 .long v4wb_flush_user_cache_all
261 .long v4wb_flush_user_cache_range
262 .long v4wb_coherent_kern_range
263 .long v4wb_coherent_user_range
2c9b9c84 264 .long v4wb_flush_kern_dcache_area
a9c9147e
RK
265 .long v4wb_dma_map_area
266 .long v4wb_dma_unmap_area
1da177e4
LT
267 .long v4wb_dma_flush_range
268 .size v4wb_cache_fns, . - v4wb_cache_fns