]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/cache-l2x0.c
Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
[net-next-2.6.git] / arch / arm / mm / cache-l2x0.c
CommitLineData
382266ad
CM
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
07620976 20#include <linux/spinlock.h>
fced80c7 21#include <linux/io.h>
382266ad
CM
22
23#include <asm/cacheflush.h>
382266ad
CM
24#include <asm/hardware/cache-l2x0.h>
25
26#define CACHE_LINE_SIZE 32
27
28static void __iomem *l2x0_base;
07620976 29static DEFINE_SPINLOCK(l2x0_lock);
64039be8 30static uint32_t l2x0_way_mask; /* Bitmask of active ways */
5ba70372 31static uint32_t l2x0_size;
382266ad 32
9a6655e4 33static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
382266ad 34{
9a6655e4 35 /* wait for cache operation by line or way to complete */
6775a558 36 while (readl_relaxed(reg) & mask)
382266ad
CM
37 ;
38}
39
9a6655e4
CM
40#ifdef CONFIG_CACHE_PL310
41static inline void cache_wait(void __iomem *reg, unsigned long mask)
42{
43 /* cache operations by line are atomic on PL310 */
44}
45#else
46#define cache_wait cache_wait_way
47#endif
48
382266ad
CM
49static inline void cache_sync(void)
50{
3d107434 51 void __iomem *base = l2x0_base;
6775a558 52 writel_relaxed(0, base + L2X0_CACHE_SYNC);
3d107434 53 cache_wait(base + L2X0_CACHE_SYNC, 1);
382266ad
CM
54}
55
424d6b14
SS
56static inline void l2x0_clean_line(unsigned long addr)
57{
58 void __iomem *base = l2x0_base;
59 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 60 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
424d6b14
SS
61}
62
63static inline void l2x0_inv_line(unsigned long addr)
64{
65 void __iomem *base = l2x0_base;
66 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 67 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
424d6b14
SS
68}
69
9e65582a
SS
70#ifdef CONFIG_PL310_ERRATA_588369
71static void debug_writel(unsigned long val)
72{
73 extern void omap_smc1(u32 fn, u32 arg);
74
75 /*
76 * Texas Instrument secure monitor api to modify the
77 * PL310 Debug Control Register.
78 */
79 omap_smc1(0x100, val);
80}
81
82static inline void l2x0_flush_line(unsigned long addr)
83{
84 void __iomem *base = l2x0_base;
85
86 /* Clean by PA followed by Invalidate by PA */
87 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 88 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
9e65582a 89 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 90 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
9e65582a
SS
91}
92#else
93
94/* Optimised out for non-errata case */
95static inline void debug_writel(unsigned long val)
96{
97}
98
424d6b14
SS
99static inline void l2x0_flush_line(unsigned long addr)
100{
101 void __iomem *base = l2x0_base;
102 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
6775a558 103 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
424d6b14 104}
9e65582a 105#endif
424d6b14 106
23107c54
CM
107static void l2x0_cache_sync(void)
108{
109 unsigned long flags;
110
111 spin_lock_irqsave(&l2x0_lock, flags);
112 cache_sync();
113 spin_unlock_irqrestore(&l2x0_lock, flags);
114}
115
2fd86589
TG
116static void l2x0_flush_all(void)
117{
118 unsigned long flags;
119
120 /* clean all ways */
121 spin_lock_irqsave(&l2x0_lock, flags);
122 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
123 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
124 cache_sync();
125 spin_unlock_irqrestore(&l2x0_lock, flags);
126}
127
444457c1
SS
128static void l2x0_clean_all(void)
129{
130 unsigned long flags;
131
132 /* clean all ways */
133 spin_lock_irqsave(&l2x0_lock, flags);
134 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
135 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
136 cache_sync();
137 spin_unlock_irqrestore(&l2x0_lock, flags);
138}
139
2fd86589 140static void l2x0_inv_all(void)
382266ad 141{
0eb948dd
RK
142 unsigned long flags;
143
382266ad 144 /* invalidate all ways */
0eb948dd 145 spin_lock_irqsave(&l2x0_lock, flags);
2fd86589
TG
146 /* Invalidating when L2 is enabled is a nono */
147 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
6775a558 148 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
9a6655e4 149 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
382266ad 150 cache_sync();
0eb948dd 151 spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
152}
153
154static void l2x0_inv_range(unsigned long start, unsigned long end)
155{
3d107434 156 void __iomem *base = l2x0_base;
0eb948dd 157 unsigned long flags;
382266ad 158
0eb948dd 159 spin_lock_irqsave(&l2x0_lock, flags);
4f6627ac
RS
160 if (start & (CACHE_LINE_SIZE - 1)) {
161 start &= ~(CACHE_LINE_SIZE - 1);
9e65582a 162 debug_writel(0x03);
424d6b14 163 l2x0_flush_line(start);
9e65582a 164 debug_writel(0x00);
4f6627ac
RS
165 start += CACHE_LINE_SIZE;
166 }
167
168 if (end & (CACHE_LINE_SIZE - 1)) {
169 end &= ~(CACHE_LINE_SIZE - 1);
9e65582a 170 debug_writel(0x03);
424d6b14 171 l2x0_flush_line(end);
9e65582a 172 debug_writel(0x00);
4f6627ac
RS
173 }
174
0eb948dd
RK
175 while (start < end) {
176 unsigned long blk_end = start + min(end - start, 4096UL);
177
178 while (start < blk_end) {
424d6b14 179 l2x0_inv_line(start);
0eb948dd
RK
180 start += CACHE_LINE_SIZE;
181 }
182
183 if (blk_end < end) {
184 spin_unlock_irqrestore(&l2x0_lock, flags);
185 spin_lock_irqsave(&l2x0_lock, flags);
186 }
187 }
3d107434 188 cache_wait(base + L2X0_INV_LINE_PA, 1);
382266ad 189 cache_sync();
0eb948dd 190 spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
191}
192
193static void l2x0_clean_range(unsigned long start, unsigned long end)
194{
3d107434 195 void __iomem *base = l2x0_base;
0eb948dd 196 unsigned long flags;
382266ad 197
444457c1
SS
198 if ((end - start) >= l2x0_size) {
199 l2x0_clean_all();
200 return;
201 }
202
0eb948dd 203 spin_lock_irqsave(&l2x0_lock, flags);
382266ad 204 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
205 while (start < end) {
206 unsigned long blk_end = start + min(end - start, 4096UL);
207
208 while (start < blk_end) {
424d6b14 209 l2x0_clean_line(start);
0eb948dd
RK
210 start += CACHE_LINE_SIZE;
211 }
212
213 if (blk_end < end) {
214 spin_unlock_irqrestore(&l2x0_lock, flags);
215 spin_lock_irqsave(&l2x0_lock, flags);
216 }
217 }
3d107434 218 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
382266ad 219 cache_sync();
0eb948dd 220 spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
221}
222
223static void l2x0_flush_range(unsigned long start, unsigned long end)
224{
3d107434 225 void __iomem *base = l2x0_base;
0eb948dd 226 unsigned long flags;
382266ad 227
444457c1
SS
228 if ((end - start) >= l2x0_size) {
229 l2x0_flush_all();
230 return;
231 }
232
0eb948dd 233 spin_lock_irqsave(&l2x0_lock, flags);
382266ad 234 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
235 while (start < end) {
236 unsigned long blk_end = start + min(end - start, 4096UL);
237
9e65582a 238 debug_writel(0x03);
0eb948dd 239 while (start < blk_end) {
424d6b14 240 l2x0_flush_line(start);
0eb948dd
RK
241 start += CACHE_LINE_SIZE;
242 }
9e65582a 243 debug_writel(0x00);
0eb948dd
RK
244
245 if (blk_end < end) {
246 spin_unlock_irqrestore(&l2x0_lock, flags);
247 spin_lock_irqsave(&l2x0_lock, flags);
248 }
249 }
3d107434 250 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
382266ad 251 cache_sync();
0eb948dd 252 spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
253}
254
2fd86589
TG
255static void l2x0_disable(void)
256{
257 unsigned long flags;
258
259 spin_lock_irqsave(&l2x0_lock, flags);
260 writel(0, l2x0_base + L2X0_CTRL);
261 spin_unlock_irqrestore(&l2x0_lock, flags);
262}
263
382266ad
CM
264void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
265{
266 __u32 aux;
64039be8 267 __u32 cache_id;
5ba70372 268 __u32 way_size = 0;
64039be8
JM
269 int ways;
270 const char *type;
382266ad
CM
271
272 l2x0_base = base;
273
6775a558
CM
274 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
275 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
64039be8 276
4082cfa7
SH
277 aux &= aux_mask;
278 aux |= aux_val;
279
64039be8
JM
280 /* Determine the number of ways */
281 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
282 case L2X0_CACHE_ID_PART_L310:
283 if (aux & (1 << 16))
284 ways = 16;
285 else
286 ways = 8;
287 type = "L310";
288 break;
289 case L2X0_CACHE_ID_PART_L210:
290 ways = (aux >> 13) & 0xf;
291 type = "L210";
292 break;
293 default:
294 /* Assume unknown chips have 8 ways */
295 ways = 8;
296 type = "L2x0 series";
297 break;
298 }
299
300 l2x0_way_mask = (1 << ways) - 1;
301
5ba70372
SS
302 /*
303 * L2 cache Size = Way size * Number of ways
304 */
305 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
306 way_size = 1 << (way_size + 3);
307 l2x0_size = ways * way_size * SZ_1K;
308
48371cd3
SK
309 /*
310 * Check if l2x0 controller is already enabled.
311 * If you are booting from non-secure mode
312 * accessing the below registers will fault.
313 */
6775a558 314 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
382266ad 315
48371cd3 316 /* l2x0 controller is disabled */
6775a558 317 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
382266ad 318
48371cd3
SK
319 l2x0_inv_all();
320
321 /* enable L2X0 */
6775a558 322 writel_relaxed(1, l2x0_base + L2X0_CTRL);
48371cd3 323 }
382266ad
CM
324
325 outer_cache.inv_range = l2x0_inv_range;
326 outer_cache.clean_range = l2x0_clean_range;
327 outer_cache.flush_range = l2x0_flush_range;
23107c54 328 outer_cache.sync = l2x0_cache_sync;
2fd86589
TG
329 outer_cache.flush_all = l2x0_flush_all;
330 outer_cache.inv_all = l2x0_inv_all;
331 outer_cache.disable = l2x0_disable;
382266ad 332
64039be8 333 printk(KERN_INFO "%s cache controller enabled\n", type);
5ba70372
SS
334 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
335 ways, cache_id, aux, l2x0_size);
382266ad 336}