2 * arch/score/mm/cache.c
4 * Score Processor version.
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <linux/init.h>
27 #include <linux/linkage.h>
28 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/sched.h>
33 #include <asm/mmu_context.h>
35 /* Cache operations. */
36 void (*flush_cache_all)(void);
37 void (*__flush_cache_all)(void);
38 void (*flush_cache_mm)(struct mm_struct *mm);
39 void (*flush_cache_range)(struct vm_area_struct *vma,
40 unsigned long start, unsigned long end);
41 void (*flush_cache_page)(struct vm_area_struct *vma,
42 unsigned long page, unsigned long pfn);
43 void (*flush_icache_range)(unsigned long start, unsigned long end);
44 void (*__flush_cache_vmap)(void);
45 void (*__flush_cache_vunmap)(void);
46 void (*flush_cache_sigtramp)(unsigned long addr);
47 void (*flush_data_cache_page)(unsigned long addr);
48 EXPORT_SYMBOL(flush_data_cache_page);
49 void (*flush_icache_all)(void);
51 /*Score 7 cache operations*/
52 static inline void s7___flush_cache_all(void);
53 static void s7_flush_cache_mm(struct mm_struct *mm);
54 static void s7_flush_cache_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end);
56 static void s7_flush_cache_page(struct vm_area_struct *vma,
57 unsigned long page, unsigned long pfn);
58 static void s7_flush_icache_range(unsigned long start, unsigned long end);
59 static void s7_flush_cache_sigtramp(unsigned long addr);
60 static void s7_flush_data_cache_page(unsigned long addr);
61 static void s7_flush_dcache_range(unsigned long start, unsigned long end);
63 void __update_cache(struct vm_area_struct *vma, unsigned long address,
67 unsigned long pfn, addr;
68 int exec = (vma->vm_flags & VM_EXEC);
71 if (unlikely(!pfn_valid(pfn)))
73 page = pfn_to_page(pfn);
74 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
75 addr = (unsigned long) page_address(page);
77 s7_flush_data_cache_page(addr);
78 clear_bit(PG_arch_1, &page->flags);
82 static inline void setup_protection_map(void)
84 protection_map[0] = PAGE_NONE;
85 protection_map[1] = PAGE_READONLY;
86 protection_map[2] = PAGE_COPY;
87 protection_map[3] = PAGE_COPY;
88 protection_map[4] = PAGE_READONLY;
89 protection_map[5] = PAGE_READONLY;
90 protection_map[6] = PAGE_COPY;
91 protection_map[7] = PAGE_COPY;
92 protection_map[8] = PAGE_NONE;
93 protection_map[9] = PAGE_READONLY;
94 protection_map[10] = PAGE_SHARED;
95 protection_map[11] = PAGE_SHARED;
96 protection_map[12] = PAGE_READONLY;
97 protection_map[13] = PAGE_READONLY;
98 protection_map[14] = PAGE_SHARED;
99 protection_map[15] = PAGE_SHARED;
102 void __devinit cpu_cache_init(void)
104 flush_cache_all = s7_flush_cache_all;
105 __flush_cache_all = s7___flush_cache_all;
106 flush_cache_mm = s7_flush_cache_mm;
107 flush_cache_range = s7_flush_cache_range;
108 flush_cache_page = s7_flush_cache_page;
109 flush_icache_range = s7_flush_icache_range;
110 flush_cache_sigtramp = s7_flush_cache_sigtramp;
111 flush_data_cache_page = s7_flush_data_cache_page;
113 setup_protection_map();
116 void s7_flush_icache_all(void)
118 __asm__ __volatile__(
119 "la r8, s7_flush_icache_all\n"
120 "cache 0x10, [r8, 0]\n"
121 "nop\nnop\nnop\nnop\nnop\nnop\n"
125 void s7_flush_dcache_all(void)
127 __asm__ __volatile__(
128 "la r8, s7_flush_dcache_all\n"
129 "cache 0x1f, [r8, 0]\n"
130 "nop\nnop\nnop\nnop\nnop\nnop\n"
131 "cache 0x1a, [r8, 0]\n"
132 "nop\nnop\nnop\nnop\nnop\nnop\n"
136 void s7_flush_cache_all(void)
138 __asm__ __volatile__(
139 "la r8, s7_flush_cache_all\n"
140 "cache 0x10, [r8, 0]\n"
141 "nop\nnop\nnop\nnop\nnop\nnop\n"
142 "cache 0x1f, [r8, 0]\n"
143 "nop\nnop\nnop\nnop\nnop\nnop\n"
144 "cache 0x1a, [r8, 0]\n"
145 "nop\nnop\nnop\nnop\nnop\nnop\n"
149 void s7___flush_cache_all(void)
151 __asm__ __volatile__(
152 "la r8, s7_flush_cache_all\n"
153 "cache 0x10, [r8, 0]\n"
154 "nop\nnop\nnop\nnop\nnop\nnop\n"
155 "cache 0x1f, [r8, 0]\n"
156 "nop\nnop\nnop\nnop\nnop\nnop\n"
157 "cache 0x1a, [r8, 0]\n"
158 "nop\nnop\nnop\nnop\nnop\nnop\n"
162 static void s7_flush_cache_mm(struct mm_struct *mm)
166 s7_flush_cache_all();
169 /*if we flush a range precisely , the processing may be very long.
170 We must check each page in the range whether present. If the page is present,
171 we can flush the range in the page. Be careful, the range may be cross two
172 page, a page is present and another is not present.
175 The interface is provided in hopes that the port can find
176 a suitably efficient method for removing multiple page
177 sized regions from the cache.
180 s7_flush_cache_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end)
183 struct mm_struct *mm = vma->vm_mm;
184 int exec = vma->vm_flags & VM_EXEC;
193 pgdp = pgd_offset(mm, start);
194 pudp = pud_offset(pgdp, start);
195 pmdp = pmd_offset(pudp, start);
196 ptep = pte_offset(pmdp, start);
198 while (start <= end) {
199 unsigned long tmpend;
200 pgdp = pgd_offset(mm, start);
201 pudp = pud_offset(pgdp, start);
202 pmdp = pmd_offset(pudp, start);
203 ptep = pte_offset(pmdp, start);
205 if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
206 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
209 tmpend = (start | (PAGE_SIZE-1)) > end ?
210 end : (start | (PAGE_SIZE-1));
212 s7_flush_dcache_range(start, tmpend);
214 s7_flush_icache_range(start, tmpend);
215 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
220 s7_flush_cache_page(struct vm_area_struct *vma,
221 unsigned long addr, unsigned long pfn)
223 int exec = vma->vm_flags & VM_EXEC;
224 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
226 s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
229 s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE);
232 static void s7_flush_cache_sigtramp(unsigned long addr)
234 __asm__ __volatile__(
235 "cache 0x02, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
237 "cache 0x02, [%0, 0x4]\n"
238 "nop\nnop\nnop\nnop\nnop\n"
240 "cache 0x0d, [%0, 0]\n"
241 "nop\nnop\nnop\nnop\nnop\n"
242 "cache 0x0d, [%0, 0x4]\n"
243 "nop\nnop\nnop\nnop\nnop\n"
245 "cache 0x1a, [%0, 0]\n"
246 "nop\nnop\nnop\nnop\nnop\n"
251 Just flush entire Dcache!!
252 You must ensure the page doesn't include instructions, because
253 the function will not flush the Icache.
254 The addr must be cache aligned.
256 static void s7_flush_data_cache_page(unsigned long addr)
259 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
260 __asm__ __volatile__(
261 "cache 0x0e, [%0, 0]\n"
262 "cache 0x1a, [%0, 0]\n"
265 addr += L1_CACHE_BYTES;
270 1. WB and invalid a cache line of Dcache
271 2. Drain Write Buffer
272 the range must be smaller than PAGE_SIZE
274 static void s7_flush_dcache_range(unsigned long start, unsigned long end)
278 start = start & ~(L1_CACHE_BYTES - 1);
279 end = end & ~(L1_CACHE_BYTES - 1);
281 /* flush dcache to ram, and invalidate dcache lines. */
282 for (i = 0; i < size; i += L1_CACHE_BYTES) {
283 __asm__ __volatile__(
284 "cache 0x0e, [%0, 0]\n"
285 "nop\nnop\nnop\nnop\nnop\n"
286 "cache 0x1a, [%0, 0]\n"
287 "nop\nnop\nnop\nnop\nnop\n"
289 start += L1_CACHE_BYTES;
293 static void s7_flush_icache_range(unsigned long start, unsigned long end)
296 start = start & ~(L1_CACHE_BYTES - 1);
297 end = end & ~(L1_CACHE_BYTES - 1);
300 /* invalidate icache lines. */
301 for (i = 0; i < size; i += L1_CACHE_BYTES) {
302 __asm__ __volatile__(
303 "cache 0x02, [%0, 0]\n"
304 "nop\nnop\nnop\nnop\nnop\n"
306 start += L1_CACHE_BYTES;