2 * Blackfin CPLB exception handling for when MPU in on
4 * Copyright 2008-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/module.h>
12 #include <asm/blackfin.h>
13 #include <asm/cacheflush.h>
15 #include <asm/cplbinit.h>
16 #include <asm/mmu_context.h>
21 * This file is compiled with certain -ffixed-reg options. We have to
22 * make sure not to call any functions here that could clobber these
28 unsigned long *current_rwx_mask[NR_CPUS];
30 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
31 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
32 int nr_cplb_flush[NR_CPUS];
35 * Given the contents of the status register, return the index of the
36 * CPLB that caused the fault.
38 static inline int faulting_cplb_index(int status)
40 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
45 * Given the contents of the status register and the DCPLB_DATA contents,
46 * return true if a write access should be permitted.
48 static inline int write_permitted(int status, unsigned long data)
50 if (status & FAULT_USERSUPV)
51 return !!(data & CPLB_SUPV_WR);
53 return !!(data & CPLB_USER_WR);
56 /* Counters to implement round-robin replacement. */
57 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
60 * Find an ICPLB entry to be evicted and return its index.
62 static int evict_one_icplb(unsigned int cpu)
65 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
66 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
68 i = first_switched_icplb + icplb_rr_index[cpu];
70 i -= MAX_CPLBS - first_switched_icplb;
71 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
73 icplb_rr_index[cpu]++;
77 static int evict_one_dcplb(unsigned int cpu)
80 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
81 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
83 i = first_switched_dcplb + dcplb_rr_index[cpu];
85 i -= MAX_CPLBS - first_switched_dcplb;
86 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
88 dcplb_rr_index[cpu]++;
92 static noinline int dcplb_miss(unsigned int cpu)
94 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
95 int status = bfin_read_DCPLB_STATUS();
100 nr_dcplb_miss[cpu]++;
102 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
103 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
104 if (bfin_addr_dcacheable(addr)) {
105 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
106 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
107 d_data |= CPLB_L1_AOW | CPLB_WT;
112 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
115 } else if (addr >= physical_mem_end) {
116 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
117 mask = current_rwx_mask[cpu];
119 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
121 int bit = 1 << (page & 31);
124 d_data |= CPLB_USER_RD;
126 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
127 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
128 addr &= ~(1 * 1024 * 1024 - 1);
129 d_data &= ~PAGE_SIZE_4KB;
130 d_data |= PAGE_SIZE_1MB;
132 return CPLB_PROT_VIOL;
133 } else if (addr >= _ramend) {
134 d_data |= CPLB_USER_RD | CPLB_USER_WR;
135 if (reserved_mem_dcache_on)
136 d_data |= CPLB_L1_CHBL;
138 mask = current_rwx_mask[cpu];
140 int page = addr >> PAGE_SHIFT;
142 int bit = 1 << (page & 31);
145 d_data |= CPLB_USER_RD;
147 mask += page_mask_nelts;
149 d_data |= CPLB_USER_WR;
152 idx = evict_one_dcplb(cpu);
155 dcplb_tbl[cpu][idx].addr = addr;
156 dcplb_tbl[cpu][idx].data = d_data;
159 bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
160 bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
166 static noinline int icplb_miss(unsigned int cpu)
168 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
169 int status = bfin_read_ICPLB_STATUS();
171 unsigned long i_data;
173 nr_icplb_miss[cpu]++;
175 /* If inside the uncached DMA region, fault. */
176 if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
177 return CPLB_PROT_VIOL;
179 if (status & FAULT_USERSUPV)
180 nr_icplb_supv_miss[cpu]++;
183 * First, try to find a CPLB that matches this address. If we
184 * find one, then the fact that we're in the miss handler means
185 * that the instruction crosses a page boundary.
187 for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
188 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
189 unsigned long this_addr = icplb_tbl[cpu][idx].addr;
190 if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
197 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
199 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
201 * Normal RAM, and possibly the reserved memory area, are
204 if (addr < _ramend ||
205 (addr < physical_mem_end && reserved_mem_icache_on))
206 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
209 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
212 } else if (addr >= physical_mem_end) {
213 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
214 if (!(status & FAULT_USERSUPV)) {
215 unsigned long *mask = current_rwx_mask[cpu];
218 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
220 int bit = 1 << (page & 31);
222 mask += 2 * page_mask_nelts;
224 i_data |= CPLB_USER_RD;
227 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
228 && (status & FAULT_USERSUPV)) {
229 addr &= ~(1 * 1024 * 1024 - 1);
230 i_data &= ~PAGE_SIZE_4KB;
231 i_data |= PAGE_SIZE_1MB;
233 return CPLB_PROT_VIOL;
234 } else if (addr >= _ramend) {
235 i_data |= CPLB_USER_RD;
236 if (reserved_mem_icache_on)
237 i_data |= CPLB_L1_CHBL;
240 * Two cases to distinguish - a supervisor access must
241 * necessarily be for a module page; we grant it
242 * unconditionally (could do better here in the future).
243 * Otherwise, check the x bitmap of the current process.
245 if (!(status & FAULT_USERSUPV)) {
246 unsigned long *mask = current_rwx_mask[cpu];
249 int page = addr >> PAGE_SHIFT;
251 int bit = 1 << (page & 31);
253 mask += 2 * page_mask_nelts;
255 i_data |= CPLB_USER_RD;
259 idx = evict_one_icplb(cpu);
261 icplb_tbl[cpu][idx].addr = addr;
262 icplb_tbl[cpu][idx].data = i_data;
265 bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
266 bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
272 static noinline int dcplb_protection_fault(unsigned int cpu)
274 int status = bfin_read_DCPLB_STATUS();
276 nr_dcplb_prot[cpu]++;
278 if (status & FAULT_RW) {
279 int idx = faulting_cplb_index(status);
280 unsigned long data = dcplb_tbl[cpu][idx].data;
281 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
282 write_permitted(status, data)) {
284 dcplb_tbl[cpu][idx].data = data;
285 bfin_write32(DCPLB_DATA0 + idx * 4, data);
289 return CPLB_PROT_VIOL;
292 int cplb_hdr(int seqstat, struct pt_regs *regs)
294 int cause = seqstat & 0x3f;
295 unsigned int cpu = raw_smp_processor_id();
298 return dcplb_protection_fault(cpu);
300 return icplb_miss(cpu);
302 return dcplb_miss(cpu);
308 void flush_switched_cplbs(unsigned int cpu)
313 nr_cplb_flush[cpu]++;
315 local_irq_save_hw(flags);
317 for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
318 icplb_tbl[cpu][i].data = 0;
319 bfin_write32(ICPLB_DATA0 + i * 4, 0);
324 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
325 dcplb_tbl[cpu][i].data = 0;
326 bfin_write32(DCPLB_DATA0 + i * 4, 0);
329 local_irq_restore_hw(flags);
333 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
336 unsigned long addr = (unsigned long)masks;
337 unsigned long d_data;
341 current_rwx_mask[cpu] = masks;
345 local_irq_save_hw(flags);
346 current_rwx_mask[cpu] = masks;
348 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
352 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
353 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
354 d_data |= CPLB_L1_CHBL;
355 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
356 d_data |= CPLB_L1_AOW | CPLB_WT;
362 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
363 dcplb_tbl[cpu][i].addr = addr;
364 dcplb_tbl[cpu][i].data = d_data;
365 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
366 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
370 local_irq_restore_hw(flags);