]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/blackfin/kernel/cplb-mpu/cplbmgr.c
7e6383dc7b2021003cbc9578aecb440ffde4541c
[net-next-2.6.git] / arch / blackfin / kernel / cplb-mpu / cplbmgr.c
1 /*
2  * Blackfin CPLB exception handling for when MPU in on
3  *
4  * Copyright 2008-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/mm.h>
11
12 #include <asm/blackfin.h>
13 #include <asm/cacheflush.h>
14 #include <asm/cplb.h>
15 #include <asm/cplbinit.h>
16 #include <asm/mmu_context.h>
17
18 /*
19  * WARNING
20  *
21  * This file is compiled with certain -ffixed-reg options.  We have to
22  * make sure not to call any functions here that could clobber these
23  * registers.
24  */
25
26 int page_mask_nelts;
27 int page_mask_order;
28 unsigned long *current_rwx_mask[NR_CPUS];
29
30 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
31 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
32 int nr_cplb_flush[NR_CPUS];
33
34 /*
35  * Given the contents of the status register, return the index of the
36  * CPLB that caused the fault.
37  */
38 static inline int faulting_cplb_index(int status)
39 {
40         int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
41         return 30 - signbits;
42 }
43
44 /*
45  * Given the contents of the status register and the DCPLB_DATA contents,
46  * return true if a write access should be permitted.
47  */
48 static inline int write_permitted(int status, unsigned long data)
49 {
50         if (status & FAULT_USERSUPV)
51                 return !!(data & CPLB_SUPV_WR);
52         else
53                 return !!(data & CPLB_USER_WR);
54 }
55
56 /* Counters to implement round-robin replacement.  */
57 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
58
59 /*
60  * Find an ICPLB entry to be evicted and return its index.
61  */
62 static int evict_one_icplb(unsigned int cpu)
63 {
64         int i;
65         for (i = first_switched_icplb; i < MAX_CPLBS; i++)
66                 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
67                         return i;
68         i = first_switched_icplb + icplb_rr_index[cpu];
69         if (i >= MAX_CPLBS) {
70                 i -= MAX_CPLBS - first_switched_icplb;
71                 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
72         }
73         icplb_rr_index[cpu]++;
74         return i;
75 }
76
77 static int evict_one_dcplb(unsigned int cpu)
78 {
79         int i;
80         for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
81                 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
82                         return i;
83         i = first_switched_dcplb + dcplb_rr_index[cpu];
84         if (i >= MAX_CPLBS) {
85                 i -= MAX_CPLBS - first_switched_dcplb;
86                 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
87         }
88         dcplb_rr_index[cpu]++;
89         return i;
90 }
91
92 static noinline int dcplb_miss(unsigned int cpu)
93 {
94         unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
95         int status = bfin_read_DCPLB_STATUS();
96         unsigned long *mask;
97         int idx;
98         unsigned long d_data;
99
100         nr_dcplb_miss[cpu]++;
101
102         d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
103 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
104         if (bfin_addr_dcacheable(addr)) {
105                 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
106 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
107                 d_data |= CPLB_L1_AOW | CPLB_WT;
108 # endif
109         }
110 #endif
111
112         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
113                 addr = L2_START;
114                 d_data = L2_DMEMORY;
115         } else if (addr >= physical_mem_end) {
116                 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
117                         mask = current_rwx_mask[cpu];
118                         if (mask) {
119                                 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
120                                 int idx = page >> 5;
121                                 int bit = 1 << (page & 31);
122
123                                 if (mask[idx] & bit)
124                                         d_data |= CPLB_USER_RD;
125                         }
126                 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
127                     && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
128                         addr &= ~(1 * 1024 * 1024 - 1);
129                         d_data &= ~PAGE_SIZE_4KB;
130                         d_data |= PAGE_SIZE_1MB;
131                 } else
132                         return CPLB_PROT_VIOL;
133         } else if (addr >= _ramend) {
134                 d_data |= CPLB_USER_RD | CPLB_USER_WR;
135                 if (reserved_mem_dcache_on)
136                         d_data |= CPLB_L1_CHBL;
137         } else {
138                 mask = current_rwx_mask[cpu];
139                 if (mask) {
140                         int page = addr >> PAGE_SHIFT;
141                         int idx = page >> 5;
142                         int bit = 1 << (page & 31);
143
144                         if (mask[idx] & bit)
145                                 d_data |= CPLB_USER_RD;
146
147                         mask += page_mask_nelts;
148                         if (mask[idx] & bit)
149                                 d_data |= CPLB_USER_WR;
150                 }
151         }
152         idx = evict_one_dcplb(cpu);
153
154         addr &= PAGE_MASK;
155         dcplb_tbl[cpu][idx].addr = addr;
156         dcplb_tbl[cpu][idx].data = d_data;
157
158         _disable_dcplb();
159         bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
160         bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
161         _enable_dcplb();
162
163         return 0;
164 }
165
166 static noinline int icplb_miss(unsigned int cpu)
167 {
168         unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
169         int status = bfin_read_ICPLB_STATUS();
170         int idx;
171         unsigned long i_data;
172
173         nr_icplb_miss[cpu]++;
174
175         /* If inside the uncached DMA region, fault.  */
176         if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
177                 return CPLB_PROT_VIOL;
178
179         if (status & FAULT_USERSUPV)
180                 nr_icplb_supv_miss[cpu]++;
181
182         /*
183          * First, try to find a CPLB that matches this address.  If we
184          * find one, then the fact that we're in the miss handler means
185          * that the instruction crosses a page boundary.
186          */
187         for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
188                 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
189                         unsigned long this_addr = icplb_tbl[cpu][idx].addr;
190                         if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
191                                 addr += PAGE_SIZE;
192                                 break;
193                         }
194                 }
195         }
196
197         i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
198
199 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
200         /*
201          * Normal RAM, and possibly the reserved memory area, are
202          * cacheable.
203          */
204         if (addr < _ramend ||
205             (addr < physical_mem_end && reserved_mem_icache_on))
206                 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
207 #endif
208
209         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
210                 addr = L2_START;
211                 i_data = L2_IMEMORY;
212         } else if (addr >= physical_mem_end) {
213                 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
214                         if (!(status & FAULT_USERSUPV)) {
215                                 unsigned long *mask = current_rwx_mask[cpu];
216
217                                 if (mask) {
218                                         int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
219                                         int idx = page >> 5;
220                                         int bit = 1 << (page & 31);
221
222                                         mask += 2 * page_mask_nelts;
223                                         if (mask[idx] & bit)
224                                                 i_data |= CPLB_USER_RD;
225                                 }
226                         }
227                 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
228                     && (status & FAULT_USERSUPV)) {
229                         addr &= ~(1 * 1024 * 1024 - 1);
230                         i_data &= ~PAGE_SIZE_4KB;
231                         i_data |= PAGE_SIZE_1MB;
232                 } else
233                     return CPLB_PROT_VIOL;
234         } else if (addr >= _ramend) {
235                 i_data |= CPLB_USER_RD;
236                 if (reserved_mem_icache_on)
237                         i_data |= CPLB_L1_CHBL;
238         } else {
239                 /*
240                  * Two cases to distinguish - a supervisor access must
241                  * necessarily be for a module page; we grant it
242                  * unconditionally (could do better here in the future).
243                  * Otherwise, check the x bitmap of the current process.
244                  */
245                 if (!(status & FAULT_USERSUPV)) {
246                         unsigned long *mask = current_rwx_mask[cpu];
247
248                         if (mask) {
249                                 int page = addr >> PAGE_SHIFT;
250                                 int idx = page >> 5;
251                                 int bit = 1 << (page & 31);
252
253                                 mask += 2 * page_mask_nelts;
254                                 if (mask[idx] & bit)
255                                         i_data |= CPLB_USER_RD;
256                         }
257                 }
258         }
259         idx = evict_one_icplb(cpu);
260         addr &= PAGE_MASK;
261         icplb_tbl[cpu][idx].addr = addr;
262         icplb_tbl[cpu][idx].data = i_data;
263
264         _disable_icplb();
265         bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
266         bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
267         _enable_icplb();
268
269         return 0;
270 }
271
272 static noinline int dcplb_protection_fault(unsigned int cpu)
273 {
274         int status = bfin_read_DCPLB_STATUS();
275
276         nr_dcplb_prot[cpu]++;
277
278         if (status & FAULT_RW) {
279                 int idx = faulting_cplb_index(status);
280                 unsigned long data = dcplb_tbl[cpu][idx].data;
281                 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
282                     write_permitted(status, data)) {
283                         data |= CPLB_DIRTY;
284                         dcplb_tbl[cpu][idx].data = data;
285                         bfin_write32(DCPLB_DATA0 + idx * 4, data);
286                         return 0;
287                 }
288         }
289         return CPLB_PROT_VIOL;
290 }
291
292 int cplb_hdr(int seqstat, struct pt_regs *regs)
293 {
294         int cause = seqstat & 0x3f;
295         unsigned int cpu = raw_smp_processor_id();
296         switch (cause) {
297         case 0x23:
298                 return dcplb_protection_fault(cpu);
299         case 0x2C:
300                 return icplb_miss(cpu);
301         case 0x26:
302                 return dcplb_miss(cpu);
303         default:
304                 return 1;
305         }
306 }
307
308 void flush_switched_cplbs(unsigned int cpu)
309 {
310         int i;
311         unsigned long flags;
312
313         nr_cplb_flush[cpu]++;
314
315         local_irq_save_hw(flags);
316         _disable_icplb();
317         for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
318                 icplb_tbl[cpu][i].data = 0;
319                 bfin_write32(ICPLB_DATA0 + i * 4, 0);
320         }
321         _enable_icplb();
322
323         _disable_dcplb();
324         for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
325                 dcplb_tbl[cpu][i].data = 0;
326                 bfin_write32(DCPLB_DATA0 + i * 4, 0);
327         }
328         _enable_dcplb();
329         local_irq_restore_hw(flags);
330
331 }
332
333 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
334 {
335         int i;
336         unsigned long addr = (unsigned long)masks;
337         unsigned long d_data;
338         unsigned long flags;
339
340         if (!masks) {
341                 current_rwx_mask[cpu] = masks;
342                 return;
343         }
344
345         local_irq_save_hw(flags);
346         current_rwx_mask[cpu] = masks;
347
348         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
349                 addr = L2_START;
350                 d_data = L2_DMEMORY;
351         } else {
352                 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
353 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
354                 d_data |= CPLB_L1_CHBL;
355 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
356                 d_data |= CPLB_L1_AOW | CPLB_WT;
357 # endif
358 #endif
359         }
360
361         _disable_dcplb();
362         for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
363                 dcplb_tbl[cpu][i].addr = addr;
364                 dcplb_tbl[cpu][i].data = d_data;
365                 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
366                 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
367                 addr += PAGE_SIZE;
368         }
369         _enable_dcplb();
370         local_irq_restore_hw(flags);
371 }