]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Synthesize TLB refill handlers at runtime. | |
7 | * | |
e30ec452 | 8 | * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer |
95affdda | 9 | * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki |
41c594ab | 10 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) |
fd062c84 | 11 | * Copyright (C) 2008, 2009 Cavium Networks, Inc. |
41c594ab RB |
12 | * |
13 | * ... and the days got worse and worse and now you see | |
14 | * I've gone completly out of my mind. | |
15 | * | |
16 | * They're coming to take me a away haha | |
17 | * they're coming to take me a away hoho hihi haha | |
18 | * to the funny farm where code is beautiful all the time ... | |
19 | * | |
20 | * (Condolences to Napoleon XIV) | |
1da177e4 LT |
21 | */ |
22 | ||
95affdda | 23 | #include <linux/bug.h> |
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/types.h> | |
631330f5 | 26 | #include <linux/smp.h> |
1da177e4 LT |
27 | #include <linux/string.h> |
28 | #include <linux/init.h> | |
29 | ||
1da177e4 | 30 | #include <asm/mmu_context.h> |
1da177e4 | 31 | #include <asm/war.h> |
3482d713 | 32 | #include <asm/uasm.h> |
e30ec452 | 33 | |
1ec56329 DD |
34 | /* |
35 | * TLB load/store/modify handlers. | |
36 | * | |
37 | * Only the fastpath gets synthesized at runtime, the slowpath for | |
38 | * do_page_fault remains normal asm. | |
39 | */ | |
40 | extern void tlb_do_page_fault_0(void); | |
41 | extern void tlb_do_page_fault_1(void); | |
42 | ||
43 | ||
aeffdbba | 44 | static inline int r45k_bvahwbug(void) |
1da177e4 LT |
45 | { |
46 | /* XXX: We should probe for the presence of this bug, but we don't. */ | |
47 | return 0; | |
48 | } | |
49 | ||
aeffdbba | 50 | static inline int r4k_250MHZhwbug(void) |
1da177e4 LT |
51 | { |
52 | /* XXX: We should probe for the presence of this bug, but we don't. */ | |
53 | return 0; | |
54 | } | |
55 | ||
aeffdbba | 56 | static inline int __maybe_unused bcm1250_m3_war(void) |
1da177e4 LT |
57 | { |
58 | return BCM1250_M3_WAR; | |
59 | } | |
60 | ||
aeffdbba | 61 | static inline int __maybe_unused r10000_llsc_war(void) |
1da177e4 LT |
62 | { |
63 | return R10000_LLSC_WAR; | |
64 | } | |
65 | ||
8df5beac MR |
66 | /* |
67 | * Found by experiment: At least some revisions of the 4kc throw under | |
68 | * some circumstances a machine check exception, triggered by invalid | |
69 | * values in the index register. Delaying the tlbp instruction until | |
70 | * after the next branch, plus adding an additional nop in front of | |
71 | * tlbwi/tlbwr avoids the invalid index register values. Nobody knows | |
72 | * why; it's not an issue caused by the core RTL. | |
73 | * | |
74 | */ | |
234fcd14 | 75 | static int __cpuinit m4kc_tlbp_war(void) |
8df5beac MR |
76 | { |
77 | return (current_cpu_data.processor_id & 0xffff00) == | |
78 | (PRID_COMP_MIPS | PRID_IMP_4KC); | |
79 | } | |
80 | ||
e30ec452 | 81 | /* Handle labels (which must be positive integers). */ |
1da177e4 | 82 | enum label_id { |
e30ec452 | 83 | label_second_part = 1, |
1da177e4 LT |
84 | label_leave, |
85 | label_vmalloc, | |
86 | label_vmalloc_done, | |
87 | label_tlbw_hazard, | |
88 | label_split, | |
6dd9344c DD |
89 | label_tlbl_goaround1, |
90 | label_tlbl_goaround2, | |
1da177e4 LT |
91 | label_nopage_tlbl, |
92 | label_nopage_tlbs, | |
93 | label_nopage_tlbm, | |
94 | label_smp_pgtable_change, | |
95 | label_r3000_write_probe_fail, | |
1ec56329 | 96 | label_large_segbits_fault, |
fd062c84 DD |
97 | #ifdef CONFIG_HUGETLB_PAGE |
98 | label_tlb_huge_update, | |
99 | #endif | |
1da177e4 LT |
100 | }; |
101 | ||
e30ec452 TS |
102 | UASM_L_LA(_second_part) |
103 | UASM_L_LA(_leave) | |
e30ec452 TS |
104 | UASM_L_LA(_vmalloc) |
105 | UASM_L_LA(_vmalloc_done) | |
106 | UASM_L_LA(_tlbw_hazard) | |
107 | UASM_L_LA(_split) | |
6dd9344c DD |
108 | UASM_L_LA(_tlbl_goaround1) |
109 | UASM_L_LA(_tlbl_goaround2) | |
e30ec452 TS |
110 | UASM_L_LA(_nopage_tlbl) |
111 | UASM_L_LA(_nopage_tlbs) | |
112 | UASM_L_LA(_nopage_tlbm) | |
113 | UASM_L_LA(_smp_pgtable_change) | |
114 | UASM_L_LA(_r3000_write_probe_fail) | |
1ec56329 | 115 | UASM_L_LA(_large_segbits_fault) |
fd062c84 DD |
116 | #ifdef CONFIG_HUGETLB_PAGE |
117 | UASM_L_LA(_tlb_huge_update) | |
118 | #endif | |
656be92f | 119 | |
92b1e6a6 FBH |
120 | /* |
121 | * For debug purposes. | |
122 | */ | |
123 | static inline void dump_handler(const u32 *handler, int count) | |
124 | { | |
125 | int i; | |
126 | ||
127 | pr_debug("\t.set push\n"); | |
128 | pr_debug("\t.set noreorder\n"); | |
129 | ||
130 | for (i = 0; i < count; i++) | |
131 | pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); | |
132 | ||
133 | pr_debug("\t.set pop\n"); | |
134 | } | |
135 | ||
1da177e4 LT |
136 | /* The only general purpose registers allowed in TLB handlers. */ |
137 | #define K0 26 | |
138 | #define K1 27 | |
139 | ||
140 | /* Some CP0 registers */ | |
41c594ab RB |
141 | #define C0_INDEX 0, 0 |
142 | #define C0_ENTRYLO0 2, 0 | |
143 | #define C0_TCBIND 2, 2 | |
144 | #define C0_ENTRYLO1 3, 0 | |
145 | #define C0_CONTEXT 4, 0 | |
fd062c84 | 146 | #define C0_PAGEMASK 5, 0 |
41c594ab RB |
147 | #define C0_BADVADDR 8, 0 |
148 | #define C0_ENTRYHI 10, 0 | |
149 | #define C0_EPC 14, 0 | |
150 | #define C0_XCONTEXT 20, 0 | |
1da177e4 | 151 | |
875d43e7 | 152 | #ifdef CONFIG_64BIT |
e30ec452 | 153 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) |
1da177e4 | 154 | #else |
e30ec452 | 155 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) |
1da177e4 LT |
156 | #endif |
157 | ||
158 | /* The worst case length of the handler is around 18 instructions for | |
159 | * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. | |
160 | * Maximum space available is 32 instructions for R3000 and 64 | |
161 | * instructions for R4000. | |
162 | * | |
163 | * We deliberately chose a buffer size of 128, so we won't scribble | |
164 | * over anything important on overflow before we panic. | |
165 | */ | |
234fcd14 | 166 | static u32 tlb_handler[128] __cpuinitdata; |
1da177e4 LT |
167 | |
168 | /* simply assume worst case size for labels and relocs */ | |
234fcd14 RB |
169 | static struct uasm_label labels[128] __cpuinitdata; |
170 | static struct uasm_reloc relocs[128] __cpuinitdata; | |
1da177e4 | 171 | |
1ec56329 DD |
172 | #ifdef CONFIG_64BIT |
173 | static int check_for_high_segbits __cpuinitdata; | |
174 | #endif | |
175 | ||
82622284 DD |
176 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
177 | /* | |
178 | * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | |
179 | * we cannot do r3000 under these circumstances. | |
180 | */ | |
181 | ||
1da177e4 LT |
182 | /* |
183 | * The R3000 TLB handler is simple. | |
184 | */ | |
234fcd14 | 185 | static void __cpuinit build_r3000_tlb_refill_handler(void) |
1da177e4 LT |
186 | { |
187 | long pgdc = (long)pgd_current; | |
188 | u32 *p; | |
189 | ||
190 | memset(tlb_handler, 0, sizeof(tlb_handler)); | |
191 | p = tlb_handler; | |
192 | ||
e30ec452 TS |
193 | uasm_i_mfc0(&p, K0, C0_BADVADDR); |
194 | uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ | |
195 | uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); | |
196 | uasm_i_srl(&p, K0, K0, 22); /* load delay */ | |
197 | uasm_i_sll(&p, K0, K0, 2); | |
198 | uasm_i_addu(&p, K1, K1, K0); | |
199 | uasm_i_mfc0(&p, K0, C0_CONTEXT); | |
200 | uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ | |
201 | uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ | |
202 | uasm_i_addu(&p, K1, K1, K0); | |
203 | uasm_i_lw(&p, K0, 0, K1); | |
204 | uasm_i_nop(&p); /* load delay */ | |
205 | uasm_i_mtc0(&p, K0, C0_ENTRYLO0); | |
206 | uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ | |
207 | uasm_i_tlbwr(&p); /* cp0 delay */ | |
208 | uasm_i_jr(&p, K1); | |
209 | uasm_i_rfe(&p); /* branch delay */ | |
1da177e4 LT |
210 | |
211 | if (p > tlb_handler + 32) | |
212 | panic("TLB refill handler space exceeded"); | |
213 | ||
e30ec452 TS |
214 | pr_debug("Wrote TLB refill handler (%u instructions).\n", |
215 | (unsigned int)(p - tlb_handler)); | |
1da177e4 | 216 | |
91b05e67 | 217 | memcpy((void *)ebase, tlb_handler, 0x80); |
92b1e6a6 FBH |
218 | |
219 | dump_handler((u32 *)ebase, 32); | |
1da177e4 | 220 | } |
82622284 | 221 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ |
1da177e4 LT |
222 | |
223 | /* | |
224 | * The R4000 TLB handler is much more complicated. We have two | |
225 | * consecutive handler areas with 32 instructions space each. | |
226 | * Since they aren't used at the same time, we can overflow in the | |
227 | * other one.To keep things simple, we first assume linear space, | |
228 | * then we relocate it to the final handler layout as needed. | |
229 | */ | |
234fcd14 | 230 | static u32 final_handler[64] __cpuinitdata; |
1da177e4 LT |
231 | |
232 | /* | |
233 | * Hazards | |
234 | * | |
235 | * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: | |
236 | * 2. A timing hazard exists for the TLBP instruction. | |
237 | * | |
238 | * stalling_instruction | |
239 | * TLBP | |
240 | * | |
241 | * The JTLB is being read for the TLBP throughout the stall generated by the | |
242 | * previous instruction. This is not really correct as the stalling instruction | |
243 | * can modify the address used to access the JTLB. The failure symptom is that | |
244 | * the TLBP instruction will use an address created for the stalling instruction | |
245 | * and not the address held in C0_ENHI and thus report the wrong results. | |
246 | * | |
247 | * The software work-around is to not allow the instruction preceding the TLBP | |
248 | * to stall - make it an NOP or some other instruction guaranteed not to stall. | |
249 | * | |
250 | * Errata 2 will not be fixed. This errata is also on the R5000. | |
251 | * | |
252 | * As if we MIPS hackers wouldn't know how to nop pipelines happy ... | |
253 | */ | |
234fcd14 | 254 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) |
1da177e4 | 255 | { |
10cc3529 | 256 | switch (current_cpu_type()) { |
326e2e1a | 257 | /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ |
f5b4d956 | 258 | case CPU_R4600: |
326e2e1a | 259 | case CPU_R4700: |
1da177e4 LT |
260 | case CPU_R5000: |
261 | case CPU_R5000A: | |
262 | case CPU_NEVADA: | |
e30ec452 TS |
263 | uasm_i_nop(p); |
264 | uasm_i_tlbp(p); | |
1da177e4 LT |
265 | break; |
266 | ||
267 | default: | |
e30ec452 | 268 | uasm_i_tlbp(p); |
1da177e4 LT |
269 | break; |
270 | } | |
271 | } | |
272 | ||
273 | /* | |
274 | * Write random or indexed TLB entry, and care about the hazards from | |
275 | * the preceeding mtc0 and for the following eret. | |
276 | */ | |
277 | enum tlb_write_entry { tlb_random, tlb_indexed }; | |
278 | ||
234fcd14 | 279 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, |
e30ec452 | 280 | struct uasm_reloc **r, |
1da177e4 LT |
281 | enum tlb_write_entry wmode) |
282 | { | |
283 | void(*tlbw)(u32 **) = NULL; | |
284 | ||
285 | switch (wmode) { | |
e30ec452 TS |
286 | case tlb_random: tlbw = uasm_i_tlbwr; break; |
287 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; | |
1da177e4 LT |
288 | } |
289 | ||
161548bf | 290 | if (cpu_has_mips_r2) { |
41f0e4d0 DD |
291 | if (cpu_has_mips_r2_exec_hazard) |
292 | uasm_i_ehb(p); | |
161548bf RB |
293 | tlbw(p); |
294 | return; | |
295 | } | |
296 | ||
10cc3529 | 297 | switch (current_cpu_type()) { |
1da177e4 LT |
298 | case CPU_R4000PC: |
299 | case CPU_R4000SC: | |
300 | case CPU_R4000MC: | |
301 | case CPU_R4400PC: | |
302 | case CPU_R4400SC: | |
303 | case CPU_R4400MC: | |
304 | /* | |
305 | * This branch uses up a mtc0 hazard nop slot and saves | |
306 | * two nops after the tlbw instruction. | |
307 | */ | |
e30ec452 | 308 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); |
1da177e4 | 309 | tlbw(p); |
e30ec452 TS |
310 | uasm_l_tlbw_hazard(l, *p); |
311 | uasm_i_nop(p); | |
1da177e4 LT |
312 | break; |
313 | ||
314 | case CPU_R4600: | |
315 | case CPU_R4700: | |
316 | case CPU_R5000: | |
317 | case CPU_R5000A: | |
e30ec452 | 318 | uasm_i_nop(p); |
2c93e12c | 319 | tlbw(p); |
e30ec452 | 320 | uasm_i_nop(p); |
2c93e12c MR |
321 | break; |
322 | ||
323 | case CPU_R4300: | |
1da177e4 LT |
324 | case CPU_5KC: |
325 | case CPU_TX49XX: | |
bdf21b18 | 326 | case CPU_PR4450: |
e30ec452 | 327 | uasm_i_nop(p); |
1da177e4 LT |
328 | tlbw(p); |
329 | break; | |
330 | ||
331 | case CPU_R10000: | |
332 | case CPU_R12000: | |
44d921b2 | 333 | case CPU_R14000: |
1da177e4 | 334 | case CPU_4KC: |
b1ec4c8e | 335 | case CPU_4KEC: |
1da177e4 | 336 | case CPU_SB1: |
93ce2f52 | 337 | case CPU_SB1A: |
1da177e4 LT |
338 | case CPU_4KSC: |
339 | case CPU_20KC: | |
340 | case CPU_25KF: | |
602977b0 KC |
341 | case CPU_BMIPS32: |
342 | case CPU_BMIPS3300: | |
343 | case CPU_BMIPS4350: | |
344 | case CPU_BMIPS4380: | |
345 | case CPU_BMIPS5000: | |
2a21c730 | 346 | case CPU_LOONGSON2: |
a644b277 | 347 | case CPU_R5500: |
8df5beac | 348 | if (m4kc_tlbp_war()) |
e30ec452 | 349 | uasm_i_nop(p); |
2f794d09 | 350 | case CPU_ALCHEMY: |
1da177e4 LT |
351 | tlbw(p); |
352 | break; | |
353 | ||
354 | case CPU_NEVADA: | |
e30ec452 | 355 | uasm_i_nop(p); /* QED specifies 2 nops hazard */ |
1da177e4 LT |
356 | /* |
357 | * This branch uses up a mtc0 hazard nop slot and saves | |
358 | * a nop after the tlbw instruction. | |
359 | */ | |
e30ec452 | 360 | uasm_il_bgezl(p, r, 0, label_tlbw_hazard); |
1da177e4 | 361 | tlbw(p); |
e30ec452 | 362 | uasm_l_tlbw_hazard(l, *p); |
1da177e4 LT |
363 | break; |
364 | ||
365 | case CPU_RM7000: | |
e30ec452 TS |
366 | uasm_i_nop(p); |
367 | uasm_i_nop(p); | |
368 | uasm_i_nop(p); | |
369 | uasm_i_nop(p); | |
1da177e4 LT |
370 | tlbw(p); |
371 | break; | |
372 | ||
1da177e4 LT |
373 | case CPU_RM9000: |
374 | /* | |
375 | * When the JTLB is updated by tlbwi or tlbwr, a subsequent | |
376 | * use of the JTLB for instructions should not occur for 4 | |
377 | * cpu cycles and use for data translations should not occur | |
378 | * for 3 cpu cycles. | |
379 | */ | |
e30ec452 TS |
380 | uasm_i_ssnop(p); |
381 | uasm_i_ssnop(p); | |
382 | uasm_i_ssnop(p); | |
383 | uasm_i_ssnop(p); | |
1da177e4 | 384 | tlbw(p); |
e30ec452 TS |
385 | uasm_i_ssnop(p); |
386 | uasm_i_ssnop(p); | |
387 | uasm_i_ssnop(p); | |
388 | uasm_i_ssnop(p); | |
1da177e4 LT |
389 | break; |
390 | ||
391 | case CPU_VR4111: | |
392 | case CPU_VR4121: | |
393 | case CPU_VR4122: | |
394 | case CPU_VR4181: | |
395 | case CPU_VR4181A: | |
e30ec452 TS |
396 | uasm_i_nop(p); |
397 | uasm_i_nop(p); | |
1da177e4 | 398 | tlbw(p); |
e30ec452 TS |
399 | uasm_i_nop(p); |
400 | uasm_i_nop(p); | |
1da177e4 LT |
401 | break; |
402 | ||
403 | case CPU_VR4131: | |
404 | case CPU_VR4133: | |
7623debf | 405 | case CPU_R5432: |
e30ec452 TS |
406 | uasm_i_nop(p); |
407 | uasm_i_nop(p); | |
1da177e4 LT |
408 | tlbw(p); |
409 | break; | |
410 | ||
83ccf69d LPC |
411 | case CPU_JZRISC: |
412 | tlbw(p); | |
413 | uasm_i_nop(p); | |
414 | break; | |
415 | ||
1da177e4 LT |
416 | default: |
417 | panic("No TLB refill handler yet (CPU type: %d)", | |
418 | current_cpu_data.cputype); | |
419 | break; | |
420 | } | |
421 | } | |
422 | ||
6dd9344c DD |
423 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, |
424 | unsigned int reg) | |
fd062c84 | 425 | { |
6dd9344c DD |
426 | if (kernel_uses_smartmips_rixi) { |
427 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); | |
428 | UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | |
429 | } else { | |
430 | #ifdef CONFIG_64BIT_PHYS_ADDR | |
3be6022c | 431 | uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); |
6dd9344c DD |
432 | #else |
433 | UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | |
434 | #endif | |
435 | } | |
436 | } | |
fd062c84 | 437 | |
6dd9344c | 438 | #ifdef CONFIG_HUGETLB_PAGE |
fd062c84 | 439 | |
6dd9344c DD |
440 | static __cpuinit void build_restore_pagemask(u32 **p, |
441 | struct uasm_reloc **r, | |
442 | unsigned int tmp, | |
443 | enum label_id lid) | |
444 | { | |
fd062c84 DD |
445 | /* Reset default page size */ |
446 | if (PM_DEFAULT_MASK >> 16) { | |
447 | uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | |
448 | uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | |
6dd9344c | 449 | uasm_il_b(p, r, lid); |
fd062c84 DD |
450 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
451 | } else if (PM_DEFAULT_MASK) { | |
452 | uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | |
6dd9344c | 453 | uasm_il_b(p, r, lid); |
fd062c84 DD |
454 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); |
455 | } else { | |
6dd9344c | 456 | uasm_il_b(p, r, lid); |
fd062c84 DD |
457 | uasm_i_mtc0(p, 0, C0_PAGEMASK); |
458 | } | |
459 | } | |
460 | ||
6dd9344c DD |
461 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, |
462 | struct uasm_label **l, | |
463 | struct uasm_reloc **r, | |
464 | unsigned int tmp, | |
465 | enum tlb_write_entry wmode) | |
466 | { | |
467 | /* Set huge page tlb entry size */ | |
468 | uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | |
469 | uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | |
470 | uasm_i_mtc0(p, tmp, C0_PAGEMASK); | |
471 | ||
472 | build_tlb_write_entry(p, l, r, wmode); | |
473 | ||
474 | build_restore_pagemask(p, r, tmp, label_leave); | |
475 | } | |
476 | ||
fd062c84 DD |
477 | /* |
478 | * Check if Huge PTE is present, if so then jump to LABEL. | |
479 | */ | |
480 | static void __cpuinit | |
481 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, | |
482 | unsigned int pmd, int lid) | |
483 | { | |
484 | UASM_i_LW(p, tmp, 0, pmd); | |
485 | uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); | |
486 | uasm_il_bnez(p, r, tmp, lid); | |
487 | } | |
488 | ||
489 | static __cpuinit void build_huge_update_entries(u32 **p, | |
490 | unsigned int pte, | |
491 | unsigned int tmp) | |
492 | { | |
493 | int small_sequence; | |
494 | ||
495 | /* | |
496 | * A huge PTE describes an area the size of the | |
497 | * configured huge page size. This is twice the | |
498 | * of the large TLB entry size we intend to use. | |
499 | * A TLB entry half the size of the configured | |
500 | * huge page size is configured into entrylo0 | |
501 | * and entrylo1 to cover the contiguous huge PTE | |
502 | * address space. | |
503 | */ | |
504 | small_sequence = (HPAGE_SIZE >> 7) < 0x10000; | |
505 | ||
506 | /* We can clobber tmp. It isn't used after this.*/ | |
507 | if (!small_sequence) | |
508 | uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | |
509 | ||
6dd9344c | 510 | build_convert_pte_to_entrylo(p, pte); |
9b8c3891 | 511 | UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ |
fd062c84 DD |
512 | /* convert to entrylo1 */ |
513 | if (small_sequence) | |
514 | UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); | |
515 | else | |
516 | UASM_i_ADDU(p, pte, pte, tmp); | |
517 | ||
9b8c3891 | 518 | UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ |
fd062c84 DD |
519 | } |
520 | ||
521 | static __cpuinit void build_huge_handler_tail(u32 **p, | |
522 | struct uasm_reloc **r, | |
523 | struct uasm_label **l, | |
524 | unsigned int pte, | |
525 | unsigned int ptr) | |
526 | { | |
527 | #ifdef CONFIG_SMP | |
528 | UASM_i_SC(p, pte, 0, ptr); | |
529 | uasm_il_beqz(p, r, pte, label_tlb_huge_update); | |
530 | UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ | |
531 | #else | |
532 | UASM_i_SW(p, pte, 0, ptr); | |
533 | #endif | |
534 | build_huge_update_entries(p, pte, ptr); | |
535 | build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); | |
536 | } | |
537 | #endif /* CONFIG_HUGETLB_PAGE */ | |
538 | ||
875d43e7 | 539 | #ifdef CONFIG_64BIT |
1da177e4 LT |
540 | /* |
541 | * TMP and PTR are scratch. | |
542 | * TMP will be clobbered, PTR will hold the pmd entry. | |
543 | */ | |
234fcd14 | 544 | static void __cpuinit |
e30ec452 | 545 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
1da177e4 LT |
546 | unsigned int tmp, unsigned int ptr) |
547 | { | |
82622284 | 548 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1da177e4 | 549 | long pgdc = (long)pgd_current; |
82622284 | 550 | #endif |
1da177e4 LT |
551 | /* |
552 | * The vmalloc handling is not in the hotpath. | |
553 | */ | |
e30ec452 | 554 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); |
1ec56329 DD |
555 | |
556 | if (check_for_high_segbits) { | |
557 | /* | |
558 | * The kernel currently implicitely assumes that the | |
559 | * MIPS SEGBITS parameter for the processor is | |
560 | * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never | |
561 | * allocate virtual addresses outside the maximum | |
562 | * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But | |
563 | * that doesn't prevent user code from accessing the | |
564 | * higher xuseg addresses. Here, we make sure that | |
565 | * everything but the lower xuseg addresses goes down | |
566 | * the module_alloc/vmalloc path. | |
567 | */ | |
568 | uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | |
569 | uasm_il_bnez(p, r, ptr, label_vmalloc); | |
570 | } else { | |
571 | uasm_il_bltz(p, r, tmp, label_vmalloc); | |
572 | } | |
e30ec452 | 573 | /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ |
1da177e4 | 574 | |
82622284 DD |
575 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
576 | /* | |
577 | * &pgd << 11 stored in CONTEXT [23..63]. | |
578 | */ | |
579 | UASM_i_MFC0(p, ptr, C0_CONTEXT); | |
580 | uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */ | |
581 | uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */ | |
582 | uasm_i_drotr(p, ptr, ptr, 11); | |
583 | #elif defined(CONFIG_SMP) | |
41c594ab RB |
584 | # ifdef CONFIG_MIPS_MT_SMTC |
585 | /* | |
586 | * SMTC uses TCBind value as "CPU" index | |
587 | */ | |
e30ec452 | 588 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
3be6022c | 589 | uasm_i_dsrl_safe(p, ptr, ptr, 19); |
41c594ab | 590 | # else |
1da177e4 | 591 | /* |
1b3a6e97 | 592 | * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 |
1da177e4 LT |
593 | * stored in CONTEXT. |
594 | */ | |
e30ec452 | 595 | uasm_i_dmfc0(p, ptr, C0_CONTEXT); |
3be6022c | 596 | uasm_i_dsrl_safe(p, ptr, ptr, 23); |
82622284 | 597 | # endif |
e30ec452 TS |
598 | UASM_i_LA_mostly(p, tmp, pgdc); |
599 | uasm_i_daddu(p, ptr, ptr, tmp); | |
600 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); | |
601 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | |
1da177e4 | 602 | #else |
e30ec452 TS |
603 | UASM_i_LA_mostly(p, ptr, pgdc); |
604 | uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | |
1da177e4 LT |
605 | #endif |
606 | ||
e30ec452 | 607 | uasm_l_vmalloc_done(l, *p); |
242954b5 | 608 | |
3be6022c DD |
609 | /* get pgd offset in bytes */ |
610 | uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); | |
e30ec452 TS |
611 | |
612 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | |
613 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | |
325f8a0a | 614 | #ifndef __PAGETABLE_PMD_FOLDED |
e30ec452 TS |
615 | uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
616 | uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | |
3be6022c | 617 | uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ |
e30ec452 TS |
618 | uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); |
619 | uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | |
325f8a0a | 620 | #endif |
1da177e4 LT |
621 | } |
622 | ||
1ec56329 | 623 | enum vmalloc64_mode {not_refill, refill}; |
1da177e4 LT |
624 | /* |
625 | * BVADDR is the faulting address, PTR is scratch. | |
626 | * PTR will hold the pgd for vmalloc. | |
627 | */ | |
234fcd14 | 628 | static void __cpuinit |
e30ec452 | 629 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, |
1ec56329 DD |
630 | unsigned int bvaddr, unsigned int ptr, |
631 | enum vmalloc64_mode mode) | |
1da177e4 LT |
632 | { |
633 | long swpd = (long)swapper_pg_dir; | |
1ec56329 DD |
634 | int single_insn_swpd; |
635 | int did_vmalloc_branch = 0; | |
636 | ||
637 | single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); | |
1da177e4 | 638 | |
e30ec452 | 639 | uasm_l_vmalloc(l, *p); |
1da177e4 | 640 | |
1ec56329 DD |
641 | if (mode == refill && check_for_high_segbits) { |
642 | if (single_insn_swpd) { | |
643 | uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); | |
644 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | |
645 | did_vmalloc_branch = 1; | |
646 | /* fall through */ | |
647 | } else { | |
648 | uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); | |
649 | } | |
650 | } | |
651 | if (!did_vmalloc_branch) { | |
652 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | |
653 | uasm_il_b(p, r, label_vmalloc_done); | |
654 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | |
655 | } else { | |
656 | UASM_i_LA_mostly(p, ptr, swpd); | |
657 | uasm_il_b(p, r, label_vmalloc_done); | |
658 | if (uasm_in_compat_space_p(swpd)) | |
659 | uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | |
660 | else | |
661 | uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | |
662 | } | |
663 | } | |
664 | if (mode == refill && check_for_high_segbits) { | |
665 | uasm_l_large_segbits_fault(l, *p); | |
666 | /* | |
667 | * We get here if we are an xsseg address, or if we are | |
668 | * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. | |
669 | * | |
670 | * Ignoring xsseg (assume disabled so would generate | |
671 | * (address errors?), the only remaining possibility | |
672 | * is the upper xuseg addresses. On processors with | |
673 | * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these | |
674 | * addresses would have taken an address error. We try | |
675 | * to mimic that here by taking a load/istream page | |
676 | * fault. | |
677 | */ | |
678 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | |
679 | uasm_i_jr(p, ptr); | |
680 | uasm_i_nop(p); | |
1da177e4 LT |
681 | } |
682 | } | |
683 | ||
875d43e7 | 684 | #else /* !CONFIG_64BIT */ |
1da177e4 LT |
685 | |
686 | /* | |
687 | * TMP and PTR are scratch. | |
688 | * TMP will be clobbered, PTR will hold the pgd entry. | |
689 | */ | |
234fcd14 | 690 | static void __cpuinit __maybe_unused |
1da177e4 LT |
691 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) |
692 | { | |
693 | long pgdc = (long)pgd_current; | |
694 | ||
695 | /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | |
696 | #ifdef CONFIG_SMP | |
41c594ab RB |
697 | #ifdef CONFIG_MIPS_MT_SMTC |
698 | /* | |
699 | * SMTC uses TCBind value as "CPU" index | |
700 | */ | |
e30ec452 TS |
701 | uasm_i_mfc0(p, ptr, C0_TCBIND); |
702 | UASM_i_LA_mostly(p, tmp, pgdc); | |
703 | uasm_i_srl(p, ptr, ptr, 19); | |
41c594ab RB |
704 | #else |
705 | /* | |
706 | * smp_processor_id() << 3 is stored in CONTEXT. | |
707 | */ | |
e30ec452 TS |
708 | uasm_i_mfc0(p, ptr, C0_CONTEXT); |
709 | UASM_i_LA_mostly(p, tmp, pgdc); | |
710 | uasm_i_srl(p, ptr, ptr, 23); | |
41c594ab | 711 | #endif |
e30ec452 | 712 | uasm_i_addu(p, ptr, tmp, ptr); |
1da177e4 | 713 | #else |
e30ec452 | 714 | UASM_i_LA_mostly(p, ptr, pgdc); |
1da177e4 | 715 | #endif |
e30ec452 TS |
716 | uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
717 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | |
718 | uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ | |
719 | uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); | |
720 | uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ | |
1da177e4 LT |
721 | } |
722 | ||
875d43e7 | 723 | #endif /* !CONFIG_64BIT */ |
1da177e4 | 724 | |
234fcd14 | 725 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) |
1da177e4 | 726 | { |
242954b5 | 727 | unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; |
1da177e4 LT |
728 | unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); |
729 | ||
10cc3529 | 730 | switch (current_cpu_type()) { |
1da177e4 LT |
731 | case CPU_VR41XX: |
732 | case CPU_VR4111: | |
733 | case CPU_VR4121: | |
734 | case CPU_VR4122: | |
735 | case CPU_VR4131: | |
736 | case CPU_VR4181: | |
737 | case CPU_VR4181A: | |
738 | case CPU_VR4133: | |
739 | shift += 2; | |
740 | break; | |
741 | ||
742 | default: | |
743 | break; | |
744 | } | |
745 | ||
746 | if (shift) | |
e30ec452 TS |
747 | UASM_i_SRL(p, ctx, ctx, shift); |
748 | uasm_i_andi(p, ctx, ctx, mask); | |
1da177e4 LT |
749 | } |
750 | ||
234fcd14 | 751 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) |
1da177e4 LT |
752 | { |
753 | /* | |
754 | * Bug workaround for the Nevada. It seems as if under certain | |
755 | * circumstances the move from cp0_context might produce a | |
756 | * bogus result when the mfc0 instruction and its consumer are | |
757 | * in a different cacheline or a load instruction, probably any | |
758 | * memory reference, is between them. | |
759 | */ | |
10cc3529 | 760 | switch (current_cpu_type()) { |
1da177e4 | 761 | case CPU_NEVADA: |
e30ec452 | 762 | UASM_i_LW(p, ptr, 0, ptr); |
1da177e4 LT |
763 | GET_CONTEXT(p, tmp); /* get context reg */ |
764 | break; | |
765 | ||
766 | default: | |
767 | GET_CONTEXT(p, tmp); /* get context reg */ | |
e30ec452 | 768 | UASM_i_LW(p, ptr, 0, ptr); |
1da177e4 LT |
769 | break; |
770 | } | |
771 | ||
772 | build_adjust_context(p, tmp); | |
e30ec452 | 773 | UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ |
1da177e4 LT |
774 | } |
775 | ||
234fcd14 | 776 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, |
1da177e4 LT |
777 | unsigned int ptep) |
778 | { | |
779 | /* | |
780 | * 64bit address support (36bit on a 32bit CPU) in a 32bit | |
781 | * Kernel is a special case. Only a few CPUs use it. | |
782 | */ | |
783 | #ifdef CONFIG_64BIT_PHYS_ADDR | |
784 | if (cpu_has_64bits) { | |
e30ec452 TS |
785 | uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ |
786 | uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | |
6dd9344c DD |
787 | if (kernel_uses_smartmips_rixi) { |
788 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); | |
789 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); | |
790 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | |
791 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | |
792 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | |
793 | } else { | |
3be6022c | 794 | uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ |
6dd9344c | 795 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
3be6022c | 796 | uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ |
6dd9344c | 797 | } |
9b8c3891 | 798 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
1da177e4 LT |
799 | } else { |
800 | int pte_off_even = sizeof(pte_t) / 2; | |
801 | int pte_off_odd = pte_off_even + sizeof(pte_t); | |
802 | ||
803 | /* The pte entries are pre-shifted */ | |
e30ec452 | 804 | uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ |
9b8c3891 | 805 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ |
e30ec452 | 806 | uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ |
9b8c3891 | 807 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ |
1da177e4 LT |
808 | } |
809 | #else | |
e30ec452 TS |
810 | UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ |
811 | UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | |
1da177e4 LT |
812 | if (r45k_bvahwbug()) |
813 | build_tlb_probe_entry(p); | |
6dd9344c DD |
814 | if (kernel_uses_smartmips_rixi) { |
815 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); | |
816 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); | |
817 | UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | |
818 | if (r4k_250MHZhwbug()) | |
819 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | |
820 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | |
821 | UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | |
822 | } else { | |
823 | UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | |
824 | if (r4k_250MHZhwbug()) | |
825 | UASM_i_MTC0(p, 0, C0_ENTRYLO0); | |
826 | UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | |
827 | UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | |
828 | if (r45k_bvahwbug()) | |
829 | uasm_i_mfc0(p, tmp, C0_INDEX); | |
830 | } | |
1da177e4 | 831 | if (r4k_250MHZhwbug()) |
9b8c3891 DD |
832 | UASM_i_MTC0(p, 0, C0_ENTRYLO1); |
833 | UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | |
1da177e4 LT |
834 | #endif |
835 | } | |
836 | ||
e6f72d3a DD |
837 | /* |
838 | * For a 64-bit kernel, we are using the 64-bit XTLB refill exception | |
839 | * because EXL == 0. If we wrap, we can also use the 32 instruction | |
840 | * slots before the XTLB refill exception handler which belong to the | |
841 | * unused TLB refill exception. | |
842 | */ | |
843 | #define MIPS64_REFILL_INSNS 32 | |
844 | ||
234fcd14 | 845 | static void __cpuinit build_r4000_tlb_refill_handler(void) |
1da177e4 LT |
846 | { |
847 | u32 *p = tlb_handler; | |
e30ec452 TS |
848 | struct uasm_label *l = labels; |
849 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
850 | u32 *f; |
851 | unsigned int final_len; | |
852 | ||
853 | memset(tlb_handler, 0, sizeof(tlb_handler)); | |
854 | memset(labels, 0, sizeof(labels)); | |
855 | memset(relocs, 0, sizeof(relocs)); | |
856 | memset(final_handler, 0, sizeof(final_handler)); | |
857 | ||
858 | /* | |
859 | * create the plain linear handler | |
860 | */ | |
861 | if (bcm1250_m3_war()) { | |
3d45285d RB |
862 | unsigned int segbits = 44; |
863 | ||
864 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | |
865 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | |
e30ec452 | 866 | uasm_i_xor(&p, K0, K0, K1); |
3be6022c DD |
867 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
868 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | |
869 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | |
3d45285d | 870 | uasm_i_or(&p, K0, K0, K1); |
e30ec452 TS |
871 | uasm_il_bnez(&p, &r, K0, label_leave); |
872 | /* No need for uasm_i_nop */ | |
1da177e4 LT |
873 | } |
874 | ||
875d43e7 | 875 | #ifdef CONFIG_64BIT |
1da177e4 LT |
876 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ |
877 | #else | |
878 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | |
879 | #endif | |
880 | ||
fd062c84 DD |
881 | #ifdef CONFIG_HUGETLB_PAGE |
882 | build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); | |
883 | #endif | |
884 | ||
1da177e4 LT |
885 | build_get_ptep(&p, K0, K1); |
886 | build_update_entries(&p, K0, K1); | |
887 | build_tlb_write_entry(&p, &l, &r, tlb_random); | |
e30ec452 TS |
888 | uasm_l_leave(&l, p); |
889 | uasm_i_eret(&p); /* return from trap */ | |
1da177e4 | 890 | |
fd062c84 DD |
891 | #ifdef CONFIG_HUGETLB_PAGE |
892 | uasm_l_tlb_huge_update(&l, p); | |
893 | UASM_i_LW(&p, K0, 0, K1); | |
894 | build_huge_update_entries(&p, K0, K1); | |
895 | build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); | |
896 | #endif | |
897 | ||
875d43e7 | 898 | #ifdef CONFIG_64BIT |
1ec56329 | 899 | build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill); |
1da177e4 LT |
900 | #endif |
901 | ||
902 | /* | |
903 | * Overflow check: For the 64bit handler, we need at least one | |
904 | * free instruction slot for the wrap-around branch. In worst | |
905 | * case, if the intended insertion point is a delay slot, we | |
4b3f686d | 906 | * need three, with the second nop'ed and the third being |
1da177e4 LT |
907 | * unused. |
908 | */ | |
2a21c730 FZ |
909 | /* Loongson2 ebase is different than r4k, we have more space */ |
910 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) | |
1da177e4 LT |
911 | if ((p - tlb_handler) > 64) |
912 | panic("TLB refill handler space exceeded"); | |
913 | #else | |
e6f72d3a DD |
914 | if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) |
915 | || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) | |
916 | && uasm_insn_has_bdelay(relocs, | |
917 | tlb_handler + MIPS64_REFILL_INSNS - 3))) | |
1da177e4 LT |
918 | panic("TLB refill handler space exceeded"); |
919 | #endif | |
920 | ||
921 | /* | |
922 | * Now fold the handler in the TLB refill handler space. | |
923 | */ | |
2a21c730 | 924 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) |
1da177e4 LT |
925 | f = final_handler; |
926 | /* Simplest case, just copy the handler. */ | |
e30ec452 | 927 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
1da177e4 | 928 | final_len = p - tlb_handler; |
875d43e7 | 929 | #else /* CONFIG_64BIT */ |
e6f72d3a DD |
930 | f = final_handler + MIPS64_REFILL_INSNS; |
931 | if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { | |
1da177e4 | 932 | /* Just copy the handler. */ |
e30ec452 | 933 | uasm_copy_handler(relocs, labels, tlb_handler, p, f); |
1da177e4 LT |
934 | final_len = p - tlb_handler; |
935 | } else { | |
fd062c84 DD |
936 | #if defined(CONFIG_HUGETLB_PAGE) |
937 | const enum label_id ls = label_tlb_huge_update; | |
95affdda DD |
938 | #else |
939 | const enum label_id ls = label_vmalloc; | |
940 | #endif | |
941 | u32 *split; | |
942 | int ov = 0; | |
943 | int i; | |
944 | ||
945 | for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) | |
946 | ; | |
947 | BUG_ON(i == ARRAY_SIZE(labels)); | |
948 | split = labels[i].addr; | |
1da177e4 LT |
949 | |
950 | /* | |
95affdda | 951 | * See if we have overflown one way or the other. |
1da177e4 | 952 | */ |
95affdda DD |
953 | if (split > tlb_handler + MIPS64_REFILL_INSNS || |
954 | split < p - MIPS64_REFILL_INSNS) | |
955 | ov = 1; | |
956 | ||
957 | if (ov) { | |
958 | /* | |
959 | * Split two instructions before the end. One | |
960 | * for the branch and one for the instruction | |
961 | * in the delay slot. | |
962 | */ | |
963 | split = tlb_handler + MIPS64_REFILL_INSNS - 2; | |
964 | ||
965 | /* | |
966 | * If the branch would fall in a delay slot, | |
967 | * we must back up an additional instruction | |
968 | * so that it is no longer in a delay slot. | |
969 | */ | |
970 | if (uasm_insn_has_bdelay(relocs, split - 1)) | |
971 | split--; | |
972 | } | |
1da177e4 | 973 | /* Copy first part of the handler. */ |
e30ec452 | 974 | uasm_copy_handler(relocs, labels, tlb_handler, split, f); |
1da177e4 LT |
975 | f += split - tlb_handler; |
976 | ||
95affdda DD |
977 | if (ov) { |
978 | /* Insert branch. */ | |
979 | uasm_l_split(&l, final_handler); | |
980 | uasm_il_b(&f, &r, label_split); | |
981 | if (uasm_insn_has_bdelay(relocs, split)) | |
982 | uasm_i_nop(&f); | |
983 | else { | |
984 | uasm_copy_handler(relocs, labels, | |
985 | split, split + 1, f); | |
986 | uasm_move_labels(labels, f, f + 1, -1); | |
987 | f++; | |
988 | split++; | |
989 | } | |
1da177e4 LT |
990 | } |
991 | ||
992 | /* Copy the rest of the handler. */ | |
e30ec452 | 993 | uasm_copy_handler(relocs, labels, split, p, final_handler); |
e6f72d3a DD |
994 | final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + |
995 | (p - split); | |
1da177e4 | 996 | } |
875d43e7 | 997 | #endif /* CONFIG_64BIT */ |
1da177e4 | 998 | |
e30ec452 TS |
999 | uasm_resolve_relocs(relocs, labels); |
1000 | pr_debug("Wrote TLB refill handler (%u instructions).\n", | |
1001 | final_len); | |
1da177e4 | 1002 | |
91b05e67 | 1003 | memcpy((void *)ebase, final_handler, 0x100); |
92b1e6a6 FBH |
1004 | |
1005 | dump_handler((u32 *)ebase, 64); | |
1da177e4 LT |
1006 | } |
1007 | ||
1da177e4 LT |
1008 | /* |
1009 | * 128 instructions for the fastpath handler is generous and should | |
1010 | * never be exceeded. | |
1011 | */ | |
1012 | #define FASTPATH_SIZE 128 | |
1013 | ||
cbdbe07f FBH |
1014 | u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; |
1015 | u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; | |
1016 | u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; | |
1da177e4 | 1017 | |
234fcd14 | 1018 | static void __cpuinit |
bd1437e4 | 1019 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
1da177e4 LT |
1020 | { |
1021 | #ifdef CONFIG_SMP | |
1022 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1023 | if (cpu_has_64bits) | |
e30ec452 | 1024 | uasm_i_lld(p, pte, 0, ptr); |
1da177e4 LT |
1025 | else |
1026 | # endif | |
e30ec452 | 1027 | UASM_i_LL(p, pte, 0, ptr); |
1da177e4 LT |
1028 | #else |
1029 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1030 | if (cpu_has_64bits) | |
e30ec452 | 1031 | uasm_i_ld(p, pte, 0, ptr); |
1da177e4 LT |
1032 | else |
1033 | # endif | |
e30ec452 | 1034 | UASM_i_LW(p, pte, 0, ptr); |
1da177e4 LT |
1035 | #endif |
1036 | } | |
1037 | ||
234fcd14 | 1038 | static void __cpuinit |
e30ec452 | 1039 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, |
63b2d2f4 | 1040 | unsigned int mode) |
1da177e4 | 1041 | { |
63b2d2f4 TS |
1042 | #ifdef CONFIG_64BIT_PHYS_ADDR |
1043 | unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | |
1044 | #endif | |
1045 | ||
e30ec452 | 1046 | uasm_i_ori(p, pte, pte, mode); |
1da177e4 LT |
1047 | #ifdef CONFIG_SMP |
1048 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1049 | if (cpu_has_64bits) | |
e30ec452 | 1050 | uasm_i_scd(p, pte, 0, ptr); |
1da177e4 LT |
1051 | else |
1052 | # endif | |
e30ec452 | 1053 | UASM_i_SC(p, pte, 0, ptr); |
1da177e4 LT |
1054 | |
1055 | if (r10000_llsc_war()) | |
e30ec452 | 1056 | uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); |
1da177e4 | 1057 | else |
e30ec452 | 1058 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); |
1da177e4 LT |
1059 | |
1060 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1061 | if (!cpu_has_64bits) { | |
e30ec452 TS |
1062 | /* no uasm_i_nop needed */ |
1063 | uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); | |
1064 | uasm_i_ori(p, pte, pte, hwmode); | |
1065 | uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); | |
1066 | uasm_il_beqz(p, r, pte, label_smp_pgtable_change); | |
1067 | /* no uasm_i_nop needed */ | |
1068 | uasm_i_lw(p, pte, 0, ptr); | |
1da177e4 | 1069 | } else |
e30ec452 | 1070 | uasm_i_nop(p); |
1da177e4 | 1071 | # else |
e30ec452 | 1072 | uasm_i_nop(p); |
1da177e4 LT |
1073 | # endif |
1074 | #else | |
1075 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1076 | if (cpu_has_64bits) | |
e30ec452 | 1077 | uasm_i_sd(p, pte, 0, ptr); |
1da177e4 LT |
1078 | else |
1079 | # endif | |
e30ec452 | 1080 | UASM_i_SW(p, pte, 0, ptr); |
1da177e4 LT |
1081 | |
1082 | # ifdef CONFIG_64BIT_PHYS_ADDR | |
1083 | if (!cpu_has_64bits) { | |
e30ec452 TS |
1084 | uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); |
1085 | uasm_i_ori(p, pte, pte, hwmode); | |
1086 | uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); | |
1087 | uasm_i_lw(p, pte, 0, ptr); | |
1da177e4 LT |
1088 | } |
1089 | # endif | |
1090 | #endif | |
1091 | } | |
1092 | ||
1093 | /* | |
1094 | * Check if PTE is present, if not then jump to LABEL. PTR points to | |
1095 | * the page table where this PTE is located, PTE will be re-loaded | |
1096 | * with it's original value. | |
1097 | */ | |
234fcd14 | 1098 | static void __cpuinit |
bd1437e4 | 1099 | build_pte_present(u32 **p, struct uasm_reloc **r, |
1da177e4 LT |
1100 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1101 | { | |
6dd9344c DD |
1102 | if (kernel_uses_smartmips_rixi) { |
1103 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT); | |
1104 | uasm_il_beqz(p, r, pte, lid); | |
1105 | } else { | |
1106 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | |
1107 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); | |
1108 | uasm_il_bnez(p, r, pte, lid); | |
1109 | } | |
bd1437e4 | 1110 | iPTE_LW(p, pte, ptr); |
1da177e4 LT |
1111 | } |
1112 | ||
1113 | /* Make PTE valid, store result in PTR. */ | |
234fcd14 | 1114 | static void __cpuinit |
e30ec452 | 1115 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1da177e4 LT |
1116 | unsigned int ptr) |
1117 | { | |
63b2d2f4 TS |
1118 | unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; |
1119 | ||
1120 | iPTE_SW(p, r, pte, ptr, mode); | |
1da177e4 LT |
1121 | } |
1122 | ||
1123 | /* | |
1124 | * Check if PTE can be written to, if not branch to LABEL. Regardless | |
1125 | * restore PTE with value from PTR when done. | |
1126 | */ | |
234fcd14 | 1127 | static void __cpuinit |
bd1437e4 | 1128 | build_pte_writable(u32 **p, struct uasm_reloc **r, |
1da177e4 LT |
1129 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1130 | { | |
e30ec452 TS |
1131 | uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); |
1132 | uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); | |
1133 | uasm_il_bnez(p, r, pte, lid); | |
bd1437e4 | 1134 | iPTE_LW(p, pte, ptr); |
1da177e4 LT |
1135 | } |
1136 | ||
1137 | /* Make PTE writable, update software status bits as well, then store | |
1138 | * at PTR. | |
1139 | */ | |
234fcd14 | 1140 | static void __cpuinit |
e30ec452 | 1141 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, |
1da177e4 LT |
1142 | unsigned int ptr) |
1143 | { | |
63b2d2f4 TS |
1144 | unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID |
1145 | | _PAGE_DIRTY); | |
1146 | ||
1147 | iPTE_SW(p, r, pte, ptr, mode); | |
1da177e4 LT |
1148 | } |
1149 | ||
1150 | /* | |
1151 | * Check if PTE can be modified, if not branch to LABEL. Regardless | |
1152 | * restore PTE with value from PTR when done. | |
1153 | */ | |
234fcd14 | 1154 | static void __cpuinit |
bd1437e4 | 1155 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, |
1da177e4 LT |
1156 | unsigned int pte, unsigned int ptr, enum label_id lid) |
1157 | { | |
e30ec452 TS |
1158 | uasm_i_andi(p, pte, pte, _PAGE_WRITE); |
1159 | uasm_il_beqz(p, r, pte, lid); | |
bd1437e4 | 1160 | iPTE_LW(p, pte, ptr); |
1da177e4 LT |
1161 | } |
1162 | ||
82622284 | 1163 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1da177e4 LT |
1164 | /* |
1165 | * R3000 style TLB load/store/modify handlers. | |
1166 | */ | |
1167 | ||
fded2e50 MR |
1168 | /* |
1169 | * This places the pte into ENTRYLO0 and writes it with tlbwi. | |
1170 | * Then it returns. | |
1171 | */ | |
234fcd14 | 1172 | static void __cpuinit |
fded2e50 | 1173 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) |
1da177e4 | 1174 | { |
e30ec452 TS |
1175 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ |
1176 | uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ | |
1177 | uasm_i_tlbwi(p); | |
1178 | uasm_i_jr(p, tmp); | |
1179 | uasm_i_rfe(p); /* branch delay */ | |
1da177e4 LT |
1180 | } |
1181 | ||
1182 | /* | |
fded2e50 MR |
1183 | * This places the pte into ENTRYLO0 and writes it with tlbwi |
1184 | * or tlbwr as appropriate. This is because the index register | |
1185 | * may have the probe fail bit set as a result of a trap on a | |
1186 | * kseg2 access, i.e. without refill. Then it returns. | |
1da177e4 | 1187 | */ |
234fcd14 | 1188 | static void __cpuinit |
e30ec452 TS |
1189 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, |
1190 | struct uasm_reloc **r, unsigned int pte, | |
1191 | unsigned int tmp) | |
1192 | { | |
1193 | uasm_i_mfc0(p, tmp, C0_INDEX); | |
1194 | uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | |
1195 | uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ | |
1196 | uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ | |
1197 | uasm_i_tlbwi(p); /* cp0 delay */ | |
1198 | uasm_i_jr(p, tmp); | |
1199 | uasm_i_rfe(p); /* branch delay */ | |
1200 | uasm_l_r3000_write_probe_fail(l, *p); | |
1201 | uasm_i_tlbwr(p); /* cp0 delay */ | |
1202 | uasm_i_jr(p, tmp); | |
1203 | uasm_i_rfe(p); /* branch delay */ | |
1da177e4 LT |
1204 | } |
1205 | ||
234fcd14 | 1206 | static void __cpuinit |
1da177e4 LT |
1207 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, |
1208 | unsigned int ptr) | |
1209 | { | |
1210 | long pgdc = (long)pgd_current; | |
1211 | ||
e30ec452 TS |
1212 | uasm_i_mfc0(p, pte, C0_BADVADDR); |
1213 | uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ | |
1214 | uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | |
1215 | uasm_i_srl(p, pte, pte, 22); /* load delay */ | |
1216 | uasm_i_sll(p, pte, pte, 2); | |
1217 | uasm_i_addu(p, ptr, ptr, pte); | |
1218 | uasm_i_mfc0(p, pte, C0_CONTEXT); | |
1219 | uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ | |
1220 | uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ | |
1221 | uasm_i_addu(p, ptr, ptr, pte); | |
1222 | uasm_i_lw(p, pte, 0, ptr); | |
1223 | uasm_i_tlbp(p); /* load delay */ | |
1da177e4 LT |
1224 | } |
1225 | ||
234fcd14 | 1226 | static void __cpuinit build_r3000_tlb_load_handler(void) |
1da177e4 LT |
1227 | { |
1228 | u32 *p = handle_tlbl; | |
e30ec452 TS |
1229 | struct uasm_label *l = labels; |
1230 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1231 | |
1232 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | |
1233 | memset(labels, 0, sizeof(labels)); | |
1234 | memset(relocs, 0, sizeof(relocs)); | |
1235 | ||
1236 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
bd1437e4 | 1237 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
e30ec452 | 1238 | uasm_i_nop(&p); /* load delay */ |
1da177e4 | 1239 | build_make_valid(&p, &r, K0, K1); |
fded2e50 | 1240 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1da177e4 | 1241 | |
e30ec452 TS |
1242 | uasm_l_nopage_tlbl(&l, p); |
1243 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | |
1244 | uasm_i_nop(&p); | |
1da177e4 LT |
1245 | |
1246 | if ((p - handle_tlbl) > FASTPATH_SIZE) | |
1247 | panic("TLB load handler fastpath space exceeded"); | |
1248 | ||
e30ec452 TS |
1249 | uasm_resolve_relocs(relocs, labels); |
1250 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | |
1251 | (unsigned int)(p - handle_tlbl)); | |
1da177e4 | 1252 | |
92b1e6a6 | 1253 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1da177e4 LT |
1254 | } |
1255 | ||
234fcd14 | 1256 | static void __cpuinit build_r3000_tlb_store_handler(void) |
1da177e4 LT |
1257 | { |
1258 | u32 *p = handle_tlbs; | |
e30ec452 TS |
1259 | struct uasm_label *l = labels; |
1260 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1261 | |
1262 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | |
1263 | memset(labels, 0, sizeof(labels)); | |
1264 | memset(relocs, 0, sizeof(relocs)); | |
1265 | ||
1266 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
bd1437e4 | 1267 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
e30ec452 | 1268 | uasm_i_nop(&p); /* load delay */ |
1da177e4 | 1269 | build_make_write(&p, &r, K0, K1); |
fded2e50 | 1270 | build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); |
1da177e4 | 1271 | |
e30ec452 TS |
1272 | uasm_l_nopage_tlbs(&l, p); |
1273 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1274 | uasm_i_nop(&p); | |
1da177e4 LT |
1275 | |
1276 | if ((p - handle_tlbs) > FASTPATH_SIZE) | |
1277 | panic("TLB store handler fastpath space exceeded"); | |
1278 | ||
e30ec452 TS |
1279 | uasm_resolve_relocs(relocs, labels); |
1280 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | |
1281 | (unsigned int)(p - handle_tlbs)); | |
1da177e4 | 1282 | |
92b1e6a6 | 1283 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1da177e4 LT |
1284 | } |
1285 | ||
234fcd14 | 1286 | static void __cpuinit build_r3000_tlb_modify_handler(void) |
1da177e4 LT |
1287 | { |
1288 | u32 *p = handle_tlbm; | |
e30ec452 TS |
1289 | struct uasm_label *l = labels; |
1290 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1291 | |
1292 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | |
1293 | memset(labels, 0, sizeof(labels)); | |
1294 | memset(relocs, 0, sizeof(relocs)); | |
1295 | ||
1296 | build_r3000_tlbchange_handler_head(&p, K0, K1); | |
bd1437e4 | 1297 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
e30ec452 | 1298 | uasm_i_nop(&p); /* load delay */ |
1da177e4 | 1299 | build_make_write(&p, &r, K0, K1); |
fded2e50 | 1300 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
1da177e4 | 1301 | |
e30ec452 TS |
1302 | uasm_l_nopage_tlbm(&l, p); |
1303 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1304 | uasm_i_nop(&p); | |
1da177e4 LT |
1305 | |
1306 | if ((p - handle_tlbm) > FASTPATH_SIZE) | |
1307 | panic("TLB modify handler fastpath space exceeded"); | |
1308 | ||
e30ec452 TS |
1309 | uasm_resolve_relocs(relocs, labels); |
1310 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | |
1311 | (unsigned int)(p - handle_tlbm)); | |
1da177e4 | 1312 | |
92b1e6a6 | 1313 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1da177e4 | 1314 | } |
82622284 | 1315 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ |
1da177e4 LT |
1316 | |
1317 | /* | |
1318 | * R4000 style TLB load/store/modify handlers. | |
1319 | */ | |
234fcd14 | 1320 | static void __cpuinit |
e30ec452 TS |
1321 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, |
1322 | struct uasm_reloc **r, unsigned int pte, | |
1da177e4 LT |
1323 | unsigned int ptr) |
1324 | { | |
875d43e7 | 1325 | #ifdef CONFIG_64BIT |
1da177e4 LT |
1326 | build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ |
1327 | #else | |
1328 | build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ | |
1329 | #endif | |
1330 | ||
fd062c84 DD |
1331 | #ifdef CONFIG_HUGETLB_PAGE |
1332 | /* | |
1333 | * For huge tlb entries, pmd doesn't contain an address but | |
1334 | * instead contains the tlb pte. Check the PAGE_HUGE bit and | |
1335 | * see if we need to jump to huge tlb processing. | |
1336 | */ | |
1337 | build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); | |
1338 | #endif | |
1339 | ||
e30ec452 TS |
1340 | UASM_i_MFC0(p, pte, C0_BADVADDR); |
1341 | UASM_i_LW(p, ptr, 0, ptr); | |
1342 | UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); | |
1343 | uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); | |
1344 | UASM_i_ADDU(p, ptr, ptr, pte); | |
1da177e4 LT |
1345 | |
1346 | #ifdef CONFIG_SMP | |
e30ec452 TS |
1347 | uasm_l_smp_pgtable_change(l, *p); |
1348 | #endif | |
bd1437e4 | 1349 | iPTE_LW(p, pte, ptr); /* get even pte */ |
8df5beac MR |
1350 | if (!m4kc_tlbp_war()) |
1351 | build_tlb_probe_entry(p); | |
1da177e4 LT |
1352 | } |
1353 | ||
234fcd14 | 1354 | static void __cpuinit |
e30ec452 TS |
1355 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, |
1356 | struct uasm_reloc **r, unsigned int tmp, | |
1da177e4 LT |
1357 | unsigned int ptr) |
1358 | { | |
e30ec452 TS |
1359 | uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); |
1360 | uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); | |
1da177e4 LT |
1361 | build_update_entries(p, tmp, ptr); |
1362 | build_tlb_write_entry(p, l, r, tlb_indexed); | |
e30ec452 TS |
1363 | uasm_l_leave(l, *p); |
1364 | uasm_i_eret(p); /* return from trap */ | |
1da177e4 | 1365 | |
875d43e7 | 1366 | #ifdef CONFIG_64BIT |
1ec56329 | 1367 | build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); |
1da177e4 LT |
1368 | #endif |
1369 | } | |
1370 | ||
234fcd14 | 1371 | static void __cpuinit build_r4000_tlb_load_handler(void) |
1da177e4 LT |
1372 | { |
1373 | u32 *p = handle_tlbl; | |
e30ec452 TS |
1374 | struct uasm_label *l = labels; |
1375 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1376 | |
1377 | memset(handle_tlbl, 0, sizeof(handle_tlbl)); | |
1378 | memset(labels, 0, sizeof(labels)); | |
1379 | memset(relocs, 0, sizeof(relocs)); | |
1380 | ||
1381 | if (bcm1250_m3_war()) { | |
3d45285d RB |
1382 | unsigned int segbits = 44; |
1383 | ||
1384 | uasm_i_dmfc0(&p, K0, C0_BADVADDR); | |
1385 | uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | |
e30ec452 | 1386 | uasm_i_xor(&p, K0, K0, K1); |
3be6022c DD |
1387 | uasm_i_dsrl_safe(&p, K1, K0, 62); |
1388 | uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | |
1389 | uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | |
3d45285d | 1390 | uasm_i_or(&p, K0, K0, K1); |
e30ec452 TS |
1391 | uasm_il_bnez(&p, &r, K0, label_leave); |
1392 | /* No need for uasm_i_nop */ | |
1da177e4 LT |
1393 | } |
1394 | ||
1395 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | |
bd1437e4 | 1396 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); |
8df5beac MR |
1397 | if (m4kc_tlbp_war()) |
1398 | build_tlb_probe_entry(&p); | |
6dd9344c DD |
1399 | |
1400 | if (kernel_uses_smartmips_rixi) { | |
1401 | /* | |
1402 | * If the page is not _PAGE_VALID, RI or XI could not | |
1403 | * have triggered it. Skip the expensive test.. | |
1404 | */ | |
1405 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | |
1406 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); | |
1407 | uasm_i_nop(&p); | |
1408 | ||
1409 | uasm_i_tlbr(&p); | |
1410 | /* Examine entrylo 0 or 1 based on ptr. */ | |
1411 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | |
1412 | uasm_i_beqz(&p, K0, 8); | |
1413 | ||
1414 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | |
1415 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | |
1416 | /* | |
1417 | * If the entryLo (now in K0) is valid (bit 1), RI or | |
1418 | * XI must have triggered it. | |
1419 | */ | |
1420 | uasm_i_andi(&p, K0, K0, 2); | |
1421 | uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); | |
1422 | ||
1423 | uasm_l_tlbl_goaround1(&l, p); | |
1424 | /* Reload the PTE value */ | |
1425 | iPTE_LW(&p, K0, K1); | |
1426 | } | |
1da177e4 LT |
1427 | build_make_valid(&p, &r, K0, K1); |
1428 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | |
1429 | ||
fd062c84 DD |
1430 | #ifdef CONFIG_HUGETLB_PAGE |
1431 | /* | |
1432 | * This is the entry point when build_r4000_tlbchange_handler_head | |
1433 | * spots a huge page. | |
1434 | */ | |
1435 | uasm_l_tlb_huge_update(&l, p); | |
1436 | iPTE_LW(&p, K0, K1); | |
1437 | build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); | |
1438 | build_tlb_probe_entry(&p); | |
6dd9344c DD |
1439 | |
1440 | if (kernel_uses_smartmips_rixi) { | |
1441 | /* | |
1442 | * If the page is not _PAGE_VALID, RI or XI could not | |
1443 | * have triggered it. Skip the expensive test.. | |
1444 | */ | |
1445 | uasm_i_andi(&p, K0, K0, _PAGE_VALID); | |
1446 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | |
1447 | uasm_i_nop(&p); | |
1448 | ||
1449 | uasm_i_tlbr(&p); | |
1450 | /* Examine entrylo 0 or 1 based on ptr. */ | |
1451 | uasm_i_andi(&p, K0, K1, sizeof(pte_t)); | |
1452 | uasm_i_beqz(&p, K0, 8); | |
1453 | ||
1454 | UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ | |
1455 | UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ | |
1456 | /* | |
1457 | * If the entryLo (now in K0) is valid (bit 1), RI or | |
1458 | * XI must have triggered it. | |
1459 | */ | |
1460 | uasm_i_andi(&p, K0, K0, 2); | |
1461 | uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); | |
1462 | /* Reload the PTE value */ | |
1463 | iPTE_LW(&p, K0, K1); | |
1464 | ||
1465 | /* | |
1466 | * We clobbered C0_PAGEMASK, restore it. On the other branch | |
1467 | * it is restored in build_huge_tlb_write_entry. | |
1468 | */ | |
1469 | build_restore_pagemask(&p, &r, K0, label_nopage_tlbl); | |
1470 | ||
1471 | uasm_l_tlbl_goaround2(&l, p); | |
1472 | } | |
fd062c84 DD |
1473 | uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); |
1474 | build_huge_handler_tail(&p, &r, &l, K0, K1); | |
1475 | #endif | |
1476 | ||
e30ec452 TS |
1477 | uasm_l_nopage_tlbl(&l, p); |
1478 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | |
1479 | uasm_i_nop(&p); | |
1da177e4 LT |
1480 | |
1481 | if ((p - handle_tlbl) > FASTPATH_SIZE) | |
1482 | panic("TLB load handler fastpath space exceeded"); | |
1483 | ||
e30ec452 TS |
1484 | uasm_resolve_relocs(relocs, labels); |
1485 | pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | |
1486 | (unsigned int)(p - handle_tlbl)); | |
1da177e4 | 1487 | |
92b1e6a6 | 1488 | dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); |
1da177e4 LT |
1489 | } |
1490 | ||
234fcd14 | 1491 | static void __cpuinit build_r4000_tlb_store_handler(void) |
1da177e4 LT |
1492 | { |
1493 | u32 *p = handle_tlbs; | |
e30ec452 TS |
1494 | struct uasm_label *l = labels; |
1495 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1496 | |
1497 | memset(handle_tlbs, 0, sizeof(handle_tlbs)); | |
1498 | memset(labels, 0, sizeof(labels)); | |
1499 | memset(relocs, 0, sizeof(relocs)); | |
1500 | ||
1501 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | |
bd1437e4 | 1502 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); |
8df5beac MR |
1503 | if (m4kc_tlbp_war()) |
1504 | build_tlb_probe_entry(&p); | |
1da177e4 LT |
1505 | build_make_write(&p, &r, K0, K1); |
1506 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | |
1507 | ||
fd062c84 DD |
1508 | #ifdef CONFIG_HUGETLB_PAGE |
1509 | /* | |
1510 | * This is the entry point when | |
1511 | * build_r4000_tlbchange_handler_head spots a huge page. | |
1512 | */ | |
1513 | uasm_l_tlb_huge_update(&l, p); | |
1514 | iPTE_LW(&p, K0, K1); | |
1515 | build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); | |
1516 | build_tlb_probe_entry(&p); | |
1517 | uasm_i_ori(&p, K0, K0, | |
1518 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | |
1519 | build_huge_handler_tail(&p, &r, &l, K0, K1); | |
1520 | #endif | |
1521 | ||
e30ec452 TS |
1522 | uasm_l_nopage_tlbs(&l, p); |
1523 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1524 | uasm_i_nop(&p); | |
1da177e4 LT |
1525 | |
1526 | if ((p - handle_tlbs) > FASTPATH_SIZE) | |
1527 | panic("TLB store handler fastpath space exceeded"); | |
1528 | ||
e30ec452 TS |
1529 | uasm_resolve_relocs(relocs, labels); |
1530 | pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | |
1531 | (unsigned int)(p - handle_tlbs)); | |
1da177e4 | 1532 | |
92b1e6a6 | 1533 | dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); |
1da177e4 LT |
1534 | } |
1535 | ||
234fcd14 | 1536 | static void __cpuinit build_r4000_tlb_modify_handler(void) |
1da177e4 LT |
1537 | { |
1538 | u32 *p = handle_tlbm; | |
e30ec452 TS |
1539 | struct uasm_label *l = labels; |
1540 | struct uasm_reloc *r = relocs; | |
1da177e4 LT |
1541 | |
1542 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | |
1543 | memset(labels, 0, sizeof(labels)); | |
1544 | memset(relocs, 0, sizeof(relocs)); | |
1545 | ||
1546 | build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); | |
bd1437e4 | 1547 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); |
8df5beac MR |
1548 | if (m4kc_tlbp_war()) |
1549 | build_tlb_probe_entry(&p); | |
1da177e4 LT |
1550 | /* Present and writable bits set, set accessed and dirty bits. */ |
1551 | build_make_write(&p, &r, K0, K1); | |
1552 | build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); | |
1553 | ||
fd062c84 DD |
1554 | #ifdef CONFIG_HUGETLB_PAGE |
1555 | /* | |
1556 | * This is the entry point when | |
1557 | * build_r4000_tlbchange_handler_head spots a huge page. | |
1558 | */ | |
1559 | uasm_l_tlb_huge_update(&l, p); | |
1560 | iPTE_LW(&p, K0, K1); | |
1561 | build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); | |
1562 | build_tlb_probe_entry(&p); | |
1563 | uasm_i_ori(&p, K0, K0, | |
1564 | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | |
1565 | build_huge_handler_tail(&p, &r, &l, K0, K1); | |
1566 | #endif | |
1567 | ||
e30ec452 TS |
1568 | uasm_l_nopage_tlbm(&l, p); |
1569 | uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | |
1570 | uasm_i_nop(&p); | |
1da177e4 LT |
1571 | |
1572 | if ((p - handle_tlbm) > FASTPATH_SIZE) | |
1573 | panic("TLB modify handler fastpath space exceeded"); | |
1574 | ||
e30ec452 TS |
1575 | uasm_resolve_relocs(relocs, labels); |
1576 | pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | |
1577 | (unsigned int)(p - handle_tlbm)); | |
115f2a44 | 1578 | |
92b1e6a6 | 1579 | dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); |
1da177e4 LT |
1580 | } |
1581 | ||
234fcd14 | 1582 | void __cpuinit build_tlb_refill_handler(void) |
1da177e4 LT |
1583 | { |
1584 | /* | |
1585 | * The refill handler is generated per-CPU, multi-node systems | |
1586 | * may have local storage for it. The other handlers are only | |
1587 | * needed once. | |
1588 | */ | |
1589 | static int run_once = 0; | |
1590 | ||
1ec56329 DD |
1591 | #ifdef CONFIG_64BIT |
1592 | check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | |
1593 | #endif | |
1594 | ||
10cc3529 | 1595 | switch (current_cpu_type()) { |
1da177e4 LT |
1596 | case CPU_R2000: |
1597 | case CPU_R3000: | |
1598 | case CPU_R3000A: | |
1599 | case CPU_R3081E: | |
1600 | case CPU_TX3912: | |
1601 | case CPU_TX3922: | |
1602 | case CPU_TX3927: | |
82622284 | 1603 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
1da177e4 LT |
1604 | build_r3000_tlb_refill_handler(); |
1605 | if (!run_once) { | |
1606 | build_r3000_tlb_load_handler(); | |
1607 | build_r3000_tlb_store_handler(); | |
1608 | build_r3000_tlb_modify_handler(); | |
1609 | run_once++; | |
1610 | } | |
82622284 DD |
1611 | #else |
1612 | panic("No R3000 TLB refill handler"); | |
1613 | #endif | |
1da177e4 LT |
1614 | break; |
1615 | ||
1616 | case CPU_R6000: | |
1617 | case CPU_R6000A: | |
1618 | panic("No R6000 TLB refill handler yet"); | |
1619 | break; | |
1620 | ||
1621 | case CPU_R8000: | |
1622 | panic("No R8000 TLB refill handler yet"); | |
1623 | break; | |
1624 | ||
1625 | default: | |
1626 | build_r4000_tlb_refill_handler(); | |
1627 | if (!run_once) { | |
1628 | build_r4000_tlb_load_handler(); | |
1629 | build_r4000_tlb_store_handler(); | |
1630 | build_r4000_tlb_modify_handler(); | |
1631 | run_once++; | |
1632 | } | |
1633 | } | |
1634 | } | |
1d40cfcd | 1635 | |
234fcd14 | 1636 | void __cpuinit flush_tlb_handlers(void) |
1d40cfcd | 1637 | { |
e0cee3ee | 1638 | local_flush_icache_range((unsigned long)handle_tlbl, |
1d40cfcd | 1639 | (unsigned long)handle_tlbl + sizeof(handle_tlbl)); |
e0cee3ee | 1640 | local_flush_icache_range((unsigned long)handle_tlbs, |
1d40cfcd | 1641 | (unsigned long)handle_tlbs + sizeof(handle_tlbs)); |
e0cee3ee | 1642 | local_flush_icache_range((unsigned long)handle_tlbm, |
1d40cfcd RB |
1643 | (unsigned long)handle_tlbm + sizeof(handle_tlbm)); |
1644 | } |