]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/mips/kernel/traps.c
MIPS: Provide more elevant interface cu2_notifier for CP2 extensions.
[net-next-2.6.git] / arch / mips / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
36ccf1c0 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
1da177e4
LT
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
60b0d655 12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
1da177e4 13 */
8e8a52ed 14#include <linux/bug.h>
60b0d655 15#include <linux/compiler.h>
1da177e4
LT
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/smp.h>
1da177e4
LT
21#include <linux/spinlock.h>
22#include <linux/kallsyms.h>
e01402b1 23#include <linux/bootmem.h>
d4fd1989 24#include <linux/interrupt.h>
39b8d525 25#include <linux/ptrace.h>
88547001
JW
26#include <linux/kgdb.h>
27#include <linux/kdebug.h>
69f3a7de 28#include <linux/notifier.h>
5dd11d5d 29#include <linux/kdb.h>
1da177e4
LT
30
31#include <asm/bootinfo.h>
32#include <asm/branch.h>
33#include <asm/break.h>
69f3a7de 34#include <asm/cop2.h>
1da177e4 35#include <asm/cpu.h>
e50c0a8f 36#include <asm/dsp.h>
1da177e4 37#include <asm/fpu.h>
ba3049ed 38#include <asm/fpu_emulator.h>
340ee4b9
RB
39#include <asm/mipsregs.h>
40#include <asm/mipsmtregs.h>
1da177e4
LT
41#include <asm/module.h>
42#include <asm/pgtable.h>
43#include <asm/ptrace.h>
44#include <asm/sections.h>
45#include <asm/system.h>
46#include <asm/tlbdebug.h>
47#include <asm/traps.h>
48#include <asm/uaccess.h>
b67b2b70 49#include <asm/watch.h>
1da177e4 50#include <asm/mmu_context.h>
1da177e4 51#include <asm/types.h>
1df0f0ff 52#include <asm/stacktrace.h>
f9bb4cf3 53#include <asm/irq.h>
92bbe1b9 54#include <asm/uasm.h>
1da177e4 55
c65a5480
AN
56extern void check_wait(void);
57extern asmlinkage void r4k_wait(void);
58extern asmlinkage void rollback_handle_int(void);
e4ac58af 59extern asmlinkage void handle_int(void);
1da177e4
LT
60extern asmlinkage void handle_tlbm(void);
61extern asmlinkage void handle_tlbl(void);
62extern asmlinkage void handle_tlbs(void);
63extern asmlinkage void handle_adel(void);
64extern asmlinkage void handle_ades(void);
65extern asmlinkage void handle_ibe(void);
66extern asmlinkage void handle_dbe(void);
67extern asmlinkage void handle_sys(void);
68extern asmlinkage void handle_bp(void);
69extern asmlinkage void handle_ri(void);
5b10496b
AN
70extern asmlinkage void handle_ri_rdhwr_vivt(void);
71extern asmlinkage void handle_ri_rdhwr(void);
1da177e4
LT
72extern asmlinkage void handle_cpu(void);
73extern asmlinkage void handle_ov(void);
74extern asmlinkage void handle_tr(void);
75extern asmlinkage void handle_fpe(void);
76extern asmlinkage void handle_mdmx(void);
77extern asmlinkage void handle_watch(void);
340ee4b9 78extern asmlinkage void handle_mt(void);
e50c0a8f 79extern asmlinkage void handle_dsp(void);
1da177e4
LT
80extern asmlinkage void handle_mcheck(void);
81extern asmlinkage void handle_reserved(void);
82
12616ed2 83extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
e04582b7 84 struct mips_fpu_struct *ctx, int has_fpu);
1da177e4
LT
85
86void (*board_be_init)(void);
87int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
e01402b1
RB
88void (*board_nmi_handler_setup)(void);
89void (*board_ejtag_handler_setup)(void);
90void (*board_bind_eic_interrupt)(int irq, int regset);
1da177e4 91
1da177e4 92
4d157d5e 93static void show_raw_backtrace(unsigned long reg29)
e889d78f 94{
39b8d525 95 unsigned long *sp = (unsigned long *)(reg29 & ~3);
e889d78f
AN
96 unsigned long addr;
97
98 printk("Call Trace:");
99#ifdef CONFIG_KALLSYMS
100 printk("\n");
101#endif
10220c88
TB
102 while (!kstack_end(sp)) {
103 unsigned long __user *p =
104 (unsigned long __user *)(unsigned long)sp++;
105 if (__get_user(addr, p)) {
106 printk(" (Bad stack address)");
107 break;
39b8d525 108 }
10220c88
TB
109 if (__kernel_text_address(addr))
110 print_ip_sym(addr);
e889d78f 111 }
10220c88 112 printk("\n");
e889d78f
AN
113}
114
f66686f7 115#ifdef CONFIG_KALLSYMS
1df0f0ff 116int raw_show_trace;
f66686f7
AN
117static int __init set_raw_show_trace(char *str)
118{
119 raw_show_trace = 1;
120 return 1;
121}
122__setup("raw_show_trace", set_raw_show_trace);
1df0f0ff 123#endif
4d157d5e 124
eae23f2c 125static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
f66686f7 126{
4d157d5e
FBH
127 unsigned long sp = regs->regs[29];
128 unsigned long ra = regs->regs[31];
f66686f7 129 unsigned long pc = regs->cp0_epc;
f66686f7
AN
130
131 if (raw_show_trace || !__kernel_text_address(pc)) {
87151ae3 132 show_raw_backtrace(sp);
f66686f7
AN
133 return;
134 }
135 printk("Call Trace:\n");
4d157d5e 136 do {
87151ae3 137 print_ip_sym(pc);
1924600c 138 pc = unwind_stack(task, &sp, pc, &ra);
4d157d5e 139 } while (pc);
f66686f7
AN
140 printk("\n");
141}
f66686f7 142
1da177e4
LT
143/*
144 * This routine abuses get_user()/put_user() to reference pointers
145 * with at least a bit of error checking ...
146 */
eae23f2c
RB
147static void show_stacktrace(struct task_struct *task,
148 const struct pt_regs *regs)
1da177e4
LT
149{
150 const int field = 2 * sizeof(unsigned long);
151 long stackdata;
152 int i;
5e0373b8 153 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
1da177e4
LT
154
155 printk("Stack :");
156 i = 0;
157 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
158 if (i && ((i % (64 / field)) == 0))
159 printk("\n ");
160 if (i > 39) {
161 printk(" ...");
162 break;
163 }
164
165 if (__get_user(stackdata, sp++)) {
166 printk(" (Bad stack address)");
167 break;
168 }
169
170 printk(" %0*lx", field, stackdata);
171 i++;
172 }
173 printk("\n");
87151ae3 174 show_backtrace(task, regs);
f66686f7
AN
175}
176
f66686f7
AN
177void show_stack(struct task_struct *task, unsigned long *sp)
178{
179 struct pt_regs regs;
180 if (sp) {
181 regs.regs[29] = (unsigned long)sp;
182 regs.regs[31] = 0;
183 regs.cp0_epc = 0;
184 } else {
185 if (task && task != current) {
186 regs.regs[29] = task->thread.reg29;
187 regs.regs[31] = 0;
188 regs.cp0_epc = task->thread.reg31;
5dd11d5d
JW
189#ifdef CONFIG_KGDB_KDB
190 } else if (atomic_read(&kgdb_active) != -1 &&
191 kdb_current_regs) {
192 memcpy(&regs, kdb_current_regs, sizeof(regs));
193#endif /* CONFIG_KGDB_KDB */
f66686f7
AN
194 } else {
195 prepare_frametrace(&regs);
196 }
197 }
198 show_stacktrace(task, &regs);
1da177e4
LT
199}
200
201/*
202 * The architecture-independent dump_stack generator
203 */
204void dump_stack(void)
205{
1666a6fc 206 struct pt_regs regs;
1da177e4 207
1666a6fc
FBH
208 prepare_frametrace(&regs);
209 show_backtrace(current, &regs);
1da177e4
LT
210}
211
212EXPORT_SYMBOL(dump_stack);
213
e1bb8289 214static void show_code(unsigned int __user *pc)
1da177e4
LT
215{
216 long i;
39b8d525 217 unsigned short __user *pc16 = NULL;
1da177e4
LT
218
219 printk("\nCode:");
220
39b8d525
RB
221 if ((unsigned long)pc & 1)
222 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
1da177e4
LT
223 for(i = -3 ; i < 6 ; i++) {
224 unsigned int insn;
39b8d525 225 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
1da177e4
LT
226 printk(" (Bad address in epc)\n");
227 break;
228 }
39b8d525 229 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
1da177e4
LT
230 }
231}
232
eae23f2c 233static void __show_regs(const struct pt_regs *regs)
1da177e4
LT
234{
235 const int field = 2 * sizeof(unsigned long);
236 unsigned int cause = regs->cp0_cause;
237 int i;
238
239 printk("Cpu %d\n", smp_processor_id());
240
241 /*
242 * Saved main processor registers
243 */
244 for (i = 0; i < 32; ) {
245 if ((i % 4) == 0)
246 printk("$%2d :", i);
247 if (i == 0)
248 printk(" %0*lx", field, 0UL);
249 else if (i == 26 || i == 27)
250 printk(" %*s", field, "");
251 else
252 printk(" %0*lx", field, regs->regs[i]);
253
254 i++;
255 if ((i % 4) == 0)
256 printk("\n");
257 }
258
9693a853
FBH
259#ifdef CONFIG_CPU_HAS_SMARTMIPS
260 printk("Acx : %0*lx\n", field, regs->acx);
261#endif
1da177e4
LT
262 printk("Hi : %0*lx\n", field, regs->hi);
263 printk("Lo : %0*lx\n", field, regs->lo);
264
265 /*
266 * Saved cp0 registers
267 */
b012cffe
RB
268 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
269 (void *) regs->cp0_epc);
1da177e4 270 printk(" %s\n", print_tainted());
b012cffe
RB
271 printk("ra : %0*lx %pS\n", field, regs->regs[31],
272 (void *) regs->regs[31]);
1da177e4
LT
273
274 printk("Status: %08x ", (uint32_t) regs->cp0_status);
275
3b2396d9
MR
276 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
277 if (regs->cp0_status & ST0_KUO)
278 printk("KUo ");
279 if (regs->cp0_status & ST0_IEO)
280 printk("IEo ");
281 if (regs->cp0_status & ST0_KUP)
282 printk("KUp ");
283 if (regs->cp0_status & ST0_IEP)
284 printk("IEp ");
285 if (regs->cp0_status & ST0_KUC)
286 printk("KUc ");
287 if (regs->cp0_status & ST0_IEC)
288 printk("IEc ");
289 } else {
290 if (regs->cp0_status & ST0_KX)
291 printk("KX ");
292 if (regs->cp0_status & ST0_SX)
293 printk("SX ");
294 if (regs->cp0_status & ST0_UX)
295 printk("UX ");
296 switch (regs->cp0_status & ST0_KSU) {
297 case KSU_USER:
298 printk("USER ");
299 break;
300 case KSU_SUPERVISOR:
301 printk("SUPERVISOR ");
302 break;
303 case KSU_KERNEL:
304 printk("KERNEL ");
305 break;
306 default:
307 printk("BAD_MODE ");
308 break;
309 }
310 if (regs->cp0_status & ST0_ERL)
311 printk("ERL ");
312 if (regs->cp0_status & ST0_EXL)
313 printk("EXL ");
314 if (regs->cp0_status & ST0_IE)
315 printk("IE ");
1da177e4 316 }
1da177e4
LT
317 printk("\n");
318
319 printk("Cause : %08x\n", cause);
320
321 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
322 if (1 <= cause && cause <= 5)
323 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
324
9966db25
RB
325 printk("PrId : %08x (%s)\n", read_c0_prid(),
326 cpu_name_string());
1da177e4
LT
327}
328
eae23f2c
RB
329/*
330 * FIXME: really the generic show_regs should take a const pointer argument.
331 */
332void show_regs(struct pt_regs *regs)
333{
334 __show_regs((struct pt_regs *)regs);
335}
336
337void show_registers(const struct pt_regs *regs)
1da177e4 338{
39b8d525
RB
339 const int field = 2 * sizeof(unsigned long);
340
eae23f2c 341 __show_regs(regs);
1da177e4 342 print_modules();
39b8d525
RB
343 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
344 current->comm, current->pid, current_thread_info(), current,
345 field, current_thread_info()->tp_value);
346 if (cpu_has_userlocal) {
347 unsigned long tls;
348
349 tls = read_c0_userlocal();
350 if (tls != current_thread_info()->tp_value)
351 printk("*HwTLS: %0*lx\n", field, tls);
352 }
353
f66686f7 354 show_stacktrace(current, regs);
e1bb8289 355 show_code((unsigned int __user *) regs->cp0_epc);
1da177e4
LT
356 printk("\n");
357}
358
359static DEFINE_SPINLOCK(die_lock);
360
ce384d83 361void __noreturn die(const char * str, struct pt_regs * regs)
1da177e4
LT
362{
363 static int die_counter;
ce384d83 364 int sig = SIGSEGV;
41c594ab
RB
365#ifdef CONFIG_MIPS_MT_SMTC
366 unsigned long dvpret = dvpe();
367#endif /* CONFIG_MIPS_MT_SMTC */
1da177e4 368
5dd11d5d
JW
369 notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
370
1da177e4
LT
371 console_verbose();
372 spin_lock_irq(&die_lock);
41c594ab
RB
373 bust_spinlocks(1);
374#ifdef CONFIG_MIPS_MT_SMTC
375 mips_mt_regdump(dvpret);
376#endif /* CONFIG_MIPS_MT_SMTC */
ce384d83
YP
377
378 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
379 sig = 0;
380
178086c8 381 printk("%s[#%d]:\n", str, ++die_counter);
1da177e4 382 show_registers(regs);
bcdcd8e7 383 add_taint(TAINT_DIE);
1da177e4 384 spin_unlock_irq(&die_lock);
d4fd1989
MB
385
386 if (in_interrupt())
387 panic("Fatal exception in interrupt");
388
389 if (panic_on_oops) {
390 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
391 ssleep(5);
392 panic("Fatal exception");
393 }
394
ce384d83 395 do_exit(sig);
1da177e4
LT
396}
397
0510617b
TB
398extern struct exception_table_entry __start___dbe_table[];
399extern struct exception_table_entry __stop___dbe_table[];
1da177e4 400
b6dcec9b
RB
401__asm__(
402" .section __dbe_table, \"a\"\n"
403" .previous \n");
1da177e4
LT
404
405/* Given an address, look for it in the exception tables. */
406static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
407{
408 const struct exception_table_entry *e;
409
410 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
411 if (!e)
412 e = search_module_dbetables(addr);
413 return e;
414}
415
416asmlinkage void do_be(struct pt_regs *regs)
417{
418 const int field = 2 * sizeof(unsigned long);
419 const struct exception_table_entry *fixup = NULL;
420 int data = regs->cp0_cause & 4;
421 int action = MIPS_BE_FATAL;
422
423 /* XXX For now. Fixme, this searches the wrong table ... */
424 if (data && !user_mode(regs))
425 fixup = search_dbe_tables(exception_epc(regs));
426
427 if (fixup)
428 action = MIPS_BE_FIXUP;
429
430 if (board_be_handler)
28fc582c 431 action = board_be_handler(regs, fixup != NULL);
1da177e4
LT
432
433 switch (action) {
434 case MIPS_BE_DISCARD:
435 return;
436 case MIPS_BE_FIXUP:
437 if (fixup) {
438 regs->cp0_epc = fixup->nextinsn;
439 return;
440 }
441 break;
442 default:
443 break;
444 }
445
446 /*
447 * Assume it would be too dangerous to continue ...
448 */
449 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
450 data ? "Data" : "Instruction",
451 field, regs->cp0_epc, field, regs->regs[31]);
88547001
JW
452 if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0)
453 == NOTIFY_STOP)
454 return;
455
1da177e4
LT
456 die_if_kernel("Oops", regs);
457 force_sig(SIGBUS, current);
458}
459
1da177e4 460/*
60b0d655 461 * ll/sc, rdhwr, sync emulation
1da177e4
LT
462 */
463
464#define OPCODE 0xfc000000
465#define BASE 0x03e00000
466#define RT 0x001f0000
467#define OFFSET 0x0000ffff
468#define LL 0xc0000000
469#define SC 0xe0000000
60b0d655 470#define SPEC0 0x00000000
3c37026d
RB
471#define SPEC3 0x7c000000
472#define RD 0x0000f800
473#define FUNC 0x0000003f
60b0d655 474#define SYNC 0x0000000f
3c37026d 475#define RDHWR 0x0000003b
1da177e4
LT
476
477/*
478 * The ll_bit is cleared by r*_switch.S
479 */
480
f1e39a4a
RB
481unsigned int ll_bit;
482struct task_struct *ll_task;
1da177e4 483
60b0d655 484static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
1da177e4 485{
fe00f943 486 unsigned long value, __user *vaddr;
1da177e4 487 long offset;
1da177e4
LT
488
489 /*
490 * analyse the ll instruction that just caused a ri exception
491 * and put the referenced address to addr.
492 */
493
494 /* sign extend offset */
495 offset = opcode & OFFSET;
496 offset <<= 16;
497 offset >>= 16;
498
fe00f943
RB
499 vaddr = (unsigned long __user *)
500 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
1da177e4 501
60b0d655
MR
502 if ((unsigned long)vaddr & 3)
503 return SIGBUS;
504 if (get_user(value, vaddr))
505 return SIGSEGV;
1da177e4
LT
506
507 preempt_disable();
508
509 if (ll_task == NULL || ll_task == current) {
510 ll_bit = 1;
511 } else {
512 ll_bit = 0;
513 }
514 ll_task = current;
515
516 preempt_enable();
517
518 regs->regs[(opcode & RT) >> 16] = value;
519
60b0d655 520 return 0;
1da177e4
LT
521}
522
60b0d655 523static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
1da177e4 524{
fe00f943
RB
525 unsigned long __user *vaddr;
526 unsigned long reg;
1da177e4 527 long offset;
1da177e4
LT
528
529 /*
530 * analyse the sc instruction that just caused a ri exception
531 * and put the referenced address to addr.
532 */
533
534 /* sign extend offset */
535 offset = opcode & OFFSET;
536 offset <<= 16;
537 offset >>= 16;
538
fe00f943
RB
539 vaddr = (unsigned long __user *)
540 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
1da177e4
LT
541 reg = (opcode & RT) >> 16;
542
60b0d655
MR
543 if ((unsigned long)vaddr & 3)
544 return SIGBUS;
1da177e4
LT
545
546 preempt_disable();
547
548 if (ll_bit == 0 || ll_task != current) {
549 regs->regs[reg] = 0;
550 preempt_enable();
60b0d655 551 return 0;
1da177e4
LT
552 }
553
554 preempt_enable();
555
60b0d655
MR
556 if (put_user(regs->regs[reg], vaddr))
557 return SIGSEGV;
1da177e4
LT
558
559 regs->regs[reg] = 1;
560
60b0d655 561 return 0;
1da177e4
LT
562}
563
564/*
565 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
566 * opcodes are supposed to result in coprocessor unusable exceptions if
567 * executed on ll/sc-less processors. That's the theory. In practice a
568 * few processors such as NEC's VR4100 throw reserved instruction exceptions
569 * instead, so we're doing the emulation thing in both exception handlers.
570 */
60b0d655 571static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
1da177e4 572{
60b0d655
MR
573 if ((opcode & OPCODE) == LL)
574 return simulate_ll(regs, opcode);
575 if ((opcode & OPCODE) == SC)
576 return simulate_sc(regs, opcode);
1da177e4 577
60b0d655 578 return -1; /* Must be something else ... */
1da177e4
LT
579}
580
3c37026d
RB
581/*
582 * Simulate trapping 'rdhwr' instructions to provide user accessible
1f5826bd 583 * registers not implemented in hardware.
3c37026d 584 */
60b0d655 585static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
3c37026d 586{
dc8f6029 587 struct thread_info *ti = task_thread_info(current);
3c37026d
RB
588
589 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
590 int rd = (opcode & RD) >> 11;
591 int rt = (opcode & RT) >> 16;
592 switch (rd) {
1f5826bd
CD
593 case 0: /* CPU number */
594 regs->regs[rt] = smp_processor_id();
595 return 0;
596 case 1: /* SYNCI length */
597 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
598 current_cpu_data.icache.linesz);
599 return 0;
600 case 2: /* Read count register */
601 regs->regs[rt] = read_c0_count();
602 return 0;
603 case 3: /* Count register resolution */
604 switch (current_cpu_data.cputype) {
605 case CPU_20KC:
606 case CPU_25KF:
607 regs->regs[rt] = 1;
608 break;
3c37026d 609 default:
1f5826bd
CD
610 regs->regs[rt] = 2;
611 }
612 return 0;
613 case 29:
614 regs->regs[rt] = ti->tp_value;
615 return 0;
616 default:
617 return -1;
3c37026d
RB
618 }
619 }
620
56ebd51b 621 /* Not ours. */
60b0d655
MR
622 return -1;
623}
e5679882 624
60b0d655
MR
625static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
626{
627 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
628 return 0;
629
630 return -1; /* Must be something else ... */
3c37026d
RB
631}
632
1da177e4
LT
633asmlinkage void do_ov(struct pt_regs *regs)
634{
635 siginfo_t info;
636
36ccf1c0
RB
637 die_if_kernel("Integer overflow", regs);
638
1da177e4
LT
639 info.si_code = FPE_INTOVF;
640 info.si_signo = SIGFPE;
641 info.si_errno = 0;
fe00f943 642 info.si_addr = (void __user *) regs->cp0_epc;
1da177e4
LT
643 force_sig_info(SIGFPE, &info, current);
644}
645
646/*
647 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
648 */
649asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
650{
948a34cf
TS
651 siginfo_t info;
652
88547001
JW
653 if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0)
654 == NOTIFY_STOP)
655 return;
57725f9e
CD
656 die_if_kernel("FP exception in kernel code", regs);
657
1da177e4
LT
658 if (fcr31 & FPU_CSR_UNI_X) {
659 int sig;
660
1da177e4 661 /*
a3dddd56 662 * Unimplemented operation exception. If we've got the full
1da177e4
LT
663 * software emulator on-board, let's use it...
664 *
665 * Force FPU to dump state into task/thread context. We're
666 * moving a lot of data here for what is probably a single
667 * instruction, but the alternative is to pre-decode the FP
668 * register operands before invoking the emulator, which seems
669 * a bit extreme for what should be an infrequent event.
670 */
cd21dfcf 671 /* Ensure 'resume' not overwrite saved fp context again. */
53dc8028 672 lose_fpu(1);
1da177e4
LT
673
674 /* Run the emulator */
49a89efb 675 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
1da177e4
LT
676
677 /*
678 * We can't allow the emulated instruction to leave any of
679 * the cause bit set in $fcr31.
680 */
eae89076 681 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1da177e4
LT
682
683 /* Restore the hardware register state */
53dc8028 684 own_fpu(1); /* Using the FPU again. */
1da177e4
LT
685
686 /* If something went wrong, signal */
687 if (sig)
688 force_sig(sig, current);
689
690 return;
948a34cf
TS
691 } else if (fcr31 & FPU_CSR_INV_X)
692 info.si_code = FPE_FLTINV;
693 else if (fcr31 & FPU_CSR_DIV_X)
694 info.si_code = FPE_FLTDIV;
695 else if (fcr31 & FPU_CSR_OVF_X)
696 info.si_code = FPE_FLTOVF;
697 else if (fcr31 & FPU_CSR_UDF_X)
698 info.si_code = FPE_FLTUND;
699 else if (fcr31 & FPU_CSR_INE_X)
700 info.si_code = FPE_FLTRES;
701 else
702 info.si_code = __SI_FAULT;
703 info.si_signo = SIGFPE;
704 info.si_errno = 0;
705 info.si_addr = (void __user *) regs->cp0_epc;
706 force_sig_info(SIGFPE, &info, current);
1da177e4
LT
707}
708
df270051
RB
709static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
710 const char *str)
1da177e4 711{
1da177e4 712 siginfo_t info;
df270051 713 char b[40];
1da177e4 714
5dd11d5d
JW
715#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
716 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
717 return;
718#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
719
88547001
JW
720 if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
721 return;
722
1da177e4 723 /*
df270051
RB
724 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
725 * insns, even for trap and break codes that indicate arithmetic
726 * failures. Weird ...
1da177e4
LT
727 * But should we continue the brokenness??? --macro
728 */
df270051
RB
729 switch (code) {
730 case BRK_OVERFLOW:
731 case BRK_DIVZERO:
732 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
733 die_if_kernel(b, regs);
734 if (code == BRK_DIVZERO)
1da177e4
LT
735 info.si_code = FPE_INTDIV;
736 else
737 info.si_code = FPE_INTOVF;
738 info.si_signo = SIGFPE;
739 info.si_errno = 0;
fe00f943 740 info.si_addr = (void __user *) regs->cp0_epc;
1da177e4
LT
741 force_sig_info(SIGFPE, &info, current);
742 break;
63dc68a8 743 case BRK_BUG:
df270051
RB
744 die_if_kernel("Kernel bug detected", regs);
745 force_sig(SIGTRAP, current);
63dc68a8 746 break;
ba3049ed
RB
747 case BRK_MEMU:
748 /*
749 * Address errors may be deliberately induced by the FPU
750 * emulator to retake control of the CPU after executing the
751 * instruction in the delay slot of an emulated branch.
752 *
753 * Terminate if exception was recognized as a delay slot return
754 * otherwise handle as normal.
755 */
756 if (do_dsemulret(regs))
757 return;
758
759 die_if_kernel("Math emu break/trap", regs);
760 force_sig(SIGTRAP, current);
761 break;
1da177e4 762 default:
df270051
RB
763 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
764 die_if_kernel(b, regs);
1da177e4
LT
765 force_sig(SIGTRAP, current);
766 }
df270051
RB
767}
768
769asmlinkage void do_bp(struct pt_regs *regs)
770{
771 unsigned int opcode, bcode;
772
773 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
774 goto out_sigsegv;
775
776 /*
777 * There is the ancient bug in the MIPS assemblers that the break
778 * code starts left to bit 16 instead to bit 6 in the opcode.
779 * Gas is bug-compatible, but not always, grrr...
780 * We handle both cases with a simple heuristics. --macro
781 */
782 bcode = ((opcode >> 6) & ((1 << 20) - 1));
783 if (bcode >= (1 << 10))
784 bcode >>= 10;
785
786 do_trap_or_bp(regs, bcode, "Break");
90fccb13 787 return;
e5679882
RB
788
789out_sigsegv:
790 force_sig(SIGSEGV, current);
1da177e4
LT
791}
792
793asmlinkage void do_tr(struct pt_regs *regs)
794{
795 unsigned int opcode, tcode = 0;
1da177e4 796
ba755f8e 797 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
e5679882 798 goto out_sigsegv;
1da177e4
LT
799
800 /* Immediate versions don't provide a code. */
801 if (!(opcode & OPCODE))
802 tcode = ((opcode >> 6) & ((1 << 10) - 1));
803
df270051 804 do_trap_or_bp(regs, tcode, "Trap");
90fccb13 805 return;
e5679882
RB
806
807out_sigsegv:
808 force_sig(SIGSEGV, current);
1da177e4
LT
809}
810
811asmlinkage void do_ri(struct pt_regs *regs)
812{
60b0d655
MR
813 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
814 unsigned long old_epc = regs->cp0_epc;
815 unsigned int opcode = 0;
816 int status = -1;
1da177e4 817
88547001
JW
818 if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0)
819 == NOTIFY_STOP)
820 return;
821
60b0d655 822 die_if_kernel("Reserved instruction in kernel code", regs);
1da177e4 823
60b0d655 824 if (unlikely(compute_return_epc(regs) < 0))
3c37026d
RB
825 return;
826
60b0d655
MR
827 if (unlikely(get_user(opcode, epc) < 0))
828 status = SIGSEGV;
829
830 if (!cpu_has_llsc && status < 0)
831 status = simulate_llsc(regs, opcode);
832
833 if (status < 0)
834 status = simulate_rdhwr(regs, opcode);
835
836 if (status < 0)
837 status = simulate_sync(regs, opcode);
838
839 if (status < 0)
840 status = SIGILL;
841
842 if (unlikely(status > 0)) {
843 regs->cp0_epc = old_epc; /* Undo skip-over. */
844 force_sig(status, current);
845 }
1da177e4
LT
846}
847
d223a861
RB
848/*
849 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
850 * emulated more than some threshold number of instructions, force migration to
851 * a "CPU" that has FP support.
852 */
853static void mt_ase_fp_affinity(void)
854{
855#ifdef CONFIG_MIPS_MT_FPAFF
856 if (mt_fpemul_threshold > 0 &&
857 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
858 /*
859 * If there's no FPU present, or if the application has already
860 * restricted the allowed set to exclude any CPUs with FPUs,
861 * we'll skip the procedure.
862 */
863 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
864 cpumask_t tmask;
865
9cc12363
KK
866 current->thread.user_cpus_allowed
867 = current->cpus_allowed;
868 cpus_and(tmask, current->cpus_allowed,
869 mt_fpu_cpumask);
ed1bbdef 870 set_cpus_allowed_ptr(current, &tmask);
293c5bd1 871 set_thread_flag(TIF_FPUBOUND);
d223a861
RB
872 }
873 }
874#endif /* CONFIG_MIPS_MT_FPAFF */
875}
876
69f3a7de
RB
877/*
878 * No lock; only written during early bootup by CPU 0.
879 */
880static RAW_NOTIFIER_HEAD(cu2_chain);
881
882int __ref register_cu2_notifier(struct notifier_block *nb)
883{
884 return raw_notifier_chain_register(&cu2_chain, nb);
885}
886
887int cu2_notifier_call_chain(unsigned long val, void *v)
888{
889 return raw_notifier_call_chain(&cu2_chain, val, v);
890}
891
892static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
893 void *data)
894{
895 struct pt_regs *regs = data;
896
897 switch (action) {
898 default:
899 die_if_kernel("Unhandled kernel unaligned access or invalid "
900 "instruction", regs);
901 /* Fall through */
902
903 case CU2_EXCEPTION:
904 force_sig(SIGILL, current);
905 }
906
907 return NOTIFY_OK;
908}
909
1da177e4
LT
910asmlinkage void do_cpu(struct pt_regs *regs)
911{
60b0d655
MR
912 unsigned int __user *epc;
913 unsigned long old_epc;
914 unsigned int opcode;
1da177e4 915 unsigned int cpid;
60b0d655 916 int status;
f9bb4cf3 917 unsigned long __maybe_unused flags;
1da177e4 918
5323180d
AN
919 die_if_kernel("do_cpu invoked from kernel context!", regs);
920
1da177e4
LT
921 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
922
923 switch (cpid) {
924 case 0:
60b0d655
MR
925 epc = (unsigned int __user *)exception_epc(regs);
926 old_epc = regs->cp0_epc;
927 opcode = 0;
928 status = -1;
1da177e4 929
60b0d655 930 if (unlikely(compute_return_epc(regs) < 0))
1da177e4 931 return;
3c37026d 932
60b0d655
MR
933 if (unlikely(get_user(opcode, epc) < 0))
934 status = SIGSEGV;
935
936 if (!cpu_has_llsc && status < 0)
937 status = simulate_llsc(regs, opcode);
938
939 if (status < 0)
940 status = simulate_rdhwr(regs, opcode);
941
942 if (status < 0)
943 status = SIGILL;
944
945 if (unlikely(status > 0)) {
946 regs->cp0_epc = old_epc; /* Undo skip-over. */
947 force_sig(status, current);
948 }
949
950 return;
1da177e4
LT
951
952 case 1:
53dc8028
AN
953 if (used_math()) /* Using the FPU again. */
954 own_fpu(1);
955 else { /* First time FPU user. */
1da177e4
LT
956 init_fpu();
957 set_used_math();
958 }
959
5323180d 960 if (!raw_cpu_has_fpu) {
e04582b7 961 int sig;
e04582b7
AN
962 sig = fpu_emulator_cop1Handler(regs,
963 &current->thread.fpu, 0);
1da177e4
LT
964 if (sig)
965 force_sig(sig, current);
d223a861
RB
966 else
967 mt_ase_fp_affinity();
1da177e4
LT
968 }
969
1da177e4
LT
970 return;
971
972 case 2:
69f3a7de 973 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
55dc9d51 974 return;
69f3a7de 975
1da177e4
LT
976 case 3:
977 break;
978 }
979
980 force_sig(SIGILL, current);
981}
982
983asmlinkage void do_mdmx(struct pt_regs *regs)
984{
985 force_sig(SIGILL, current);
986}
987
8bc6d05b
DD
988/*
989 * Called with interrupts disabled.
990 */
1da177e4
LT
991asmlinkage void do_watch(struct pt_regs *regs)
992{
b67b2b70
DD
993 u32 cause;
994
1da177e4 995 /*
b67b2b70
DD
996 * Clear WP (bit 22) bit of cause register so we don't loop
997 * forever.
1da177e4 998 */
b67b2b70
DD
999 cause = read_c0_cause();
1000 cause &= ~(1 << 22);
1001 write_c0_cause(cause);
1002
1003 /*
1004 * If the current thread has the watch registers loaded, save
1005 * their values and send SIGTRAP. Otherwise another thread
1006 * left the registers set, clear them and continue.
1007 */
1008 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1009 mips_read_watch_registers();
8bc6d05b 1010 local_irq_enable();
b67b2b70 1011 force_sig(SIGTRAP, current);
8bc6d05b 1012 } else {
b67b2b70 1013 mips_clear_watch_registers();
8bc6d05b
DD
1014 local_irq_enable();
1015 }
1da177e4
LT
1016}
1017
1018asmlinkage void do_mcheck(struct pt_regs *regs)
1019{
cac4bcbc
RB
1020 const int field = 2 * sizeof(unsigned long);
1021 int multi_match = regs->cp0_status & ST0_TS;
1022
1da177e4 1023 show_regs(regs);
cac4bcbc
RB
1024
1025 if (multi_match) {
1026 printk("Index : %0x\n", read_c0_index());
1027 printk("Pagemask: %0x\n", read_c0_pagemask());
1028 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
1029 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1030 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1031 printk("\n");
1032 dump_tlb_all();
1033 }
1034
e1bb8289 1035 show_code((unsigned int __user *) regs->cp0_epc);
cac4bcbc 1036
1da177e4
LT
1037 /*
1038 * Some chips may have other causes of machine check (e.g. SB1
1039 * graduation timer)
1040 */
1041 panic("Caught Machine Check exception - %scaused by multiple "
1042 "matching entries in the TLB.",
cac4bcbc 1043 (multi_match) ? "" : "not ");
1da177e4
LT
1044}
1045
340ee4b9
RB
1046asmlinkage void do_mt(struct pt_regs *regs)
1047{
41c594ab
RB
1048 int subcode;
1049
41c594ab
RB
1050 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1051 >> VPECONTROL_EXCPT_SHIFT;
1052 switch (subcode) {
1053 case 0:
e35a5e35 1054 printk(KERN_DEBUG "Thread Underflow\n");
41c594ab
RB
1055 break;
1056 case 1:
e35a5e35 1057 printk(KERN_DEBUG "Thread Overflow\n");
41c594ab
RB
1058 break;
1059 case 2:
e35a5e35 1060 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
41c594ab
RB
1061 break;
1062 case 3:
e35a5e35 1063 printk(KERN_DEBUG "Gating Storage Exception\n");
41c594ab
RB
1064 break;
1065 case 4:
e35a5e35 1066 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
41c594ab
RB
1067 break;
1068 case 5:
e35a5e35 1069 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
41c594ab
RB
1070 break;
1071 default:
e35a5e35 1072 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
41c594ab
RB
1073 subcode);
1074 break;
1075 }
340ee4b9
RB
1076 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1077
1078 force_sig(SIGILL, current);
1079}
1080
1081
e50c0a8f
RB
1082asmlinkage void do_dsp(struct pt_regs *regs)
1083{
1084 if (cpu_has_dsp)
1085 panic("Unexpected DSP exception\n");
1086
1087 force_sig(SIGILL, current);
1088}
1089
1da177e4
LT
1090asmlinkage void do_reserved(struct pt_regs *regs)
1091{
1092 /*
1093 * Game over - no way to handle this if it ever occurs. Most probably
1094 * caused by a new unknown cpu type or after another deadly
1095 * hard/software error.
1096 */
1097 show_regs(regs);
1098 panic("Caught reserved exception %ld - should not happen.",
1099 (regs->cp0_cause & 0x7f) >> 2);
1100}
1101
39b8d525
RB
1102static int __initdata l1parity = 1;
1103static int __init nol1parity(char *s)
1104{
1105 l1parity = 0;
1106 return 1;
1107}
1108__setup("nol1par", nol1parity);
1109static int __initdata l2parity = 1;
1110static int __init nol2parity(char *s)
1111{
1112 l2parity = 0;
1113 return 1;
1114}
1115__setup("nol2par", nol2parity);
1116
1da177e4
LT
1117/*
1118 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1119 * it different ways.
1120 */
1121static inline void parity_protection_init(void)
1122{
10cc3529 1123 switch (current_cpu_type()) {
1da177e4 1124 case CPU_24K:
98a41de9 1125 case CPU_34K:
39b8d525
RB
1126 case CPU_74K:
1127 case CPU_1004K:
1128 {
1129#define ERRCTL_PE 0x80000000
1130#define ERRCTL_L2P 0x00800000
1131 unsigned long errctl;
1132 unsigned int l1parity_present, l2parity_present;
1133
1134 errctl = read_c0_ecc();
1135 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1136
1137 /* probe L1 parity support */
1138 write_c0_ecc(errctl | ERRCTL_PE);
1139 back_to_back_c0_hazard();
1140 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1141
1142 /* probe L2 parity support */
1143 write_c0_ecc(errctl|ERRCTL_L2P);
1144 back_to_back_c0_hazard();
1145 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1146
1147 if (l1parity_present && l2parity_present) {
1148 if (l1parity)
1149 errctl |= ERRCTL_PE;
1150 if (l1parity ^ l2parity)
1151 errctl |= ERRCTL_L2P;
1152 } else if (l1parity_present) {
1153 if (l1parity)
1154 errctl |= ERRCTL_PE;
1155 } else if (l2parity_present) {
1156 if (l2parity)
1157 errctl |= ERRCTL_L2P;
1158 } else {
1159 /* No parity available */
1160 }
1161
1162 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1163
1164 write_c0_ecc(errctl);
1165 back_to_back_c0_hazard();
1166 errctl = read_c0_ecc();
1167 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1168
1169 if (l1parity_present)
1170 printk(KERN_INFO "Cache parity protection %sabled\n",
1171 (errctl & ERRCTL_PE) ? "en" : "dis");
1172
1173 if (l2parity_present) {
1174 if (l1parity_present && l1parity)
1175 errctl ^= ERRCTL_L2P;
1176 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1177 (errctl & ERRCTL_L2P) ? "en" : "dis");
1178 }
1179 }
1180 break;
1181
1da177e4 1182 case CPU_5KC:
14f18b7f
RB
1183 write_c0_ecc(0x80000000);
1184 back_to_back_c0_hazard();
1185 /* Set the PE bit (bit 31) in the c0_errctl register. */
1186 printk(KERN_INFO "Cache parity protection %sabled\n",
1187 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1da177e4
LT
1188 break;
1189 case CPU_20KC:
1190 case CPU_25KF:
1191 /* Clear the DE bit (bit 16) in the c0_status register. */
1192 printk(KERN_INFO "Enable cache parity protection for "
1193 "MIPS 20KC/25KF CPUs.\n");
1194 clear_c0_status(ST0_DE);
1195 break;
1196 default:
1197 break;
1198 }
1199}
1200
1201asmlinkage void cache_parity_error(void)
1202{
1203 const int field = 2 * sizeof(unsigned long);
1204 unsigned int reg_val;
1205
1206 /* For the moment, report the problem and hang. */
1207 printk("Cache error exception:\n");
1208 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1209 reg_val = read_c0_cacheerr();
1210 printk("c0_cacheerr == %08x\n", reg_val);
1211
1212 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1213 reg_val & (1<<30) ? "secondary" : "primary",
1214 reg_val & (1<<31) ? "data" : "insn");
1215 printk("Error bits: %s%s%s%s%s%s%s\n",
1216 reg_val & (1<<29) ? "ED " : "",
1217 reg_val & (1<<28) ? "ET " : "",
1218 reg_val & (1<<26) ? "EE " : "",
1219 reg_val & (1<<25) ? "EB " : "",
1220 reg_val & (1<<24) ? "EI " : "",
1221 reg_val & (1<<23) ? "E1 " : "",
1222 reg_val & (1<<22) ? "E0 " : "");
1223 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1224
ec917c2c 1225#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1da177e4
LT
1226 if (reg_val & (1<<22))
1227 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1228
1229 if (reg_val & (1<<23))
1230 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1231#endif
1232
1233 panic("Can't handle the cache error!");
1234}
1235
1236/*
1237 * SDBBP EJTAG debug exception handler.
1238 * We skip the instruction and return to the next instruction.
1239 */
1240void ejtag_exception_handler(struct pt_regs *regs)
1241{
1242 const int field = 2 * sizeof(unsigned long);
1243 unsigned long depc, old_epc;
1244 unsigned int debug;
1245
70ae6126 1246 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1da177e4
LT
1247 depc = read_c0_depc();
1248 debug = read_c0_debug();
70ae6126 1249 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1da177e4
LT
1250 if (debug & 0x80000000) {
1251 /*
1252 * In branch delay slot.
1253 * We cheat a little bit here and use EPC to calculate the
1254 * debug return address (DEPC). EPC is restored after the
1255 * calculation.
1256 */
1257 old_epc = regs->cp0_epc;
1258 regs->cp0_epc = depc;
1259 __compute_return_epc(regs);
1260 depc = regs->cp0_epc;
1261 regs->cp0_epc = old_epc;
1262 } else
1263 depc += 4;
1264 write_c0_depc(depc);
1265
1266#if 0
70ae6126 1267 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1da177e4
LT
1268 write_c0_debug(debug | 0x100);
1269#endif
1270}
1271
1272/*
1273 * NMI exception handler.
1274 */
34412c72 1275NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
1da177e4 1276{
41c594ab 1277 bust_spinlocks(1);
1da177e4
LT
1278 printk("NMI taken!!!!\n");
1279 die("NMI", regs);
1da177e4
LT
1280}
1281
e01402b1
RB
1282#define VECTORSPACING 0x100 /* for EI/VI mode */
1283
1284unsigned long ebase;
1da177e4 1285unsigned long exception_handlers[32];
e01402b1 1286unsigned long vi_handlers[64];
1da177e4 1287
2d1b6e95 1288void __init *set_except_vector(int n, void *addr)
1da177e4
LT
1289{
1290 unsigned long handler = (unsigned long) addr;
1291 unsigned long old_handler = exception_handlers[n];
1292
1293 exception_handlers[n] = handler;
1294 if (n == 0 && cpu_has_divec) {
92bbe1b9
FF
1295 unsigned long jump_mask = ~((1 << 28) - 1);
1296 u32 *buf = (u32 *)(ebase + 0x200);
1297 unsigned int k0 = 26;
1298 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1299 uasm_i_j(&buf, handler & ~jump_mask);
1300 uasm_i_nop(&buf);
1301 } else {
1302 UASM_i_LA(&buf, k0, handler);
1303 uasm_i_jr(&buf, k0);
1304 uasm_i_nop(&buf);
1305 }
1306 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
e01402b1
RB
1307 }
1308 return (void *)old_handler;
1309}
1310
6ba07e59
AN
1311static asmlinkage void do_default_vi(void)
1312{
1313 show_regs(get_irq_regs());
1314 panic("Caught unexpected vectored interrupt.");
1315}
1316
ef300e42 1317static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
e01402b1
RB
1318{
1319 unsigned long handler;
1320 unsigned long old_handler = vi_handlers[n];
f6771dbb 1321 int srssets = current_cpu_data.srsets;
e01402b1
RB
1322 u32 *w;
1323 unsigned char *b;
1324
b72b7092 1325 BUG_ON(!cpu_has_veic && !cpu_has_vint);
e01402b1
RB
1326
1327 if (addr == NULL) {
1328 handler = (unsigned long) do_default_vi;
1329 srs = 0;
41c594ab 1330 } else
e01402b1
RB
1331 handler = (unsigned long) addr;
1332 vi_handlers[n] = (unsigned long) addr;
1333
1334 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1335
f6771dbb 1336 if (srs >= srssets)
e01402b1
RB
1337 panic("Shadow register set %d not supported", srs);
1338
1339 if (cpu_has_veic) {
1340 if (board_bind_eic_interrupt)
49a89efb 1341 board_bind_eic_interrupt(n, srs);
41c594ab 1342 } else if (cpu_has_vint) {
e01402b1 1343 /* SRSMap is only defined if shadow sets are implemented */
f6771dbb 1344 if (srssets > 1)
49a89efb 1345 change_c0_srsmap(0xf << n*4, srs << n*4);
e01402b1
RB
1346 }
1347
1348 if (srs == 0) {
1349 /*
1350 * If no shadow set is selected then use the default handler
1351 * that does normal register saving and a standard interrupt exit
1352 */
1353
1354 extern char except_vec_vi, except_vec_vi_lui;
1355 extern char except_vec_vi_ori, except_vec_vi_end;
c65a5480
AN
1356 extern char rollback_except_vec_vi;
1357 char *vec_start = (cpu_wait == r4k_wait) ?
1358 &rollback_except_vec_vi : &except_vec_vi;
41c594ab
RB
1359#ifdef CONFIG_MIPS_MT_SMTC
1360 /*
1361 * We need to provide the SMTC vectored interrupt handler
1362 * not only with the address of the handler, but with the
1363 * Status.IM bit to be masked before going there.
1364 */
1365 extern char except_vec_vi_mori;
c65a5480 1366 const int mori_offset = &except_vec_vi_mori - vec_start;
41c594ab 1367#endif /* CONFIG_MIPS_MT_SMTC */
c65a5480
AN
1368 const int handler_len = &except_vec_vi_end - vec_start;
1369 const int lui_offset = &except_vec_vi_lui - vec_start;
1370 const int ori_offset = &except_vec_vi_ori - vec_start;
e01402b1
RB
1371
1372 if (handler_len > VECTORSPACING) {
1373 /*
1374 * Sigh... panicing won't help as the console
1375 * is probably not configured :(
1376 */
49a89efb 1377 panic("VECTORSPACING too small");
e01402b1
RB
1378 }
1379
c65a5480 1380 memcpy(b, vec_start, handler_len);
41c594ab 1381#ifdef CONFIG_MIPS_MT_SMTC
8e8a52ed
RB
1382 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1383
41c594ab
RB
1384 w = (u32 *)(b + mori_offset);
1385 *w = (*w & 0xffff0000) | (0x100 << n);
1386#endif /* CONFIG_MIPS_MT_SMTC */
e01402b1
RB
1387 w = (u32 *)(b + lui_offset);
1388 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1389 w = (u32 *)(b + ori_offset);
1390 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
e0cee3ee
TB
1391 local_flush_icache_range((unsigned long)b,
1392 (unsigned long)(b+handler_len));
e01402b1
RB
1393 }
1394 else {
1395 /*
1396 * In other cases jump directly to the interrupt handler
1397 *
1398 * It is the handlers responsibility to save registers if required
1399 * (eg hi/lo) and return from the exception using "eret"
1400 */
1401 w = (u32 *)b;
1402 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1403 *w = 0;
e0cee3ee
TB
1404 local_flush_icache_range((unsigned long)b,
1405 (unsigned long)(b+8));
1da177e4 1406 }
e01402b1 1407
1da177e4
LT
1408 return (void *)old_handler;
1409}
1410
ef300e42 1411void *set_vi_handler(int n, vi_handler_t addr)
e01402b1 1412{
ff3eab2a 1413 return set_vi_srs_handler(n, addr, 0);
e01402b1 1414}
f41ae0b2 1415
1da177e4
LT
1416extern void cpu_cache_init(void);
1417extern void tlb_init(void);
1d40cfcd 1418extern void flush_tlb_handlers(void);
1da177e4 1419
42f77542
RB
1420/*
1421 * Timer interrupt
1422 */
1423int cp0_compare_irq;
010c108d 1424int cp0_compare_irq_shift;
42f77542
RB
1425
1426/*
1427 * Performance counter IRQ or -1 if shared with timer
1428 */
1429int cp0_perfcount_irq;
1430EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1431
bdc94eb4
CD
1432static int __cpuinitdata noulri;
1433
1434static int __init ulri_disable(char *s)
1435{
1436 pr_info("Disabling ulri\n");
1437 noulri = 1;
1438
1439 return 1;
1440}
1441__setup("noulri", ulri_disable);
1442
234fcd14 1443void __cpuinit per_cpu_trap_init(void)
1da177e4
LT
1444{
1445 unsigned int cpu = smp_processor_id();
1446 unsigned int status_set = ST0_CU0;
41c594ab
RB
1447#ifdef CONFIG_MIPS_MT_SMTC
1448 int secondaryTC = 0;
1449 int bootTC = (cpu == 0);
1450
1451 /*
1452 * Only do per_cpu_trap_init() for first TC of Each VPE.
1453 * Note that this hack assumes that the SMTC init code
1454 * assigns TCs consecutively and in ascending order.
1455 */
1456
1457 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1458 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1459 secondaryTC = 1;
1460#endif /* CONFIG_MIPS_MT_SMTC */
1da177e4
LT
1461
1462 /*
1463 * Disable coprocessors and select 32-bit or 64-bit addressing
1464 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1465 * flag that some firmware may have left set and the TS bit (for
1466 * IP27). Set XX for ISA IV code to work.
1467 */
875d43e7 1468#ifdef CONFIG_64BIT
1da177e4
LT
1469 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1470#endif
1471 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1472 status_set |= ST0_XX;
bbaf238b
CD
1473 if (cpu_has_dsp)
1474 status_set |= ST0_MX;
1475
b38c7399 1476 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1da177e4
LT
1477 status_set);
1478
a3692020 1479 if (cpu_has_mips_r2) {
fbeda19f 1480 unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits;
a3692020 1481
bdc94eb4 1482 if (!noulri && cpu_has_userlocal)
a3692020
RB
1483 enable |= (1 << 29);
1484
1485 write_c0_hwrena(enable);
1486 }
e01402b1 1487
41c594ab
RB
1488#ifdef CONFIG_MIPS_MT_SMTC
1489 if (!secondaryTC) {
1490#endif /* CONFIG_MIPS_MT_SMTC */
1491
e01402b1 1492 if (cpu_has_veic || cpu_has_vint) {
9fb4c2b9 1493 unsigned long sr = set_c0_status(ST0_BEV);
49a89efb 1494 write_c0_ebase(ebase);
9fb4c2b9 1495 write_c0_status(sr);
e01402b1 1496 /* Setting vector spacing enables EI/VI mode */
49a89efb 1497 change_c0_intctl(0x3e0, VECTORSPACING);
e01402b1 1498 }
d03d0a57
RB
1499 if (cpu_has_divec) {
1500 if (cpu_has_mipsmt) {
1501 unsigned int vpflags = dvpe();
1502 set_c0_cause(CAUSEF_IV);
1503 evpe(vpflags);
1504 } else
1505 set_c0_cause(CAUSEF_IV);
1506 }
3b1d4ed5
RB
1507
1508 /*
1509 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1510 *
1511 * o read IntCtl.IPTI to determine the timer interrupt
1512 * o read IntCtl.IPPCI to determine the performance counter interrupt
1513 */
1514 if (cpu_has_mips_r2) {
010c108d
DV
1515 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
1516 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
1517 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
c3e838a2 1518 if (cp0_perfcount_irq == cp0_compare_irq)
3b1d4ed5 1519 cp0_perfcount_irq = -1;
c3e838a2
CD
1520 } else {
1521 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
f4fc580b 1522 cp0_compare_irq_shift = cp0_compare_irq;
c3e838a2 1523 cp0_perfcount_irq = -1;
3b1d4ed5
RB
1524 }
1525
41c594ab
RB
1526#ifdef CONFIG_MIPS_MT_SMTC
1527 }
1528#endif /* CONFIG_MIPS_MT_SMTC */
1da177e4
LT
1529
1530 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1531 TLBMISS_HANDLER_SETUP();
1532
1533 atomic_inc(&init_mm.mm_count);
1534 current->active_mm = &init_mm;
1535 BUG_ON(current->mm);
1536 enter_lazy_tlb(&init_mm, current);
1537
41c594ab
RB
1538#ifdef CONFIG_MIPS_MT_SMTC
1539 if (bootTC) {
1540#endif /* CONFIG_MIPS_MT_SMTC */
1541 cpu_cache_init();
1542 tlb_init();
1543#ifdef CONFIG_MIPS_MT_SMTC
6a05888d
RB
1544 } else if (!secondaryTC) {
1545 /*
1546 * First TC in non-boot VPE must do subset of tlb_init()
1547 * for MMU countrol registers.
1548 */
1549 write_c0_pagemask(PM_DEFAULT_MASK);
1550 write_c0_wired(0);
41c594ab
RB
1551 }
1552#endif /* CONFIG_MIPS_MT_SMTC */
1da177e4
LT
1553}
1554
e01402b1 1555/* Install CPU exception handler */
49a89efb 1556void __init set_handler(unsigned long offset, void *addr, unsigned long size)
e01402b1
RB
1557{
1558 memcpy((void *)(ebase + offset), addr, size);
e0cee3ee 1559 local_flush_icache_range(ebase + offset, ebase + offset + size);
e01402b1
RB
1560}
1561
234fcd14 1562static char panic_null_cerr[] __cpuinitdata =
641e97f3
RB
1563 "Trying to set NULL cache error exception handler";
1564
42fe7ee3
RB
1565/*
1566 * Install uncached CPU exception handler.
1567 * This is suitable only for the cache error exception which is the only
1568 * exception handler that is being run uncached.
1569 */
234fcd14
RB
1570void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
1571 unsigned long size)
e01402b1 1572{
4f81b01a 1573 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
e01402b1 1574
641e97f3
RB
1575 if (!addr)
1576 panic(panic_null_cerr);
1577
e01402b1
RB
1578 memcpy((void *)(uncached_ebase + offset), addr, size);
1579}
1580
5b10496b
AN
1581static int __initdata rdhwr_noopt;
1582static int __init set_rdhwr_noopt(char *str)
1583{
1584 rdhwr_noopt = 1;
1585 return 1;
1586}
1587
1588__setup("rdhwr_noopt", set_rdhwr_noopt);
1589
1da177e4
LT
1590void __init trap_init(void)
1591{
1592 extern char except_vec3_generic, except_vec3_r4000;
1da177e4
LT
1593 extern char except_vec4;
1594 unsigned long i;
c65a5480
AN
1595 int rollback;
1596
1597 check_wait();
1598 rollback = (cpu_wait == r4k_wait);
1da177e4 1599
88547001
JW
1600#if defined(CONFIG_KGDB)
1601 if (kgdb_early_setup)
1602 return; /* Already done */
1603#endif
1604
9fb4c2b9
CD
1605 if (cpu_has_veic || cpu_has_vint) {
1606 unsigned long size = 0x200 + VECTORSPACING*64;
1607 ebase = (unsigned long)
1608 __alloc_bootmem(size, 1 << fls(size), 0);
1609 } else {
f6be75d0 1610 ebase = CKSEG0;
566f74f6
DD
1611 if (cpu_has_mips_r2)
1612 ebase += (read_c0_ebase() & 0x3ffff000);
1613 }
e01402b1 1614
1da177e4
LT
1615 per_cpu_trap_init();
1616
1617 /*
1618 * Copy the generic exception handlers to their final destination.
1619 * This will be overriden later as suitable for a particular
1620 * configuration.
1621 */
e01402b1 1622 set_handler(0x180, &except_vec3_generic, 0x80);
1da177e4
LT
1623
1624 /*
1625 * Setup default vectors
1626 */
1627 for (i = 0; i <= 31; i++)
1628 set_except_vector(i, handle_reserved);
1629
1630 /*
1631 * Copy the EJTAG debug exception vector handler code to it's final
1632 * destination.
1633 */
e01402b1 1634 if (cpu_has_ejtag && board_ejtag_handler_setup)
49a89efb 1635 board_ejtag_handler_setup();
1da177e4
LT
1636
1637 /*
1638 * Only some CPUs have the watch exceptions.
1639 */
1640 if (cpu_has_watch)
1641 set_except_vector(23, handle_watch);
1642
1643 /*
e01402b1 1644 * Initialise interrupt handlers
1da177e4 1645 */
e01402b1
RB
1646 if (cpu_has_veic || cpu_has_vint) {
1647 int nvec = cpu_has_veic ? 64 : 8;
1648 for (i = 0; i < nvec; i++)
ff3eab2a 1649 set_vi_handler(i, NULL);
e01402b1
RB
1650 }
1651 else if (cpu_has_divec)
1652 set_handler(0x200, &except_vec4, 0x8);
1da177e4
LT
1653
1654 /*
1655 * Some CPUs can enable/disable for cache parity detection, but does
1656 * it different ways.
1657 */
1658 parity_protection_init();
1659
1660 /*
1661 * The Data Bus Errors / Instruction Bus Errors are signaled
1662 * by external hardware. Therefore these two exceptions
1663 * may have board specific handlers.
1664 */
1665 if (board_be_init)
1666 board_be_init();
1667
c65a5480 1668 set_except_vector(0, rollback ? rollback_handle_int : handle_int);
1da177e4
LT
1669 set_except_vector(1, handle_tlbm);
1670 set_except_vector(2, handle_tlbl);
1671 set_except_vector(3, handle_tlbs);
1672
1673 set_except_vector(4, handle_adel);
1674 set_except_vector(5, handle_ades);
1675
1676 set_except_vector(6, handle_ibe);
1677 set_except_vector(7, handle_dbe);
1678
1679 set_except_vector(8, handle_sys);
1680 set_except_vector(9, handle_bp);
5b10496b
AN
1681 set_except_vector(10, rdhwr_noopt ? handle_ri :
1682 (cpu_has_vtag_icache ?
1683 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1da177e4
LT
1684 set_except_vector(11, handle_cpu);
1685 set_except_vector(12, handle_ov);
1686 set_except_vector(13, handle_tr);
1da177e4 1687
10cc3529
RB
1688 if (current_cpu_type() == CPU_R6000 ||
1689 current_cpu_type() == CPU_R6000A) {
1da177e4
LT
1690 /*
1691 * The R6000 is the only R-series CPU that features a machine
1692 * check exception (similar to the R4000 cache error) and
1693 * unaligned ldc1/sdc1 exception. The handlers have not been
1694 * written yet. Well, anyway there is no R6000 machine on the
1695 * current list of targets for Linux/MIPS.
1696 * (Duh, crap, there is someone with a triple R6k machine)
1697 */
1698 //set_except_vector(14, handle_mc);
1699 //set_except_vector(15, handle_ndc);
1700 }
1701
e01402b1
RB
1702
1703 if (board_nmi_handler_setup)
1704 board_nmi_handler_setup();
1705
e50c0a8f
RB
1706 if (cpu_has_fpu && !cpu_has_nofpuex)
1707 set_except_vector(15, handle_fpe);
1708
1709 set_except_vector(22, handle_mdmx);
1710
1711 if (cpu_has_mcheck)
1712 set_except_vector(24, handle_mcheck);
1713
340ee4b9
RB
1714 if (cpu_has_mipsmt)
1715 set_except_vector(25, handle_mt);
1716
acaec427 1717 set_except_vector(26, handle_dsp);
e50c0a8f
RB
1718
1719 if (cpu_has_vce)
1720 /* Special exception: R4[04]00 uses also the divec space. */
566f74f6 1721 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
e50c0a8f 1722 else if (cpu_has_4kex)
566f74f6 1723 memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
e50c0a8f 1724 else
566f74f6 1725 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
e50c0a8f 1726
e0cee3ee 1727 local_flush_icache_range(ebase, ebase + 0x400);
1d40cfcd 1728 flush_tlb_handlers();
0510617b
TB
1729
1730 sort_extable(__start___dbe_table, __stop___dbe_table);
69f3a7de 1731
4483b159 1732 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
1da177e4 1733}