2 * Low-level system-call handling, trap handlers and context-switching
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
31 #include <linux/errno.h>
32 #include <asm/signal.h>
34 /* The size of a state save frame. */
35 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
37 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
38 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
40 #define C_ENTRY(name) .globl name; .align 4; name
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
47 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
102 andi r11, r11, ~MSR_BIP
110 ori r11, r11, MSR_BIP
118 andi r11, r11, ~MSR_EIP
134 andi r11, r11, ~MSR_IE
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
168 andni r11, r11, (MSR_VMS|MSR_UMS)
174 /* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 /* turn on virtual protected mode save */
185 /* turn off virtual protected mode save and user mode save*/
188 rted r0, TOPHYS(1f); \
192 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
193 swi r5, r1, PTO+PT_R5; \
194 swi r6, r1, PTO+PT_R6; \
195 swi r7, r1, PTO+PT_R7; \
196 swi r8, r1, PTO+PT_R8; \
197 swi r9, r1, PTO+PT_R9; \
198 swi r10, r1, PTO+PT_R10; \
199 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
200 swi r12, r1, PTO+PT_R12; \
201 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
202 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
203 swi r15, r1, PTO+PT_R15; /* Save LP */ \
204 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
205 swi r19, r1, PTO+PT_R19; \
206 swi r20, r1, PTO+PT_R20; \
207 swi r21, r1, PTO+PT_R21; \
208 swi r22, r1, PTO+PT_R22; \
209 swi r23, r1, PTO+PT_R23; \
210 swi r24, r1, PTO+PT_R24; \
211 swi r25, r1, PTO+PT_R25; \
212 swi r26, r1, PTO+PT_R26; \
213 swi r27, r1, PTO+PT_R27; \
214 swi r28, r1, PTO+PT_R28; \
215 swi r29, r1, PTO+PT_R29; \
216 swi r30, r1, PTO+PT_R30; \
217 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
218 mfs r11, rmsr; /* save MSR */ \
220 swi r11, r1, PTO+PT_MSR;
222 #define RESTORE_REGS \
223 lwi r11, r1, PTO+PT_MSR; \
226 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
227 lwi r5, r1, PTO+PT_R5; \
228 lwi r6, r1, PTO+PT_R6; \
229 lwi r7, r1, PTO+PT_R7; \
230 lwi r8, r1, PTO+PT_R8; \
231 lwi r9, r1, PTO+PT_R9; \
232 lwi r10, r1, PTO+PT_R10; \
233 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
234 lwi r12, r1, PTO+PT_R12; \
235 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
236 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
237 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
238 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
239 lwi r19, r1, PTO+PT_R19; \
240 lwi r20, r1, PTO+PT_R20; \
241 lwi r21, r1, PTO+PT_R21; \
242 lwi r22, r1, PTO+PT_R22; \
243 lwi r23, r1, PTO+PT_R23; \
244 lwi r24, r1, PTO+PT_R24; \
245 lwi r25, r1, PTO+PT_R25; \
246 lwi r26, r1, PTO+PT_R26; \
247 lwi r27, r1, PTO+PT_R27; \
248 lwi r28, r1, PTO+PT_R28; \
249 lwi r29, r1, PTO+PT_R29; \
250 lwi r30, r1, PTO+PT_R30; \
251 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258 * System calls are handled here.
261 * Syscall number in r12, args in r5-r10
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
267 C_ENTRY(_user_exception):
268 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269 addi r14, r14, 4 /* return address is 4 byte after call */
270 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
272 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273 beqi r11, 1f; /* Jump ahead if coming from user */
274 /* Kernel-mode state save. */
275 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
277 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
280 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283 addi r11, r0, 1; /* Was in kernel-mode. */
284 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
286 nop; /* Fill delay slot */
288 /* User-mode state save. */
290 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
291 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
293 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
294 /* calculate kernel stack pointer from task struct 8k */
295 addik r1, r1, THREAD_SIZE;
298 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
301 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
302 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303 swi r11, r1, PTO+PT_R1; /* Store user SP. */
305 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
306 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
307 /* Save away the syscall number. */
308 swi r12, r1, PTO+PT_R0;
311 la r15, r0, ret_from_trap-8
312 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
313 /* Jump to the appropriate function for the system call number in r12
314 * (r12 is not preserved), or return an error if r12 is not valid. The LP
315 * register should point to the location where
316 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
317 /* See if the system call number is valid. */
318 addi r11, r12, -__NR_syscalls;
320 /* Figure out which function to use for this system call. */
321 /* Note Microblaze barrel shift is optional, so don't rely on it */
322 add r12, r12, r12; /* convert num -> ptr */
325 /* Trac syscalls and stored them to r0_ram */
326 lwi r3, r12, 0x400 + TOPHYS(r0_ram)
328 swi r3, r12, 0x400 + TOPHYS(r0_ram)
330 lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */
331 /* Make the system call. to r12*/
335 /* The syscall number is invalid, return an error. */
336 1: VM_ON; /* RETURN() expects virtual mode*/
337 addi r3, r0, -ENOSYS;
338 rtsd r15,8; /* looks like a normal subroutine return */
342 /* Entry point used to return from a syscall/trap. */
343 /* We re-enable BIP bit before state restore */
344 C_ENTRY(ret_from_trap):
345 set_bip; /* Ints masked for state restore*/
346 lwi r11, r1, PTO+PT_MODE;
347 /* See if returning to kernel mode, if so, skip resched &c. */
350 /* We're returning to user mode, so check for various conditions that
351 * trigger rescheduling. */
352 /* Get current task ptr into r11 */
353 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
354 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
355 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
356 andi r11, r11, _TIF_NEED_RESCHED;
359 swi r3, r1, PTO + PT_R3; /* store syscall result */
360 swi r4, r1, PTO + PT_R4;
361 bralid r15, schedule; /* Call scheduler */
362 nop; /* delay slot */
363 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
364 lwi r4, r1, PTO + PT_R4;
366 /* Maybe handle a signal */
367 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
368 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
369 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
370 andi r11, r11, _TIF_SIGPENDING;
371 beqi r11, 1f; /* Signals to handle, handle them */
373 swi r3, r1, PTO + PT_R3; /* store syscall result */
374 swi r4, r1, PTO + PT_R4;
375 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
376 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
377 addi r7, r0, 1; /* Arg 3: int in_syscall */
378 bralid r15, do_signal; /* Handle any signals */
380 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
381 lwi r4, r1, PTO + PT_R4;
383 /* Finally, return to user state. */
384 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
385 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
386 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
390 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
391 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
394 /* Return to kernel state. */
398 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
401 TRAP_return: /* Make global symbol for debugging */
402 rtbd r14, 0; /* Instructions to return from an IRQ */
406 /* These syscalls need access to the struct pt_regs on the stack, so we
407 implement them in assembly (they're basically all wrappers anyway). */
409 C_ENTRY(sys_fork_wrapper):
410 addi r5, r0, SIGCHLD /* Arg 0: flags */
411 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
412 la r7, r1, PTO /* Arg 2: parent context */
413 add r8. r0, r0 /* Arg 3: (unused) */
414 add r9, r0, r0; /* Arg 4: (unused) */
415 add r10, r0, r0; /* Arg 5: (unused) */
416 brid do_fork /* Do real work (tail-call) */
419 /* This the initial entry point for a new child thread, with an appropriate
420 stack in place that makes it look the the child is in the middle of an
421 syscall. This function is actually `returned to' from switch_thread
422 (copy_thread makes ret_from_fork the return address in each new thread's
424 C_ENTRY(ret_from_fork):
425 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
426 add r3, r5, r0; /* switch_thread returns the prev task */
427 /* ( in the delay slot ) */
428 add r3, r0, r0; /* Child's fork call should return 0. */
429 brid ret_from_trap; /* Do normal trap return */
432 C_ENTRY(sys_vfork_wrapper):
434 brid sys_vfork /* Do real work (tail-call) */
437 C_ENTRY(sys_clone_wrapper):
438 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
439 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
440 1: la r7, r1, PTO; /* Arg 2: parent context */
441 add r8, r0, r0; /* Arg 3: (unused) */
442 add r9, r0, r0; /* Arg 4: (unused) */
443 add r10, r0, r0; /* Arg 5: (unused) */
444 brid do_fork /* Do real work (tail-call) */
447 C_ENTRY(sys_execve_wrapper):
448 la r8, r1, PTO; /* add user context as 4th arg */
449 brid sys_execve; /* Do real work (tail-call).*/
452 C_ENTRY(sys_rt_sigsuspend_wrapper):
453 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
454 swi r4, r1, PTO+PT_R4;
455 la r7, r1, PTO; /* add user context as 3rd arg */
456 brlid r15, sys_rt_sigsuspend; /* Do real work.*/
458 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
459 lwi r4, r1, PTO+PT_R4;
460 bri ret_from_trap /* fall through will not work here due to align */
463 C_ENTRY(sys_rt_sigreturn_wrapper):
464 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
465 swi r4, r1, PTO+PT_R4;
466 la r5, r1, PTO; /* add user context as 1st arg */
467 brlid r15, sys_rt_sigreturn /* Do real work */
469 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
470 lwi r4, r1, PTO+PT_R4;
471 bri ret_from_trap /* fall through will not work here due to align */
475 * HW EXCEPTION rutine start
479 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
480 set_bip; /*equalize initial state for all possible entries*/\
484 /* See if already in kernel mode.*/ \
485 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
486 beqi r11, 1f; /* Jump ahead if coming from user */\
487 /* Kernel-mode state save. */ \
488 /* Reload kernel stack-ptr. */ \
489 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
491 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
492 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
493 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
494 /* store return registers separately because \
495 * this macros is use for others exceptions */ \
496 swi r3, r1, PTO + PT_R3; \
497 swi r4, r1, PTO + PT_R4; \
499 /* PC, before IRQ/trap - this is one instruction above */ \
500 swi r17, r1, PTO+PT_PC; \
502 addi r11, r0, 1; /* Was in kernel-mode. */ \
503 swi r11, r1, PTO+PT_MODE; \
505 nop; /* Fill delay slot */ \
506 1: /* User-mode state save. */ \
507 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
508 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
510 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
511 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
514 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
515 /* store return registers separately because this macros \
516 * is use for others exceptions */ \
517 swi r3, r1, PTO + PT_R3; \
518 swi r4, r1, PTO + PT_R4; \
520 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
521 swi r17, r1, PTO+PT_PC; \
523 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
524 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
525 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
527 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
528 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
529 /* Save away the syscall number. */ \
530 swi r0, r1, PTO+PT_R0; \
533 C_ENTRY(full_exception_trap):
534 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
535 /* adjust exception address for privileged instruction
536 * for finding where is it */
538 SAVE_STATE /* Save registers */
539 /* FIXME this can be store directly in PT_ESR reg.
540 * I tested it but there is a fault */
541 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
542 la r15, r0, ret_from_exc - 8
543 la r5, r1, PTO /* parameter struct pt_regs * regs */
546 mfs r7, rfsr; /* save FSR */
548 la r12, r0, full_exception
554 * Unaligned data trap.
556 * Unaligned data trap last on 4k page is handled here.
558 * Trap entered via exception, so EE bit is set, and interrupts
559 * are masked. This is nice, means we don't have to CLI before state save
561 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
563 C_ENTRY(unaligned_data_trap):
564 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
565 SAVE_STATE /* Save registers.*/
566 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
567 la r15, r0, ret_from_exc-8
568 mfs r3, resr /* ESR */
570 mfs r4, rear /* EAR */
572 la r7, r1, PTO /* parameter struct pt_regs * regs */
573 la r12, r0, _unaligned_data_exception
575 rtbd r12, 0; /* interrupts enabled */
581 * If the real exception handler (from hw_exception_handler.S) didn't find
582 * the mapping for the process, then we're thrown here to handle such situation.
584 * Trap entered via exceptions, so EE bit is set, and interrupts
585 * are masked. This is nice, means we don't have to CLI before state save
587 * Build a standard exception frame for TLB Access errors. All TLB exceptions
588 * will bail out to this point if they can't resolve the lightweight TLB fault.
590 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
591 * void do_page_fault(struct pt_regs *regs,
592 * unsigned long address,
593 * unsigned long error_code)
595 /* data and intruction trap - which is choose is resolved int fault.c */
596 C_ENTRY(page_fault_data_trap):
597 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
598 SAVE_STATE /* Save registers.*/
599 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
600 la r15, r0, ret_from_exc-8
601 la r5, r1, PTO /* parameter struct pt_regs * regs */
602 mfs r6, rear /* parameter unsigned long address */
604 mfs r7, resr /* parameter unsigned long error_code */
606 la r12, r0, do_page_fault
608 rtbd r12, 0; /* interrupts enabled */
611 C_ENTRY(page_fault_instr_trap):
612 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
613 SAVE_STATE /* Save registers.*/
614 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
615 la r15, r0, ret_from_exc-8
616 la r5, r1, PTO /* parameter struct pt_regs * regs */
617 mfs r6, rear /* parameter unsigned long address */
619 ori r7, r0, 0 /* parameter unsigned long error_code */
620 la r12, r0, do_page_fault
622 rtbd r12, 0; /* interrupts enabled */
625 /* Entry point used to return from an exception. */
626 C_ENTRY(ret_from_exc):
627 set_bip; /* Ints masked for state restore*/
628 lwi r11, r1, PTO+PT_MODE;
629 bnei r11, 2f; /* See if returning to kernel mode, */
630 /* ... if so, skip resched &c. */
632 /* We're returning to user mode, so check for various conditions that
633 trigger rescheduling. */
634 /* Get current task ptr into r11 */
635 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
636 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
637 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
638 andi r11, r11, _TIF_NEED_RESCHED;
641 /* Call the scheduler before returning from a syscall/trap. */
642 bralid r15, schedule; /* Call scheduler */
643 nop; /* delay slot */
645 /* Maybe handle a signal */
646 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
647 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
648 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
649 andi r11, r11, _TIF_SIGPENDING;
650 beqi r11, 1f; /* Signals to handle, handle them */
653 * Handle a signal return; Pending signals should be in r18.
655 * Not all registers are saved by the normal trap/interrupt entry
656 * points (for instance, call-saved registers (because the normal
657 * C-compiler calling sequence in the kernel makes sure they're
658 * preserved), and call-clobbered registers in the case of
659 * traps), but signal handlers may want to examine or change the
660 * complete register state. Here we save anything not saved by
661 * the normal entry sequence, so that it may be safely restored
662 * (in a possibly modified form) after do_signal returns.
663 * store return registers separately because this macros is use
664 * for others exceptions */
665 swi r3, r1, PTO + PT_R3;
666 swi r4, r1, PTO + PT_R4;
667 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
668 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
669 addi r7, r0, 0; /* Arg 3: int in_syscall */
670 bralid r15, do_signal; /* Handle any signals */
672 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
673 lwi r4, r1, PTO+PT_R4;
675 /* Finally, return to user state. */
676 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
677 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
678 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
682 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
683 lwi r4, r1, PTO+PT_R4;
685 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
687 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
689 /* Return to kernel state. */
692 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
693 lwi r4, r1, PTO+PT_R4;
695 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
699 EXC_return: /* Make global symbol for debugging */
700 rtbd r14, 0; /* Instructions to return from an IRQ */
704 * HW EXCEPTION rutine end
708 * Hardware maskable interrupts.
710 * The stack-pointer (r1) should have already been saved to the memory
711 * location PER_CPU(ENTRY_SP).
714 /* MS: we are in physical address */
715 /* Save registers, switch to proper stack, convert SP to virtual.*/
716 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
717 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
718 /* MS: See if already in kernel mode. */
719 lwi r11, r0, TOPHYS(PER_CPU(KM));
720 beqi r11, 1f; /* MS: Jump ahead if coming from user */
722 /* Kernel-mode state save. */
724 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
725 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
726 swi r11, r1, (PT_R1 - PT_SIZE);
727 /* MS: restore r11 because of saving in SAVE_REGS */
728 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
730 /* MS: Make room on the stack -> activation record */
731 addik r1, r1, -STATE_SAVE_SIZE;
732 /* MS: store return registers separately because
733 * this macros is use for others exceptions */
734 swi r3, r1, PTO + PT_R3;
735 swi r4, r1, PTO + PT_R4;
738 addi r11, r0, 1; /* MS: Was in kernel-mode. */
739 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
741 nop; /* MS: Fill delay slot */
744 /* User-mode state save. */
745 /* MS: restore r11 -> FIXME move before SAVE_REG */
746 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
747 /* MS: get the saved current */
748 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
750 lwi r1, r1, TS_THREAD_INFO;
751 addik r1, r1, THREAD_SIZE;
754 addik r1, r1, -STATE_SAVE_SIZE;
755 swi r3, r1, PTO+PT_R3;
756 swi r4, r1, PTO+PT_R4;
759 swi r0, r1, PTO + PT_MODE;
760 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
761 swi r11, r1, PTO+PT_R1;
762 /* setup kernel mode to KM */
764 swi r11, r0, TOPHYS(PER_CPU(KM));
767 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
768 swi r0, r1, PTO + PT_R0;
773 la r15, r0, irq_call;
774 irq_call:rtbd r11, 0;
777 /* MS: we are in virtual mode */
779 lwi r11, r1, PTO + PT_MODE;
782 add r11, r0, CURRENT_TASK;
783 lwi r11, r11, TS_THREAD_INFO;
784 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
785 andi r11, r11, _TIF_NEED_RESCHED;
787 bralid r15, schedule;
788 nop; /* delay slot */
790 /* Maybe handle a signal */
791 5: add r11, r0, CURRENT_TASK;
792 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
793 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
794 andi r11, r11, _TIF_SIGPENDING;
795 beqid r11, no_intr_resched
796 /* Handle a signal return; Pending signals should be in r18. */
797 addi r7, r0, 0; /* Arg 3: int in_syscall */
798 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
799 bralid r15, do_signal; /* Handle any signals */
800 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
802 /* Finally, return to user state. */
804 /* Disable interrupts, we are now committed to the state restore */
806 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
807 add r11, r0, CURRENT_TASK;
808 swi r11, r0, PER_CPU(CURRENT_SAVE);
811 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
812 lwi r4, r1, PTO + PT_R4;
814 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
815 lwi r1, r1, PT_R1 - PT_SIZE;
817 /* MS: Return to kernel state. */
818 2: VM_OFF /* MS: turn off MMU */
820 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
821 lwi r4, r1, PTO + PT_R4;
823 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
826 IRQ_return: /* MS: Make global symbol for debugging */
832 * We enter dbtrap in "BIP" (breakpoint) mode.
833 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
835 * however, wait to save state first
837 C_ENTRY(_debug_exception):
838 /* BIP bit is set on entry, no interrupts can occur */
839 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
841 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
842 set_bip; /*equalize initial state for all possible entries*/
845 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
846 beqi r11, 1f; /* Jump ahead if coming from user */
847 /* Kernel-mode state save. */
848 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
850 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
851 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
853 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
854 swi r3, r1, PTO + PT_R3;
855 swi r4, r1, PTO + PT_R4;
858 addi r11, r0, 1; /* Was in kernel-mode. */
859 swi r11, r1, PTO + PT_MODE;
861 nop; /* Fill delay slot */
862 1: /* User-mode state save. */
863 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
864 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
866 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
867 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
870 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
871 swi r3, r1, PTO + PT_R3;
872 swi r4, r1, PTO + PT_R4;
875 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
876 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
877 swi r11, r1, PTO+PT_R1; /* Store user SP. */
879 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
880 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
881 /* Save away the syscall number. */
882 swi r0, r1, PTO+PT_R0;
885 addi r5, r0, SIGTRAP /* send the trap signal */
886 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
887 addk r7, r0, r0 /* 3rd param zero */
890 la r11, r0, send_sig;
891 la r15, r0, dbtrap_call;
892 dbtrap_call: rtbd r11, 0;
895 set_bip; /* Ints masked for state restore*/
896 lwi r11, r1, PTO+PT_MODE;
899 /* Get current task ptr into r11 */
900 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
901 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
902 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
903 andi r11, r11, _TIF_NEED_RESCHED;
906 /* Call the scheduler before returning from a syscall/trap. */
908 bralid r15, schedule; /* Call scheduler */
909 nop; /* delay slot */
910 /* XXX Is PT_DTRACE handling needed here? */
911 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
913 /* Maybe handle a signal */
914 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
915 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
916 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
917 andi r11, r11, _TIF_SIGPENDING;
918 beqi r11, 1f; /* Signals to handle, handle them */
920 /* Handle a signal return; Pending signals should be in r18. */
921 /* Not all registers are saved by the normal trap/interrupt entry
922 points (for instance, call-saved registers (because the normal
923 C-compiler calling sequence in the kernel makes sure they're
924 preserved), and call-clobbered registers in the case of
925 traps), but signal handlers may want to examine or change the
926 complete register state. Here we save anything not saved by
927 the normal entry sequence, so that it may be safely restored
928 (in a possibly modified form) after do_signal returns. */
930 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
931 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
932 addi r7, r0, 0; /* Arg 3: int in_syscall */
933 bralid r15, do_signal; /* Handle any signals */
937 /* Finally, return to user state. */
938 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
939 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
940 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
944 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
945 lwi r4, r1, PTO+PT_R4;
947 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
950 lwi r1, r1, PT_R1 - PT_SIZE;
951 /* Restore user stack pointer. */
954 /* Return to kernel state. */
957 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
958 lwi r4, r1, PTO+PT_R4;
960 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
964 DBTRAP_return: /* Make global symbol for debugging */
965 rtbd r14, 0; /* Instructions to return from an IRQ */
971 /* prepare return value */
974 /* save registers in cpu_context */
975 /* use r11 and r12, volatile registers, as temp register */
976 /* give start of cpu_context for previous process */
977 addik r11, r5, TI_CPU_CONTEXT
980 /* skip volatile registers.
981 * they are saved on stack when we jumped to _switch_to() */
982 /* dedicated registers */
989 /* save non-volatile registers */
1000 swi r29, r11, CC_R29
1001 swi r30, r11, CC_R30
1002 /* special purpose registers */
1005 swi r12, r11, CC_MSR
1008 swi r12, r11, CC_EAR
1011 swi r12, r11, CC_ESR
1014 swi r12, r11, CC_FSR
1016 /* update r31, the current */
1017 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
1018 /* stored it to current_save too */
1019 swi r31, r0, PER_CPU(CURRENT_SAVE)
1021 /* get new process' cpu context and restore */
1022 /* give me start where start context of next task */
1023 addik r11, r6, TI_CPU_CONTEXT
1025 /* non-volatile registers */
1026 lwi r30, r11, CC_R30
1027 lwi r29, r11, CC_R29
1028 lwi r28, r11, CC_R28
1029 lwi r27, r11, CC_R27
1030 lwi r26, r11, CC_R26
1031 lwi r25, r11, CC_R25
1032 lwi r24, r11, CC_R24
1033 lwi r23, r11, CC_R23
1034 lwi r22, r11, CC_R22
1035 lwi r21, r11, CC_R21
1036 lwi r20, r11, CC_R20
1037 lwi r19, r11, CC_R19
1038 /* dedicated registers */
1039 lwi r18, r11, CC_R18
1040 lwi r17, r11, CC_R17
1041 lwi r16, r11, CC_R16
1042 lwi r15, r11, CC_R15
1043 lwi r14, r11, CC_R14
1044 lwi r13, r11, CC_R13
1045 /* skip volatile registers */
1049 /* special purpose registers */
1050 lwi r12, r11, CC_FSR
1053 lwi r12, r11, CC_MSR
1061 brai 0x70; /* Jump back to FS-boot */
1066 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1069 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1072 /* These are compiled and loaded into high memory, then
1073 * copied into place in mach_early_setup */
1074 .section .init.ivt, "ax"
1076 /* this is very important - here is the reset vector */
1077 /* in current MMU branch you don't care what is here - it is
1078 * used from bootloader site - but this is correct for FS-BOOT */
1081 brai TOPHYS(_user_exception); /* syscall handler */
1082 brai TOPHYS(_interrupt); /* Interrupt handler */
1083 brai TOPHYS(_break); /* nmi trap handler */
1084 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1087 brai TOPHYS(_debug_exception); /* debug trap handler*/
1089 .section .rodata,"a"
1090 #include "syscall_table.S"
1092 syscall_table_size=(.-sys_call_table)