]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/sh/kernel/process.c
sh: BUG() handling through trapa vector.
[net-next-2.6.git] / arch / sh / kernel / process.c
CommitLineData
1da177e4
LT
1/* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $
2 *
3 * linux/arch/sh/kernel/process.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8ae91b9a 8 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
1da177e4
LT
9 */
10
11/*
12 * This file handles the architecture-dependent parts of process handling..
13 */
14
15#include <linux/module.h>
16#include <linux/unistd.h>
17#include <linux/mm.h>
18#include <linux/elfcore.h>
1da177e4 19#include <linux/a.out.h>
a3310bbd
PM
20#include <linux/slab.h>
21#include <linux/pm.h>
1da177e4 22#include <linux/ptrace.h>
1da177e4 23#include <linux/kallsyms.h>
a3310bbd 24#include <linux/kexec.h>
1da177e4
LT
25
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/mmu_context.h>
29#include <asm/elf.h>
b5233d07 30#include <asm/ubc.h>
1da177e4
LT
31
32static int hlt_counter=0;
33
34int ubc_usercnt = 0;
35
36#define HARD_IDLE_TIMEOUT (HZ / 3)
37
a3310bbd
PM
38void (*pm_idle)(void);
39
40void (*pm_power_off)(void);
41EXPORT_SYMBOL(pm_power_off);
42
1da177e4
LT
43void disable_hlt(void)
44{
45 hlt_counter++;
46}
47
48EXPORT_SYMBOL(disable_hlt);
49
50void enable_hlt(void)
51{
52 hlt_counter--;
53}
54
55EXPORT_SYMBOL(enable_hlt);
56
a3310bbd
PM
57void default_idle(void)
58{
59 if (!hlt_counter)
60 cpu_sleep();
61 else
62 cpu_relax();
63}
64
64c7c8f8 65void cpu_idle(void)
1da177e4
LT
66{
67 /* endless idle loop with no priority at all */
68 while (1) {
a3310bbd
PM
69 void (*idle)(void) = pm_idle;
70
71 if (!idle)
72 idle = default_idle;
73
74 while (!need_resched())
75 idle();
1da177e4 76
5bfb5d69 77 preempt_enable_no_resched();
1da177e4 78 schedule();
5bfb5d69 79 preempt_disable();
1da177e4
LT
80 }
81}
82
1da177e4
LT
83void machine_restart(char * __unused)
84{
85 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
86 asm volatile("ldc %0, sr\n\t"
87 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
88}
89
1da177e4
LT
90void machine_halt(void)
91{
a3310bbd 92 local_irq_disable();
1da177e4 93
1da177e4
LT
94 while (1)
95 cpu_sleep();
96}
97
1da177e4
LT
98void machine_power_off(void)
99{
a3310bbd
PM
100 if (pm_power_off)
101 pm_power_off();
1da177e4
LT
102}
103
1da177e4
LT
104void show_regs(struct pt_regs * regs)
105{
106 printk("\n");
107 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm);
6b002230 108 print_symbol("PC is at %s\n", instruction_pointer(regs));
1da177e4
LT
109 printk("PC : %08lx SP : %08lx SR : %08lx ",
110 regs->pc, regs->regs[15], regs->sr);
111#ifdef CONFIG_MMU
112 printk("TEA : %08x ", ctrl_inl(MMU_TEA));
113#else
114 printk(" ");
115#endif
116 printk("%s\n", print_tainted());
117
118 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
119 regs->regs[0],regs->regs[1],
120 regs->regs[2],regs->regs[3]);
121 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
122 regs->regs[4],regs->regs[5],
123 regs->regs[6],regs->regs[7]);
124 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
125 regs->regs[8],regs->regs[9],
126 regs->regs[10],regs->regs[11]);
127 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
128 regs->regs[12],regs->regs[13],
129 regs->regs[14]);
130 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
131 regs->mach, regs->macl, regs->gbr, regs->pr);
132
6b002230 133 show_trace(NULL, (unsigned long *)regs->regs[15], regs);
1da177e4
LT
134}
135
136/*
137 * Create a kernel thread
138 */
139
140/*
141 * This is the mechanism for creating a new kernel thread.
142 *
143 */
144extern void kernel_thread_helper(void);
145__asm__(".align 5\n"
146 "kernel_thread_helper:\n\t"
147 "jsr @r5\n\t"
148 " nop\n\t"
149 "mov.l 1f, r1\n\t"
150 "jsr @r1\n\t"
151 " mov r0, r4\n\t"
152 ".align 2\n\t"
153 "1:.long do_exit");
154
155int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
156{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
157 struct pt_regs regs;
158
159 memset(&regs, 0, sizeof(regs));
160 regs.regs[4] = (unsigned long) arg;
161 regs.regs[5] = (unsigned long) fn;
162
163 regs.pc = (unsigned long) kernel_thread_helper;
164 regs.sr = (1 << 30);
165
166 /* Ok, create the new process.. */
167 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
168}
169
170/*
171 * Free current thread data structures etc..
172 */
173void exit_thread(void)
174{
175 if (current->thread.ubc_pc) {
176 current->thread.ubc_pc = 0;
177 ubc_usercnt -= 1;
178 }
179}
180
181void flush_thread(void)
182{
183#if defined(CONFIG_SH_FPU)
184 struct task_struct *tsk = current;
1da177e4 185 /* Forget lazy FPU state */
3cf0f4ec 186 clear_fpu(tsk, task_pt_regs(tsk));
1da177e4
LT
187 clear_used_math();
188#endif
189}
190
191void release_thread(struct task_struct *dead_task)
192{
193 /* do nothing */
194}
195
196/* Fill in the fpu structure for a core dump.. */
197int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
198{
199 int fpvalid = 0;
200
201#if defined(CONFIG_SH_FPU)
202 struct task_struct *tsk = current;
203
204 fpvalid = !!tsk_used_math(tsk);
205 if (fpvalid) {
206 unlazy_fpu(tsk, regs);
207 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
208 }
209#endif
210
211 return fpvalid;
212}
213
214/*
215 * Capture the user space registers if the task is not running (in user space)
216 */
217int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
218{
219 struct pt_regs ptregs;
220
3cf0f4ec 221 ptregs = *task_pt_regs(tsk);
1da177e4
LT
222 elf_core_copy_regs(regs, &ptregs);
223
224 return 1;
225}
226
227int
228dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
229{
230 int fpvalid = 0;
231
232#if defined(CONFIG_SH_FPU)
233 fpvalid = !!tsk_used_math(tsk);
234 if (fpvalid) {
3cf0f4ec 235 unlazy_fpu(tsk, task_pt_regs(tsk));
1da177e4
LT
236 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
237 }
238#endif
239
240 return fpvalid;
241}
242
243asmlinkage void ret_from_fork(void);
244
245int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
246 unsigned long unused,
247 struct task_struct *p, struct pt_regs *regs)
248{
2991be72 249 struct thread_info *ti = task_thread_info(p);
1da177e4
LT
250 struct pt_regs *childregs;
251#if defined(CONFIG_SH_FPU)
252 struct task_struct *tsk = current;
253
254 unlazy_fpu(tsk, regs);
255 p->thread.fpu = tsk->thread.fpu;
256 copy_to_stopped_child_used_math(p);
257#endif
258
3cf0f4ec 259 childregs = task_pt_regs(p);
1da177e4
LT
260 *childregs = *regs;
261
262 if (user_mode(regs)) {
263 childregs->regs[15] = usp;
2991be72 264 ti->addr_limit = USER_DS;
1da177e4 265 } else {
308a792f 266 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
2991be72 267 ti->addr_limit = KERNEL_DS;
1da177e4
LT
268 }
269 if (clone_flags & CLONE_SETTLS) {
270 childregs->gbr = childregs->regs[0];
271 }
272 childregs->regs[0] = 0; /* Set return value for child */
273
274 p->thread.sp = (unsigned long) childregs;
275 p->thread.pc = (unsigned long) ret_from_fork;
276
277 p->thread.ubc_pc = 0;
278
279 return 0;
280}
281
1da177e4
LT
282/* Tracing by user break controller. */
283static void
284ubc_set_tracing(int asid, unsigned long pc)
285{
8ae91b9a
RS
286#if defined(CONFIG_CPU_SH4A)
287 unsigned long val;
288
289 val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
290 val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
291
292 ctrl_outl(val, UBC_CBR0);
293 ctrl_outl(pc, UBC_CAR0);
294 ctrl_outl(0x0, UBC_CAMR0);
295 ctrl_outl(0x0, UBC_CBCR);
296
297 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
298 ctrl_outl(val, UBC_CRR0);
299
300 /* Read UBC register that we writed last. For chekking UBC Register changed */
301 val = ctrl_inl(UBC_CRR0);
302
303#else /* CONFIG_CPU_SH4A */
1da177e4
LT
304 ctrl_outl(pc, UBC_BARA);
305
a2d1a5fa 306#ifdef CONFIG_MMU
1da177e4
LT
307 /* We don't have any ASID settings for the SH-2! */
308 if (cpu_data->type != CPU_SH7604)
309 ctrl_outb(asid, UBC_BASRA);
a2d1a5fa 310#endif
1da177e4
LT
311
312 ctrl_outl(0, UBC_BAMRA);
313
e5723e0e 314 if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) {
1da177e4
LT
315 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
316 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
317 } else {
318 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
319 ctrl_outw(BRCR_PCBA, UBC_BRCR);
320 }
8ae91b9a 321#endif /* CONFIG_CPU_SH4A */
1da177e4
LT
322}
323
324/*
325 * switch_to(x,y) should switch tasks from x to y.
326 *
327 */
328struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
329{
330#if defined(CONFIG_SH_FPU)
3cf0f4ec 331 unlazy_fpu(prev, task_pt_regs(prev));
1da177e4
LT
332#endif
333
334#ifdef CONFIG_PREEMPT
335 {
336 unsigned long flags;
337 struct pt_regs *regs;
338
339 local_irq_save(flags);
3cf0f4ec 340 regs = task_pt_regs(prev);
1da177e4
LT
341 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
342 int offset = (int)regs->regs[15];
343
344 /* Reset stack pointer: clear critical region mark */
345 regs->regs[15] = regs->regs[1];
346 if (regs->pc < regs->regs[0])
347 /* Go to rewind point */
348 regs->pc = regs->regs[0] + offset;
349 }
350 local_irq_restore(flags);
351 }
352#endif
353
a2d1a5fa 354#ifdef CONFIG_MMU
1da177e4
LT
355 /*
356 * Restore the kernel mode register
357 * k7 (r7_bank1)
358 */
359 asm volatile("ldc %0, r7_bank"
360 : /* no output */
cafcfcaa 361 : "r" (task_thread_info(next)));
a2d1a5fa 362#endif
1da177e4 363
1da177e4
LT
364 /* If no tasks are using the UBC, we're done */
365 if (ubc_usercnt == 0)
366 /* If no tasks are using the UBC, we're done */;
367 else if (next->thread.ubc_pc && next->mm) {
a2d1a5fa
YS
368 int asid = 0;
369#ifdef CONFIG_MMU
19f9a34f 370 asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
a2d1a5fa
YS
371#endif
372 ubc_set_tracing(asid, next->thread.ubc_pc);
1da177e4 373 } else {
8ae91b9a
RS
374#if defined(CONFIG_CPU_SH4A)
375 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
376 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
377#else
1da177e4
LT
378 ctrl_outw(0, UBC_BBRA);
379 ctrl_outw(0, UBC_BBRB);
8ae91b9a 380#endif
1da177e4 381 }
1da177e4
LT
382
383 return prev;
384}
385
386asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
387 unsigned long r6, unsigned long r7,
f0bc814c 388 struct pt_regs __regs)
1da177e4 389{
f0bc814c 390 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
1da177e4 391#ifdef CONFIG_MMU
f0bc814c 392 return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
1da177e4
LT
393#else
394 /* fork almost works, enough to trick you into looking elsewhere :-( */
395 return -EINVAL;
396#endif
397}
398
399asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
400 unsigned long parent_tidptr,
401 unsigned long child_tidptr,
f0bc814c 402 struct pt_regs __regs)
1da177e4 403{
f0bc814c 404 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
1da177e4 405 if (!newsp)
f0bc814c
SM
406 newsp = regs->regs[15];
407 return do_fork(clone_flags, newsp, regs, 0,
1da177e4
LT
408 (int __user *)parent_tidptr, (int __user *)child_tidptr);
409}
410
411/*
412 * This is trivial, and on the face of it looks like it
413 * could equally well be done in user mode.
414 *
415 * Not so, for quite unobvious reasons - register pressure.
416 * In user mode vfork() cannot have a stack frame, and if
417 * done by calling the "clone()" system call directly, you
418 * do not have enough call-clobbered registers to hold all
419 * the information you need.
420 */
421asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
422 unsigned long r6, unsigned long r7,
f0bc814c 423 struct pt_regs __regs)
1da177e4 424{
f0bc814c
SM
425 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
426 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
1da177e4
LT
427 0, NULL, NULL);
428}
429
430/*
431 * sys_execve() executes a new program.
432 */
433asmlinkage int sys_execve(char *ufilename, char **uargv,
434 char **uenvp, unsigned long r7,
f0bc814c 435 struct pt_regs __regs)
1da177e4 436{
f0bc814c 437 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
1da177e4
LT
438 int error;
439 char *filename;
440
441 filename = getname((char __user *)ufilename);
442 error = PTR_ERR(filename);
443 if (IS_ERR(filename))
444 goto out;
445
446 error = do_execve(filename,
447 (char __user * __user *)uargv,
448 (char __user * __user *)uenvp,
f0bc814c 449 regs);
1da177e4
LT
450 if (error == 0) {
451 task_lock(current);
452 current->ptrace &= ~PT_DTRACE;
453 task_unlock(current);
454 }
455 putname(filename);
456out:
457 return error;
458}
459
460unsigned long get_wchan(struct task_struct *p)
461{
462 unsigned long schedule_frame;
463 unsigned long pc;
464
465 if (!p || p == current || p->state == TASK_RUNNING)
466 return 0;
467
468 /*
469 * The same comment as on the Alpha applies here, too ...
470 */
471 pc = thread_saved_pc(p);
472 if (in_sched_functions(pc)) {
473 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
474 return (unsigned long)((unsigned long *)schedule_frame)[1];
475 }
476 return pc;
477}
478
f0bc814c 479asmlinkage void break_point_trap(void)
1da177e4
LT
480{
481 /* Clear tracing. */
8ae91b9a
RS
482#if defined(CONFIG_CPU_SH4A)
483 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
484 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
485#else
1da177e4
LT
486 ctrl_outw(0, UBC_BBRA);
487 ctrl_outw(0, UBC_BBRB);
8ae91b9a 488#endif
1da177e4
LT
489 current->thread.ubc_pc = 0;
490 ubc_usercnt -= 1;
491
492 force_sig(SIGTRAP, current);
493}
494
495asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
496 unsigned long r6, unsigned long r7,
f0bc814c 497 struct pt_regs __regs)
1da177e4 498{
f0bc814c
SM
499 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
500
dc34d312 501 /* Rewind */
f0bc814c 502 regs->pc -= 2;
dc34d312
PM
503
504#ifdef CONFIG_BUG
505 if (__kernel_text_address(instruction_pointer(regs))) {
506 u16 insn = *(u16 *)instruction_pointer(regs);
507 if (insn == TRAPA_BUG_OPCODE)
508 handle_BUG(regs);
509 }
510#endif
511
1da177e4
LT
512 force_sig(SIGTRAP, current);
513}