]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/m32r/kernel/ptrace.c
ptrace: change signature of arch_ptrace()
[net-next-2.6.git] / arch / m32r / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/m32r/kernel/ptrace.c
3 *
4 * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
5 * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
6 *
7 * Original x86 implementation:
8 * By Ross Biro 1/23/92
9 * edited by Linus Torvalds
10 *
11 * Some code taken from sh version:
12 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
13 * Some code taken from arm version:
14 * Copyright (C) 2000 Russell King
15 */
16
1da177e4
LT
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
cfcd8c4f 20#include <linux/err.h>
1da177e4 21#include <linux/smp.h>
1da177e4
LT
22#include <linux/errno.h>
23#include <linux/ptrace.h>
24#include <linux/user.h>
25#include <linux/string.h>
7ed20e1a 26#include <linux/signal.h>
1da177e4
LT
27
28#include <asm/cacheflush.h>
29#include <asm/io.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/system.h>
33#include <asm/processor.h>
34#include <asm/mmu_context.h>
35
1da177e4
LT
36/*
37 * This routine will get a word off of the process kernel stack.
38 */
39static inline unsigned long int
40get_stack_long(struct task_struct *task, int offset)
41{
42 unsigned long *stack;
43
6c3559fc 44 stack = (unsigned long *)task_pt_regs(task);
1da177e4
LT
45
46 return stack[offset];
47}
48
49/*
50 * This routine will put a word on the process kernel stack.
51 */
52static inline int
53put_stack_long(struct task_struct *task, int offset, unsigned long data)
54{
55 unsigned long *stack;
56
6c3559fc 57 stack = (unsigned long *)task_pt_regs(task);
1da177e4
LT
58 stack[offset] = data;
59
60 return 0;
61}
62
63static int reg_offset[] = {
64 PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
65 PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
66};
67
68/*
69 * Read the word at offset "off" into the "struct user". We
70 * actually access the pt_regs stored on the kernel stack.
71 */
72static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
73 unsigned long __user *data)
74{
75 unsigned long tmp;
76#ifndef NO_FPU
77 struct user * dummy = NULL;
78#endif
79
d5a6d173 80 if ((off & 3) || off > sizeof(struct user) - 3)
1da177e4
LT
81 return -EIO;
82
83 off >>= 2;
84 switch (off) {
85 case PT_EVB:
86 __asm__ __volatile__ (
87 "mvfc %0, cr5 \n\t"
88 : "=r" (tmp)
89 );
90 break;
91 case PT_CBR: {
92 unsigned long psw;
93 psw = get_stack_long(tsk, PT_PSW);
94 tmp = ((psw >> 8) & 1);
95 }
96 break;
97 case PT_PSW: {
98 unsigned long psw, bbpsw;
99 psw = get_stack_long(tsk, PT_PSW);
100 bbpsw = get_stack_long(tsk, PT_BBPSW);
101 tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
102 }
103 break;
104 case PT_PC:
105 tmp = get_stack_long(tsk, PT_BPC);
106 break;
107 case PT_BPC:
108 off = PT_BBPC;
109 /* fall through */
110 default:
111 if (off < (sizeof(struct pt_regs) >> 2))
112 tmp = get_stack_long(tsk, off);
113#ifndef NO_FPU
114 else if (off >= (long)(&dummy->fpu >> 2) &&
115 off < (long)(&dummy->u_fpvalid >> 2)) {
116 if (!tsk_used_math(tsk)) {
117 if (off == (long)(&dummy->fpu.fpscr >> 2))
118 tmp = FPSCR_INIT;
119 else
120 tmp = 0;
121 } else
122 tmp = ((long *)(&tsk->thread.fpu >> 2))
123 [off - (long)&dummy->fpu];
124 } else if (off == (long)(&dummy->u_fpvalid >> 2))
125 tmp = !!tsk_used_math(tsk);
126#endif /* not NO_FPU */
127 else
128 tmp = 0;
129 }
130
131 return put_user(tmp, data);
132}
133
134static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
135 unsigned long data)
136{
137 int ret = -EIO;
138#ifndef NO_FPU
139 struct user * dummy = NULL;
140#endif
141
d5a6d173 142 if ((off & 3) || off > sizeof(struct user) - 3)
1da177e4
LT
143 return -EIO;
144
145 off >>= 2;
146 switch (off) {
147 case PT_EVB:
148 case PT_BPC:
149 case PT_SPI:
150 /* We don't allow to modify evb. */
151 ret = 0;
152 break;
153 case PT_PSW:
154 case PT_CBR: {
155 /* We allow to modify only cbr in psw */
156 unsigned long psw;
157 psw = get_stack_long(tsk, PT_PSW);
158 psw = (psw & ~0x100) | ((data & 1) << 8);
159 ret = put_stack_long(tsk, PT_PSW, psw);
160 }
161 break;
162 case PT_PC:
163 off = PT_BPC;
164 data &= ~1;
165 /* fall through */
166 default:
167 if (off < (sizeof(struct pt_regs) >> 2))
168 ret = put_stack_long(tsk, off, data);
169#ifndef NO_FPU
170 else if (off >= (long)(&dummy->fpu >> 2) &&
171 off < (long)(&dummy->u_fpvalid >> 2)) {
172 set_stopped_child_used_math(tsk);
173 ((long *)&tsk->thread.fpu)
174 [off - (long)&dummy->fpu] = data;
175 ret = 0;
176 } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
177 conditional_stopped_child_used_math(data, tsk);
178 ret = 0;
179 }
180#endif /* not NO_FPU */
181 break;
182 }
183
184 return ret;
185}
186
187/*
188 * Get all user integer registers.
189 */
190static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
191{
6c3559fc 192 struct pt_regs *regs = task_pt_regs(tsk);
1da177e4
LT
193
194 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
195}
196
197/*
198 * Set all user integer registers.
199 */
200static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
201{
202 struct pt_regs newregs;
203 int ret;
204
205 ret = -EFAULT;
206 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
6c3559fc 207 struct pt_regs *regs = task_pt_regs(tsk);
1da177e4
LT
208 *regs = newregs;
209 ret = 0;
210 }
211
212 return ret;
213}
214
215
216static inline int
217check_condition_bit(struct task_struct *child)
218{
219 return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
220}
221
222static int
223check_condition_src(unsigned long op, unsigned long regno1,
224 unsigned long regno2, struct task_struct *child)
225{
226 unsigned long reg1, reg2;
227
228 reg2 = get_stack_long(child, reg_offset[regno2]);
229
230 switch (op) {
231 case 0x0: /* BEQ */
232 reg1 = get_stack_long(child, reg_offset[regno1]);
233 return reg1 == reg2;
234 case 0x1: /* BNE */
235 reg1 = get_stack_long(child, reg_offset[regno1]);
236 return reg1 != reg2;
237 case 0x8: /* BEQZ */
238 return reg2 == 0;
239 case 0x9: /* BNEZ */
240 return reg2 != 0;
241 case 0xa: /* BLTZ */
242 return (int)reg2 < 0;
243 case 0xb: /* BGEZ */
244 return (int)reg2 >= 0;
245 case 0xc: /* BLEZ */
246 return (int)reg2 <= 0;
247 case 0xd: /* BGTZ */
248 return (int)reg2 > 0;
249 default:
250 /* never reached */
251 return 0;
252 }
253}
254
255static void
256compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
257 unsigned long *next_pc,
258 struct task_struct *child)
259{
260 unsigned long op, op2, op3;
261 unsigned long disp;
262 unsigned long regno;
263 int parallel = 0;
264
265 if (insn & 0x00008000)
266 parallel = 1;
267 if (pc & 3)
268 insn &= 0x7fff; /* right slot */
269 else
270 insn >>= 16; /* left slot */
271
272 op = (insn >> 12) & 0xf;
273 op2 = (insn >> 8) & 0xf;
274 op3 = (insn >> 4) & 0xf;
275
276 if (op == 0x7) {
277 switch (op2) {
278 case 0xd: /* BNC */
279 case 0x9: /* BNCL */
280 if (!check_condition_bit(child)) {
281 disp = (long)(insn << 24) >> 22;
282 *next_pc = (pc & ~0x3) + disp;
283 return;
284 }
285 break;
286 case 0x8: /* BCL */
287 case 0xc: /* BC */
288 if (check_condition_bit(child)) {
289 disp = (long)(insn << 24) >> 22;
290 *next_pc = (pc & ~0x3) + disp;
291 return;
292 }
293 break;
294 case 0xe: /* BL */
295 case 0xf: /* BRA */
296 disp = (long)(insn << 24) >> 22;
297 *next_pc = (pc & ~0x3) + disp;
298 return;
299 break;
300 }
301 } else if (op == 0x1) {
302 switch (op2) {
303 case 0x0:
304 if (op3 == 0xf) { /* TRAP */
305#if 1
306 /* pass through */
307#else
308 /* kernel space is not allowed as next_pc */
309 unsigned long evb;
310 unsigned long trapno;
311 trapno = insn & 0xf;
312 __asm__ __volatile__ (
313 "mvfc %0, cr5\n"
314 :"=r"(evb)
315 :
316 );
317 *next_pc = evb + (trapno << 2);
318 return;
319#endif
320 } else if (op3 == 0xd) { /* RTE */
321 *next_pc = get_stack_long(child, PT_BPC);
322 return;
323 }
324 break;
325 case 0xc: /* JC */
326 if (op3 == 0xc && check_condition_bit(child)) {
327 regno = insn & 0xf;
328 *next_pc = get_stack_long(child,
329 reg_offset[regno]);
330 return;
331 }
332 break;
333 case 0xd: /* JNC */
334 if (op3 == 0xc && !check_condition_bit(child)) {
335 regno = insn & 0xf;
336 *next_pc = get_stack_long(child,
337 reg_offset[regno]);
338 return;
339 }
340 break;
341 case 0xe: /* JL */
342 case 0xf: /* JMP */
343 if (op3 == 0xc) { /* JMP */
344 regno = insn & 0xf;
345 *next_pc = get_stack_long(child,
346 reg_offset[regno]);
347 return;
348 }
349 break;
350 }
351 }
352 if (parallel)
353 *next_pc = pc + 4;
354 else
355 *next_pc = pc + 2;
356}
357
358static void
359compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
360 unsigned long *next_pc,
361 struct task_struct *child)
362{
363 unsigned long op;
364 unsigned long op2;
365 unsigned long disp;
366 unsigned long regno1, regno2;
367
368 op = (insn >> 28) & 0xf;
369 if (op == 0xf) { /* branch 24-bit relative */
370 op2 = (insn >> 24) & 0xf;
371 switch (op2) {
372 case 0xd: /* BNC */
373 case 0x9: /* BNCL */
374 if (!check_condition_bit(child)) {
375 disp = (long)(insn << 8) >> 6;
376 *next_pc = (pc & ~0x3) + disp;
377 return;
378 }
379 break;
380 case 0x8: /* BCL */
381 case 0xc: /* BC */
382 if (check_condition_bit(child)) {
383 disp = (long)(insn << 8) >> 6;
384 *next_pc = (pc & ~0x3) + disp;
385 return;
386 }
387 break;
388 case 0xe: /* BL */
389 case 0xf: /* BRA */
390 disp = (long)(insn << 8) >> 6;
391 *next_pc = (pc & ~0x3) + disp;
392 return;
393 }
394 } else if (op == 0xb) { /* branch 16-bit relative */
395 op2 = (insn >> 20) & 0xf;
396 switch (op2) {
397 case 0x0: /* BEQ */
398 case 0x1: /* BNE */
399 case 0x8: /* BEQZ */
400 case 0x9: /* BNEZ */
401 case 0xa: /* BLTZ */
402 case 0xb: /* BGEZ */
403 case 0xc: /* BLEZ */
404 case 0xd: /* BGTZ */
405 regno1 = ((insn >> 24) & 0xf);
406 regno2 = ((insn >> 16) & 0xf);
407 if (check_condition_src(op2, regno1, regno2, child)) {
408 disp = (long)(insn << 16) >> 14;
409 *next_pc = (pc & ~0x3) + disp;
410 return;
411 }
412 break;
413 }
414 }
415 *next_pc = pc + 4;
416}
417
418static inline void
419compute_next_pc(unsigned long insn, unsigned long pc,
420 unsigned long *next_pc, struct task_struct *child)
421{
422 if (insn & 0x80000000)
423 compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
424 else
425 compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
426}
427
428static int
429register_debug_trap(struct task_struct *child, unsigned long next_pc,
430 unsigned long next_insn, unsigned long *code)
431{
432 struct debug_trap *p = &child->thread.debug_trap;
433 unsigned long addr = next_pc & ~3;
434
435 if (p->nr_trap == MAX_TRAPS) {
436 printk("kernel BUG at %s %d: p->nr_trap = %d\n",
437 __FILE__, __LINE__, p->nr_trap);
438 return -1;
439 }
440 p->addr[p->nr_trap] = addr;
441 p->insn[p->nr_trap] = next_insn;
442 p->nr_trap++;
443 if (next_pc & 3) {
444 *code = (next_insn & 0xffff0000) | 0x10f1;
445 /* xxx --> TRAP1 */
446 } else {
447 if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
448 *code = 0x10f17000;
449 /* TRAP1 --> NOP */
450 } else {
451 *code = (next_insn & 0xffff) | 0x10f10000;
452 /* TRAP1 --> xxx */
453 }
454 }
455 return 0;
456}
457
458static int
459unregister_debug_trap(struct task_struct *child, unsigned long addr,
460 unsigned long *code)
461{
462 struct debug_trap *p = &child->thread.debug_trap;
463 int i;
464
465 /* Search debug trap entry. */
466 for (i = 0; i < p->nr_trap; i++) {
467 if (p->addr[i] == addr)
468 break;
469 }
470 if (i >= p->nr_trap) {
471 /* The trap may be requested from debugger.
472 * ptrace should do nothing in this case.
473 */
474 return 0;
475 }
476
ec9674e7 477 /* Recover original instruction code. */
1da177e4
LT
478 *code = p->insn[i];
479
480 /* Shift debug trap entries. */
481 while (i < p->nr_trap - 1) {
482 p->insn[i] = p->insn[i + 1];
483 p->addr[i] = p->addr[i + 1];
484 i++;
485 }
486 p->nr_trap--;
487 return 1;
488}
489
490static void
491unregister_all_debug_traps(struct task_struct *child)
492{
493 struct debug_trap *p = &child->thread.debug_trap;
494 int i;
495
496 for (i = 0; i < p->nr_trap; i++)
497 access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
498 p->nr_trap = 0;
499}
500
501static inline void
502invalidate_cache(void)
503{
504#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
505
506 _flush_cache_copyback_all();
507
508#else /* ! CONFIG_CHIP_M32700 */
509
510 /* Invalidate cache */
511 __asm__ __volatile__ (
512 "ldi r0, #-1 \n\t"
513 "ldi r1, #0 \n\t"
514 "stb r1, @r0 ; cache off \n\t"
515 "; \n\t"
516 "ldi r0, #-2 \n\t"
517 "ldi r1, #1 \n\t"
518 "stb r1, @r0 ; cache invalidate \n\t"
519 ".fillinsn \n"
520 "0: \n\t"
521 "ldb r1, @r0 ; invalidate check \n\t"
522 "bnez r1, 0b \n\t"
523 "; \n\t"
524 "ldi r0, #-1 \n\t"
525 "ldi r1, #1 \n\t"
526 "stb r1, @r0 ; cache on \n\t"
527 : : : "r0", "r1", "memory"
528 );
529 /* FIXME: copying-back d-cache and invalidating i-cache are needed.
530 */
531#endif /* CONFIG_CHIP_M32700 */
532}
533
534/* Embed a debug trap (TRAP1) code */
535static int
536embed_debug_trap(struct task_struct *child, unsigned long next_pc)
537{
538 unsigned long next_insn, code;
539 unsigned long addr = next_pc & ~3;
540
541 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
542 != sizeof(next_insn)) {
543 return -1; /* error */
544 }
545
546 /* Set a trap code. */
547 if (register_debug_trap(child, next_pc, next_insn, &code)) {
548 return -1; /* error */
549 }
550 if (access_process_vm(child, addr, &code, sizeof(code), 1)
551 != sizeof(code)) {
552 return -1; /* error */
553 }
554 return 0; /* success */
555}
556
557void
558withdraw_debug_trap(struct pt_regs *regs)
559{
560 unsigned long addr;
561 unsigned long code;
562
563 addr = (regs->bpc - 2) & ~3;
564 regs->bpc -= 2;
565 if (unregister_debug_trap(current, addr, &code)) {
566 access_process_vm(current, addr, &code, sizeof(code), 1);
567 invalidate_cache();
568 }
569}
570
0ac15559 571void
1da177e4
LT
572init_debug_traps(struct task_struct *child)
573{
574 struct debug_trap *p = &child->thread.debug_trap;
575 int i;
576 p->nr_trap = 0;
577 for (i = 0; i < MAX_TRAPS; i++) {
578 p->addr[i] = 0;
579 p->insn[i] = 0;
580 }
581}
582
e34112e3
CH
583void user_enable_single_step(struct task_struct *child)
584{
585 unsigned long next_pc;
586 unsigned long pc, insn;
587
588 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
589
590 /* Compute next pc. */
591 pc = get_stack_long(child, PT_BPC);
592
593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
594 != sizeof(insn))
acdc0d5e 595 return -EIO;
e34112e3
CH
596
597 compute_next_pc(insn, pc, &next_pc, child);
598 if (next_pc & 0x80000000)
acdc0d5e 599 return -EIO;
e34112e3
CH
600
601 if (embed_debug_trap(child, next_pc))
acdc0d5e 602 return -EIO;
e34112e3
CH
603
604 invalidate_cache();
acdc0d5e 605 return 0;
e34112e3
CH
606}
607
608void user_disable_single_step(struct task_struct *child)
609{
610 unregister_all_debug_traps(child);
611 invalidate_cache();
612}
1da177e4
LT
613
614/*
615 * Called by kernel/ptrace.c when detaching..
616 *
617 * Make sure single step bits etc are not set.
618 */
619void ptrace_disable(struct task_struct *child)
620{
621 /* nothing to do.. */
622}
623
0ac15559 624long
9b05a69e
NK
625arch_ptrace(struct task_struct *child, long request,
626 unsigned long addr, unsigned long data)
1da177e4 627{
1da177e4
LT
628 int ret;
629
630 switch (request) {
631 /*
632 * read word at location "addr" in the child process.
633 */
634 case PTRACE_PEEKTEXT:
635 case PTRACE_PEEKDATA:
76647323 636 ret = generic_ptrace_peekdata(child, addr, data);
1da177e4
LT
637 break;
638
639 /*
640 * read the word at location addr in the USER area.
641 */
642 case PTRACE_PEEKUSR:
643 ret = ptrace_read_user(child, addr,
644 (unsigned long __user *)data);
645 break;
646
647 /*
648 * write the word at location addr.
649 */
650 case PTRACE_POKETEXT:
651 case PTRACE_POKEDATA:
f284ce72
AD
652 ret = generic_ptrace_pokedata(child, addr, data);
653 if (ret == 0 && request == PTRACE_POKETEXT)
654 invalidate_cache();
1da177e4
LT
655 break;
656
657 /*
658 * write the word at location addr in the USER area.
659 */
660 case PTRACE_POKEUSR:
661 ret = ptrace_write_user(child, addr, data);
662 break;
663
1da177e4
LT
664 case PTRACE_GETREGS:
665 ret = ptrace_getregs(child, (void __user *)data);
666 break;
667
668 case PTRACE_SETREGS:
669 ret = ptrace_setregs(child, (void __user *)data);
670 break;
671
672 default:
673 ret = ptrace_request(child, request, addr, data);
674 break;
675 }
676
677 return ret;
678}
679
1da177e4
LT
680/* notification of system call entry/exit
681 * - triggered by current->work.syscall_trace
682 */
683void do_syscall_trace(void)
684{
685 if (!test_thread_flag(TIF_SYSCALL_TRACE))
686 return;
687 if (!(current->ptrace & PT_PTRACED))
688 return;
689 /* the 0x80 provides a way for the tracing parent to distinguish
690 between a syscall stop and SIGTRAP delivery */
691 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
692 ? 0x80 : 0));
693
694 /*
695 * this isn't the same as continuing with a signal, but it will do
696 * for normal use. strace only continues with a signal if the
697 * stopping signal is not SIGTRAP. -brl
698 */
699 if (current->exit_code) {
700 send_sig(current->exit_code, current, 1);
701 current->exit_code = 0;
702 }
703}