]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/ptrace.c | |
3 | * | |
4 | * (C) Copyright 1999 Linus Torvalds | |
5 | * | |
6 | * Common interfaces for "ptrace()" which we do not want | |
7 | * to continually duplicate across every architecture. | |
8 | */ | |
9 | ||
10 | #include <linux/capability.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/highmem.h> | |
16 | #include <linux/pagemap.h> | |
17 | #include <linux/ptrace.h> | |
18 | #include <linux/security.h> | |
19 | #include <linux/signal.h> | |
20 | #include <linux/audit.h> | |
21 | #include <linux/pid_namespace.h> | |
22 | #include <linux/syscalls.h> | |
23 | #include <linux/uaccess.h> | |
24 | #include <linux/regset.h> | |
25 | ||
26 | ||
27 | /* | |
28 | * ptrace a task: make the debugger its new parent and | |
29 | * move it to the ptrace list. | |
30 | * | |
31 | * Must be called with the tasklist lock write-held. | |
32 | */ | |
33 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) | |
34 | { | |
35 | BUG_ON(!list_empty(&child->ptrace_entry)); | |
36 | list_add(&child->ptrace_entry, &new_parent->ptraced); | |
37 | child->parent = new_parent; | |
38 | } | |
39 | ||
40 | /* | |
41 | * Turn a tracing stop into a normal stop now, since with no tracer there | |
42 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a | |
43 | * signal sent that would resume the child, but didn't because it was in | |
44 | * TASK_TRACED, resume it now. | |
45 | * Requires that irqs be disabled. | |
46 | */ | |
47 | static void ptrace_untrace(struct task_struct *child) | |
48 | { | |
49 | spin_lock(&child->sighand->siglock); | |
50 | if (task_is_traced(child)) { | |
51 | /* | |
52 | * If the group stop is completed or in progress, | |
53 | * this thread was already counted as stopped. | |
54 | */ | |
55 | if (child->signal->flags & SIGNAL_STOP_STOPPED || | |
56 | child->signal->group_stop_count) | |
57 | __set_task_state(child, TASK_STOPPED); | |
58 | else | |
59 | signal_wake_up(child, 1); | |
60 | } | |
61 | spin_unlock(&child->sighand->siglock); | |
62 | } | |
63 | ||
64 | /* | |
65 | * unptrace a task: move it back to its original parent and | |
66 | * remove it from the ptrace list. | |
67 | * | |
68 | * Must be called with the tasklist lock write-held. | |
69 | */ | |
70 | void __ptrace_unlink(struct task_struct *child) | |
71 | { | |
72 | BUG_ON(!child->ptrace); | |
73 | ||
74 | child->ptrace = 0; | |
75 | child->parent = child->real_parent; | |
76 | list_del_init(&child->ptrace_entry); | |
77 | ||
78 | if (task_is_traced(child)) | |
79 | ptrace_untrace(child); | |
80 | } | |
81 | ||
82 | /* | |
83 | * Check that we have indeed attached to the thing.. | |
84 | */ | |
85 | int ptrace_check_attach(struct task_struct *child, int kill) | |
86 | { | |
87 | int ret = -ESRCH; | |
88 | ||
89 | /* | |
90 | * We take the read lock around doing both checks to close a | |
91 | * possible race where someone else was tracing our child and | |
92 | * detached between these two checks. After this locked check, | |
93 | * we are sure that this is our traced child and that can only | |
94 | * be changed by us so it's not changing right after this. | |
95 | */ | |
96 | read_lock(&tasklist_lock); | |
97 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { | |
98 | ret = 0; | |
99 | /* | |
100 | * child->sighand can't be NULL, release_task() | |
101 | * does ptrace_unlink() before __exit_signal(). | |
102 | */ | |
103 | spin_lock_irq(&child->sighand->siglock); | |
104 | if (task_is_stopped(child)) | |
105 | child->state = TASK_TRACED; | |
106 | else if (!task_is_traced(child) && !kill) | |
107 | ret = -ESRCH; | |
108 | spin_unlock_irq(&child->sighand->siglock); | |
109 | } | |
110 | read_unlock(&tasklist_lock); | |
111 | ||
112 | if (!ret && !kill) | |
113 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; | |
114 | ||
115 | /* All systems go.. */ | |
116 | return ret; | |
117 | } | |
118 | ||
119 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |
120 | { | |
121 | const struct cred *cred = current_cred(), *tcred; | |
122 | ||
123 | /* May we inspect the given task? | |
124 | * This check is used both for attaching with ptrace | |
125 | * and for allowing access to sensitive information in /proc. | |
126 | * | |
127 | * ptrace_attach denies several cases that /proc allows | |
128 | * because setting up the necessary parent/child relationship | |
129 | * or halting the specified task is impossible. | |
130 | */ | |
131 | int dumpable = 0; | |
132 | /* Don't let security modules deny introspection */ | |
133 | if (task == current) | |
134 | return 0; | |
135 | rcu_read_lock(); | |
136 | tcred = __task_cred(task); | |
137 | if ((cred->uid != tcred->euid || | |
138 | cred->uid != tcred->suid || | |
139 | cred->uid != tcred->uid || | |
140 | cred->gid != tcred->egid || | |
141 | cred->gid != tcred->sgid || | |
142 | cred->gid != tcred->gid) && | |
143 | !capable(CAP_SYS_PTRACE)) { | |
144 | rcu_read_unlock(); | |
145 | return -EPERM; | |
146 | } | |
147 | rcu_read_unlock(); | |
148 | smp_rmb(); | |
149 | if (task->mm) | |
150 | dumpable = get_dumpable(task->mm); | |
151 | if (!dumpable && !capable(CAP_SYS_PTRACE)) | |
152 | return -EPERM; | |
153 | ||
154 | return security_ptrace_access_check(task, mode); | |
155 | } | |
156 | ||
157 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |
158 | { | |
159 | int err; | |
160 | task_lock(task); | |
161 | err = __ptrace_may_access(task, mode); | |
162 | task_unlock(task); | |
163 | return !err; | |
164 | } | |
165 | ||
166 | int ptrace_attach(struct task_struct *task) | |
167 | { | |
168 | int retval; | |
169 | ||
170 | audit_ptrace(task); | |
171 | ||
172 | retval = -EPERM; | |
173 | if (unlikely(task->flags & PF_KTHREAD)) | |
174 | goto out; | |
175 | if (same_thread_group(task, current)) | |
176 | goto out; | |
177 | ||
178 | /* | |
179 | * Protect exec's credential calculations against our interference; | |
180 | * interference; SUID, SGID and LSM creds get determined differently | |
181 | * under ptrace. | |
182 | */ | |
183 | retval = -ERESTARTNOINTR; | |
184 | if (mutex_lock_interruptible(&task->cred_guard_mutex)) | |
185 | goto out; | |
186 | ||
187 | task_lock(task); | |
188 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); | |
189 | task_unlock(task); | |
190 | if (retval) | |
191 | goto unlock_creds; | |
192 | ||
193 | write_lock_irq(&tasklist_lock); | |
194 | retval = -EPERM; | |
195 | if (unlikely(task->exit_state)) | |
196 | goto unlock_tasklist; | |
197 | if (task->ptrace) | |
198 | goto unlock_tasklist; | |
199 | ||
200 | task->ptrace = PT_PTRACED; | |
201 | if (capable(CAP_SYS_PTRACE)) | |
202 | task->ptrace |= PT_PTRACE_CAP; | |
203 | ||
204 | __ptrace_link(task, current); | |
205 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); | |
206 | ||
207 | retval = 0; | |
208 | unlock_tasklist: | |
209 | write_unlock_irq(&tasklist_lock); | |
210 | unlock_creds: | |
211 | mutex_unlock(&task->cred_guard_mutex); | |
212 | out: | |
213 | return retval; | |
214 | } | |
215 | ||
216 | /** | |
217 | * ptrace_traceme -- helper for PTRACE_TRACEME | |
218 | * | |
219 | * Performs checks and sets PT_PTRACED. | |
220 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | |
221 | */ | |
222 | int ptrace_traceme(void) | |
223 | { | |
224 | int ret = -EPERM; | |
225 | ||
226 | write_lock_irq(&tasklist_lock); | |
227 | /* Are we already being traced? */ | |
228 | if (!current->ptrace) { | |
229 | ret = security_ptrace_traceme(current->parent); | |
230 | /* | |
231 | * Check PF_EXITING to ensure ->real_parent has not passed | |
232 | * exit_ptrace(). Otherwise we don't report the error but | |
233 | * pretend ->real_parent untraces us right after return. | |
234 | */ | |
235 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { | |
236 | current->ptrace = PT_PTRACED; | |
237 | __ptrace_link(current, current->real_parent); | |
238 | } | |
239 | } | |
240 | write_unlock_irq(&tasklist_lock); | |
241 | ||
242 | return ret; | |
243 | } | |
244 | ||
245 | /* | |
246 | * Called with irqs disabled, returns true if childs should reap themselves. | |
247 | */ | |
248 | static int ignoring_children(struct sighand_struct *sigh) | |
249 | { | |
250 | int ret; | |
251 | spin_lock(&sigh->siglock); | |
252 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || | |
253 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); | |
254 | spin_unlock(&sigh->siglock); | |
255 | return ret; | |
256 | } | |
257 | ||
258 | /* | |
259 | * Called with tasklist_lock held for writing. | |
260 | * Unlink a traced task, and clean it up if it was a traced zombie. | |
261 | * Return true if it needs to be reaped with release_task(). | |
262 | * (We can't call release_task() here because we already hold tasklist_lock.) | |
263 | * | |
264 | * If it's a zombie, our attachedness prevented normal parent notification | |
265 | * or self-reaping. Do notification now if it would have happened earlier. | |
266 | * If it should reap itself, return true. | |
267 | * | |
268 | * If it's our own child, there is no notification to do. But if our normal | |
269 | * children self-reap, then this child was prevented by ptrace and we must | |
270 | * reap it now, in that case we must also wake up sub-threads sleeping in | |
271 | * do_wait(). | |
272 | */ | |
273 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |
274 | { | |
275 | __ptrace_unlink(p); | |
276 | ||
277 | if (p->exit_state == EXIT_ZOMBIE) { | |
278 | if (!task_detached(p) && thread_group_empty(p)) { | |
279 | if (!same_thread_group(p->real_parent, tracer)) | |
280 | do_notify_parent(p, p->exit_signal); | |
281 | else if (ignoring_children(tracer->sighand)) { | |
282 | __wake_up_parent(p, tracer); | |
283 | p->exit_signal = -1; | |
284 | } | |
285 | } | |
286 | if (task_detached(p)) { | |
287 | /* Mark it as in the process of being reaped. */ | |
288 | p->exit_state = EXIT_DEAD; | |
289 | return true; | |
290 | } | |
291 | } | |
292 | ||
293 | return false; | |
294 | } | |
295 | ||
296 | int ptrace_detach(struct task_struct *child, unsigned int data) | |
297 | { | |
298 | bool dead = false; | |
299 | ||
300 | if (!valid_signal(data)) | |
301 | return -EIO; | |
302 | ||
303 | /* Architecture-specific hardware disable .. */ | |
304 | ptrace_disable(child); | |
305 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
306 | ||
307 | write_lock_irq(&tasklist_lock); | |
308 | /* | |
309 | * This child can be already killed. Make sure de_thread() or | |
310 | * our sub-thread doing do_wait() didn't do release_task() yet. | |
311 | */ | |
312 | if (child->ptrace) { | |
313 | child->exit_code = data; | |
314 | dead = __ptrace_detach(current, child); | |
315 | if (!child->exit_state) | |
316 | wake_up_process(child); | |
317 | } | |
318 | write_unlock_irq(&tasklist_lock); | |
319 | ||
320 | if (unlikely(dead)) | |
321 | release_task(child); | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
326 | /* | |
327 | * Detach all tasks we were using ptrace on. Called with tasklist held | |
328 | * for writing, and returns with it held too. But note it can release | |
329 | * and reacquire the lock. | |
330 | */ | |
331 | void exit_ptrace(struct task_struct *tracer) | |
332 | { | |
333 | struct task_struct *p, *n; | |
334 | LIST_HEAD(ptrace_dead); | |
335 | ||
336 | if (likely(list_empty(&tracer->ptraced))) | |
337 | return; | |
338 | ||
339 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { | |
340 | if (__ptrace_detach(tracer, p)) | |
341 | list_add(&p->ptrace_entry, &ptrace_dead); | |
342 | } | |
343 | ||
344 | write_unlock_irq(&tasklist_lock); | |
345 | BUG_ON(!list_empty(&tracer->ptraced)); | |
346 | ||
347 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { | |
348 | list_del_init(&p->ptrace_entry); | |
349 | release_task(p); | |
350 | } | |
351 | ||
352 | write_lock_irq(&tasklist_lock); | |
353 | } | |
354 | ||
355 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) | |
356 | { | |
357 | int copied = 0; | |
358 | ||
359 | while (len > 0) { | |
360 | char buf[128]; | |
361 | int this_len, retval; | |
362 | ||
363 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | |
364 | retval = access_process_vm(tsk, src, buf, this_len, 0); | |
365 | if (!retval) { | |
366 | if (copied) | |
367 | break; | |
368 | return -EIO; | |
369 | } | |
370 | if (copy_to_user(dst, buf, retval)) | |
371 | return -EFAULT; | |
372 | copied += retval; | |
373 | src += retval; | |
374 | dst += retval; | |
375 | len -= retval; | |
376 | } | |
377 | return copied; | |
378 | } | |
379 | ||
380 | int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) | |
381 | { | |
382 | int copied = 0; | |
383 | ||
384 | while (len > 0) { | |
385 | char buf[128]; | |
386 | int this_len, retval; | |
387 | ||
388 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; | |
389 | if (copy_from_user(buf, src, this_len)) | |
390 | return -EFAULT; | |
391 | retval = access_process_vm(tsk, dst, buf, this_len, 1); | |
392 | if (!retval) { | |
393 | if (copied) | |
394 | break; | |
395 | return -EIO; | |
396 | } | |
397 | copied += retval; | |
398 | src += retval; | |
399 | dst += retval; | |
400 | len -= retval; | |
401 | } | |
402 | return copied; | |
403 | } | |
404 | ||
405 | static int ptrace_setoptions(struct task_struct *child, long data) | |
406 | { | |
407 | child->ptrace &= ~PT_TRACE_MASK; | |
408 | ||
409 | if (data & PTRACE_O_TRACESYSGOOD) | |
410 | child->ptrace |= PT_TRACESYSGOOD; | |
411 | ||
412 | if (data & PTRACE_O_TRACEFORK) | |
413 | child->ptrace |= PT_TRACE_FORK; | |
414 | ||
415 | if (data & PTRACE_O_TRACEVFORK) | |
416 | child->ptrace |= PT_TRACE_VFORK; | |
417 | ||
418 | if (data & PTRACE_O_TRACECLONE) | |
419 | child->ptrace |= PT_TRACE_CLONE; | |
420 | ||
421 | if (data & PTRACE_O_TRACEEXEC) | |
422 | child->ptrace |= PT_TRACE_EXEC; | |
423 | ||
424 | if (data & PTRACE_O_TRACEVFORKDONE) | |
425 | child->ptrace |= PT_TRACE_VFORK_DONE; | |
426 | ||
427 | if (data & PTRACE_O_TRACEEXIT) | |
428 | child->ptrace |= PT_TRACE_EXIT; | |
429 | ||
430 | return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; | |
431 | } | |
432 | ||
433 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) | |
434 | { | |
435 | unsigned long flags; | |
436 | int error = -ESRCH; | |
437 | ||
438 | if (lock_task_sighand(child, &flags)) { | |
439 | error = -EINVAL; | |
440 | if (likely(child->last_siginfo != NULL)) { | |
441 | *info = *child->last_siginfo; | |
442 | error = 0; | |
443 | } | |
444 | unlock_task_sighand(child, &flags); | |
445 | } | |
446 | return error; | |
447 | } | |
448 | ||
449 | static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) | |
450 | { | |
451 | unsigned long flags; | |
452 | int error = -ESRCH; | |
453 | ||
454 | if (lock_task_sighand(child, &flags)) { | |
455 | error = -EINVAL; | |
456 | if (likely(child->last_siginfo != NULL)) { | |
457 | *child->last_siginfo = *info; | |
458 | error = 0; | |
459 | } | |
460 | unlock_task_sighand(child, &flags); | |
461 | } | |
462 | return error; | |
463 | } | |
464 | ||
465 | ||
466 | #ifdef PTRACE_SINGLESTEP | |
467 | #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) | |
468 | #else | |
469 | #define is_singlestep(request) 0 | |
470 | #endif | |
471 | ||
472 | #ifdef PTRACE_SINGLEBLOCK | |
473 | #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) | |
474 | #else | |
475 | #define is_singleblock(request) 0 | |
476 | #endif | |
477 | ||
478 | #ifdef PTRACE_SYSEMU | |
479 | #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) | |
480 | #else | |
481 | #define is_sysemu_singlestep(request) 0 | |
482 | #endif | |
483 | ||
484 | static int ptrace_resume(struct task_struct *child, long request, long data) | |
485 | { | |
486 | if (!valid_signal(data)) | |
487 | return -EIO; | |
488 | ||
489 | if (request == PTRACE_SYSCALL) | |
490 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
491 | else | |
492 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | |
493 | ||
494 | #ifdef TIF_SYSCALL_EMU | |
495 | if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) | |
496 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
497 | else | |
498 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); | |
499 | #endif | |
500 | ||
501 | if (is_singleblock(request)) { | |
502 | if (unlikely(!arch_has_block_step())) | |
503 | return -EIO; | |
504 | user_enable_block_step(child); | |
505 | } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { | |
506 | if (unlikely(!arch_has_single_step())) | |
507 | return -EIO; | |
508 | user_enable_single_step(child); | |
509 | } else { | |
510 | user_disable_single_step(child); | |
511 | } | |
512 | ||
513 | child->exit_code = data; | |
514 | wake_up_process(child); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
519 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK | |
520 | ||
521 | static const struct user_regset * | |
522 | find_regset(const struct user_regset_view *view, unsigned int type) | |
523 | { | |
524 | const struct user_regset *regset; | |
525 | int n; | |
526 | ||
527 | for (n = 0; n < view->n; ++n) { | |
528 | regset = view->regsets + n; | |
529 | if (regset->core_note_type == type) | |
530 | return regset; | |
531 | } | |
532 | ||
533 | return NULL; | |
534 | } | |
535 | ||
536 | static int ptrace_regset(struct task_struct *task, int req, unsigned int type, | |
537 | struct iovec *kiov) | |
538 | { | |
539 | const struct user_regset_view *view = task_user_regset_view(task); | |
540 | const struct user_regset *regset = find_regset(view, type); | |
541 | int regset_no; | |
542 | ||
543 | if (!regset || (kiov->iov_len % regset->size) != 0) | |
544 | return -EINVAL; | |
545 | ||
546 | regset_no = regset - view->regsets; | |
547 | kiov->iov_len = min(kiov->iov_len, | |
548 | (__kernel_size_t) (regset->n * regset->size)); | |
549 | ||
550 | if (req == PTRACE_GETREGSET) | |
551 | return copy_regset_to_user(task, view, regset_no, 0, | |
552 | kiov->iov_len, kiov->iov_base); | |
553 | else | |
554 | return copy_regset_from_user(task, view, regset_no, 0, | |
555 | kiov->iov_len, kiov->iov_base); | |
556 | } | |
557 | ||
558 | #endif | |
559 | ||
560 | int ptrace_request(struct task_struct *child, long request, | |
561 | long addr, long data) | |
562 | { | |
563 | int ret = -EIO; | |
564 | siginfo_t siginfo; | |
565 | ||
566 | switch (request) { | |
567 | case PTRACE_PEEKTEXT: | |
568 | case PTRACE_PEEKDATA: | |
569 | return generic_ptrace_peekdata(child, addr, data); | |
570 | case PTRACE_POKETEXT: | |
571 | case PTRACE_POKEDATA: | |
572 | return generic_ptrace_pokedata(child, addr, data); | |
573 | ||
574 | #ifdef PTRACE_OLDSETOPTIONS | |
575 | case PTRACE_OLDSETOPTIONS: | |
576 | #endif | |
577 | case PTRACE_SETOPTIONS: | |
578 | ret = ptrace_setoptions(child, data); | |
579 | break; | |
580 | case PTRACE_GETEVENTMSG: | |
581 | ret = put_user(child->ptrace_message, (unsigned long __user *) data); | |
582 | break; | |
583 | ||
584 | case PTRACE_GETSIGINFO: | |
585 | ret = ptrace_getsiginfo(child, &siginfo); | |
586 | if (!ret) | |
587 | ret = copy_siginfo_to_user((siginfo_t __user *) data, | |
588 | &siginfo); | |
589 | break; | |
590 | ||
591 | case PTRACE_SETSIGINFO: | |
592 | if (copy_from_user(&siginfo, (siginfo_t __user *) data, | |
593 | sizeof siginfo)) | |
594 | ret = -EFAULT; | |
595 | else | |
596 | ret = ptrace_setsiginfo(child, &siginfo); | |
597 | break; | |
598 | ||
599 | case PTRACE_DETACH: /* detach a process that was attached. */ | |
600 | ret = ptrace_detach(child, data); | |
601 | break; | |
602 | ||
603 | #ifdef CONFIG_BINFMT_ELF_FDPIC | |
604 | case PTRACE_GETFDPIC: { | |
605 | struct mm_struct *mm = get_task_mm(child); | |
606 | unsigned long tmp = 0; | |
607 | ||
608 | ret = -ESRCH; | |
609 | if (!mm) | |
610 | break; | |
611 | ||
612 | switch (addr) { | |
613 | case PTRACE_GETFDPIC_EXEC: | |
614 | tmp = mm->context.exec_fdpic_loadmap; | |
615 | break; | |
616 | case PTRACE_GETFDPIC_INTERP: | |
617 | tmp = mm->context.interp_fdpic_loadmap; | |
618 | break; | |
619 | default: | |
620 | break; | |
621 | } | |
622 | mmput(mm); | |
623 | ||
624 | ret = put_user(tmp, (unsigned long __user *) data); | |
625 | break; | |
626 | } | |
627 | #endif | |
628 | ||
629 | #ifdef PTRACE_SINGLESTEP | |
630 | case PTRACE_SINGLESTEP: | |
631 | #endif | |
632 | #ifdef PTRACE_SINGLEBLOCK | |
633 | case PTRACE_SINGLEBLOCK: | |
634 | #endif | |
635 | #ifdef PTRACE_SYSEMU | |
636 | case PTRACE_SYSEMU: | |
637 | case PTRACE_SYSEMU_SINGLESTEP: | |
638 | #endif | |
639 | case PTRACE_SYSCALL: | |
640 | case PTRACE_CONT: | |
641 | return ptrace_resume(child, request, data); | |
642 | ||
643 | case PTRACE_KILL: | |
644 | if (child->exit_state) /* already dead */ | |
645 | return 0; | |
646 | return ptrace_resume(child, request, SIGKILL); | |
647 | ||
648 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK | |
649 | case PTRACE_GETREGSET: | |
650 | case PTRACE_SETREGSET: | |
651 | { | |
652 | struct iovec kiov; | |
653 | struct iovec __user *uiov = (struct iovec __user *) data; | |
654 | ||
655 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) | |
656 | return -EFAULT; | |
657 | ||
658 | if (__get_user(kiov.iov_base, &uiov->iov_base) || | |
659 | __get_user(kiov.iov_len, &uiov->iov_len)) | |
660 | return -EFAULT; | |
661 | ||
662 | ret = ptrace_regset(child, request, addr, &kiov); | |
663 | if (!ret) | |
664 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | |
665 | break; | |
666 | } | |
667 | #endif | |
668 | default: | |
669 | break; | |
670 | } | |
671 | ||
672 | return ret; | |
673 | } | |
674 | ||
675 | static struct task_struct *ptrace_get_task_struct(pid_t pid) | |
676 | { | |
677 | struct task_struct *child; | |
678 | ||
679 | rcu_read_lock(); | |
680 | child = find_task_by_vpid(pid); | |
681 | if (child) | |
682 | get_task_struct(child); | |
683 | rcu_read_unlock(); | |
684 | ||
685 | if (!child) | |
686 | return ERR_PTR(-ESRCH); | |
687 | return child; | |
688 | } | |
689 | ||
690 | #ifndef arch_ptrace_attach | |
691 | #define arch_ptrace_attach(child) do { } while (0) | |
692 | #endif | |
693 | ||
694 | SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data) | |
695 | { | |
696 | struct task_struct *child; | |
697 | long ret; | |
698 | ||
699 | if (request == PTRACE_TRACEME) { | |
700 | ret = ptrace_traceme(); | |
701 | if (!ret) | |
702 | arch_ptrace_attach(current); | |
703 | goto out; | |
704 | } | |
705 | ||
706 | child = ptrace_get_task_struct(pid); | |
707 | if (IS_ERR(child)) { | |
708 | ret = PTR_ERR(child); | |
709 | goto out; | |
710 | } | |
711 | ||
712 | if (request == PTRACE_ATTACH) { | |
713 | ret = ptrace_attach(child); | |
714 | /* | |
715 | * Some architectures need to do book-keeping after | |
716 | * a ptrace attach. | |
717 | */ | |
718 | if (!ret) | |
719 | arch_ptrace_attach(child); | |
720 | goto out_put_task_struct; | |
721 | } | |
722 | ||
723 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | |
724 | if (ret < 0) | |
725 | goto out_put_task_struct; | |
726 | ||
727 | ret = arch_ptrace(child, request, addr, data); | |
728 | ||
729 | out_put_task_struct: | |
730 | put_task_struct(child); | |
731 | out: | |
732 | return ret; | |
733 | } | |
734 | ||
735 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) | |
736 | { | |
737 | unsigned long tmp; | |
738 | int copied; | |
739 | ||
740 | copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); | |
741 | if (copied != sizeof(tmp)) | |
742 | return -EIO; | |
743 | return put_user(tmp, (unsigned long __user *)data); | |
744 | } | |
745 | ||
746 | int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) | |
747 | { | |
748 | int copied; | |
749 | ||
750 | copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); | |
751 | return (copied == sizeof(data)) ? 0 : -EIO; | |
752 | } | |
753 | ||
754 | #if defined CONFIG_COMPAT | |
755 | #include <linux/compat.h> | |
756 | ||
757 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, | |
758 | compat_ulong_t addr, compat_ulong_t data) | |
759 | { | |
760 | compat_ulong_t __user *datap = compat_ptr(data); | |
761 | compat_ulong_t word; | |
762 | siginfo_t siginfo; | |
763 | int ret; | |
764 | ||
765 | switch (request) { | |
766 | case PTRACE_PEEKTEXT: | |
767 | case PTRACE_PEEKDATA: | |
768 | ret = access_process_vm(child, addr, &word, sizeof(word), 0); | |
769 | if (ret != sizeof(word)) | |
770 | ret = -EIO; | |
771 | else | |
772 | ret = put_user(word, datap); | |
773 | break; | |
774 | ||
775 | case PTRACE_POKETEXT: | |
776 | case PTRACE_POKEDATA: | |
777 | ret = access_process_vm(child, addr, &data, sizeof(data), 1); | |
778 | ret = (ret != sizeof(data) ? -EIO : 0); | |
779 | break; | |
780 | ||
781 | case PTRACE_GETEVENTMSG: | |
782 | ret = put_user((compat_ulong_t) child->ptrace_message, datap); | |
783 | break; | |
784 | ||
785 | case PTRACE_GETSIGINFO: | |
786 | ret = ptrace_getsiginfo(child, &siginfo); | |
787 | if (!ret) | |
788 | ret = copy_siginfo_to_user32( | |
789 | (struct compat_siginfo __user *) datap, | |
790 | &siginfo); | |
791 | break; | |
792 | ||
793 | case PTRACE_SETSIGINFO: | |
794 | memset(&siginfo, 0, sizeof siginfo); | |
795 | if (copy_siginfo_from_user32( | |
796 | &siginfo, (struct compat_siginfo __user *) datap)) | |
797 | ret = -EFAULT; | |
798 | else | |
799 | ret = ptrace_setsiginfo(child, &siginfo); | |
800 | break; | |
801 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK | |
802 | case PTRACE_GETREGSET: | |
803 | case PTRACE_SETREGSET: | |
804 | { | |
805 | struct iovec kiov; | |
806 | struct compat_iovec __user *uiov = | |
807 | (struct compat_iovec __user *) datap; | |
808 | compat_uptr_t ptr; | |
809 | compat_size_t len; | |
810 | ||
811 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) | |
812 | return -EFAULT; | |
813 | ||
814 | if (__get_user(ptr, &uiov->iov_base) || | |
815 | __get_user(len, &uiov->iov_len)) | |
816 | return -EFAULT; | |
817 | ||
818 | kiov.iov_base = compat_ptr(ptr); | |
819 | kiov.iov_len = len; | |
820 | ||
821 | ret = ptrace_regset(child, request, addr, &kiov); | |
822 | if (!ret) | |
823 | ret = __put_user(kiov.iov_len, &uiov->iov_len); | |
824 | break; | |
825 | } | |
826 | #endif | |
827 | ||
828 | default: | |
829 | ret = ptrace_request(child, request, addr, data); | |
830 | } | |
831 | ||
832 | return ret; | |
833 | } | |
834 | ||
835 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | |
836 | compat_long_t addr, compat_long_t data) | |
837 | { | |
838 | struct task_struct *child; | |
839 | long ret; | |
840 | ||
841 | if (request == PTRACE_TRACEME) { | |
842 | ret = ptrace_traceme(); | |
843 | goto out; | |
844 | } | |
845 | ||
846 | child = ptrace_get_task_struct(pid); | |
847 | if (IS_ERR(child)) { | |
848 | ret = PTR_ERR(child); | |
849 | goto out; | |
850 | } | |
851 | ||
852 | if (request == PTRACE_ATTACH) { | |
853 | ret = ptrace_attach(child); | |
854 | /* | |
855 | * Some architectures need to do book-keeping after | |
856 | * a ptrace attach. | |
857 | */ | |
858 | if (!ret) | |
859 | arch_ptrace_attach(child); | |
860 | goto out_put_task_struct; | |
861 | } | |
862 | ||
863 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | |
864 | if (!ret) | |
865 | ret = compat_arch_ptrace(child, request, addr, data); | |
866 | ||
867 | out_put_task_struct: | |
868 | put_task_struct(child); | |
869 | out: | |
870 | return ret; | |
871 | } | |
872 | #endif /* CONFIG_COMPAT */ |