]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/ptrace.h
ptrace: change signature of sys_ptrace() and friends
[net-next-2.6.git] / include / linux / ptrace.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PTRACE_H
2#define _LINUX_PTRACE_H
3/* ptrace.h */
4/* structs and defines to help the user use the ptrace system call. */
5
6/* has the defines to get at the registers. */
7
8#define PTRACE_TRACEME 0
9#define PTRACE_PEEKTEXT 1
10#define PTRACE_PEEKDATA 2
11#define PTRACE_PEEKUSR 3
12#define PTRACE_POKETEXT 4
13#define PTRACE_POKEDATA 5
14#define PTRACE_POKEUSR 6
15#define PTRACE_CONT 7
16#define PTRACE_KILL 8
17#define PTRACE_SINGLESTEP 9
18
416bc512
RM
19#define PTRACE_ATTACH 16
20#define PTRACE_DETACH 17
1da177e4
LT
21
22#define PTRACE_SYSCALL 24
23
24/* 0x4200-0x4300 are reserved for architecture-independent additions. */
25#define PTRACE_SETOPTIONS 0x4200
26#define PTRACE_GETEVENTMSG 0x4201
27#define PTRACE_GETSIGINFO 0x4202
28#define PTRACE_SETSIGINFO 0x4203
29
2225a122
SS
30/*
31 * Generic ptrace interface that exports the architecture specific regsets
32 * using the corresponding NT_* types (which are also used in the core dump).
c6a0dd7e
SS
33 * Please note that the NT_PRSTATUS note type in a core dump contains a full
34 * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the
35 * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the
36 * other user_regset flavors, the user_regset layout and the ELF core dump note
37 * payload are exactly the same layout.
2225a122
SS
38 *
39 * This interface usage is as follows:
40 * struct iovec iov = { buf, len};
41 *
42 * ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov);
43 *
44 * On the successful completion, iov.len will be updated by the kernel,
45 * specifying how much the kernel has written/read to/from the user's iov.buf.
46 */
47#define PTRACE_GETREGSET 0x4204
48#define PTRACE_SETREGSET 0x4205
49
1da177e4
LT
50/* options set using PTRACE_SETOPTIONS */
51#define PTRACE_O_TRACESYSGOOD 0x00000001
52#define PTRACE_O_TRACEFORK 0x00000002
53#define PTRACE_O_TRACEVFORK 0x00000004
54#define PTRACE_O_TRACECLONE 0x00000008
55#define PTRACE_O_TRACEEXEC 0x00000010
56#define PTRACE_O_TRACEVFORKDONE 0x00000020
57#define PTRACE_O_TRACEEXIT 0x00000040
58
59#define PTRACE_O_MASK 0x0000007f
60
61/* Wait extended result codes for the above trace options. */
62#define PTRACE_EVENT_FORK 1
63#define PTRACE_EVENT_VFORK 2
64#define PTRACE_EVENT_CLONE 3
65#define PTRACE_EVENT_EXEC 4
66#define PTRACE_EVENT_VFORK_DONE 5
67#define PTRACE_EVENT_EXIT 6
68
69#include <asm/ptrace.h>
70
71#ifdef __KERNEL__
72/*
73 * Ptrace flags
260ea101
EB
74 *
75 * The owner ship rules for task->ptrace which holds the ptrace
76 * flags is simple. When a task is running it owns it's task->ptrace
77 * flags. When the a task is stopped the ptracer owns task->ptrace.
1da177e4
LT
78 */
79
80#define PT_PTRACED 0x00000001
81#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
82#define PT_TRACESYSGOOD 0x00000004
83#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
84#define PT_TRACE_FORK 0x00000010
85#define PT_TRACE_VFORK 0x00000020
86#define PT_TRACE_CLONE 0x00000040
87#define PT_TRACE_EXEC 0x00000080
88#define PT_TRACE_VFORK_DONE 0x00000100
89#define PT_TRACE_EXIT 0x00000200
1da177e4
LT
90
91#define PT_TRACE_MASK 0x000003f4
92
93/* single stepping state bits (used on ARM and PA-RISC) */
94#define PT_SINGLESTEP_BIT 31
95#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
96#define PT_BLOCKSTEP_BIT 30
97#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
98
99#include <linux/compiler.h> /* For unlikely. */
100#include <linux/sched.h> /* For struct task_struct. */
101
481bed45
CH
102
103extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
6b9c7ed8 104extern int ptrace_traceme(void);
1da177e4
LT
105extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
106extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
107extern int ptrace_attach(struct task_struct *tsk);
108extern int ptrace_detach(struct task_struct *, unsigned int);
109extern void ptrace_disable(struct task_struct *);
110extern int ptrace_check_attach(struct task_struct *task, int kill);
4abf9869
NK
111extern int ptrace_request(struct task_struct *child, long request,
112 unsigned long addr, unsigned long data);
1da177e4
LT
113extern void ptrace_notify(int exit_code);
114extern void __ptrace_link(struct task_struct *child,
115 struct task_struct *new_parent);
116extern void __ptrace_unlink(struct task_struct *child);
39c626ae 117extern void exit_ptrace(struct task_struct *tracer);
006ebb40
SS
118#define PTRACE_MODE_READ 1
119#define PTRACE_MODE_ATTACH 2
120/* Returns 0 on success, -errno on denial. */
121extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
122/* Returns true on success, false on denial. */
123extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
1da177e4 124
53b6f9fb
ON
125static inline int ptrace_reparented(struct task_struct *child)
126{
127 return child->real_parent != child->parent;
128}
c6a47cc2 129
1da177e4
LT
130static inline void ptrace_unlink(struct task_struct *child)
131{
132 if (unlikely(child->ptrace))
133 __ptrace_unlink(child);
134}
135
4abf9869
NK
136int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
137 unsigned long data);
138int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
139 unsigned long data);
1da177e4 140
88ac2921
RM
141/**
142 * task_ptrace - return %PT_* flags that apply to a task
143 * @task: pointer to &task_struct in question
144 *
145 * Returns the %PT_* flags that apply to @task.
146 */
147static inline int task_ptrace(struct task_struct *task)
148{
149 return task->ptrace;
150}
151
152/**
153 * ptrace_event - possibly stop for a ptrace event notification
154 * @mask: %PT_* bit to check in @current->ptrace
155 * @event: %PTRACE_EVENT_* value to report if @mask is set
156 * @message: value for %PTRACE_GETEVENTMSG to return
157 *
158 * This checks the @mask bit to see if ptrace wants stops for this event.
159 * If so we stop, reporting @event and @message to the ptrace parent.
160 *
161 * Returns nonzero if we did a ptrace notification, zero if not.
162 *
163 * Called without locks.
164 */
165static inline int ptrace_event(int mask, int event, unsigned long message)
166{
167 if (mask && likely(!(current->ptrace & mask)))
168 return 0;
169 current->ptrace_message = message;
170 ptrace_notify((event << 8) | SIGTRAP);
171 return 1;
172}
173
09a05394
RM
174/**
175 * ptrace_init_task - initialize ptrace state for a new child
176 * @child: new child task
177 * @ptrace: true if child should be ptrace'd by parent's tracer
178 *
179 * This is called immediately after adding @child to its parent's children
180 * list. @ptrace is false in the normal case, and true to ptrace @child.
181 *
182 * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
183 */
184static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
185{
186 INIT_LIST_HEAD(&child->ptrace_entry);
187 INIT_LIST_HEAD(&child->ptraced);
188 child->parent = child->real_parent;
189 child->ptrace = 0;
c6a47cc2 190 if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
09a05394 191 child->ptrace = current->ptrace;
c6a47cc2 192 __ptrace_link(child, current->parent);
09a05394
RM
193 }
194}
195
dae33574
RM
196/**
197 * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
198 * @task: task in %EXIT_DEAD state
199 *
200 * Called with write_lock(&tasklist_lock) held.
201 */
202static inline void ptrace_release_task(struct task_struct *task)
203{
204 BUG_ON(!list_empty(&task->ptraced));
205 ptrace_unlink(task);
206 BUG_ON(!list_empty(&task->ptrace_entry));
207}
208
1da177e4
LT
209#ifndef force_successful_syscall_return
210/*
211 * System call handlers that, upon successful completion, need to return a
212 * negative value should call force_successful_syscall_return() right before
213 * returning. On architectures where the syscall convention provides for a
214 * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
215 * others), this macro can be used to ensure that the error flag will not get
216 * set. On architectures which do not support a separate error flag, the macro
217 * is a no-op and the spurious error condition needs to be filtered out by some
218 * other means (e.g., in user-level, by passing an extra argument to the
219 * syscall handler, or something along those lines).
220 */
221#define force_successful_syscall_return() do { } while (0)
222#endif
223
fb7fa8f1
RM
224/*
225 * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
226 *
227 * These do-nothing inlines are used when the arch does not
228 * implement single-step. The kerneldoc comments are here
229 * to document the interface for all arch definitions.
230 */
231
232#ifndef arch_has_single_step
233/**
234 * arch_has_single_step - does this CPU support user-mode single-step?
235 *
236 * If this is defined, then there must be function declarations or
237 * inlines for user_enable_single_step() and user_disable_single_step().
238 * arch_has_single_step() should evaluate to nonzero iff the machine
239 * supports instruction single-step for user mode.
240 * It can be a constant or it can test a CPU feature bit.
241 */
242#define arch_has_single_step() (0)
243
244/**
245 * user_enable_single_step - single-step in user-mode task
246 * @task: either current or a task stopped in %TASK_TRACED
247 *
248 * This can only be called when arch_has_single_step() has returned nonzero.
249 * Set @task so that when it returns to user mode, it will trap after the
dc802c2d
RM
250 * next single instruction executes. If arch_has_block_step() is defined,
251 * this must clear the effects of user_enable_block_step() too.
fb7fa8f1
RM
252 */
253static inline void user_enable_single_step(struct task_struct *task)
254{
255 BUG(); /* This can never be called. */
256}
257
258/**
259 * user_disable_single_step - cancel user-mode single-step
260 * @task: either current or a task stopped in %TASK_TRACED
261 *
dc802c2d
RM
262 * Clear @task of the effects of user_enable_single_step() and
263 * user_enable_block_step(). This can be called whether or not either
264 * of those was ever called on @task, and even if arch_has_single_step()
265 * returned zero.
fb7fa8f1
RM
266 */
267static inline void user_disable_single_step(struct task_struct *task)
268{
269}
dacbe41f
CH
270#else
271extern void user_enable_single_step(struct task_struct *);
272extern void user_disable_single_step(struct task_struct *);
fb7fa8f1
RM
273#endif /* arch_has_single_step */
274
dc802c2d
RM
275#ifndef arch_has_block_step
276/**
277 * arch_has_block_step - does this CPU support user-mode block-step?
278 *
279 * If this is defined, then there must be a function declaration or inline
280 * for user_enable_block_step(), and arch_has_single_step() must be defined
281 * too. arch_has_block_step() should evaluate to nonzero iff the machine
282 * supports step-until-branch for user mode. It can be a constant or it
283 * can test a CPU feature bit.
284 */
5b88abbf 285#define arch_has_block_step() (0)
dc802c2d
RM
286
287/**
288 * user_enable_block_step - step until branch in user-mode task
289 * @task: either current or a task stopped in %TASK_TRACED
290 *
291 * This can only be called when arch_has_block_step() has returned nonzero,
292 * and will never be called when single-instruction stepping is being used.
293 * Set @task so that when it returns to user mode, it will trap after the
294 * next branch or trap taken.
295 */
296static inline void user_enable_block_step(struct task_struct *task)
297{
298 BUG(); /* This can never be called. */
299}
dacbe41f
CH
300#else
301extern void user_enable_block_step(struct task_struct *);
dc802c2d
RM
302#endif /* arch_has_block_step */
303
85ec7fd9
ON
304#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
305extern void user_single_step_siginfo(struct task_struct *tsk,
306 struct pt_regs *regs, siginfo_t *info);
307#else
308static inline void user_single_step_siginfo(struct task_struct *tsk,
309 struct pt_regs *regs, siginfo_t *info)
310{
311 memset(info, 0, sizeof(*info));
312 info->si_signo = SIGTRAP;
313}
314#endif
315
1a669c2f
RM
316#ifndef arch_ptrace_stop_needed
317/**
318 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
319 * @code: current->exit_code value ptrace will stop with
320 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
321 *
322 * This is called with the siglock held, to decide whether or not it's
323 * necessary to release the siglock and call arch_ptrace_stop() with the
324 * same @code and @info arguments. It can be defined to a constant if
325 * arch_ptrace_stop() is never required, or always is. On machines where
326 * this makes sense, it should be defined to a quick test to optimize out
327 * calling arch_ptrace_stop() when it would be superfluous. For example,
328 * if the thread has not been back to user mode since the last stop, the
329 * thread state might indicate that nothing needs to be done.
330 */
331#define arch_ptrace_stop_needed(code, info) (0)
332#endif
333
334#ifndef arch_ptrace_stop
335/**
336 * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
337 * @code: current->exit_code value ptrace will stop with
338 * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
339 *
340 * This is called with no locks held when arch_ptrace_stop_needed() has
341 * just returned nonzero. It is allowed to block, e.g. for user memory
342 * access. The arch can have machine-specific work to be done before
343 * ptrace stops. On ia64, register backing store gets written back to user
344 * memory here. Since this can be costly (requires dropping the siglock),
345 * we only do it when the arch requires it for this particular stop, as
346 * indicated by arch_ptrace_stop_needed().
347 */
348#define arch_ptrace_stop(code, info) do { } while (0)
349#endif
350
bbc69863
RM
351extern int task_current_syscall(struct task_struct *target, long *callno,
352 unsigned long args[6], unsigned int maxargs,
353 unsigned long *sp, unsigned long *pc);
354
1da177e4
LT
355#endif
356
357#endif