]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* By Ross Biro 1/23/92 */ |
2 | /* | |
3 | * Pentium III FXSR, SSE support | |
4 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
eee3af4a MM |
5 | * |
6 | * BTS tracing | |
7 | * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 | |
1da177e4 LT |
8 | */ |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/smp.h> | |
1da177e4 LT |
14 | #include <linux/errno.h> |
15 | #include <linux/ptrace.h> | |
91e7b707 | 16 | #include <linux/regset.h> |
1da177e4 | 17 | #include <linux/user.h> |
070459d9 | 18 | #include <linux/elf.h> |
1da177e4 LT |
19 | #include <linux/security.h> |
20 | #include <linux/audit.h> | |
21 | #include <linux/seccomp.h> | |
7ed20e1a | 22 | #include <linux/signal.h> |
1da177e4 LT |
23 | |
24 | #include <asm/uaccess.h> | |
25 | #include <asm/pgtable.h> | |
26 | #include <asm/system.h> | |
27 | #include <asm/processor.h> | |
28 | #include <asm/i387.h> | |
29 | #include <asm/debugreg.h> | |
30 | #include <asm/ldt.h> | |
31 | #include <asm/desc.h> | |
2047b08b RM |
32 | #include <asm/prctl.h> |
33 | #include <asm/proto.h> | |
eee3af4a MM |
34 | #include <asm/ds.h> |
35 | ||
070459d9 RM |
36 | #include "tls.h" |
37 | ||
38 | enum x86_regset { | |
39 | REGSET_GENERAL, | |
40 | REGSET_FP, | |
41 | REGSET_XFP, | |
42 | REGSET_TLS, | |
43 | }; | |
eee3af4a | 44 | |
1da177e4 LT |
45 | /* |
46 | * does not yet catch signals sent when the child dies. | |
47 | * in exit.c or in signal.c. | |
48 | */ | |
49 | ||
9f155b98 CE |
50 | /* |
51 | * Determines which flags the user has access to [1 = access, 0 = no access]. | |
9f155b98 | 52 | */ |
e39c2891 RM |
53 | #define FLAG_MASK_32 ((unsigned long) \ |
54 | (X86_EFLAGS_CF | X86_EFLAGS_PF | \ | |
55 | X86_EFLAGS_AF | X86_EFLAGS_ZF | \ | |
56 | X86_EFLAGS_SF | X86_EFLAGS_TF | \ | |
57 | X86_EFLAGS_DF | X86_EFLAGS_OF | \ | |
58 | X86_EFLAGS_RF | X86_EFLAGS_AC)) | |
59 | ||
2047b08b RM |
60 | /* |
61 | * Determines whether a value may be installed in a segment register. | |
62 | */ | |
63 | static inline bool invalid_selector(u16 value) | |
64 | { | |
65 | return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); | |
66 | } | |
67 | ||
68 | #ifdef CONFIG_X86_32 | |
69 | ||
e39c2891 | 70 | #define FLAG_MASK FLAG_MASK_32 |
1da177e4 | 71 | |
62a97d44 | 72 | static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) |
1da177e4 | 73 | { |
65ea5b03 | 74 | BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); |
06ee1b68 | 75 | regno >>= 2; |
62a97d44 RM |
76 | if (regno > FS) |
77 | --regno; | |
65ea5b03 | 78 | return ®s->bx + regno; |
1da177e4 LT |
79 | } |
80 | ||
06ee1b68 | 81 | static u16 get_segment_reg(struct task_struct *task, unsigned long offset) |
1da177e4 | 82 | { |
06ee1b68 RM |
83 | /* |
84 | * Returning the value truncates it to 16 bits. | |
85 | */ | |
86 | unsigned int retval; | |
87 | if (offset != offsetof(struct user_regs_struct, gs)) | |
88 | retval = *pt_regs_access(task_pt_regs(task), offset); | |
89 | else { | |
90 | retval = task->thread.gs; | |
91 | if (task == current) | |
92 | savesegment(gs, retval); | |
93 | } | |
94 | return retval; | |
95 | } | |
96 | ||
97 | static int set_segment_reg(struct task_struct *task, | |
98 | unsigned long offset, u16 value) | |
99 | { | |
100 | /* | |
101 | * The value argument was already truncated to 16 bits. | |
102 | */ | |
2047b08b | 103 | if (invalid_selector(value)) |
06ee1b68 RM |
104 | return -EIO; |
105 | ||
106 | if (offset != offsetof(struct user_regs_struct, gs)) | |
107 | *pt_regs_access(task_pt_regs(task), offset) = value; | |
108 | else { | |
109 | task->thread.gs = value; | |
110 | if (task == current) | |
5fd4d16b RM |
111 | /* |
112 | * The user-mode %gs is not affected by | |
113 | * kernel entry, so we must update the CPU. | |
114 | */ | |
115 | loadsegment(gs, value); | |
1da177e4 | 116 | } |
06ee1b68 | 117 | |
1da177e4 LT |
118 | return 0; |
119 | } | |
120 | ||
2047b08b RM |
121 | static unsigned long debugreg_addr_limit(struct task_struct *task) |
122 | { | |
123 | return TASK_SIZE - 3; | |
124 | } | |
125 | ||
126 | #else /* CONFIG_X86_64 */ | |
127 | ||
128 | #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) | |
129 | ||
130 | static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) | |
131 | { | |
132 | BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); | |
133 | return ®s->r15 + (offset / sizeof(regs->r15)); | |
134 | } | |
135 | ||
136 | static u16 get_segment_reg(struct task_struct *task, unsigned long offset) | |
137 | { | |
138 | /* | |
139 | * Returning the value truncates it to 16 bits. | |
140 | */ | |
141 | unsigned int seg; | |
142 | ||
143 | switch (offset) { | |
144 | case offsetof(struct user_regs_struct, fs): | |
145 | if (task == current) { | |
146 | /* Older gas can't assemble movq %?s,%r?? */ | |
147 | asm("movl %%fs,%0" : "=r" (seg)); | |
148 | return seg; | |
149 | } | |
150 | return task->thread.fsindex; | |
151 | case offsetof(struct user_regs_struct, gs): | |
152 | if (task == current) { | |
153 | asm("movl %%gs,%0" : "=r" (seg)); | |
154 | return seg; | |
155 | } | |
156 | return task->thread.gsindex; | |
157 | case offsetof(struct user_regs_struct, ds): | |
158 | if (task == current) { | |
159 | asm("movl %%ds,%0" : "=r" (seg)); | |
160 | return seg; | |
161 | } | |
162 | return task->thread.ds; | |
163 | case offsetof(struct user_regs_struct, es): | |
164 | if (task == current) { | |
165 | asm("movl %%es,%0" : "=r" (seg)); | |
166 | return seg; | |
167 | } | |
168 | return task->thread.es; | |
169 | ||
170 | case offsetof(struct user_regs_struct, cs): | |
171 | case offsetof(struct user_regs_struct, ss): | |
172 | break; | |
173 | } | |
174 | return *pt_regs_access(task_pt_regs(task), offset); | |
175 | } | |
176 | ||
177 | static int set_segment_reg(struct task_struct *task, | |
178 | unsigned long offset, u16 value) | |
179 | { | |
180 | /* | |
181 | * The value argument was already truncated to 16 bits. | |
182 | */ | |
183 | if (invalid_selector(value)) | |
184 | return -EIO; | |
185 | ||
186 | switch (offset) { | |
187 | case offsetof(struct user_regs_struct,fs): | |
188 | /* | |
189 | * If this is setting fs as for normal 64-bit use but | |
190 | * setting fs_base has implicitly changed it, leave it. | |
191 | */ | |
192 | if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && | |
193 | task->thread.fs != 0) || | |
194 | (value == 0 && task->thread.fsindex == FS_TLS_SEL && | |
195 | task->thread.fs == 0)) | |
196 | break; | |
197 | task->thread.fsindex = value; | |
198 | if (task == current) | |
199 | loadsegment(fs, task->thread.fsindex); | |
200 | break; | |
201 | case offsetof(struct user_regs_struct,gs): | |
202 | /* | |
203 | * If this is setting gs as for normal 64-bit use but | |
204 | * setting gs_base has implicitly changed it, leave it. | |
205 | */ | |
206 | if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && | |
207 | task->thread.gs != 0) || | |
208 | (value == 0 && task->thread.gsindex == GS_TLS_SEL && | |
209 | task->thread.gs == 0)) | |
210 | break; | |
211 | task->thread.gsindex = value; | |
212 | if (task == current) | |
213 | load_gs_index(task->thread.gsindex); | |
214 | break; | |
215 | case offsetof(struct user_regs_struct,ds): | |
216 | task->thread.ds = value; | |
217 | if (task == current) | |
218 | loadsegment(ds, task->thread.ds); | |
219 | break; | |
220 | case offsetof(struct user_regs_struct,es): | |
221 | task->thread.es = value; | |
222 | if (task == current) | |
223 | loadsegment(es, task->thread.es); | |
224 | break; | |
225 | ||
226 | /* | |
227 | * Can't actually change these in 64-bit mode. | |
228 | */ | |
229 | case offsetof(struct user_regs_struct,cs): | |
230 | #ifdef CONFIG_IA32_EMULATION | |
231 | if (test_tsk_thread_flag(task, TIF_IA32)) | |
232 | task_pt_regs(task)->cs = value; | |
2047b08b | 233 | #endif |
cb757c41 | 234 | break; |
2047b08b RM |
235 | case offsetof(struct user_regs_struct,ss): |
236 | #ifdef CONFIG_IA32_EMULATION | |
237 | if (test_tsk_thread_flag(task, TIF_IA32)) | |
238 | task_pt_regs(task)->ss = value; | |
2047b08b | 239 | #endif |
cb757c41 | 240 | break; |
2047b08b RM |
241 | } |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
246 | static unsigned long debugreg_addr_limit(struct task_struct *task) | |
247 | { | |
248 | #ifdef CONFIG_IA32_EMULATION | |
249 | if (test_tsk_thread_flag(task, TIF_IA32)) | |
250 | return IA32_PAGE_OFFSET - 3; | |
251 | #endif | |
252 | return TASK_SIZE64 - 7; | |
253 | } | |
254 | ||
255 | #endif /* CONFIG_X86_32 */ | |
256 | ||
06ee1b68 | 257 | static unsigned long get_flags(struct task_struct *task) |
1da177e4 | 258 | { |
06ee1b68 RM |
259 | unsigned long retval = task_pt_regs(task)->flags; |
260 | ||
261 | /* | |
262 | * If the debugger set TF, hide it from the readout. | |
263 | */ | |
264 | if (test_tsk_thread_flag(task, TIF_FORCED_TF)) | |
265 | retval &= ~X86_EFLAGS_TF; | |
1da177e4 | 266 | |
1da177e4 LT |
267 | return retval; |
268 | } | |
269 | ||
06ee1b68 RM |
270 | static int set_flags(struct task_struct *task, unsigned long value) |
271 | { | |
272 | struct pt_regs *regs = task_pt_regs(task); | |
273 | ||
274 | /* | |
275 | * If the user value contains TF, mark that | |
276 | * it was not "us" (the debugger) that set it. | |
277 | * If not, make sure it stays set if we had. | |
278 | */ | |
279 | if (value & X86_EFLAGS_TF) | |
280 | clear_tsk_thread_flag(task, TIF_FORCED_TF); | |
281 | else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) | |
282 | value |= X86_EFLAGS_TF; | |
283 | ||
284 | regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); | |
285 | ||
286 | return 0; | |
287 | } | |
288 | ||
289 | static int putreg(struct task_struct *child, | |
290 | unsigned long offset, unsigned long value) | |
291 | { | |
292 | switch (offset) { | |
293 | case offsetof(struct user_regs_struct, cs): | |
294 | case offsetof(struct user_regs_struct, ds): | |
295 | case offsetof(struct user_regs_struct, es): | |
296 | case offsetof(struct user_regs_struct, fs): | |
297 | case offsetof(struct user_regs_struct, gs): | |
298 | case offsetof(struct user_regs_struct, ss): | |
299 | return set_segment_reg(child, offset, value); | |
300 | ||
301 | case offsetof(struct user_regs_struct, flags): | |
302 | return set_flags(child, value); | |
2047b08b RM |
303 | |
304 | #ifdef CONFIG_X86_64 | |
305 | case offsetof(struct user_regs_struct,fs_base): | |
306 | if (value >= TASK_SIZE_OF(child)) | |
307 | return -EIO; | |
308 | /* | |
309 | * When changing the segment base, use do_arch_prctl | |
310 | * to set either thread.fs or thread.fsindex and the | |
311 | * corresponding GDT slot. | |
312 | */ | |
313 | if (child->thread.fs != value) | |
314 | return do_arch_prctl(child, ARCH_SET_FS, value); | |
315 | return 0; | |
316 | case offsetof(struct user_regs_struct,gs_base): | |
317 | /* | |
318 | * Exactly the same here as the %fs handling above. | |
319 | */ | |
320 | if (value >= TASK_SIZE_OF(child)) | |
321 | return -EIO; | |
322 | if (child->thread.gs != value) | |
323 | return do_arch_prctl(child, ARCH_SET_GS, value); | |
324 | return 0; | |
325 | #endif | |
06ee1b68 RM |
326 | } |
327 | ||
328 | *pt_regs_access(task_pt_regs(child), offset) = value; | |
329 | return 0; | |
330 | } | |
331 | ||
332 | static unsigned long getreg(struct task_struct *task, unsigned long offset) | |
333 | { | |
334 | switch (offset) { | |
335 | case offsetof(struct user_regs_struct, cs): | |
336 | case offsetof(struct user_regs_struct, ds): | |
337 | case offsetof(struct user_regs_struct, es): | |
338 | case offsetof(struct user_regs_struct, fs): | |
339 | case offsetof(struct user_regs_struct, gs): | |
340 | case offsetof(struct user_regs_struct, ss): | |
341 | return get_segment_reg(task, offset); | |
342 | ||
343 | case offsetof(struct user_regs_struct, flags): | |
344 | return get_flags(task); | |
2047b08b RM |
345 | |
346 | #ifdef CONFIG_X86_64 | |
347 | case offsetof(struct user_regs_struct, fs_base): { | |
348 | /* | |
349 | * do_arch_prctl may have used a GDT slot instead of | |
350 | * the MSR. To userland, it appears the same either | |
351 | * way, except the %fs segment selector might not be 0. | |
352 | */ | |
353 | unsigned int seg = task->thread.fsindex; | |
354 | if (task->thread.fs != 0) | |
355 | return task->thread.fs; | |
356 | if (task == current) | |
357 | asm("movl %%fs,%0" : "=r" (seg)); | |
358 | if (seg != FS_TLS_SEL) | |
359 | return 0; | |
360 | return get_desc_base(&task->thread.tls_array[FS_TLS]); | |
361 | } | |
362 | case offsetof(struct user_regs_struct, gs_base): { | |
363 | /* | |
364 | * Exactly the same here as the %fs handling above. | |
365 | */ | |
366 | unsigned int seg = task->thread.gsindex; | |
367 | if (task->thread.gs != 0) | |
368 | return task->thread.gs; | |
369 | if (task == current) | |
370 | asm("movl %%gs,%0" : "=r" (seg)); | |
371 | if (seg != GS_TLS_SEL) | |
372 | return 0; | |
373 | return get_desc_base(&task->thread.tls_array[GS_TLS]); | |
374 | } | |
375 | #endif | |
06ee1b68 RM |
376 | } |
377 | ||
378 | return *pt_regs_access(task_pt_regs(task), offset); | |
379 | } | |
380 | ||
91e7b707 RM |
381 | static int genregs_get(struct task_struct *target, |
382 | const struct user_regset *regset, | |
383 | unsigned int pos, unsigned int count, | |
384 | void *kbuf, void __user *ubuf) | |
385 | { | |
386 | if (kbuf) { | |
387 | unsigned long *k = kbuf; | |
388 | while (count > 0) { | |
389 | *k++ = getreg(target, pos); | |
390 | count -= sizeof(*k); | |
391 | pos += sizeof(*k); | |
392 | } | |
393 | } else { | |
394 | unsigned long __user *u = ubuf; | |
395 | while (count > 0) { | |
396 | if (__put_user(getreg(target, pos), u++)) | |
397 | return -EFAULT; | |
398 | count -= sizeof(*u); | |
399 | pos += sizeof(*u); | |
400 | } | |
401 | } | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
406 | static int genregs_set(struct task_struct *target, | |
407 | const struct user_regset *regset, | |
408 | unsigned int pos, unsigned int count, | |
409 | const void *kbuf, const void __user *ubuf) | |
410 | { | |
411 | int ret = 0; | |
412 | if (kbuf) { | |
413 | const unsigned long *k = kbuf; | |
414 | while (count > 0 && !ret) { | |
415 | ret = putreg(target, pos, *k++); | |
416 | count -= sizeof(*k); | |
417 | pos += sizeof(*k); | |
418 | } | |
419 | } else { | |
420 | const unsigned long __user *u = ubuf; | |
421 | while (count > 0 && !ret) { | |
422 | unsigned long word; | |
423 | ret = __get_user(word, u++); | |
424 | if (ret) | |
425 | break; | |
426 | ret = putreg(target, pos, word); | |
427 | count -= sizeof(*u); | |
428 | pos += sizeof(*u); | |
429 | } | |
430 | } | |
431 | return ret; | |
432 | } | |
433 | ||
d9771e8c RM |
434 | /* |
435 | * This function is trivial and will be inlined by the compiler. | |
436 | * Having it separates the implementation details of debug | |
437 | * registers from the interface details of ptrace. | |
438 | */ | |
439 | static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) | |
440 | { | |
0f534093 RM |
441 | switch (n) { |
442 | case 0: return child->thread.debugreg0; | |
443 | case 1: return child->thread.debugreg1; | |
444 | case 2: return child->thread.debugreg2; | |
445 | case 3: return child->thread.debugreg3; | |
446 | case 6: return child->thread.debugreg6; | |
447 | case 7: return child->thread.debugreg7; | |
448 | } | |
449 | return 0; | |
d9771e8c RM |
450 | } |
451 | ||
452 | static int ptrace_set_debugreg(struct task_struct *child, | |
453 | int n, unsigned long data) | |
454 | { | |
0f534093 RM |
455 | int i; |
456 | ||
d9771e8c RM |
457 | if (unlikely(n == 4 || n == 5)) |
458 | return -EIO; | |
459 | ||
2047b08b | 460 | if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) |
d9771e8c RM |
461 | return -EIO; |
462 | ||
0f534093 RM |
463 | switch (n) { |
464 | case 0: child->thread.debugreg0 = data; break; | |
465 | case 1: child->thread.debugreg1 = data; break; | |
466 | case 2: child->thread.debugreg2 = data; break; | |
467 | case 3: child->thread.debugreg3 = data; break; | |
468 | ||
469 | case 6: | |
2047b08b RM |
470 | if ((data & ~0xffffffffUL) != 0) |
471 | return -EIO; | |
0f534093 RM |
472 | child->thread.debugreg6 = data; |
473 | break; | |
474 | ||
475 | case 7: | |
d9771e8c RM |
476 | /* |
477 | * Sanity-check data. Take one half-byte at once with | |
478 | * check = (val >> (16 + 4*i)) & 0xf. It contains the | |
479 | * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits | |
480 | * 2 and 3 are LENi. Given a list of invalid values, | |
481 | * we do mask |= 1 << invalid_value, so that | |
482 | * (mask >> check) & 1 is a correct test for invalid | |
483 | * values. | |
484 | * | |
485 | * R/Wi contains the type of the breakpoint / | |
486 | * watchpoint, LENi contains the length of the watched | |
487 | * data in the watchpoint case. | |
488 | * | |
489 | * The invalid values are: | |
2047b08b | 490 | * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] |
d9771e8c RM |
491 | * - R/Wi == 0x10 (break on I/O reads or writes), so |
492 | * mask |= 0x4444. | |
493 | * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= | |
494 | * 0x1110. | |
495 | * | |
496 | * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. | |
497 | * | |
498 | * See the Intel Manual "System Programming Guide", | |
499 | * 15.2.4 | |
500 | * | |
501 | * Note that LENi == 0x10 is defined on x86_64 in long | |
502 | * mode (i.e. even for 32-bit userspace software, but | |
503 | * 64-bit kernel), so the x86_64 mask value is 0x5454. | |
504 | * See the AMD manual no. 24593 (AMD64 System Programming) | |
505 | */ | |
2047b08b RM |
506 | #ifdef CONFIG_X86_32 |
507 | #define DR7_MASK 0x5f54 | |
508 | #else | |
509 | #define DR7_MASK 0x5554 | |
510 | #endif | |
d9771e8c RM |
511 | data &= ~DR_CONTROL_RESERVED; |
512 | for (i = 0; i < 4; i++) | |
2047b08b | 513 | if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) |
d9771e8c | 514 | return -EIO; |
0f534093 | 515 | child->thread.debugreg7 = data; |
d9771e8c RM |
516 | if (data) |
517 | set_tsk_thread_flag(child, TIF_DEBUG); | |
518 | else | |
519 | clear_tsk_thread_flag(child, TIF_DEBUG); | |
0f534093 | 520 | break; |
d9771e8c RM |
521 | } |
522 | ||
d9771e8c RM |
523 | return 0; |
524 | } | |
525 | ||
a95d67f8 | 526 | static int ptrace_bts_get_size(struct task_struct *child) |
eee3af4a MM |
527 | { |
528 | if (!child->thread.ds_area_msr) | |
529 | return -ENXIO; | |
530 | ||
a95d67f8 | 531 | return ds_get_bts_index((void *)child->thread.ds_area_msr); |
eee3af4a MM |
532 | } |
533 | ||
eee3af4a MM |
534 | static int ptrace_bts_read_record(struct task_struct *child, |
535 | long index, | |
536 | struct bts_struct __user *out) | |
537 | { | |
538 | struct bts_struct ret; | |
539 | int retval; | |
a95d67f8 | 540 | int bts_end; |
e4811f25 | 541 | int bts_index; |
eee3af4a MM |
542 | |
543 | if (!child->thread.ds_area_msr) | |
544 | return -ENXIO; | |
545 | ||
e4811f25 MM |
546 | if (index < 0) |
547 | return -EINVAL; | |
548 | ||
a95d67f8 MM |
549 | bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr); |
550 | if (bts_end <= index) | |
e4811f25 MM |
551 | return -EINVAL; |
552 | ||
553 | /* translate the ptrace bts index into the ds bts index */ | |
554 | bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr); | |
555 | bts_index -= (index + 1); | |
556 | if (bts_index < 0) | |
a95d67f8 | 557 | bts_index += bts_end; |
e4811f25 | 558 | |
eee3af4a | 559 | retval = ds_read_bts((void *)child->thread.ds_area_msr, |
e4811f25 | 560 | bts_index, &ret); |
eee3af4a MM |
561 | if (retval) |
562 | return retval; | |
563 | ||
564 | if (copy_to_user(out, &ret, sizeof(ret))) | |
565 | return -EFAULT; | |
566 | ||
567 | return sizeof(ret); | |
568 | } | |
569 | ||
570 | static int ptrace_bts_write_record(struct task_struct *child, | |
571 | const struct bts_struct *in) | |
572 | { | |
573 | int retval; | |
574 | ||
575 | if (!child->thread.ds_area_msr) | |
576 | return -ENXIO; | |
577 | ||
578 | retval = ds_write_bts((void *)child->thread.ds_area_msr, in); | |
579 | if (retval) | |
580 | return retval; | |
581 | ||
582 | return sizeof(*in); | |
583 | } | |
584 | ||
a95d67f8 | 585 | static int ptrace_bts_clear(struct task_struct *child) |
eee3af4a | 586 | { |
a95d67f8 MM |
587 | if (!child->thread.ds_area_msr) |
588 | return -ENXIO; | |
eee3af4a | 589 | |
a95d67f8 MM |
590 | return ds_clear((void *)child->thread.ds_area_msr); |
591 | } | |
592 | ||
593 | static int ptrace_bts_drain(struct task_struct *child, | |
594 | struct bts_struct __user *out) | |
595 | { | |
596 | int end, i; | |
597 | void *ds = (void *)child->thread.ds_area_msr; | |
598 | ||
599 | if (!ds) | |
eee3af4a MM |
600 | return -ENXIO; |
601 | ||
a95d67f8 MM |
602 | end = ds_get_bts_index(ds); |
603 | if (end <= 0) | |
604 | return end; | |
605 | ||
606 | for (i = 0; i < end; i++, out++) { | |
607 | struct bts_struct ret; | |
608 | int retval; | |
609 | ||
610 | retval = ds_read_bts(ds, i, &ret); | |
611 | if (retval < 0) | |
612 | return retval; | |
613 | ||
614 | if (copy_to_user(out, &ret, sizeof(ret))) | |
615 | return -EFAULT; | |
616 | } | |
617 | ||
618 | ds_clear(ds); | |
619 | ||
620 | return i; | |
621 | } | |
622 | ||
623 | static int ptrace_bts_config(struct task_struct *child, | |
624 | const struct ptrace_bts_config __user *ucfg) | |
625 | { | |
626 | struct ptrace_bts_config cfg; | |
627 | unsigned long debugctl_mask; | |
628 | int bts_size, ret; | |
629 | void *ds; | |
630 | ||
631 | if (copy_from_user(&cfg, ucfg, sizeof(cfg))) | |
632 | return -EFAULT; | |
633 | ||
634 | bts_size = 0; | |
635 | ds = (void *)child->thread.ds_area_msr; | |
636 | if (ds) { | |
637 | bts_size = ds_get_bts_size(ds); | |
638 | if (bts_size < 0) | |
639 | return bts_size; | |
640 | } | |
641 | ||
642 | if (bts_size != cfg.size) { | |
643 | ret = ds_free((void **)&child->thread.ds_area_msr); | |
644 | if (ret < 0) | |
645 | return ret; | |
646 | ||
647 | if (cfg.size > 0) | |
648 | ret = ds_allocate((void **)&child->thread.ds_area_msr, | |
649 | cfg.size); | |
650 | ds = (void *)child->thread.ds_area_msr; | |
651 | if (ds) | |
652 | set_tsk_thread_flag(child, TIF_DS_AREA_MSR); | |
653 | else | |
654 | clear_tsk_thread_flag(child, TIF_DS_AREA_MSR); | |
655 | ||
656 | if (ret < 0) | |
657 | return ret; | |
658 | ||
659 | bts_size = ds_get_bts_size(ds); | |
660 | if (bts_size <= 0) | |
661 | return bts_size; | |
662 | } | |
663 | ||
664 | if (ds) { | |
665 | if (cfg.flags & PTRACE_BTS_O_SIGNAL) { | |
666 | ret = ds_set_overflow(ds, DS_O_SIGNAL); | |
667 | } else { | |
668 | ret = ds_set_overflow(ds, DS_O_WRAP); | |
669 | } | |
670 | if (ret < 0) | |
671 | return ret; | |
672 | } | |
673 | ||
674 | debugctl_mask = ds_debugctl_mask(); | |
675 | if (ds && (cfg.flags & PTRACE_BTS_O_TRACE)) { | |
eee3af4a MM |
676 | child->thread.debugctlmsr |= debugctl_mask; |
677 | set_tsk_thread_flag(child, TIF_DEBUGCTLMSR); | |
678 | } else { | |
679 | /* there is no way for us to check whether we 'own' | |
680 | * the respective bits in the DEBUGCTL MSR, we're | |
681 | * about to clear */ | |
682 | child->thread.debugctlmsr &= ~debugctl_mask; | |
683 | ||
684 | if (!child->thread.debugctlmsr) | |
685 | clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); | |
686 | } | |
687 | ||
a95d67f8 | 688 | if (ds && (cfg.flags & PTRACE_BTS_O_SCHED)) |
eee3af4a MM |
689 | set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); |
690 | else | |
691 | clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); | |
692 | ||
693 | return 0; | |
694 | } | |
695 | ||
a95d67f8 MM |
696 | static int ptrace_bts_status(struct task_struct *child, |
697 | struct ptrace_bts_config __user *ucfg) | |
eee3af4a | 698 | { |
a95d67f8 MM |
699 | void *ds = (void *)child->thread.ds_area_msr; |
700 | struct ptrace_bts_config cfg; | |
eee3af4a | 701 | |
a95d67f8 | 702 | memset(&cfg, 0, sizeof(cfg)); |
eee3af4a | 703 | |
a95d67f8 MM |
704 | if (ds) { |
705 | cfg.size = ds_get_bts_size(ds); | |
eee3af4a | 706 | |
a95d67f8 MM |
707 | if (ds_get_overflow(ds) == DS_O_SIGNAL) |
708 | cfg.flags |= PTRACE_BTS_O_SIGNAL; | |
eee3af4a | 709 | |
a95d67f8 MM |
710 | if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && |
711 | child->thread.debugctlmsr & ds_debugctl_mask()) | |
712 | cfg.flags |= PTRACE_BTS_O_TRACE; | |
eee3af4a | 713 | |
a95d67f8 MM |
714 | if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) |
715 | cfg.flags |= PTRACE_BTS_O_SCHED; | |
eee3af4a MM |
716 | } |
717 | ||
a95d67f8 MM |
718 | if (copy_to_user(ucfg, &cfg, sizeof(cfg))) |
719 | return -EFAULT; | |
eee3af4a | 720 | |
a95d67f8 | 721 | return sizeof(cfg); |
eee3af4a MM |
722 | } |
723 | ||
724 | void ptrace_bts_take_timestamp(struct task_struct *tsk, | |
725 | enum bts_qualifier qualifier) | |
726 | { | |
727 | struct bts_struct rec = { | |
728 | .qualifier = qualifier, | |
3c68904f | 729 | .variant.jiffies = jiffies |
eee3af4a MM |
730 | }; |
731 | ||
eee3af4a MM |
732 | ptrace_bts_write_record(tsk, &rec); |
733 | } | |
734 | ||
1da177e4 LT |
735 | /* |
736 | * Called by kernel/ptrace.c when detaching.. | |
737 | * | |
738 | * Make sure the single step bit is not set. | |
739 | */ | |
740 | void ptrace_disable(struct task_struct *child) | |
9e714bed | 741 | { |
7f232343 | 742 | user_disable_single_step(child); |
e9c86c78 | 743 | #ifdef TIF_SYSCALL_EMU |
ab1c23c2 | 744 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
e9c86c78 | 745 | #endif |
eee3af4a MM |
746 | ptrace_bts_config(child, /* options = */ 0); |
747 | if (child->thread.ds_area_msr) { | |
748 | ds_free((void **)&child->thread.ds_area_msr); | |
749 | clear_tsk_thread_flag(child, TIF_DS_AREA_MSR); | |
750 | } | |
1da177e4 LT |
751 | } |
752 | ||
5a4646a4 RM |
753 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
754 | static const struct user_regset_view user_x86_32_view; /* Initialized below. */ | |
755 | #endif | |
756 | ||
481bed45 | 757 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
1da177e4 | 758 | { |
5a4646a4 | 759 | int ret; |
1da177e4 LT |
760 | unsigned long __user *datap = (unsigned long __user *)data; |
761 | ||
1da177e4 LT |
762 | switch (request) { |
763 | /* when I and D space are separate, these will need to be fixed. */ | |
9e714bed | 764 | case PTRACE_PEEKTEXT: /* read word at location addr. */ |
76647323 AD |
765 | case PTRACE_PEEKDATA: |
766 | ret = generic_ptrace_peekdata(child, addr, data); | |
1da177e4 | 767 | break; |
1da177e4 LT |
768 | |
769 | /* read the word at location addr in the USER area. */ | |
770 | case PTRACE_PEEKUSR: { | |
771 | unsigned long tmp; | |
772 | ||
773 | ret = -EIO; | |
e9c86c78 RM |
774 | if ((addr & (sizeof(data) - 1)) || addr < 0 || |
775 | addr >= sizeof(struct user)) | |
1da177e4 LT |
776 | break; |
777 | ||
778 | tmp = 0; /* Default return condition */ | |
e9c86c78 | 779 | if (addr < sizeof(struct user_regs_struct)) |
1da177e4 | 780 | tmp = getreg(child, addr); |
e9c86c78 RM |
781 | else if (addr >= offsetof(struct user, u_debugreg[0]) && |
782 | addr <= offsetof(struct user, u_debugreg[7])) { | |
783 | addr -= offsetof(struct user, u_debugreg[0]); | |
784 | tmp = ptrace_get_debugreg(child, addr / sizeof(data)); | |
1da177e4 LT |
785 | } |
786 | ret = put_user(tmp, datap); | |
787 | break; | |
788 | } | |
789 | ||
790 | /* when I and D space are separate, this will have to be fixed. */ | |
791 | case PTRACE_POKETEXT: /* write the word at location addr. */ | |
792 | case PTRACE_POKEDATA: | |
f284ce72 | 793 | ret = generic_ptrace_pokedata(child, addr, data); |
1da177e4 LT |
794 | break; |
795 | ||
796 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ | |
797 | ret = -EIO; | |
e9c86c78 RM |
798 | if ((addr & (sizeof(data) - 1)) || addr < 0 || |
799 | addr >= sizeof(struct user)) | |
1da177e4 LT |
800 | break; |
801 | ||
e9c86c78 | 802 | if (addr < sizeof(struct user_regs_struct)) |
1da177e4 | 803 | ret = putreg(child, addr, data); |
e9c86c78 RM |
804 | else if (addr >= offsetof(struct user, u_debugreg[0]) && |
805 | addr <= offsetof(struct user, u_debugreg[7])) { | |
806 | addr -= offsetof(struct user, u_debugreg[0]); | |
807 | ret = ptrace_set_debugreg(child, | |
808 | addr / sizeof(data), data); | |
1da177e4 | 809 | } |
e9c86c78 | 810 | break; |
1da177e4 | 811 | |
5a4646a4 RM |
812 | case PTRACE_GETREGS: /* Get all gp regs from the child. */ |
813 | return copy_regset_to_user(child, | |
814 | task_user_regset_view(current), | |
815 | REGSET_GENERAL, | |
816 | 0, sizeof(struct user_regs_struct), | |
817 | datap); | |
818 | ||
819 | case PTRACE_SETREGS: /* Set all gp regs in the child. */ | |
820 | return copy_regset_from_user(child, | |
821 | task_user_regset_view(current), | |
822 | REGSET_GENERAL, | |
823 | 0, sizeof(struct user_regs_struct), | |
824 | datap); | |
825 | ||
826 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | |
827 | return copy_regset_to_user(child, | |
828 | task_user_regset_view(current), | |
829 | REGSET_FP, | |
830 | 0, sizeof(struct user_i387_struct), | |
831 | datap); | |
832 | ||
833 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | |
834 | return copy_regset_from_user(child, | |
835 | task_user_regset_view(current), | |
836 | REGSET_FP, | |
837 | 0, sizeof(struct user_i387_struct), | |
838 | datap); | |
1da177e4 | 839 | |
e9c86c78 | 840 | #ifdef CONFIG_X86_32 |
5a4646a4 RM |
841 | case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ |
842 | return copy_regset_to_user(child, &user_x86_32_view, | |
843 | REGSET_XFP, | |
844 | 0, sizeof(struct user_fxsr_struct), | |
845 | datap); | |
846 | ||
847 | case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ | |
848 | return copy_regset_from_user(child, &user_x86_32_view, | |
849 | REGSET_XFP, | |
850 | 0, sizeof(struct user_fxsr_struct), | |
851 | datap); | |
e9c86c78 | 852 | #endif |
1da177e4 | 853 | |
e9c86c78 | 854 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
1da177e4 | 855 | case PTRACE_GET_THREAD_AREA: |
efd1ca52 RM |
856 | if (addr < 0) |
857 | return -EIO; | |
858 | ret = do_get_thread_area(child, addr, | |
859 | (struct user_desc __user *) data); | |
1da177e4 LT |
860 | break; |
861 | ||
862 | case PTRACE_SET_THREAD_AREA: | |
efd1ca52 RM |
863 | if (addr < 0) |
864 | return -EIO; | |
865 | ret = do_set_thread_area(child, addr, | |
866 | (struct user_desc __user *) data, 0); | |
1da177e4 | 867 | break; |
e9c86c78 RM |
868 | #endif |
869 | ||
870 | #ifdef CONFIG_X86_64 | |
871 | /* normal 64bit interface to access TLS data. | |
872 | Works just like arch_prctl, except that the arguments | |
873 | are reversed. */ | |
874 | case PTRACE_ARCH_PRCTL: | |
875 | ret = do_arch_prctl(child, data, addr); | |
876 | break; | |
877 | #endif | |
1da177e4 | 878 | |
a95d67f8 MM |
879 | case PTRACE_BTS_CONFIG: |
880 | ret = ptrace_bts_config | |
881 | (child, (struct ptrace_bts_config __user *)addr); | |
eee3af4a MM |
882 | break; |
883 | ||
a95d67f8 MM |
884 | case PTRACE_BTS_STATUS: |
885 | ret = ptrace_bts_status | |
886 | (child, (struct ptrace_bts_config __user *)addr); | |
eee3af4a MM |
887 | break; |
888 | ||
a95d67f8 MM |
889 | case PTRACE_BTS_SIZE: |
890 | ret = ptrace_bts_get_size(child); | |
eee3af4a MM |
891 | break; |
892 | ||
a95d67f8 | 893 | case PTRACE_BTS_GET: |
eee3af4a | 894 | ret = ptrace_bts_read_record |
a95d67f8 | 895 | (child, data, (struct bts_struct __user *) addr); |
eee3af4a MM |
896 | break; |
897 | ||
a95d67f8 MM |
898 | case PTRACE_BTS_CLEAR: |
899 | ret = ptrace_bts_clear(child); | |
eee3af4a MM |
900 | break; |
901 | ||
a95d67f8 MM |
902 | case PTRACE_BTS_DRAIN: |
903 | ret = ptrace_bts_drain | |
904 | (child, (struct bts_struct __user *) addr); | |
eee3af4a MM |
905 | break; |
906 | ||
1da177e4 LT |
907 | default: |
908 | ret = ptrace_request(child, request, addr, data); | |
909 | break; | |
910 | } | |
d9771e8c | 911 | |
1da177e4 LT |
912 | return ret; |
913 | } | |
914 | ||
cb757c41 RM |
915 | #ifdef CONFIG_IA32_EMULATION |
916 | ||
099cd6e9 RM |
917 | #include <linux/compat.h> |
918 | #include <linux/syscalls.h> | |
919 | #include <asm/ia32.h> | |
cb757c41 RM |
920 | #include <asm/user32.h> |
921 | ||
922 | #define R32(l,q) \ | |
923 | case offsetof(struct user32, regs.l): \ | |
924 | regs->q = value; break | |
925 | ||
926 | #define SEG32(rs) \ | |
927 | case offsetof(struct user32, regs.rs): \ | |
928 | return set_segment_reg(child, \ | |
929 | offsetof(struct user_regs_struct, rs), \ | |
930 | value); \ | |
931 | break | |
932 | ||
933 | static int putreg32(struct task_struct *child, unsigned regno, u32 value) | |
934 | { | |
935 | struct pt_regs *regs = task_pt_regs(child); | |
936 | ||
937 | switch (regno) { | |
938 | ||
939 | SEG32(cs); | |
940 | SEG32(ds); | |
941 | SEG32(es); | |
942 | SEG32(fs); | |
943 | SEG32(gs); | |
944 | SEG32(ss); | |
945 | ||
946 | R32(ebx, bx); | |
947 | R32(ecx, cx); | |
948 | R32(edx, dx); | |
949 | R32(edi, di); | |
950 | R32(esi, si); | |
951 | R32(ebp, bp); | |
952 | R32(eax, ax); | |
953 | R32(orig_eax, orig_ax); | |
954 | R32(eip, ip); | |
955 | R32(esp, sp); | |
956 | ||
957 | case offsetof(struct user32, regs.eflags): | |
958 | return set_flags(child, value); | |
959 | ||
960 | case offsetof(struct user32, u_debugreg[0]) ... | |
961 | offsetof(struct user32, u_debugreg[7]): | |
962 | regno -= offsetof(struct user32, u_debugreg[0]); | |
963 | return ptrace_set_debugreg(child, regno / 4, value); | |
964 | ||
965 | default: | |
966 | if (regno > sizeof(struct user32) || (regno & 3)) | |
967 | return -EIO; | |
968 | ||
969 | /* | |
970 | * Other dummy fields in the virtual user structure | |
971 | * are ignored | |
972 | */ | |
973 | break; | |
974 | } | |
975 | return 0; | |
976 | } | |
977 | ||
978 | #undef R32 | |
979 | #undef SEG32 | |
980 | ||
981 | #define R32(l,q) \ | |
982 | case offsetof(struct user32, regs.l): \ | |
983 | *val = regs->q; break | |
984 | ||
985 | #define SEG32(rs) \ | |
986 | case offsetof(struct user32, regs.rs): \ | |
987 | *val = get_segment_reg(child, \ | |
988 | offsetof(struct user_regs_struct, rs)); \ | |
989 | break | |
990 | ||
991 | static int getreg32(struct task_struct *child, unsigned regno, u32 *val) | |
992 | { | |
993 | struct pt_regs *regs = task_pt_regs(child); | |
994 | ||
995 | switch (regno) { | |
996 | ||
997 | SEG32(ds); | |
998 | SEG32(es); | |
999 | SEG32(fs); | |
1000 | SEG32(gs); | |
1001 | ||
1002 | R32(cs, cs); | |
1003 | R32(ss, ss); | |
1004 | R32(ebx, bx); | |
1005 | R32(ecx, cx); | |
1006 | R32(edx, dx); | |
1007 | R32(edi, di); | |
1008 | R32(esi, si); | |
1009 | R32(ebp, bp); | |
1010 | R32(eax, ax); | |
1011 | R32(orig_eax, orig_ax); | |
1012 | R32(eip, ip); | |
1013 | R32(esp, sp); | |
1014 | ||
1015 | case offsetof(struct user32, regs.eflags): | |
1016 | *val = get_flags(child); | |
1017 | break; | |
1018 | ||
1019 | case offsetof(struct user32, u_debugreg[0]) ... | |
1020 | offsetof(struct user32, u_debugreg[7]): | |
1021 | regno -= offsetof(struct user32, u_debugreg[0]); | |
1022 | *val = ptrace_get_debugreg(child, regno / 4); | |
1023 | break; | |
1024 | ||
1025 | default: | |
1026 | if (regno > sizeof(struct user32) || (regno & 3)) | |
1027 | return -EIO; | |
1028 | ||
1029 | /* | |
1030 | * Other dummy fields in the virtual user structure | |
1031 | * are ignored | |
1032 | */ | |
1033 | *val = 0; | |
1034 | break; | |
1035 | } | |
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | #undef R32 | |
1040 | #undef SEG32 | |
1041 | ||
91e7b707 RM |
1042 | static int genregs32_get(struct task_struct *target, |
1043 | const struct user_regset *regset, | |
1044 | unsigned int pos, unsigned int count, | |
1045 | void *kbuf, void __user *ubuf) | |
1046 | { | |
1047 | if (kbuf) { | |
1048 | compat_ulong_t *k = kbuf; | |
1049 | while (count > 0) { | |
1050 | getreg32(target, pos, k++); | |
1051 | count -= sizeof(*k); | |
1052 | pos += sizeof(*k); | |
1053 | } | |
1054 | } else { | |
1055 | compat_ulong_t __user *u = ubuf; | |
1056 | while (count > 0) { | |
1057 | compat_ulong_t word; | |
1058 | getreg32(target, pos, &word); | |
1059 | if (__put_user(word, u++)) | |
1060 | return -EFAULT; | |
1061 | count -= sizeof(*u); | |
1062 | pos += sizeof(*u); | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | static int genregs32_set(struct task_struct *target, | |
1070 | const struct user_regset *regset, | |
1071 | unsigned int pos, unsigned int count, | |
1072 | const void *kbuf, const void __user *ubuf) | |
1073 | { | |
1074 | int ret = 0; | |
1075 | if (kbuf) { | |
1076 | const compat_ulong_t *k = kbuf; | |
1077 | while (count > 0 && !ret) { | |
1078 | ret = putreg(target, pos, *k++); | |
1079 | count -= sizeof(*k); | |
1080 | pos += sizeof(*k); | |
1081 | } | |
1082 | } else { | |
1083 | const compat_ulong_t __user *u = ubuf; | |
1084 | while (count > 0 && !ret) { | |
1085 | compat_ulong_t word; | |
1086 | ret = __get_user(word, u++); | |
1087 | if (ret) | |
1088 | break; | |
1089 | ret = putreg(target, pos, word); | |
1090 | count -= sizeof(*u); | |
1091 | pos += sizeof(*u); | |
1092 | } | |
1093 | } | |
1094 | return ret; | |
1095 | } | |
1096 | ||
099cd6e9 RM |
1097 | static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) |
1098 | { | |
1099 | siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t)); | |
1100 | compat_siginfo_t __user *si32 = compat_ptr(data); | |
1101 | siginfo_t ssi; | |
1102 | int ret; | |
1103 | ||
1104 | if (request == PTRACE_SETSIGINFO) { | |
1105 | memset(&ssi, 0, sizeof(siginfo_t)); | |
1106 | ret = copy_siginfo_from_user32(&ssi, si32); | |
1107 | if (ret) | |
1108 | return ret; | |
1109 | if (copy_to_user(si, &ssi, sizeof(siginfo_t))) | |
1110 | return -EFAULT; | |
1111 | } | |
1112 | ret = sys_ptrace(request, pid, addr, (unsigned long)si); | |
1113 | if (ret) | |
1114 | return ret; | |
1115 | if (request == PTRACE_GETSIGINFO) { | |
1116 | if (copy_from_user(&ssi, si, sizeof(siginfo_t))) | |
1117 | return -EFAULT; | |
1118 | ret = copy_siginfo_to_user32(si32, &ssi); | |
1119 | } | |
1120 | return ret; | |
1121 | } | |
1122 | ||
1123 | asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) | |
1124 | { | |
1125 | struct task_struct *child; | |
1126 | struct pt_regs *childregs; | |
1127 | void __user *datap = compat_ptr(data); | |
1128 | int ret; | |
1129 | __u32 val; | |
1130 | ||
1131 | switch (request) { | |
1132 | case PTRACE_TRACEME: | |
1133 | case PTRACE_ATTACH: | |
1134 | case PTRACE_KILL: | |
1135 | case PTRACE_CONT: | |
1136 | case PTRACE_SINGLESTEP: | |
1137 | case PTRACE_SINGLEBLOCK: | |
1138 | case PTRACE_DETACH: | |
1139 | case PTRACE_SYSCALL: | |
1140 | case PTRACE_OLDSETOPTIONS: | |
1141 | case PTRACE_SETOPTIONS: | |
1142 | case PTRACE_SET_THREAD_AREA: | |
1143 | case PTRACE_GET_THREAD_AREA: | |
eee3af4a MM |
1144 | case PTRACE_BTS_CONFIG: |
1145 | case PTRACE_BTS_STATUS: | |
a95d67f8 MM |
1146 | case PTRACE_BTS_SIZE: |
1147 | case PTRACE_BTS_GET: | |
1148 | case PTRACE_BTS_CLEAR: | |
1149 | case PTRACE_BTS_DRAIN: | |
099cd6e9 RM |
1150 | return sys_ptrace(request, pid, addr, data); |
1151 | ||
1152 | default: | |
1153 | return -EINVAL; | |
1154 | ||
1155 | case PTRACE_PEEKTEXT: | |
1156 | case PTRACE_PEEKDATA: | |
1157 | case PTRACE_POKEDATA: | |
1158 | case PTRACE_POKETEXT: | |
1159 | case PTRACE_POKEUSR: | |
1160 | case PTRACE_PEEKUSR: | |
1161 | case PTRACE_GETREGS: | |
1162 | case PTRACE_SETREGS: | |
1163 | case PTRACE_SETFPREGS: | |
1164 | case PTRACE_GETFPREGS: | |
1165 | case PTRACE_SETFPXREGS: | |
1166 | case PTRACE_GETFPXREGS: | |
1167 | case PTRACE_GETEVENTMSG: | |
1168 | break; | |
1169 | ||
1170 | case PTRACE_SETSIGINFO: | |
1171 | case PTRACE_GETSIGINFO: | |
1172 | return ptrace32_siginfo(request, pid, addr, data); | |
1173 | } | |
1174 | ||
1175 | child = ptrace_get_task_struct(pid); | |
1176 | if (IS_ERR(child)) | |
1177 | return PTR_ERR(child); | |
1178 | ||
1179 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | |
1180 | if (ret < 0) | |
1181 | goto out; | |
1182 | ||
1183 | childregs = task_pt_regs(child); | |
1184 | ||
1185 | switch (request) { | |
1186 | case PTRACE_PEEKDATA: | |
1187 | case PTRACE_PEEKTEXT: | |
1188 | ret = 0; | |
1189 | if (access_process_vm(child, addr, &val, sizeof(u32), 0) != | |
1190 | sizeof(u32)) | |
1191 | ret = -EIO; | |
1192 | else | |
1193 | ret = put_user(val, (unsigned int __user *)datap); | |
1194 | break; | |
1195 | ||
1196 | case PTRACE_POKEDATA: | |
1197 | case PTRACE_POKETEXT: | |
1198 | ret = 0; | |
1199 | if (access_process_vm(child, addr, &data, sizeof(u32), 1) != | |
1200 | sizeof(u32)) | |
1201 | ret = -EIO; | |
1202 | break; | |
1203 | ||
1204 | case PTRACE_PEEKUSR: | |
1205 | ret = getreg32(child, addr, &val); | |
1206 | if (ret == 0) | |
1207 | ret = put_user(val, (__u32 __user *)datap); | |
1208 | break; | |
1209 | ||
1210 | case PTRACE_POKEUSR: | |
1211 | ret = putreg32(child, addr, data); | |
1212 | break; | |
1213 | ||
5a4646a4 RM |
1214 | case PTRACE_GETREGS: /* Get all gp regs from the child. */ |
1215 | return copy_regset_to_user(child, &user_x86_32_view, | |
1216 | REGSET_GENERAL, | |
1217 | 0, sizeof(struct user_regs_struct32), | |
1218 | datap); | |
1219 | ||
1220 | case PTRACE_SETREGS: /* Set all gp regs in the child. */ | |
1221 | return copy_regset_from_user(child, &user_x86_32_view, | |
1222 | REGSET_GENERAL, 0, | |
1223 | sizeof(struct user_regs_struct32), | |
1224 | datap); | |
1225 | ||
1226 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | |
1227 | return copy_regset_to_user(child, &user_x86_32_view, | |
1228 | REGSET_FP, 0, | |
1229 | sizeof(struct user_i387_ia32_struct), | |
1230 | datap); | |
1231 | ||
1232 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | |
1233 | return copy_regset_from_user( | |
1234 | child, &user_x86_32_view, REGSET_FP, | |
1235 | 0, sizeof(struct user_i387_ia32_struct), datap); | |
1236 | ||
1237 | case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ | |
1238 | return copy_regset_to_user(child, &user_x86_32_view, | |
1239 | REGSET_XFP, 0, | |
1240 | sizeof(struct user32_fxsr_struct), | |
1241 | datap); | |
1242 | ||
1243 | case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ | |
1244 | return copy_regset_from_user(child, &user_x86_32_view, | |
1245 | REGSET_XFP, 0, | |
1246 | sizeof(struct user32_fxsr_struct), | |
1247 | datap); | |
099cd6e9 RM |
1248 | |
1249 | case PTRACE_GETEVENTMSG: | |
1250 | ret = put_user(child->ptrace_message, | |
1251 | (unsigned int __user *)compat_ptr(data)); | |
1252 | break; | |
1253 | ||
1254 | default: | |
1255 | BUG(); | |
1256 | } | |
1257 | ||
1258 | out: | |
1259 | put_task_struct(child); | |
1260 | return ret; | |
1261 | } | |
1262 | ||
cb757c41 RM |
1263 | #endif /* CONFIG_IA32_EMULATION */ |
1264 | ||
070459d9 RM |
1265 | #ifdef CONFIG_X86_64 |
1266 | ||
1267 | static const struct user_regset x86_64_regsets[] = { | |
1268 | [REGSET_GENERAL] = { | |
1269 | .core_note_type = NT_PRSTATUS, | |
1270 | .n = sizeof(struct user_regs_struct) / sizeof(long), | |
1271 | .size = sizeof(long), .align = sizeof(long), | |
1272 | .get = genregs_get, .set = genregs_set | |
1273 | }, | |
1274 | [REGSET_FP] = { | |
1275 | .core_note_type = NT_PRFPREG, | |
1276 | .n = sizeof(struct user_i387_struct) / sizeof(long), | |
1277 | .size = sizeof(long), .align = sizeof(long), | |
1278 | .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set | |
1279 | }, | |
1280 | }; | |
1281 | ||
1282 | static const struct user_regset_view user_x86_64_view = { | |
1283 | .name = "x86_64", .e_machine = EM_X86_64, | |
1284 | .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) | |
1285 | }; | |
1286 | ||
1287 | #else /* CONFIG_X86_32 */ | |
1288 | ||
1289 | #define user_regs_struct32 user_regs_struct | |
1290 | #define genregs32_get genregs_get | |
1291 | #define genregs32_set genregs_set | |
1292 | ||
1293 | #endif /* CONFIG_X86_64 */ | |
1294 | ||
1295 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | |
1296 | static const struct user_regset x86_32_regsets[] = { | |
1297 | [REGSET_GENERAL] = { | |
1298 | .core_note_type = NT_PRSTATUS, | |
1299 | .n = sizeof(struct user_regs_struct32) / sizeof(u32), | |
1300 | .size = sizeof(u32), .align = sizeof(u32), | |
1301 | .get = genregs32_get, .set = genregs32_set | |
1302 | }, | |
1303 | [REGSET_FP] = { | |
1304 | .core_note_type = NT_PRFPREG, | |
1305 | .n = sizeof(struct user_i387_struct) / sizeof(u32), | |
1306 | .size = sizeof(u32), .align = sizeof(u32), | |
1307 | .active = fpregs_active, .get = fpregs_get, .set = fpregs_set | |
1308 | }, | |
1309 | [REGSET_XFP] = { | |
1310 | .core_note_type = NT_PRXFPREG, | |
1311 | .n = sizeof(struct user_i387_struct) / sizeof(u32), | |
1312 | .size = sizeof(u32), .align = sizeof(u32), | |
1313 | .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set | |
1314 | }, | |
1315 | [REGSET_TLS] = { | |
bb61682b | 1316 | .core_note_type = NT_386_TLS, |
070459d9 RM |
1317 | .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, |
1318 | .size = sizeof(struct user_desc), | |
1319 | .align = sizeof(struct user_desc), | |
1320 | .active = regset_tls_active, | |
1321 | .get = regset_tls_get, .set = regset_tls_set | |
1322 | }, | |
1323 | }; | |
1324 | ||
1325 | static const struct user_regset_view user_x86_32_view = { | |
1326 | .name = "i386", .e_machine = EM_386, | |
1327 | .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) | |
1328 | }; | |
1329 | #endif | |
1330 | ||
1331 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | |
1332 | { | |
1333 | #ifdef CONFIG_IA32_EMULATION | |
1334 | if (test_tsk_thread_flag(task, TIF_IA32)) | |
1335 | #endif | |
1336 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | |
1337 | return &user_x86_32_view; | |
1338 | #endif | |
1339 | #ifdef CONFIG_X86_64 | |
1340 | return &user_x86_64_view; | |
1341 | #endif | |
1342 | } | |
1343 | ||
86976cd8 RM |
1344 | #ifdef CONFIG_X86_32 |
1345 | ||
1da177e4 LT |
1346 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) |
1347 | { | |
1348 | struct siginfo info; | |
1349 | ||
1350 | tsk->thread.trap_no = 1; | |
1351 | tsk->thread.error_code = error_code; | |
1352 | ||
1353 | memset(&info, 0, sizeof(info)); | |
1354 | info.si_signo = SIGTRAP; | |
1355 | info.si_code = TRAP_BRKPT; | |
1356 | ||
65ea5b03 PA |
1357 | /* User-mode ip? */ |
1358 | info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; | |
1da177e4 | 1359 | |
27b46d76 | 1360 | /* Send us the fake SIGTRAP */ |
1da177e4 LT |
1361 | force_sig_info(SIGTRAP, &info, tsk); |
1362 | } | |
1363 | ||
1364 | /* notification of system call entry/exit | |
1365 | * - triggered by current->work.syscall_trace | |
1366 | */ | |
1367 | __attribute__((regparm(3))) | |
ed75e8d5 | 1368 | int do_syscall_trace(struct pt_regs *regs, int entryexit) |
1da177e4 | 1369 | { |
4c7fc722 AA |
1370 | int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU); |
1371 | /* | |
1372 | * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall | |
1373 | * interception | |
1374 | */ | |
1b38f006 | 1375 | int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP); |
4c7fc722 | 1376 | int ret = 0; |
1b38f006 | 1377 | |
1da177e4 | 1378 | /* do the secure computing check first */ |
4c7fc722 | 1379 | if (!entryexit) |
65ea5b03 | 1380 | secure_computing(regs->orig_ax); |
1da177e4 | 1381 | |
ab1c23c2 BS |
1382 | if (unlikely(current->audit_context)) { |
1383 | if (entryexit) | |
65ea5b03 PA |
1384 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), |
1385 | regs->ax); | |
ab1c23c2 BS |
1386 | /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only |
1387 | * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is | |
1388 | * not used, entry.S will call us only on syscall exit, not | |
1389 | * entry; so when TIF_SYSCALL_AUDIT is used we must avoid | |
1390 | * calling send_sigtrap() on syscall entry. | |
1391 | * | |
1392 | * Note that when PTRACE_SYSEMU_SINGLESTEP is used, | |
1393 | * is_singlestep is false, despite his name, so we will still do | |
1394 | * the correct thing. | |
1395 | */ | |
1396 | else if (is_singlestep) | |
1397 | goto out; | |
1398 | } | |
1da177e4 LT |
1399 | |
1400 | if (!(current->ptrace & PT_PTRACED)) | |
2fd6f58b | 1401 | goto out; |
1da177e4 | 1402 | |
1b38f006 BS |
1403 | /* If a process stops on the 1st tracepoint with SYSCALL_TRACE |
1404 | * and then is resumed with SYSEMU_SINGLESTEP, it will come in | |
1405 | * here. We have to check this and return */ | |
1406 | if (is_sysemu && entryexit) | |
1407 | return 0; | |
ed75e8d5 | 1408 | |
1da177e4 | 1409 | /* Fake a debug trap */ |
c8c86cec | 1410 | if (is_singlestep) |
1da177e4 LT |
1411 | send_sigtrap(current, regs, 0); |
1412 | ||
c8c86cec | 1413 | if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu) |
2fd6f58b | 1414 | goto out; |
1da177e4 LT |
1415 | |
1416 | /* the 0x80 provides a way for the tracing parent to distinguish | |
1417 | between a syscall stop and SIGTRAP delivery */ | |
ed75e8d5 | 1418 | /* Note that the debugger could change the result of test_thread_flag!*/ |
4c7fc722 | 1419 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0)); |
1da177e4 LT |
1420 | |
1421 | /* | |
1422 | * this isn't the same as continuing with a signal, but it will do | |
1423 | * for normal use. strace only continues with a signal if the | |
1424 | * stopping signal is not SIGTRAP. -brl | |
1425 | */ | |
1426 | if (current->exit_code) { | |
1427 | send_sig(current->exit_code, current, 1); | |
1428 | current->exit_code = 0; | |
1429 | } | |
ed75e8d5 | 1430 | ret = is_sysemu; |
4c7fc722 | 1431 | out: |
2fd6f58b | 1432 | if (unlikely(current->audit_context) && !entryexit) |
65ea5b03 PA |
1433 | audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax, |
1434 | regs->bx, regs->cx, regs->dx, regs->si); | |
c8c86cec BS |
1435 | if (ret == 0) |
1436 | return 0; | |
1437 | ||
65ea5b03 | 1438 | regs->orig_ax = -1; /* force skip of syscall restarting */ |
c8c86cec | 1439 | if (unlikely(current->audit_context)) |
65ea5b03 | 1440 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
c8c86cec | 1441 | return 1; |
1da177e4 | 1442 | } |
86976cd8 RM |
1443 | |
1444 | #else /* CONFIG_X86_64 */ | |
1445 | ||
1446 | static void syscall_trace(struct pt_regs *regs) | |
1447 | { | |
1448 | ||
1449 | #if 0 | |
1450 | printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n", | |
1451 | current->comm, | |
1452 | regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0), | |
1453 | current_thread_info()->flags, current->ptrace); | |
1454 | #endif | |
1455 | ||
1456 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | |
1457 | ? 0x80 : 0)); | |
1458 | /* | |
1459 | * this isn't the same as continuing with a signal, but it will do | |
1460 | * for normal use. strace only continues with a signal if the | |
1461 | * stopping signal is not SIGTRAP. -brl | |
1462 | */ | |
1463 | if (current->exit_code) { | |
1464 | send_sig(current->exit_code, current, 1); | |
1465 | current->exit_code = 0; | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | asmlinkage void syscall_trace_enter(struct pt_regs *regs) | |
1470 | { | |
1471 | /* do the secure computing check first */ | |
1472 | secure_computing(regs->orig_ax); | |
1473 | ||
1474 | if (test_thread_flag(TIF_SYSCALL_TRACE) | |
1475 | && (current->ptrace & PT_PTRACED)) | |
1476 | syscall_trace(regs); | |
1477 | ||
1478 | if (unlikely(current->audit_context)) { | |
1479 | if (test_thread_flag(TIF_IA32)) { | |
1480 | audit_syscall_entry(AUDIT_ARCH_I386, | |
1481 | regs->orig_ax, | |
1482 | regs->bx, regs->cx, | |
1483 | regs->dx, regs->si); | |
1484 | } else { | |
1485 | audit_syscall_entry(AUDIT_ARCH_X86_64, | |
1486 | regs->orig_ax, | |
1487 | regs->di, regs->si, | |
1488 | regs->dx, regs->r10); | |
1489 | } | |
1490 | } | |
1491 | } | |
1492 | ||
1493 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | |
1494 | { | |
1495 | if (unlikely(current->audit_context)) | |
1496 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | |
1497 | ||
1498 | if ((test_thread_flag(TIF_SYSCALL_TRACE) | |
1499 | || test_thread_flag(TIF_SINGLESTEP)) | |
1500 | && (current->ptrace & PT_PTRACED)) | |
1501 | syscall_trace(regs); | |
1502 | } | |
1503 | ||
1504 | #endif /* CONFIG_X86_32 */ |