]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Kernel Probes (KProbes) | |
3 | * arch/x86_64/kernel/kprobes.c | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 | * | |
19 | * Copyright (C) IBM Corporation, 2002, 2004 | |
20 | * | |
21 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | |
22 | * Probes initial implementation ( includes contributions from | |
23 | * Rusty Russell). | |
24 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | |
25 | * interface to access function arguments. | |
26 | * 2004-Oct Jim Keniston <kenistoj@us.ibm.com> and Prasanna S Panchamukhi | |
27 | * <prasanna@in.ibm.com> adapted for x86_64 | |
28 | * 2005-Mar Roland McGrath <roland@redhat.com> | |
29 | * Fixed to handle %rip-relative addressing mode correctly. | |
73649dab RL |
30 | * 2005-May Rusty Lynch <rusty.lynch@intel.com> |
31 | * Added function return probes functionality | |
1da177e4 LT |
32 | */ |
33 | ||
1da177e4 LT |
34 | #include <linux/kprobes.h> |
35 | #include <linux/ptrace.h> | |
1da177e4 LT |
36 | #include <linux/string.h> |
37 | #include <linux/slab.h> | |
38 | #include <linux/preempt.h> | |
c28f8966 | 39 | #include <linux/module.h> |
9ec4b1f3 | 40 | |
7e1048b1 | 41 | #include <asm/cacheflush.h> |
1da177e4 LT |
42 | #include <asm/pgtable.h> |
43 | #include <asm/kdebug.h> | |
c28f8966 | 44 | #include <asm/uaccess.h> |
1da177e4 | 45 | |
1da177e4 | 46 | void jprobe_return_end(void); |
f709b122 | 47 | static void __kprobes arch_copy_kprobe(struct kprobe *p); |
1da177e4 | 48 | |
e7a510f9 AM |
49 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
50 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
1da177e4 LT |
51 | |
52 | /* | |
53 | * returns non-zero if opcode modifies the interrupt flag. | |
54 | */ | |
3b60211c | 55 | static __always_inline int is_IF_modifier(kprobe_opcode_t *insn) |
1da177e4 LT |
56 | { |
57 | switch (*insn) { | |
58 | case 0xfa: /* cli */ | |
59 | case 0xfb: /* sti */ | |
60 | case 0xcf: /* iret/iretd */ | |
61 | case 0x9d: /* popf/popfd */ | |
62 | return 1; | |
63 | } | |
64 | ||
65 | if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf) | |
66 | return 1; | |
67 | return 0; | |
68 | } | |
69 | ||
0f2fbdcb | 70 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
1da177e4 LT |
71 | { |
72 | /* insn: must be on special executable page on x86_64. */ | |
2dd960d6 | 73 | p->ainsn.insn = get_insn_slot(); |
1da177e4 LT |
74 | if (!p->ainsn.insn) { |
75 | return -ENOMEM; | |
76 | } | |
49a2a1b8 | 77 | arch_copy_kprobe(p); |
1da177e4 LT |
78 | return 0; |
79 | } | |
80 | ||
81 | /* | |
82 | * Determine if the instruction uses the %rip-relative addressing mode. | |
83 | * If it does, return the address of the 32-bit displacement word. | |
84 | * If not, return null. | |
85 | */ | |
3b60211c | 86 | static s32 __kprobes *is_riprel(u8 *insn) |
1da177e4 LT |
87 | { |
88 | #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \ | |
89 | (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ | |
90 | (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ | |
91 | (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ | |
92 | (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ | |
93 | << (row % 64)) | |
94 | static const u64 onebyte_has_modrm[256 / 64] = { | |
95 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
96 | /* ------------------------------- */ | |
97 | W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */ | |
98 | W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */ | |
99 | W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */ | |
100 | W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */ | |
101 | W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */ | |
102 | W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */ | |
103 | W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */ | |
104 | W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */ | |
105 | W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */ | |
106 | W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */ | |
107 | W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */ | |
108 | W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */ | |
109 | W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */ | |
110 | W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */ | |
111 | W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */ | |
112 | W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */ | |
113 | /* ------------------------------- */ | |
114 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
115 | }; | |
116 | static const u64 twobyte_has_modrm[256 / 64] = { | |
117 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
118 | /* ------------------------------- */ | |
119 | W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */ | |
120 | W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */ | |
121 | W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */ | |
122 | W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */ | |
123 | W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */ | |
124 | W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */ | |
125 | W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */ | |
126 | W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */ | |
127 | W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */ | |
128 | W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */ | |
129 | W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */ | |
130 | W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */ | |
131 | W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */ | |
132 | W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */ | |
133 | W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */ | |
134 | W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */ | |
135 | /* ------------------------------- */ | |
136 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | |
137 | }; | |
138 | #undef W | |
139 | int need_modrm; | |
140 | ||
141 | /* Skip legacy instruction prefixes. */ | |
142 | while (1) { | |
143 | switch (*insn) { | |
144 | case 0x66: | |
145 | case 0x67: | |
146 | case 0x2e: | |
147 | case 0x3e: | |
148 | case 0x26: | |
149 | case 0x64: | |
150 | case 0x65: | |
151 | case 0x36: | |
152 | case 0xf0: | |
153 | case 0xf3: | |
154 | case 0xf2: | |
155 | ++insn; | |
156 | continue; | |
157 | } | |
158 | break; | |
159 | } | |
160 | ||
161 | /* Skip REX instruction prefix. */ | |
162 | if ((*insn & 0xf0) == 0x40) | |
163 | ++insn; | |
164 | ||
165 | if (*insn == 0x0f) { /* Two-byte opcode. */ | |
166 | ++insn; | |
167 | need_modrm = test_bit(*insn, twobyte_has_modrm); | |
168 | } else { /* One-byte opcode. */ | |
169 | need_modrm = test_bit(*insn, onebyte_has_modrm); | |
170 | } | |
171 | ||
172 | if (need_modrm) { | |
173 | u8 modrm = *++insn; | |
174 | if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */ | |
175 | /* Displacement follows ModRM byte. */ | |
176 | return (s32 *) ++insn; | |
177 | } | |
178 | } | |
179 | ||
180 | /* No %rip-relative addressing mode here. */ | |
181 | return NULL; | |
182 | } | |
183 | ||
f709b122 | 184 | static void __kprobes arch_copy_kprobe(struct kprobe *p) |
1da177e4 LT |
185 | { |
186 | s32 *ripdisp; | |
187 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); | |
188 | ripdisp = is_riprel(p->ainsn.insn); | |
189 | if (ripdisp) { | |
190 | /* | |
191 | * The copied instruction uses the %rip-relative | |
192 | * addressing mode. Adjust the displacement for the | |
193 | * difference between the original location of this | |
194 | * instruction and the location of the copy that will | |
195 | * actually be run. The tricky bit here is making sure | |
196 | * that the sign extension happens correctly in this | |
197 | * calculation, since we need a signed 32-bit result to | |
198 | * be sign-extended to 64 bits when it's added to the | |
199 | * %rip value and yield the same 64-bit result that the | |
200 | * sign-extension of the original signed 32-bit | |
201 | * displacement would have given. | |
202 | */ | |
203 | s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn; | |
204 | BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ | |
205 | *ripdisp = disp; | |
206 | } | |
7e1048b1 | 207 | p->opcode = *p->addr; |
1da177e4 LT |
208 | } |
209 | ||
0f2fbdcb | 210 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
1da177e4 | 211 | { |
7e1048b1 RL |
212 | *p->addr = BREAKPOINT_INSTRUCTION; |
213 | flush_icache_range((unsigned long) p->addr, | |
214 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
1da177e4 LT |
215 | } |
216 | ||
0f2fbdcb | 217 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
1da177e4 LT |
218 | { |
219 | *p->addr = p->opcode; | |
7e1048b1 RL |
220 | flush_icache_range((unsigned long) p->addr, |
221 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | |
222 | } | |
223 | ||
0498b635 | 224 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
7e1048b1 | 225 | { |
7a7d1cf9 | 226 | mutex_lock(&kprobe_mutex); |
2dd960d6 | 227 | free_insn_slot(p->ainsn.insn); |
7a7d1cf9 | 228 | mutex_unlock(&kprobe_mutex); |
1da177e4 LT |
229 | } |
230 | ||
3b60211c | 231 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
aa3d7e3d | 232 | { |
e7a510f9 AM |
233 | kcb->prev_kprobe.kp = kprobe_running(); |
234 | kcb->prev_kprobe.status = kcb->kprobe_status; | |
235 | kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags; | |
236 | kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags; | |
aa3d7e3d PP |
237 | } |
238 | ||
3b60211c | 239 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
aa3d7e3d | 240 | { |
e7a510f9 AM |
241 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
242 | kcb->kprobe_status = kcb->prev_kprobe.status; | |
243 | kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags; | |
244 | kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags; | |
aa3d7e3d PP |
245 | } |
246 | ||
3b60211c | 247 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
e7a510f9 | 248 | struct kprobe_ctlblk *kcb) |
aa3d7e3d | 249 | { |
e7a510f9 AM |
250 | __get_cpu_var(current_kprobe) = p; |
251 | kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags | |
aa3d7e3d PP |
252 | = (regs->eflags & (TF_MASK | IF_MASK)); |
253 | if (is_IF_modifier(p->ainsn.insn)) | |
e7a510f9 | 254 | kcb->kprobe_saved_rflags &= ~IF_MASK; |
aa3d7e3d PP |
255 | } |
256 | ||
0f2fbdcb | 257 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
258 | { |
259 | regs->eflags |= TF_MASK; | |
260 | regs->eflags &= ~IF_MASK; | |
261 | /*single step inline if the instruction is an int3*/ | |
262 | if (p->opcode == BREAKPOINT_INSTRUCTION) | |
263 | regs->rip = (unsigned long)p->addr; | |
264 | else | |
265 | regs->rip = (unsigned long)p->ainsn.insn; | |
266 | } | |
267 | ||
991a51d8 | 268 | /* Called with kretprobe_lock held */ |
0f2fbdcb PP |
269 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
270 | struct pt_regs *regs) | |
73649dab RL |
271 | { |
272 | unsigned long *sara = (unsigned long *)regs->rsp; | |
ba8af12f RL |
273 | struct kretprobe_instance *ri; |
274 | ||
275 | if ((ri = get_free_rp_inst(rp)) != NULL) { | |
276 | ri->rp = rp; | |
277 | ri->task = current; | |
278 | ri->ret_addr = (kprobe_opcode_t *) *sara; | |
73649dab | 279 | |
73649dab RL |
280 | /* Replace the return addr with trampoline addr */ |
281 | *sara = (unsigned long) &kretprobe_trampoline; | |
73649dab | 282 | |
ba8af12f RL |
283 | add_rp_inst(ri); |
284 | } else { | |
285 | rp->nmissed++; | |
286 | } | |
73649dab RL |
287 | } |
288 | ||
0f2fbdcb | 289 | int __kprobes kprobe_handler(struct pt_regs *regs) |
1da177e4 LT |
290 | { |
291 | struct kprobe *p; | |
292 | int ret = 0; | |
293 | kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t)); | |
d217d545 AM |
294 | struct kprobe_ctlblk *kcb; |
295 | ||
296 | /* | |
297 | * We don't want to be preempted for the entire | |
298 | * duration of kprobe processing | |
299 | */ | |
300 | preempt_disable(); | |
301 | kcb = get_kprobe_ctlblk(); | |
1da177e4 | 302 | |
1da177e4 LT |
303 | /* Check we're not actually recursing */ |
304 | if (kprobe_running()) { | |
1da177e4 LT |
305 | p = get_kprobe(addr); |
306 | if (p) { | |
e7a510f9 | 307 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
deac66ae | 308 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
1da177e4 | 309 | regs->eflags &= ~TF_MASK; |
e7a510f9 | 310 | regs->eflags |= kcb->kprobe_saved_rflags; |
1da177e4 | 311 | goto no_kprobe; |
e7a510f9 | 312 | } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { |
aa3d7e3d PP |
313 | /* TODO: Provide re-entrancy from |
314 | * post_kprobes_handler() and avoid exception | |
315 | * stack corruption while single-stepping on | |
316 | * the instruction of the new probe. | |
317 | */ | |
318 | arch_disarm_kprobe(p); | |
319 | regs->rip = (unsigned long)p->addr; | |
e7a510f9 | 320 | reset_current_kprobe(); |
aa3d7e3d PP |
321 | ret = 1; |
322 | } else { | |
323 | /* We have reentered the kprobe_handler(), since | |
324 | * another probe was hit while within the | |
325 | * handler. We here save the original kprobe | |
326 | * variables and just single step on instruction | |
327 | * of the new probe without calling any user | |
328 | * handlers. | |
329 | */ | |
e7a510f9 AM |
330 | save_previous_kprobe(kcb); |
331 | set_current_kprobe(p, regs, kcb); | |
bf8d5c52 | 332 | kprobes_inc_nmissed_count(p); |
aa3d7e3d | 333 | prepare_singlestep(p, regs); |
e7a510f9 | 334 | kcb->kprobe_status = KPROBE_REENTER; |
aa3d7e3d | 335 | return 1; |
1da177e4 | 336 | } |
1da177e4 | 337 | } else { |
eb3a7292 KA |
338 | if (*addr != BREAKPOINT_INSTRUCTION) { |
339 | /* The breakpoint instruction was removed by | |
340 | * another cpu right after we hit, no further | |
341 | * handling of this interrupt is appropriate | |
342 | */ | |
343 | regs->rip = (unsigned long)addr; | |
344 | ret = 1; | |
345 | goto no_kprobe; | |
346 | } | |
e7a510f9 | 347 | p = __get_cpu_var(current_kprobe); |
1da177e4 LT |
348 | if (p->break_handler && p->break_handler(p, regs)) { |
349 | goto ss_probe; | |
350 | } | |
351 | } | |
1da177e4 LT |
352 | goto no_kprobe; |
353 | } | |
354 | ||
1da177e4 LT |
355 | p = get_kprobe(addr); |
356 | if (!p) { | |
1da177e4 LT |
357 | if (*addr != BREAKPOINT_INSTRUCTION) { |
358 | /* | |
359 | * The breakpoint instruction was removed right | |
360 | * after we hit it. Another cpu has removed | |
361 | * either a probepoint or a debugger breakpoint | |
362 | * at this address. In either case, no further | |
363 | * handling of this interrupt is appropriate. | |
bce06494 JK |
364 | * Back up over the (now missing) int3 and run |
365 | * the original instruction. | |
1da177e4 | 366 | */ |
bce06494 | 367 | regs->rip = (unsigned long)addr; |
1da177e4 LT |
368 | ret = 1; |
369 | } | |
370 | /* Not one of ours: let kernel handle it */ | |
371 | goto no_kprobe; | |
372 | } | |
373 | ||
e7a510f9 AM |
374 | set_current_kprobe(p, regs, kcb); |
375 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
1da177e4 LT |
376 | |
377 | if (p->pre_handler && p->pre_handler(p, regs)) | |
378 | /* handler has already set things up, so skip ss setup */ | |
379 | return 1; | |
380 | ||
381 | ss_probe: | |
382 | prepare_singlestep(p, regs); | |
e7a510f9 | 383 | kcb->kprobe_status = KPROBE_HIT_SS; |
1da177e4 LT |
384 | return 1; |
385 | ||
386 | no_kprobe: | |
d217d545 | 387 | preempt_enable_no_resched(); |
1da177e4 LT |
388 | return ret; |
389 | } | |
390 | ||
73649dab RL |
391 | /* |
392 | * For function-return probes, init_kprobes() establishes a probepoint | |
393 | * here. When a retprobed function returns, this probe is hit and | |
394 | * trampoline_probe_handler() runs, calling the kretprobe's handler. | |
395 | */ | |
396 | void kretprobe_trampoline_holder(void) | |
397 | { | |
398 | asm volatile ( ".global kretprobe_trampoline\n" | |
399 | "kretprobe_trampoline: \n" | |
400 | "nop\n"); | |
401 | } | |
402 | ||
403 | /* | |
404 | * Called when we hit the probe point at kretprobe_trampoline | |
405 | */ | |
0f2fbdcb | 406 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
73649dab | 407 | { |
ba8af12f RL |
408 | struct kretprobe_instance *ri = NULL; |
409 | struct hlist_head *head; | |
410 | struct hlist_node *node, *tmp; | |
991a51d8 | 411 | unsigned long flags, orig_ret_address = 0; |
ba8af12f | 412 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
73649dab | 413 | |
991a51d8 | 414 | spin_lock_irqsave(&kretprobe_lock, flags); |
ba8af12f | 415 | head = kretprobe_inst_table_head(current); |
73649dab | 416 | |
ba8af12f RL |
417 | /* |
418 | * It is possible to have multiple instances associated with a given | |
419 | * task either because an multiple functions in the call path | |
420 | * have a return probe installed on them, and/or more then one return | |
421 | * return probe was registered for a target function. | |
422 | * | |
423 | * We can handle this because: | |
424 | * - instances are always inserted at the head of the list | |
425 | * - when multiple return probes are registered for the same | |
426 | * function, the first instance's ret_addr will point to the | |
427 | * real return address, and all the rest will point to | |
428 | * kretprobe_trampoline | |
429 | */ | |
430 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | |
431 | if (ri->task != current) | |
432 | /* another task is sharing our hash bucket */ | |
433 | continue; | |
434 | ||
435 | if (ri->rp && ri->rp->handler) | |
436 | ri->rp->handler(ri, regs); | |
437 | ||
438 | orig_ret_address = (unsigned long)ri->ret_addr; | |
73649dab | 439 | recycle_rp_inst(ri); |
ba8af12f RL |
440 | |
441 | if (orig_ret_address != trampoline_address) | |
442 | /* | |
443 | * This is the real return address. Any other | |
444 | * instances associated with this task are for | |
445 | * other calls deeper on the call stack | |
446 | */ | |
447 | break; | |
73649dab | 448 | } |
ba8af12f RL |
449 | |
450 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | |
451 | regs->rip = orig_ret_address; | |
452 | ||
e7a510f9 | 453 | reset_current_kprobe(); |
991a51d8 | 454 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
ba8af12f RL |
455 | preempt_enable_no_resched(); |
456 | ||
457 | /* | |
458 | * By returning a non-zero value, we are telling | |
d217d545 AM |
459 | * kprobe_handler() that we don't want the post_handler |
460 | * to run (and have re-enabled preemption) | |
ba8af12f RL |
461 | */ |
462 | return 1; | |
73649dab RL |
463 | } |
464 | ||
1da177e4 LT |
465 | /* |
466 | * Called after single-stepping. p->addr is the address of the | |
467 | * instruction whose first byte has been replaced by the "int 3" | |
468 | * instruction. To avoid the SMP problems that can occur when we | |
469 | * temporarily put back the original opcode to single-step, we | |
470 | * single-stepped a copy of the instruction. The address of this | |
471 | * copy is p->ainsn.insn. | |
472 | * | |
473 | * This function prepares to return from the post-single-step | |
474 | * interrupt. We have to fix up the stack as follows: | |
475 | * | |
476 | * 0) Except in the case of absolute or indirect jump or call instructions, | |
477 | * the new rip is relative to the copied instruction. We need to make | |
478 | * it relative to the original instruction. | |
479 | * | |
480 | * 1) If the single-stepped instruction was pushfl, then the TF and IF | |
481 | * flags are set in the just-pushed eflags, and may need to be cleared. | |
482 | * | |
483 | * 2) If the single-stepped instruction was a call, the return address | |
484 | * that is atop the stack is the address following the copied instruction. | |
485 | * We need to make it the address following the original instruction. | |
486 | */ | |
e7a510f9 AM |
487 | static void __kprobes resume_execution(struct kprobe *p, |
488 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | |
1da177e4 LT |
489 | { |
490 | unsigned long *tos = (unsigned long *)regs->rsp; | |
491 | unsigned long next_rip = 0; | |
492 | unsigned long copy_rip = (unsigned long)p->ainsn.insn; | |
493 | unsigned long orig_rip = (unsigned long)p->addr; | |
494 | kprobe_opcode_t *insn = p->ainsn.insn; | |
495 | ||
496 | /*skip the REX prefix*/ | |
497 | if (*insn >= 0x40 && *insn <= 0x4f) | |
498 | insn++; | |
499 | ||
500 | switch (*insn) { | |
501 | case 0x9c: /* pushfl */ | |
502 | *tos &= ~(TF_MASK | IF_MASK); | |
e7a510f9 | 503 | *tos |= kcb->kprobe_old_rflags; |
1da177e4 | 504 | break; |
0b9e2cac PP |
505 | case 0xc3: /* ret/lret */ |
506 | case 0xcb: | |
507 | case 0xc2: | |
508 | case 0xca: | |
509 | regs->eflags &= ~TF_MASK; | |
510 | /* rip is already adjusted, no more changes required*/ | |
511 | return; | |
1da177e4 LT |
512 | case 0xe8: /* call relative - Fix return addr */ |
513 | *tos = orig_rip + (*tos - copy_rip); | |
514 | break; | |
515 | case 0xff: | |
dc49e344 | 516 | if ((insn[1] & 0x30) == 0x10) { |
1da177e4 LT |
517 | /* call absolute, indirect */ |
518 | /* Fix return addr; rip is correct. */ | |
519 | next_rip = regs->rip; | |
520 | *tos = orig_rip + (*tos - copy_rip); | |
dc49e344 SO |
521 | } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
522 | ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | |
1da177e4 LT |
523 | /* rip is correct. */ |
524 | next_rip = regs->rip; | |
525 | } | |
526 | break; | |
527 | case 0xea: /* jmp absolute -- rip is correct */ | |
528 | next_rip = regs->rip; | |
529 | break; | |
530 | default: | |
531 | break; | |
532 | } | |
533 | ||
534 | regs->eflags &= ~TF_MASK; | |
535 | if (next_rip) { | |
536 | regs->rip = next_rip; | |
537 | } else { | |
538 | regs->rip = orig_rip + (regs->rip - copy_rip); | |
539 | } | |
540 | } | |
541 | ||
0f2fbdcb | 542 | int __kprobes post_kprobe_handler(struct pt_regs *regs) |
1da177e4 | 543 | { |
e7a510f9 AM |
544 | struct kprobe *cur = kprobe_running(); |
545 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
546 | ||
547 | if (!cur) | |
1da177e4 LT |
548 | return 0; |
549 | ||
e7a510f9 AM |
550 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
551 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
552 | cur->post_handler(cur, regs, 0); | |
aa3d7e3d | 553 | } |
1da177e4 | 554 | |
e7a510f9 AM |
555 | resume_execution(cur, regs, kcb); |
556 | regs->eflags |= kcb->kprobe_saved_rflags; | |
1da177e4 | 557 | |
aa3d7e3d | 558 | /* Restore the original saved kprobes variables and continue. */ |
e7a510f9 AM |
559 | if (kcb->kprobe_status == KPROBE_REENTER) { |
560 | restore_previous_kprobe(kcb); | |
aa3d7e3d | 561 | goto out; |
aa3d7e3d | 562 | } |
e7a510f9 | 563 | reset_current_kprobe(); |
aa3d7e3d | 564 | out: |
1da177e4 LT |
565 | preempt_enable_no_resched(); |
566 | ||
567 | /* | |
568 | * if somebody else is singlestepping across a probe point, eflags | |
569 | * will have TF set, in which case, continue the remaining processing | |
570 | * of do_debug, as if this is not a probe hit. | |
571 | */ | |
572 | if (regs->eflags & TF_MASK) | |
573 | return 0; | |
574 | ||
575 | return 1; | |
576 | } | |
577 | ||
0f2fbdcb | 578 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
1da177e4 | 579 | { |
e7a510f9 AM |
580 | struct kprobe *cur = kprobe_running(); |
581 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
c28f8966 | 582 | const struct exception_table_entry *fixup; |
e7a510f9 | 583 | |
c28f8966 PP |
584 | switch(kcb->kprobe_status) { |
585 | case KPROBE_HIT_SS: | |
586 | case KPROBE_REENTER: | |
587 | /* | |
588 | * We are here because the instruction being single | |
589 | * stepped caused a page fault. We reset the current | |
590 | * kprobe and the rip points back to the probe address | |
591 | * and allow the page fault handler to continue as a | |
592 | * normal page fault. | |
593 | */ | |
594 | regs->rip = (unsigned long)cur->addr; | |
e7a510f9 | 595 | regs->eflags |= kcb->kprobe_old_rflags; |
c28f8966 PP |
596 | if (kcb->kprobe_status == KPROBE_REENTER) |
597 | restore_previous_kprobe(kcb); | |
598 | else | |
599 | reset_current_kprobe(); | |
1da177e4 | 600 | preempt_enable_no_resched(); |
c28f8966 PP |
601 | break; |
602 | case KPROBE_HIT_ACTIVE: | |
603 | case KPROBE_HIT_SSDONE: | |
604 | /* | |
605 | * We increment the nmissed count for accounting, | |
606 | * we can also use npre/npostfault count for accouting | |
607 | * these specific fault cases. | |
608 | */ | |
609 | kprobes_inc_nmissed_count(cur); | |
610 | ||
611 | /* | |
612 | * We come here because instructions in the pre/post | |
613 | * handler caused the page_fault, this could happen | |
614 | * if handler tries to access user space by | |
615 | * copy_from_user(), get_user() etc. Let the | |
616 | * user-specified handler try to fix it first. | |
617 | */ | |
618 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
619 | return 1; | |
620 | ||
621 | /* | |
622 | * In case the user-specified fault handler returned | |
623 | * zero, try to fix up. | |
624 | */ | |
625 | fixup = search_exception_tables(regs->rip); | |
626 | if (fixup) { | |
627 | regs->rip = fixup->fixup; | |
628 | return 1; | |
629 | } | |
630 | ||
631 | /* | |
632 | * fixup() could not handle it, | |
633 | * Let do_page_fault() fix it. | |
634 | */ | |
635 | break; | |
636 | default: | |
637 | break; | |
1da177e4 LT |
638 | } |
639 | return 0; | |
640 | } | |
641 | ||
642 | /* | |
643 | * Wrapper routine for handling exceptions. | |
644 | */ | |
0f2fbdcb PP |
645 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
646 | unsigned long val, void *data) | |
1da177e4 LT |
647 | { |
648 | struct die_args *args = (struct die_args *)data; | |
66ff2d06 AM |
649 | int ret = NOTIFY_DONE; |
650 | ||
2326c770 | 651 | if (args->regs && user_mode(args->regs)) |
652 | return ret; | |
653 | ||
1da177e4 LT |
654 | switch (val) { |
655 | case DIE_INT3: | |
656 | if (kprobe_handler(args->regs)) | |
66ff2d06 | 657 | ret = NOTIFY_STOP; |
1da177e4 LT |
658 | break; |
659 | case DIE_DEBUG: | |
660 | if (post_kprobe_handler(args->regs)) | |
66ff2d06 | 661 | ret = NOTIFY_STOP; |
1da177e4 LT |
662 | break; |
663 | case DIE_GPF: | |
1da177e4 | 664 | case DIE_PAGE_FAULT: |
d217d545 AM |
665 | /* kprobe_running() needs smp_processor_id() */ |
666 | preempt_disable(); | |
1da177e4 LT |
667 | if (kprobe_running() && |
668 | kprobe_fault_handler(args->regs, args->trapnr)) | |
66ff2d06 | 669 | ret = NOTIFY_STOP; |
d217d545 | 670 | preempt_enable(); |
1da177e4 LT |
671 | break; |
672 | default: | |
673 | break; | |
674 | } | |
66ff2d06 | 675 | return ret; |
1da177e4 LT |
676 | } |
677 | ||
0f2fbdcb | 678 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 LT |
679 | { |
680 | struct jprobe *jp = container_of(p, struct jprobe, kp); | |
681 | unsigned long addr; | |
e7a510f9 | 682 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 683 | |
e7a510f9 AM |
684 | kcb->jprobe_saved_regs = *regs; |
685 | kcb->jprobe_saved_rsp = (long *) regs->rsp; | |
686 | addr = (unsigned long)(kcb->jprobe_saved_rsp); | |
1da177e4 LT |
687 | /* |
688 | * As Linus pointed out, gcc assumes that the callee | |
689 | * owns the argument space and could overwrite it, e.g. | |
690 | * tailcall optimization. So, to be absolutely safe | |
691 | * we also save and restore enough stack bytes to cover | |
692 | * the argument area. | |
693 | */ | |
e7a510f9 AM |
694 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
695 | MIN_STACK_SIZE(addr)); | |
1da177e4 LT |
696 | regs->eflags &= ~IF_MASK; |
697 | regs->rip = (unsigned long)(jp->entry); | |
698 | return 1; | |
699 | } | |
700 | ||
0f2fbdcb | 701 | void __kprobes jprobe_return(void) |
1da177e4 | 702 | { |
e7a510f9 AM |
703 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
704 | ||
1da177e4 LT |
705 | asm volatile (" xchg %%rbx,%%rsp \n" |
706 | " int3 \n" | |
707 | " .globl jprobe_return_end \n" | |
708 | " jprobe_return_end: \n" | |
709 | " nop \n"::"b" | |
e7a510f9 | 710 | (kcb->jprobe_saved_rsp):"memory"); |
1da177e4 LT |
711 | } |
712 | ||
0f2fbdcb | 713 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
1da177e4 | 714 | { |
e7a510f9 | 715 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
1da177e4 | 716 | u8 *addr = (u8 *) (regs->rip - 1); |
e7a510f9 | 717 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp); |
1da177e4 LT |
718 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
719 | ||
720 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | |
e7a510f9 | 721 | if ((long *)regs->rsp != kcb->jprobe_saved_rsp) { |
1da177e4 | 722 | struct pt_regs *saved_regs = |
e7a510f9 AM |
723 | container_of(kcb->jprobe_saved_rsp, |
724 | struct pt_regs, rsp); | |
1da177e4 | 725 | printk("current rsp %p does not match saved rsp %p\n", |
e7a510f9 | 726 | (long *)regs->rsp, kcb->jprobe_saved_rsp); |
1da177e4 LT |
727 | printk("Saved registers for jprobe %p\n", jp); |
728 | show_registers(saved_regs); | |
729 | printk("Current registers\n"); | |
730 | show_registers(regs); | |
731 | BUG(); | |
732 | } | |
e7a510f9 AM |
733 | *regs = kcb->jprobe_saved_regs; |
734 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, | |
1da177e4 | 735 | MIN_STACK_SIZE(stack_addr)); |
d217d545 | 736 | preempt_enable_no_resched(); |
1da177e4 LT |
737 | return 1; |
738 | } | |
739 | return 0; | |
740 | } | |
ba8af12f RL |
741 | |
742 | static struct kprobe trampoline_p = { | |
743 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | |
744 | .pre_handler = trampoline_probe_handler | |
745 | }; | |
746 | ||
6772926b | 747 | int __init arch_init_kprobes(void) |
ba8af12f RL |
748 | { |
749 | return register_kprobe(&trampoline_p); | |
750 | } |