2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
43 #include <linux/kprobes.h>
44 #include <linux/ptrace.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/hardirq.h>
48 #include <linux/preempt.h>
49 #include <linux/module.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
53 #include <asm/cacheflush.h>
55 #include <asm/pgtable.h>
56 #include <asm/uaccess.h>
57 #include <asm/alternative.h>
59 #include <asm/debugreg.h>
61 void jprobe_return_end(void);
63 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
64 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
66 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
68 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
69 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
70 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
71 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
72 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
75 * Undefined/reserved opcodes, conditional jump, Opcode Extension
76 * Groups, and some special opcodes can not boost.
78 static const u32 twobyte_is_boostable[256 / 32] = {
79 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
80 /* ---------------------------------------------- */
81 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
82 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
83 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
84 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
85 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
86 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
87 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
88 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
89 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
90 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
91 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
92 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
93 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
94 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
95 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
96 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
97 /* ----------------------------------------------- */
98 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
102 struct kretprobe_blackpoint kretprobe_blacklist[] = {
103 {"__switch_to", }, /* This function switches only current task, but
104 doesn't switch kernel stack.*/
105 {NULL, NULL} /* Terminator */
107 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
109 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
110 static void __kprobes set_jmp_op(void *from, void *to)
112 struct __arch_jmp_op {
115 } __attribute__((packed)) * jop;
116 jop = (struct __arch_jmp_op *)from;
117 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
118 jop->op = RELATIVEJUMP_OPCODE;
122 * Check for the REX prefix which can only exist on X86_64
123 * X86_32 always returns 0
125 static int __kprobes is_REX_prefix(kprobe_opcode_t *insn)
128 if ((*insn & 0xf0) == 0x40)
135 * Returns non-zero if opcode is boostable.
136 * RIP relative instructions are adjusted at copying time in 64 bits mode
138 static int __kprobes can_boost(kprobe_opcode_t *opcodes)
140 kprobe_opcode_t opcode;
141 kprobe_opcode_t *orig_opcodes = opcodes;
143 if (search_exception_tables((unsigned long)opcodes))
144 return 0; /* Page fault may occur on this address. */
147 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
149 opcode = *(opcodes++);
151 /* 2nd-byte opcode */
152 if (opcode == 0x0f) {
153 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
155 return test_bit(*opcodes,
156 (unsigned long *)twobyte_is_boostable);
159 switch (opcode & 0xf0) {
162 goto retry; /* REX prefix is boostable */
165 if (0x63 < opcode && opcode < 0x67)
166 goto retry; /* prefixes */
167 /* can't boost Address-size override and bound */
168 return (opcode != 0x62 && opcode != 0x67);
170 return 0; /* can't boost conditional jump */
172 /* can't boost software-interruptions */
173 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
175 /* can boost AA* and XLAT */
176 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
178 /* can boost in/out and absolute jmps */
179 return ((opcode & 0x04) || opcode == 0xea);
181 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
182 goto retry; /* lock/rep(ne) prefix */
183 /* clear and set flags are boostable */
184 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
186 /* segment override prefixes are boostable */
187 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
188 goto retry; /* prefixes */
189 /* CS override prefix and call are not boostable */
190 return (opcode != 0x2e && opcode != 0x9a);
194 /* Recover the probed instruction at addr for further analysis. */
195 static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
198 kp = get_kprobe((void *)addr);
203 * Basically, kp->ainsn.insn has an original instruction.
204 * However, RIP-relative instruction can not do single-stepping
205 * at different place, fix_riprel() tweaks the displacement of
206 * that instruction. In that case, we can't recover the instruction
207 * from the kp->ainsn.insn.
209 * On the other hand, kp->opcode has a copy of the first byte of
210 * the probed instruction, which is overwritten by int3. And
211 * the instruction at kp->addr is not modified by kprobes except
212 * for the first byte, we can recover the original instruction
213 * from it and kp->opcode.
215 memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
220 /* Dummy buffers for kallsyms_lookup */
221 static char __dummy_buf[KSYM_NAME_LEN];
223 /* Check if paddr is at an instruction boundary */
224 static int __kprobes can_probe(unsigned long paddr)
227 unsigned long addr, offset = 0;
229 kprobe_opcode_t buf[MAX_INSN_SIZE];
231 if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
234 /* Decode instructions */
235 addr = paddr - offset;
236 while (addr < paddr) {
237 kernel_insn_init(&insn, (void *)addr);
238 insn_get_opcode(&insn);
241 * Check if the instruction has been modified by another
242 * kprobe, in which case we replace the breakpoint by the
243 * original instruction in our buffer.
245 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
246 ret = recover_probed_instruction(buf, addr);
249 * Another debugging subsystem might insert
250 * this breakpoint. In that case, we can't
254 kernel_insn_init(&insn, buf);
256 insn_get_length(&insn);
260 return (addr == paddr);
264 * Returns non-zero if opcode modifies the interrupt flag.
266 static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
271 case 0xcf: /* iret/iretd */
272 case 0x9d: /* popf/popfd */
277 * on X86_64, 0x40-0x4f are REX prefixes so we need to look
278 * at the next byte instead.. but of course not recurse infinitely
280 if (is_REX_prefix(insn))
281 return is_IF_modifier(++insn);
287 * Adjust the displacement if the instruction uses the %rip-relative
289 * If it does, Return the address of the 32-bit displacement word.
290 * If not, return null.
291 * Only applicable to 64-bit x86.
293 static void __kprobes fix_riprel(struct kprobe *p)
297 kernel_insn_init(&insn, p->ainsn.insn);
299 if (insn_rip_relative(&insn)) {
302 insn_get_displacement(&insn);
304 * The copied instruction uses the %rip-relative addressing
305 * mode. Adjust the displacement for the difference between
306 * the original location of this instruction and the location
307 * of the copy that will actually be run. The tricky bit here
308 * is making sure that the sign extension happens correctly in
309 * this calculation, since we need a signed 32-bit result to
310 * be sign-extended to 64 bits when it's added to the %rip
311 * value and yield the same 64-bit result that the sign-
312 * extension of the original signed 32-bit displacement would
315 newdisp = (u8 *) p->addr + (s64) insn.displacement.value -
316 (u8 *) p->ainsn.insn;
317 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
318 disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn);
319 *(s32 *) disp = (s32) newdisp;
324 static void __kprobes arch_copy_kprobe(struct kprobe *p)
326 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
330 if (can_boost(p->addr))
331 p->ainsn.boostable = 0;
333 p->ainsn.boostable = -1;
335 p->opcode = *p->addr;
338 int __kprobes arch_prepare_kprobe(struct kprobe *p)
340 if (alternatives_text_reserved(p->addr, p->addr))
343 if (!can_probe((unsigned long)p->addr))
345 /* insn: must be on special executable page on x86. */
346 p->ainsn.insn = get_insn_slot();
353 void __kprobes arch_arm_kprobe(struct kprobe *p)
355 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
358 void __kprobes arch_disarm_kprobe(struct kprobe *p)
360 text_poke(p->addr, &p->opcode, 1);
363 void __kprobes arch_remove_kprobe(struct kprobe *p)
366 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
367 p->ainsn.insn = NULL;
371 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
373 kcb->prev_kprobe.kp = kprobe_running();
374 kcb->prev_kprobe.status = kcb->kprobe_status;
375 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
376 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
379 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
381 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
382 kcb->kprobe_status = kcb->prev_kprobe.status;
383 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
384 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
387 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
388 struct kprobe_ctlblk *kcb)
390 __get_cpu_var(current_kprobe) = p;
391 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
392 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
393 if (is_IF_modifier(p->ainsn.insn))
394 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
397 static void __kprobes clear_btf(void)
399 if (test_thread_flag(TIF_DEBUGCTLMSR))
400 update_debugctlmsr(0);
403 static void __kprobes restore_btf(void)
405 if (test_thread_flag(TIF_DEBUGCTLMSR))
406 update_debugctlmsr(current->thread.debugctlmsr);
409 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
410 struct pt_regs *regs)
412 unsigned long *sara = stack_addr(regs);
414 ri->ret_addr = (kprobe_opcode_t *) *sara;
416 /* Replace the return addr with trampoline addr */
417 *sara = (unsigned long) &kretprobe_trampoline;
420 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
421 struct kprobe_ctlblk *kcb, int reenter)
423 #if !defined(CONFIG_PREEMPT)
424 if (p->ainsn.boostable == 1 && !p->post_handler) {
425 /* Boost up -- we can execute copied instructions directly */
427 reset_current_kprobe();
429 * Reentering boosted probe doesn't reset current_kprobe,
430 * nor set current_kprobe, because it doesn't use single
433 regs->ip = (unsigned long)p->ainsn.insn;
434 preempt_enable_no_resched();
439 save_previous_kprobe(kcb);
440 set_current_kprobe(p, regs, kcb);
441 kcb->kprobe_status = KPROBE_REENTER;
443 kcb->kprobe_status = KPROBE_HIT_SS;
444 /* Prepare real single stepping */
446 regs->flags |= X86_EFLAGS_TF;
447 regs->flags &= ~X86_EFLAGS_IF;
448 /* single step inline if the instruction is an int3 */
449 if (p->opcode == BREAKPOINT_INSTRUCTION)
450 regs->ip = (unsigned long)p->addr;
452 regs->ip = (unsigned long)p->ainsn.insn;
456 * We have reentered the kprobe_handler(), since another probe was hit while
457 * within the handler. We save the original kprobes variables and just single
458 * step on the instruction of the new probe without calling any user handlers.
460 static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
461 struct kprobe_ctlblk *kcb)
463 switch (kcb->kprobe_status) {
464 case KPROBE_HIT_SSDONE:
465 case KPROBE_HIT_ACTIVE:
466 kprobes_inc_nmissed_count(p);
467 setup_singlestep(p, regs, kcb, 1);
470 /* A probe has been hit in the codepath leading up to, or just
471 * after, single-stepping of a probed instruction. This entire
472 * codepath should strictly reside in .kprobes.text section.
473 * Raise a BUG or we'll continue in an endless reentering loop
474 * and eventually a stack overflow.
476 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
481 /* impossible cases */
490 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
491 * remain disabled throughout this function.
493 static int __kprobes kprobe_handler(struct pt_regs *regs)
495 kprobe_opcode_t *addr;
497 struct kprobe_ctlblk *kcb;
499 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
500 if (*addr != BREAKPOINT_INSTRUCTION) {
502 * The breakpoint instruction was removed right
503 * after we hit it. Another cpu has removed
504 * either a probepoint or a debugger breakpoint
505 * at this address. In either case, no further
506 * handling of this interrupt is appropriate.
507 * Back up over the (now missing) int3 and run
508 * the original instruction.
510 regs->ip = (unsigned long)addr;
515 * We don't want to be preempted for the entire
516 * duration of kprobe processing. We conditionally
517 * re-enable preemption at the end of this function,
518 * and also in reenter_kprobe() and setup_singlestep().
522 kcb = get_kprobe_ctlblk();
523 p = get_kprobe(addr);
526 if (kprobe_running()) {
527 if (reenter_kprobe(p, regs, kcb))
530 set_current_kprobe(p, regs, kcb);
531 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
534 * If we have no pre-handler or it returned 0, we
535 * continue with normal processing. If we have a
536 * pre-handler and it returned non-zero, it prepped
537 * for calling the break_handler below on re-entry
538 * for jprobe processing, so get out doing nothing
541 if (!p->pre_handler || !p->pre_handler(p, regs))
542 setup_singlestep(p, regs, kcb, 0);
545 } else if (kprobe_running()) {
546 p = __get_cpu_var(current_kprobe);
547 if (p->break_handler && p->break_handler(p, regs)) {
548 setup_singlestep(p, regs, kcb, 0);
551 } /* else: not a kprobe fault; let the kernel handle it */
553 preempt_enable_no_resched();
558 #define SAVE_REGS_STRING \
559 /* Skip cs, ip, orig_ax. */ \
560 " subq $24, %rsp\n" \
576 #define RESTORE_REGS_STRING \
592 /* Skip orig_ax, ip, cs */ \
595 #define SAVE_REGS_STRING \
596 /* Skip cs, ip, orig_ax and gs. */ \
597 " subl $16, %esp\n" \
608 #define RESTORE_REGS_STRING \
616 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
621 * When a retprobed function returns, this code saves registers and
622 * calls trampoline_handler() runs, which calls the kretprobe's handler.
624 static void __used __kprobes kretprobe_trampoline_holder(void)
627 ".global kretprobe_trampoline\n"
628 "kretprobe_trampoline: \n"
630 /* We don't bother saving the ss register */
635 " call trampoline_handler\n"
636 /* Replace saved sp with true return address. */
637 " movq %rax, 152(%rsp)\n"
644 " call trampoline_handler\n"
645 /* Move flags to cs */
646 " movl 56(%esp), %edx\n"
647 " movl %edx, 52(%esp)\n"
648 /* Replace saved flags with true return address. */
649 " movl %eax, 56(%esp)\n"
657 * Called from kretprobe_trampoline
659 static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
661 struct kretprobe_instance *ri = NULL;
662 struct hlist_head *head, empty_rp;
663 struct hlist_node *node, *tmp;
664 unsigned long flags, orig_ret_address = 0;
665 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
667 INIT_HLIST_HEAD(&empty_rp);
668 kretprobe_hash_lock(current, &head, &flags);
669 /* fixup registers */
671 regs->cs = __KERNEL_CS;
673 regs->cs = __KERNEL_CS | get_kernel_rpl();
676 regs->ip = trampoline_address;
677 regs->orig_ax = ~0UL;
680 * It is possible to have multiple instances associated with a given
681 * task either because multiple functions in the call path have
682 * return probes installed on them, and/or more than one
683 * return probe was registered for a target function.
685 * We can handle this because:
686 * - instances are always pushed into the head of the list
687 * - when multiple return probes are registered for the same
688 * function, the (chronologically) first instance's ret_addr
689 * will be the real return address, and all the rest will
690 * point to kretprobe_trampoline.
692 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
693 if (ri->task != current)
694 /* another task is sharing our hash bucket */
697 if (ri->rp && ri->rp->handler) {
698 __get_cpu_var(current_kprobe) = &ri->rp->kp;
699 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
700 ri->rp->handler(ri, regs);
701 __get_cpu_var(current_kprobe) = NULL;
704 orig_ret_address = (unsigned long)ri->ret_addr;
705 recycle_rp_inst(ri, &empty_rp);
707 if (orig_ret_address != trampoline_address)
709 * This is the real return address. Any other
710 * instances associated with this task are for
711 * other calls deeper on the call stack
716 kretprobe_assert(ri, orig_ret_address, trampoline_address);
718 kretprobe_hash_unlock(current, &flags);
720 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
721 hlist_del(&ri->hlist);
724 return (void *)orig_ret_address;
728 * Called after single-stepping. p->addr is the address of the
729 * instruction whose first byte has been replaced by the "int 3"
730 * instruction. To avoid the SMP problems that can occur when we
731 * temporarily put back the original opcode to single-step, we
732 * single-stepped a copy of the instruction. The address of this
733 * copy is p->ainsn.insn.
735 * This function prepares to return from the post-single-step
736 * interrupt. We have to fix up the stack as follows:
738 * 0) Except in the case of absolute or indirect jump or call instructions,
739 * the new ip is relative to the copied instruction. We need to make
740 * it relative to the original instruction.
742 * 1) If the single-stepped instruction was pushfl, then the TF and IF
743 * flags are set in the just-pushed flags, and may need to be cleared.
745 * 2) If the single-stepped instruction was a call, the return address
746 * that is atop the stack is the address following the copied instruction.
747 * We need to make it the address following the original instruction.
749 * If this is the first time we've single-stepped the instruction at
750 * this probepoint, and the instruction is boostable, boost it: add a
751 * jump instruction after the copied instruction, that jumps to the next
752 * instruction after the probepoint.
754 static void __kprobes resume_execution(struct kprobe *p,
755 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
757 unsigned long *tos = stack_addr(regs);
758 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
759 unsigned long orig_ip = (unsigned long)p->addr;
760 kprobe_opcode_t *insn = p->ainsn.insn;
762 /*skip the REX prefix*/
763 if (is_REX_prefix(insn))
766 regs->flags &= ~X86_EFLAGS_TF;
768 case 0x9c: /* pushfl */
769 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
770 *tos |= kcb->kprobe_old_flags;
772 case 0xc2: /* iret/ret/lret */
777 case 0xea: /* jmp absolute -- ip is correct */
778 /* ip is already adjusted, no more changes required */
779 p->ainsn.boostable = 1;
781 case 0xe8: /* call relative - Fix return addr */
782 *tos = orig_ip + (*tos - copy_ip);
785 case 0x9a: /* call absolute -- same as call absolute, indirect */
786 *tos = orig_ip + (*tos - copy_ip);
790 if ((insn[1] & 0x30) == 0x10) {
792 * call absolute, indirect
793 * Fix return addr; ip is correct.
794 * But this is not boostable
796 *tos = orig_ip + (*tos - copy_ip);
798 } else if (((insn[1] & 0x31) == 0x20) ||
799 ((insn[1] & 0x31) == 0x21)) {
801 * jmp near and far, absolute indirect
802 * ip is correct. And this is boostable
804 p->ainsn.boostable = 1;
811 if (p->ainsn.boostable == 0) {
812 if ((regs->ip > copy_ip) &&
813 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
815 * These instructions can be executed directly if it
816 * jumps back to correct address.
818 set_jmp_op((void *)regs->ip,
819 (void *)orig_ip + (regs->ip - copy_ip));
820 p->ainsn.boostable = 1;
822 p->ainsn.boostable = -1;
826 regs->ip += orig_ip - copy_ip;
833 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
834 * remain disabled throughout this function.
836 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
838 struct kprobe *cur = kprobe_running();
839 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
844 resume_execution(cur, regs, kcb);
845 regs->flags |= kcb->kprobe_saved_flags;
847 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
848 kcb->kprobe_status = KPROBE_HIT_SSDONE;
849 cur->post_handler(cur, regs, 0);
852 /* Restore back the original saved kprobes variables and continue. */
853 if (kcb->kprobe_status == KPROBE_REENTER) {
854 restore_previous_kprobe(kcb);
857 reset_current_kprobe();
859 preempt_enable_no_resched();
862 * if somebody else is singlestepping across a probe point, flags
863 * will have TF set, in which case, continue the remaining processing
864 * of do_debug, as if this is not a probe hit.
866 if (regs->flags & X86_EFLAGS_TF)
872 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
874 struct kprobe *cur = kprobe_running();
875 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
877 switch (kcb->kprobe_status) {
881 * We are here because the instruction being single
882 * stepped caused a page fault. We reset the current
883 * kprobe and the ip points back to the probe address
884 * and allow the page fault handler to continue as a
887 regs->ip = (unsigned long)cur->addr;
888 regs->flags |= kcb->kprobe_old_flags;
889 if (kcb->kprobe_status == KPROBE_REENTER)
890 restore_previous_kprobe(kcb);
892 reset_current_kprobe();
893 preempt_enable_no_resched();
895 case KPROBE_HIT_ACTIVE:
896 case KPROBE_HIT_SSDONE:
898 * We increment the nmissed count for accounting,
899 * we can also use npre/npostfault count for accounting
900 * these specific fault cases.
902 kprobes_inc_nmissed_count(cur);
905 * We come here because instructions in the pre/post
906 * handler caused the page_fault, this could happen
907 * if handler tries to access user space by
908 * copy_from_user(), get_user() etc. Let the
909 * user-specified handler try to fix it first.
911 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
915 * In case the user-specified fault handler returned
916 * zero, try to fix up.
918 if (fixup_exception(regs))
922 * fixup routine could not handle it,
923 * Let do_page_fault() fix it.
933 * Wrapper routine for handling exceptions.
935 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
936 unsigned long val, void *data)
938 struct die_args *args = data;
939 int ret = NOTIFY_DONE;
941 if (args->regs && user_mode_vm(args->regs))
946 if (kprobe_handler(args->regs))
950 if (post_kprobe_handler(args->regs)) {
952 * Reset the BS bit in dr6 (pointed by args->err) to
953 * denote completion of processing
955 (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
961 * To be potentially processing a kprobe fault and to
962 * trust the result from kprobe_running(), we have
963 * be non-preemptible.
965 if (!preemptible() && kprobe_running() &&
966 kprobe_fault_handler(args->regs, args->trapnr))
975 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
977 struct jprobe *jp = container_of(p, struct jprobe, kp);
979 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
981 kcb->jprobe_saved_regs = *regs;
982 kcb->jprobe_saved_sp = stack_addr(regs);
983 addr = (unsigned long)(kcb->jprobe_saved_sp);
986 * As Linus pointed out, gcc assumes that the callee
987 * owns the argument space and could overwrite it, e.g.
988 * tailcall optimization. So, to be absolutely safe
989 * we also save and restore enough stack bytes to cover
992 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
993 MIN_STACK_SIZE(addr));
994 regs->flags &= ~X86_EFLAGS_IF;
995 trace_hardirqs_off();
996 regs->ip = (unsigned long)(jp->entry);
1000 void __kprobes jprobe_return(void)
1002 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1005 #ifdef CONFIG_X86_64
1006 " xchg %%rbx,%%rsp \n"
1008 " xchgl %%ebx,%%esp \n"
1011 " .globl jprobe_return_end\n"
1012 " jprobe_return_end: \n"
1014 (kcb->jprobe_saved_sp):"memory");
1017 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1019 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1020 u8 *addr = (u8 *) (regs->ip - 1);
1021 struct jprobe *jp = container_of(p, struct jprobe, kp);
1023 if ((addr > (u8 *) jprobe_return) &&
1024 (addr < (u8 *) jprobe_return_end)) {
1025 if (stack_addr(regs) != kcb->jprobe_saved_sp) {
1026 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1028 "current sp %p does not match saved sp %p\n",
1029 stack_addr(regs), kcb->jprobe_saved_sp);
1030 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1031 show_registers(saved_regs);
1032 printk(KERN_ERR "Current registers\n");
1033 show_registers(regs);
1036 *regs = kcb->jprobe_saved_regs;
1037 memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
1039 MIN_STACK_SIZE(kcb->jprobe_saved_sp));
1040 preempt_enable_no_resched();
1046 int __init arch_init_kprobes(void)
1051 int __kprobes arch_trampoline_kprobe(struct kprobe *p)