]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/kprobes.c
jprobes: remove JPROBE_ENTRY()
[net-next-2.6.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
e3869792 38#include <linux/stddef.h>
1da177e4 39#include <linux/module.h>
9ec4b1f3 40#include <linux/moduleloader.h>
3a872d89 41#include <linux/kallsyms.h>
b4c6c34a 42#include <linux/freezer.h>
346fd59b
SD
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
1eeb66a1 45#include <linux/kdebug.h>
bf8f6e5b 46
d0aaff97 47#include <asm-generic/sections.h>
1da177e4
LT
48#include <asm/cacheflush.h>
49#include <asm/errno.h>
bf8f6e5b 50#include <asm/uaccess.h>
1da177e4
LT
51
52#define KPROBE_HASH_BITS 6
53#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
54
3a872d89
AM
55
56/*
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
59 */
60#ifndef kprobe_lookup_name
61#define kprobe_lookup_name(name, addr) \
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63#endif
64
1da177e4 65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
e6f47f97 67static atomic_t kprobe_count;
1da177e4 68
bf8f6e5b
AM
69/* NOTE: change this value only with kprobe_mutex held */
70static bool kprobe_enabled;
71
7a7d1cf9 72DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
3516a460 73DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
e6584523 74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
1da177e4 75
e6f47f97
AK
76static struct notifier_block kprobe_page_fault_nb = {
77 .notifier_call = kprobe_exceptions_notify,
78 .priority = 0x7fffffff /* we need to notified first */
79};
80
2d14e39d 81#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
82/*
83 * kprobe->ainsn.insn points to the copy of the instruction to be
84 * single-stepped. x86_64, POWER4 and above have no-exec support and
85 * stepping on the instruction on a vmalloced/kmalloced/data page
86 * is a recipe for disaster
87 */
88#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
89
90struct kprobe_insn_page {
91 struct hlist_node hlist;
92 kprobe_opcode_t *insns; /* Page of instruction slots */
93 char slot_used[INSNS_PER_PAGE];
94 int nused;
b4c6c34a 95 int ngarbage;
9ec4b1f3
AM
96};
97
ab40c5c6
MH
98enum kprobe_slot_state {
99 SLOT_CLEAN = 0,
100 SLOT_DIRTY = 1,
101 SLOT_USED = 2,
102};
103
9ec4b1f3 104static struct hlist_head kprobe_insn_pages;
b4c6c34a
MH
105static int kprobe_garbage_slots;
106static int collect_garbage_slots(void);
107
108static int __kprobes check_safety(void)
109{
110 int ret = 0;
111#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
112 ret = freeze_processes();
113 if (ret == 0) {
114 struct task_struct *p, *q;
115 do_each_thread(p, q) {
116 if (p != current && p->state == TASK_RUNNING &&
117 p->pid != 0) {
118 printk("Check failed: %s is running\n",p->comm);
119 ret = -1;
120 goto loop_end;
121 }
122 } while_each_thread(p, q);
123 }
124loop_end:
125 thaw_processes();
126#else
127 synchronize_sched();
128#endif
129 return ret;
130}
9ec4b1f3
AM
131
132/**
133 * get_insn_slot() - Find a slot on an executable page for an instruction.
134 * We allocate an executable page if there's no room on existing ones.
135 */
d0aaff97 136kprobe_opcode_t __kprobes *get_insn_slot(void)
9ec4b1f3
AM
137{
138 struct kprobe_insn_page *kip;
139 struct hlist_node *pos;
140
6f716acd 141 retry:
b0bb5016 142 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
9ec4b1f3
AM
143 if (kip->nused < INSNS_PER_PAGE) {
144 int i;
145 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6
MH
146 if (kip->slot_used[i] == SLOT_CLEAN) {
147 kip->slot_used[i] = SLOT_USED;
9ec4b1f3
AM
148 kip->nused++;
149 return kip->insns + (i * MAX_INSN_SIZE);
150 }
151 }
152 /* Surprise! No unused slots. Fix kip->nused. */
153 kip->nused = INSNS_PER_PAGE;
154 }
155 }
156
b4c6c34a
MH
157 /* If there are any garbage slots, collect it and try again. */
158 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
159 goto retry;
160 }
161 /* All out of space. Need to allocate a new page. Use slot 0. */
9ec4b1f3 162 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
6f716acd 163 if (!kip)
9ec4b1f3 164 return NULL;
9ec4b1f3
AM
165
166 /*
167 * Use module_alloc so this page is within +/- 2GB of where the
168 * kernel image and loaded module images reside. This is required
169 * so x86_64 can correctly handle the %rip-relative fixups.
170 */
171 kip->insns = module_alloc(PAGE_SIZE);
172 if (!kip->insns) {
173 kfree(kip);
174 return NULL;
175 }
176 INIT_HLIST_NODE(&kip->hlist);
177 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
ab40c5c6
MH
178 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
179 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 180 kip->nused = 1;
b4c6c34a 181 kip->ngarbage = 0;
9ec4b1f3
AM
182 return kip->insns;
183}
184
b4c6c34a
MH
185/* Return 1 if all garbages are collected, otherwise 0. */
186static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
187{
ab40c5c6 188 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
189 kip->nused--;
190 if (kip->nused == 0) {
191 /*
192 * Page is no longer in use. Free it unless
193 * it's the last one. We keep the last one
194 * so as not to have to set it up again the
195 * next time somebody inserts a probe.
196 */
197 hlist_del(&kip->hlist);
198 if (hlist_empty(&kprobe_insn_pages)) {
199 INIT_HLIST_NODE(&kip->hlist);
200 hlist_add_head(&kip->hlist,
201 &kprobe_insn_pages);
202 } else {
203 module_free(NULL, kip->insns);
204 kfree(kip);
205 }
206 return 1;
207 }
208 return 0;
209}
210
211static int __kprobes collect_garbage_slots(void)
212{
213 struct kprobe_insn_page *kip;
214 struct hlist_node *pos, *next;
215
216 /* Ensure no-one is preepmted on the garbages */
217 if (check_safety() != 0)
218 return -EAGAIN;
219
b0bb5016 220 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
b4c6c34a 221 int i;
b4c6c34a
MH
222 if (kip->ngarbage == 0)
223 continue;
224 kip->ngarbage = 0; /* we will collect all garbages */
225 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6 226 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
227 collect_one_slot(kip, i))
228 break;
229 }
230 }
231 kprobe_garbage_slots = 0;
232 return 0;
233}
234
235void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
9ec4b1f3
AM
236{
237 struct kprobe_insn_page *kip;
238 struct hlist_node *pos;
239
b0bb5016 240 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
9ec4b1f3
AM
241 if (kip->insns <= slot &&
242 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
243 int i = (slot - kip->insns) / MAX_INSN_SIZE;
b4c6c34a 244 if (dirty) {
ab40c5c6 245 kip->slot_used[i] = SLOT_DIRTY;
b4c6c34a
MH
246 kip->ngarbage++;
247 } else {
248 collect_one_slot(kip, i);
9ec4b1f3 249 }
b4c6c34a 250 break;
9ec4b1f3
AM
251 }
252 }
6f716acd
CH
253
254 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
b4c6c34a 255 collect_garbage_slots();
9ec4b1f3 256}
2d14e39d 257#endif
9ec4b1f3 258
e6584523
AM
259/* We have preemption disabled.. so it is safe to use __ versions */
260static inline void set_kprobe_instance(struct kprobe *kp)
261{
262 __get_cpu_var(kprobe_instance) = kp;
263}
264
265static inline void reset_kprobe_instance(void)
266{
267 __get_cpu_var(kprobe_instance) = NULL;
268}
269
3516a460
AM
270/*
271 * This routine is called either:
49a2a1b8 272 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 273 * OR
d217d545 274 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 275 */
d0aaff97 276struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
277{
278 struct hlist_head *head;
279 struct hlist_node *node;
3516a460 280 struct kprobe *p;
1da177e4
LT
281
282 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a460 283 hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4
LT
284 if (p->addr == addr)
285 return p;
286 }
287 return NULL;
288}
289
64f562c6
AM
290/*
291 * Aggregate handlers for multiple kprobes support - these handlers
292 * take care of invoking the individual kprobe handlers on p->list
293 */
d0aaff97 294static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
295{
296 struct kprobe *kp;
297
3516a460 298 list_for_each_entry_rcu(kp, &p->list, list) {
64f562c6 299 if (kp->pre_handler) {
e6584523 300 set_kprobe_instance(kp);
8b0914ea
PP
301 if (kp->pre_handler(kp, regs))
302 return 1;
64f562c6 303 }
e6584523 304 reset_kprobe_instance();
64f562c6
AM
305 }
306 return 0;
307}
308
d0aaff97
PP
309static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
310 unsigned long flags)
64f562c6
AM
311{
312 struct kprobe *kp;
313
3516a460 314 list_for_each_entry_rcu(kp, &p->list, list) {
64f562c6 315 if (kp->post_handler) {
e6584523 316 set_kprobe_instance(kp);
64f562c6 317 kp->post_handler(kp, regs, flags);
e6584523 318 reset_kprobe_instance();
64f562c6
AM
319 }
320 }
64f562c6
AM
321}
322
d0aaff97
PP
323static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
324 int trapnr)
64f562c6 325{
e6584523
AM
326 struct kprobe *cur = __get_cpu_var(kprobe_instance);
327
64f562c6
AM
328 /*
329 * if we faulted "during" the execution of a user specified
330 * probe handler, invoke just that probe's fault handler
331 */
e6584523
AM
332 if (cur && cur->fault_handler) {
333 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
334 return 1;
335 }
336 return 0;
337}
338
d0aaff97 339static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 340{
e6584523
AM
341 struct kprobe *cur = __get_cpu_var(kprobe_instance);
342 int ret = 0;
343
344 if (cur && cur->break_handler) {
345 if (cur->break_handler(cur, regs))
346 ret = 1;
8b0914ea 347 }
e6584523
AM
348 reset_kprobe_instance();
349 return ret;
8b0914ea
PP
350}
351
bf8d5c52
KA
352/* Walks the list and increments nmissed count for multiprobe case */
353void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
354{
355 struct kprobe *kp;
356 if (p->pre_handler != aggr_pre_handler) {
357 p->nmissed++;
358 } else {
359 list_for_each_entry_rcu(kp, &p->list, list)
360 kp->nmissed++;
361 }
362 return;
363}
364
3516a460 365/* Called with kretprobe_lock held */
99219a3f 366void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
367 struct hlist_head *head)
b94cce92
HN
368{
369 /* remove rp inst off the rprobe_inst_table */
370 hlist_del(&ri->hlist);
371 if (ri->rp) {
372 /* remove rp inst off the used list */
373 hlist_del(&ri->uflist);
374 /* put rp inst back onto the free list */
375 INIT_HLIST_NODE(&ri->uflist);
376 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
377 } else
378 /* Unregistering */
99219a3f 379 hlist_add_head(&ri->hlist, head);
b94cce92
HN
380}
381
d0aaff97 382struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
b94cce92
HN
383{
384 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
385}
386
b94cce92 387/*
c6fd91f0 388 * This function is called from finish_task_switch when task tk becomes dead,
389 * so that we can recycle any function-return probe instances associated
390 * with this task. These left over instances represent probed functions
391 * that have been called but will never return.
b94cce92 392 */
d0aaff97 393void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 394{
62c27be0 395 struct kretprobe_instance *ri;
99219a3f 396 struct hlist_head *head, empty_rp;
802eae7c 397 struct hlist_node *node, *tmp;
0aa55e4d 398 unsigned long flags = 0;
802eae7c 399
99219a3f 400 INIT_HLIST_HEAD(&empty_rp);
3516a460 401 spin_lock_irqsave(&kretprobe_lock, flags);
62c27be0 402 head = kretprobe_inst_table_head(tk);
403 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
404 if (ri->task == tk)
99219a3f 405 recycle_rp_inst(ri, &empty_rp);
62c27be0 406 }
3516a460 407 spin_unlock_irqrestore(&kretprobe_lock, flags);
99219a3f 408
409 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
410 hlist_del(&ri->hlist);
411 kfree(ri);
412 }
b94cce92
HN
413}
414
b94cce92
HN
415static inline void free_rp_inst(struct kretprobe *rp)
416{
417 struct kretprobe_instance *ri;
4c4308cb
CH
418 struct hlist_node *pos, *next;
419
420 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
b94cce92
HN
421 hlist_del(&ri->uflist);
422 kfree(ri);
423 }
424}
425
8b0914ea
PP
426/*
427 * Keep all fields in the kprobe consistent
428 */
429static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
430{
431 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
432 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
433}
434
435/*
436* Add the new probe to old_p->list. Fail if this is the
437* second jprobe at the address - two jprobes can't coexist
438*/
d0aaff97 439static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
8b0914ea 440{
8b0914ea 441 if (p->break_handler) {
36721656 442 if (old_p->break_handler)
443 return -EEXIST;
3516a460 444 list_add_tail_rcu(&p->list, &old_p->list);
36721656 445 old_p->break_handler = aggr_break_handler;
8b0914ea 446 } else
3516a460 447 list_add_rcu(&p->list, &old_p->list);
36721656 448 if (p->post_handler && !old_p->post_handler)
449 old_p->post_handler = aggr_post_handler;
8b0914ea
PP
450 return 0;
451}
452
64f562c6
AM
453/*
454 * Fill in the required fields of the "manager kprobe". Replace the
455 * earlier kprobe in the hlist with the manager kprobe
456 */
457static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
458{
8b0914ea 459 copy_kprobe(p, ap);
a9ad965e 460 flush_insn_slot(ap);
64f562c6 461 ap->addr = p->addr;
64f562c6 462 ap->pre_handler = aggr_pre_handler;
64f562c6 463 ap->fault_handler = aggr_fault_handler;
36721656 464 if (p->post_handler)
465 ap->post_handler = aggr_post_handler;
466 if (p->break_handler)
467 ap->break_handler = aggr_break_handler;
64f562c6
AM
468
469 INIT_LIST_HEAD(&ap->list);
3516a460 470 list_add_rcu(&p->list, &ap->list);
64f562c6 471
adad0f33 472 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
473}
474
475/*
476 * This is the second or subsequent kprobe at the address - handle
477 * the intricacies
64f562c6 478 */
d0aaff97
PP
479static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
480 struct kprobe *p)
64f562c6
AM
481{
482 int ret = 0;
483 struct kprobe *ap;
484
8b0914ea
PP
485 if (old_p->pre_handler == aggr_pre_handler) {
486 copy_kprobe(old_p, p);
487 ret = add_new_kprobe(old_p, p);
64f562c6 488 } else {
a0d50069 489 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
64f562c6
AM
490 if (!ap)
491 return -ENOMEM;
492 add_aggr_kprobe(ap, old_p);
8b0914ea
PP
493 copy_kprobe(ap, p);
494 ret = add_new_kprobe(ap, p);
64f562c6
AM
495 }
496 return ret;
497}
498
d0aaff97
PP
499static int __kprobes in_kprobes_functions(unsigned long addr)
500{
6f716acd
CH
501 if (addr >= (unsigned long)__kprobes_text_start &&
502 addr < (unsigned long)__kprobes_text_end)
d0aaff97
PP
503 return -EINVAL;
504 return 0;
505}
506
df019b1d
KA
507static int __kprobes __register_kprobe(struct kprobe *p,
508 unsigned long called_from)
1da177e4
LT
509{
510 int ret = 0;
64f562c6 511 struct kprobe *old_p;
df019b1d 512 struct module *probed_mod;
b3e55c72 513
3a872d89
AM
514 /*
515 * If we have a symbol_name argument look it up,
516 * and add it to the address. That way the addr
517 * field can either be global or relative to a symbol.
518 */
519 if (p->symbol_name) {
520 if (p->addr)
521 return -EINVAL;
522 kprobe_lookup_name(p->symbol_name, p->addr);
523 }
524
525 if (!p->addr)
526 return -EINVAL;
527 p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
528
6f716acd
CH
529 if (!kernel_text_address((unsigned long) p->addr) ||
530 in_kprobes_functions((unsigned long) p->addr))
b3e55c72
MB
531 return -EINVAL;
532
df019b1d 533 p->mod_refcounted = 0;
6f716acd
CH
534
535 /*
536 * Check if are we probing a module.
537 */
538 probed_mod = module_text_address((unsigned long) p->addr);
539 if (probed_mod) {
df019b1d 540 struct module *calling_mod = module_text_address(called_from);
6f716acd
CH
541 /*
542 * We must allow modules to probe themself and in this case
543 * avoid incrementing the module refcount, so as to allow
544 * unloading of self probing modules.
df019b1d 545 */
6f716acd 546 if (calling_mod && calling_mod != probed_mod) {
df019b1d
KA
547 if (unlikely(!try_module_get(probed_mod)))
548 return -EINVAL;
549 p->mod_refcounted = 1;
550 } else
551 probed_mod = NULL;
552 }
1da177e4 553
3516a460 554 p->nmissed = 0;
7a7d1cf9 555 mutex_lock(&kprobe_mutex);
64f562c6
AM
556 old_p = get_kprobe(p->addr);
557 if (old_p) {
558 ret = register_aggr_kprobe(old_p, p);
e6f47f97
AK
559 if (!ret)
560 atomic_inc(&kprobe_count);
1da177e4
LT
561 goto out;
562 }
1da177e4 563
6f716acd
CH
564 ret = arch_prepare_kprobe(p);
565 if (ret)
49a2a1b8
AK
566 goto out;
567
64f562c6 568 INIT_HLIST_NODE(&p->hlist);
3516a460 569 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
570 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
571
bf8f6e5b
AM
572 if (kprobe_enabled) {
573 if (atomic_add_return(1, &kprobe_count) == \
e6f47f97 574 (ARCH_INACTIVE_KPROBE_COUNT + 1))
bf8f6e5b 575 register_page_fault_notifier(&kprobe_page_fault_nb);
7e1048b1 576
bf8f6e5b
AM
577 arch_arm_kprobe(p);
578 }
1da177e4 579out:
7a7d1cf9 580 mutex_unlock(&kprobe_mutex);
49a2a1b8 581
df019b1d
KA
582 if (ret && probed_mod)
583 module_put(probed_mod);
1da177e4
LT
584 return ret;
585}
586
df019b1d
KA
587int __kprobes register_kprobe(struct kprobe *p)
588{
6f716acd 589 return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
df019b1d
KA
590}
591
d0aaff97 592void __kprobes unregister_kprobe(struct kprobe *p)
1da177e4 593{
b3e55c72 594 struct module *mod;
f709b122
KA
595 struct kprobe *old_p, *list_p;
596 int cleanup_p;
64f562c6 597
7a7d1cf9 598 mutex_lock(&kprobe_mutex);
64f562c6 599 old_p = get_kprobe(p->addr);
49a2a1b8 600 if (unlikely(!old_p)) {
7a7d1cf9 601 mutex_unlock(&kprobe_mutex);
49a2a1b8
AK
602 return;
603 }
f709b122
KA
604 if (p != old_p) {
605 list_for_each_entry_rcu(list_p, &old_p->list, list)
606 if (list_p == p)
607 /* kprobe p is a valid probe */
608 goto valid_p;
7a7d1cf9 609 mutex_unlock(&kprobe_mutex);
f709b122
KA
610 return;
611 }
612valid_p:
6f716acd
CH
613 if (old_p == p ||
614 (old_p->pre_handler == aggr_pre_handler &&
615 p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
bf8f6e5b
AM
616 /*
617 * Only probe on the hash list. Disarm only if kprobes are
618 * enabled - otherwise, the breakpoint would already have
619 * been removed. We save on flushing icache.
620 */
621 if (kprobe_enabled)
622 arch_disarm_kprobe(p);
49a2a1b8 623 hlist_del_rcu(&old_p->hlist);
f709b122 624 cleanup_p = 1;
49a2a1b8
AK
625 } else {
626 list_del_rcu(&p->list);
f709b122 627 cleanup_p = 0;
49a2a1b8 628 }
3516a460 629
7a7d1cf9 630 mutex_unlock(&kprobe_mutex);
b3e55c72 631
49a2a1b8 632 synchronize_sched();
6f716acd
CH
633 if (p->mod_refcounted) {
634 mod = module_text_address((unsigned long)p->addr);
635 if (mod)
636 module_put(mod);
637 }
b3e55c72 638
49a2a1b8 639 if (cleanup_p) {
f709b122 640 if (p != old_p) {
49a2a1b8 641 list_del_rcu(&p->list);
3516a460 642 kfree(old_p);
49a2a1b8 643 }
0498b635 644 arch_remove_kprobe(p);
36721656 645 } else {
646 mutex_lock(&kprobe_mutex);
647 if (p->break_handler)
648 old_p->break_handler = NULL;
649 if (p->post_handler){
650 list_for_each_entry_rcu(list_p, &old_p->list, list){
651 if (list_p->post_handler){
652 cleanup_p = 2;
653 break;
654 }
655 }
656 if (cleanup_p == 0)
657 old_p->post_handler = NULL;
658 }
659 mutex_unlock(&kprobe_mutex);
49a2a1b8 660 }
e6f47f97
AK
661
662 /* Call unregister_page_fault_notifier()
663 * if no probes are active
664 */
665 mutex_lock(&kprobe_mutex);
666 if (atomic_add_return(-1, &kprobe_count) == \
667 ARCH_INACTIVE_KPROBE_COUNT)
668 unregister_page_fault_notifier(&kprobe_page_fault_nb);
669 mutex_unlock(&kprobe_mutex);
670 return;
1da177e4
LT
671}
672
673static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
674 .notifier_call = kprobe_exceptions_notify,
675 .priority = 0x7fffffff /* we need to be notified first */
676};
677
1da177e4 678
d0aaff97 679int __kprobes register_jprobe(struct jprobe *jp)
1da177e4
LT
680{
681 /* Todo: Verify probepoint is a function entry point */
682 jp->kp.pre_handler = setjmp_pre_handler;
683 jp->kp.break_handler = longjmp_break_handler;
684
df019b1d
KA
685 return __register_kprobe(&jp->kp,
686 (unsigned long)__builtin_return_address(0));
1da177e4
LT
687}
688
d0aaff97 689void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4
LT
690{
691 unregister_kprobe(&jp->kp);
692}
693
b94cce92
HN
694#ifdef ARCH_SUPPORTS_KRETPROBES
695
e65cefe8
AB
696/*
697 * This kprobe pre_handler is registered with every kretprobe. When probe
698 * hits it will set up the return probe.
699 */
700static int __kprobes pre_handler_kretprobe(struct kprobe *p,
701 struct pt_regs *regs)
702{
703 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
704 unsigned long flags = 0;
705
706 /*TODO: consider to only swap the RA after the last pre_handler fired */
707 spin_lock_irqsave(&kretprobe_lock, flags);
4c4308cb
CH
708 if (!hlist_empty(&rp->free_instances)) {
709 struct kretprobe_instance *ri;
710
711 ri = hlist_entry(rp->free_instances.first,
712 struct kretprobe_instance, uflist);
713 ri->rp = rp;
714 ri->task = current;
715 arch_prepare_kretprobe(ri, regs);
716
717 /* XXX(hch): why is there no hlist_move_head? */
718 hlist_del(&ri->uflist);
719 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
720 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
721 } else
722 rp->nmissed++;
e65cefe8
AB
723 spin_unlock_irqrestore(&kretprobe_lock, flags);
724 return 0;
725}
726
d0aaff97 727int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
728{
729 int ret = 0;
730 struct kretprobe_instance *inst;
731 int i;
732
733 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
734 rp->kp.post_handler = NULL;
735 rp->kp.fault_handler = NULL;
736 rp->kp.break_handler = NULL;
b94cce92
HN
737
738 /* Pre-allocate memory for max kretprobe instances */
739 if (rp->maxactive <= 0) {
740#ifdef CONFIG_PREEMPT
741 rp->maxactive = max(10, 2 * NR_CPUS);
742#else
743 rp->maxactive = NR_CPUS;
744#endif
745 }
746 INIT_HLIST_HEAD(&rp->used_instances);
747 INIT_HLIST_HEAD(&rp->free_instances);
748 for (i = 0; i < rp->maxactive; i++) {
749 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
750 if (inst == NULL) {
751 free_rp_inst(rp);
752 return -ENOMEM;
753 }
754 INIT_HLIST_NODE(&inst->uflist);
755 hlist_add_head(&inst->uflist, &rp->free_instances);
756 }
757
758 rp->nmissed = 0;
759 /* Establish function entry probe point */
df019b1d
KA
760 if ((ret = __register_kprobe(&rp->kp,
761 (unsigned long)__builtin_return_address(0))) != 0)
b94cce92
HN
762 free_rp_inst(rp);
763 return ret;
764}
765
766#else /* ARCH_SUPPORTS_KRETPROBES */
767
d0aaff97 768int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
769{
770 return -ENOSYS;
771}
772
346fd59b
SD
773static int __kprobes pre_handler_kretprobe(struct kprobe *p,
774 struct pt_regs *regs)
775{
776 return 0;
777}
778
b94cce92
HN
779#endif /* ARCH_SUPPORTS_KRETPROBES */
780
d0aaff97 781void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92
HN
782{
783 unsigned long flags;
784 struct kretprobe_instance *ri;
4c4308cb 785 struct hlist_node *pos, *next;
b94cce92
HN
786
787 unregister_kprobe(&rp->kp);
4c4308cb 788
b94cce92 789 /* No race here */
3516a460 790 spin_lock_irqsave(&kretprobe_lock, flags);
4c4308cb 791 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
b94cce92
HN
792 ri->rp = NULL;
793 hlist_del(&ri->uflist);
794 }
3516a460 795 spin_unlock_irqrestore(&kretprobe_lock, flags);
278ff953 796 free_rp_inst(rp);
b94cce92
HN
797}
798
1da177e4
LT
799static int __init init_kprobes(void)
800{
801 int i, err = 0;
802
803 /* FIXME allocate the probe table, currently defined statically */
804 /* initialize all list heads */
b94cce92 805 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 806 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92
HN
807 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
808 }
e6f47f97 809 atomic_set(&kprobe_count, 0);
1da177e4 810
bf8f6e5b
AM
811 /* By default, kprobes are enabled */
812 kprobe_enabled = true;
813
6772926b 814 err = arch_init_kprobes();
802eae7c
RL
815 if (!err)
816 err = register_die_notifier(&kprobe_exceptions_nb);
817
1da177e4
LT
818 return err;
819}
820
346fd59b
SD
821#ifdef CONFIG_DEBUG_FS
822static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
bf8f6e5b 823 const char *sym, int offset,char *modname)
346fd59b
SD
824{
825 char *kprobe_type;
826
827 if (p->pre_handler == pre_handler_kretprobe)
828 kprobe_type = "r";
829 else if (p->pre_handler == setjmp_pre_handler)
830 kprobe_type = "j";
831 else
832 kprobe_type = "k";
833 if (sym)
834 seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
835 sym, offset, (modname ? modname : " "));
836 else
837 seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
838}
839
840static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
841{
842 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
843}
844
845static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
846{
847 (*pos)++;
848 if (*pos >= KPROBE_TABLE_SIZE)
849 return NULL;
850 return pos;
851}
852
853static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
854{
855 /* Nothing to do */
856}
857
858static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
859{
860 struct hlist_head *head;
861 struct hlist_node *node;
862 struct kprobe *p, *kp;
863 const char *sym = NULL;
864 unsigned int i = *(loff_t *) v;
ffb45122 865 unsigned long offset = 0;
346fd59b
SD
866 char *modname, namebuf[128];
867
868 head = &kprobe_table[i];
869 preempt_disable();
870 hlist_for_each_entry_rcu(p, node, head, hlist) {
ffb45122 871 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b
SD
872 &offset, &modname, namebuf);
873 if (p->pre_handler == aggr_pre_handler) {
874 list_for_each_entry_rcu(kp, &p->list, list)
875 report_probe(pi, kp, sym, offset, modname);
876 } else
877 report_probe(pi, p, sym, offset, modname);
878 }
879 preempt_enable();
880 return 0;
881}
882
883static struct seq_operations kprobes_seq_ops = {
884 .start = kprobe_seq_start,
885 .next = kprobe_seq_next,
886 .stop = kprobe_seq_stop,
887 .show = show_kprobe_addr
888};
889
890static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
891{
892 return seq_open(filp, &kprobes_seq_ops);
893}
894
895static struct file_operations debugfs_kprobes_operations = {
896 .open = kprobes_open,
897 .read = seq_read,
898 .llseek = seq_lseek,
899 .release = seq_release,
900};
901
bf8f6e5b
AM
902static void __kprobes enable_all_kprobes(void)
903{
904 struct hlist_head *head;
905 struct hlist_node *node;
906 struct kprobe *p;
907 unsigned int i;
908
909 mutex_lock(&kprobe_mutex);
910
911 /* If kprobes are already enabled, just return */
912 if (kprobe_enabled)
913 goto already_enabled;
914
915 /*
916 * Re-register the page fault notifier only if there are any
917 * active probes at the time of enabling kprobes globally
918 */
919 if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
920 register_page_fault_notifier(&kprobe_page_fault_nb);
921
922 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
923 head = &kprobe_table[i];
924 hlist_for_each_entry_rcu(p, node, head, hlist)
925 arch_arm_kprobe(p);
926 }
927
928 kprobe_enabled = true;
929 printk(KERN_INFO "Kprobes globally enabled\n");
930
931already_enabled:
932 mutex_unlock(&kprobe_mutex);
933 return;
934}
935
936static void __kprobes disable_all_kprobes(void)
937{
938 struct hlist_head *head;
939 struct hlist_node *node;
940 struct kprobe *p;
941 unsigned int i;
942
943 mutex_lock(&kprobe_mutex);
944
945 /* If kprobes are already disabled, just return */
946 if (!kprobe_enabled)
947 goto already_disabled;
948
949 kprobe_enabled = false;
950 printk(KERN_INFO "Kprobes globally disabled\n");
951 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
952 head = &kprobe_table[i];
953 hlist_for_each_entry_rcu(p, node, head, hlist) {
954 if (!arch_trampoline_kprobe(p))
955 arch_disarm_kprobe(p);
956 }
957 }
958
959 mutex_unlock(&kprobe_mutex);
960 /* Allow all currently running kprobes to complete */
961 synchronize_sched();
962
963 mutex_lock(&kprobe_mutex);
964 /* Unconditionally unregister the page_fault notifier */
965 unregister_page_fault_notifier(&kprobe_page_fault_nb);
966
967already_disabled:
968 mutex_unlock(&kprobe_mutex);
969 return;
970}
971
972/*
973 * XXX: The debugfs bool file interface doesn't allow for callbacks
974 * when the bool state is switched. We can reuse that facility when
975 * available
976 */
977static ssize_t read_enabled_file_bool(struct file *file,
978 char __user *user_buf, size_t count, loff_t *ppos)
979{
980 char buf[3];
981
982 if (kprobe_enabled)
983 buf[0] = '1';
984 else
985 buf[0] = '0';
986 buf[1] = '\n';
987 buf[2] = 0x00;
988 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
989}
990
991static ssize_t write_enabled_file_bool(struct file *file,
992 const char __user *user_buf, size_t count, loff_t *ppos)
993{
994 char buf[32];
995 int buf_size;
996
997 buf_size = min(count, (sizeof(buf)-1));
998 if (copy_from_user(buf, user_buf, buf_size))
999 return -EFAULT;
1000
1001 switch (buf[0]) {
1002 case 'y':
1003 case 'Y':
1004 case '1':
1005 enable_all_kprobes();
1006 break;
1007 case 'n':
1008 case 'N':
1009 case '0':
1010 disable_all_kprobes();
1011 break;
1012 }
1013
1014 return count;
1015}
1016
1017static struct file_operations fops_kp = {
1018 .read = read_enabled_file_bool,
1019 .write = write_enabled_file_bool,
1020};
1021
346fd59b
SD
1022static int __kprobes debugfs_kprobe_init(void)
1023{
1024 struct dentry *dir, *file;
bf8f6e5b 1025 unsigned int value = 1;
346fd59b
SD
1026
1027 dir = debugfs_create_dir("kprobes", NULL);
1028 if (!dir)
1029 return -ENOMEM;
1030
e3869792 1031 file = debugfs_create_file("list", 0444, dir, NULL,
346fd59b
SD
1032 &debugfs_kprobes_operations);
1033 if (!file) {
1034 debugfs_remove(dir);
1035 return -ENOMEM;
1036 }
1037
bf8f6e5b
AM
1038 file = debugfs_create_file("enabled", 0600, dir,
1039 &value, &fops_kp);
1040 if (!file) {
1041 debugfs_remove(dir);
1042 return -ENOMEM;
1043 }
1044
346fd59b
SD
1045 return 0;
1046}
1047
1048late_initcall(debugfs_kprobe_init);
1049#endif /* CONFIG_DEBUG_FS */
1050
1051module_init(init_kprobes);
1da177e4
LT
1052
1053EXPORT_SYMBOL_GPL(register_kprobe);
1054EXPORT_SYMBOL_GPL(unregister_kprobe);
1055EXPORT_SYMBOL_GPL(register_jprobe);
1056EXPORT_SYMBOL_GPL(unregister_jprobe);
1057EXPORT_SYMBOL_GPL(jprobe_return);
b94cce92
HN
1058EXPORT_SYMBOL_GPL(register_kretprobe);
1059EXPORT_SYMBOL_GPL(unregister_kretprobe);