]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - kernel/kprobes.c
tracing/kprobes: Disable kprobe events by default after creation
[net-next-2.6.git] / kernel / kprobes.c
... / ...
CommitLineData
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
33 */
34#include <linux/kprobes.h>
35#include <linux/hash.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/stddef.h>
39#include <linux/module.h>
40#include <linux/moduleloader.h>
41#include <linux/kallsyms.h>
42#include <linux/freezer.h>
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
45#include <linux/kdebug.h>
46#include <linux/memory.h>
47
48#include <asm-generic/sections.h>
49#include <asm/cacheflush.h>
50#include <asm/errno.h>
51#include <asm/uaccess.h>
52
53#define KPROBE_HASH_BITS 6
54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
56
57/*
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
60 */
61#ifndef kprobe_lookup_name
62#define kprobe_lookup_name(name, addr) \
63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64#endif
65
66static int kprobes_initialized;
67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69
70/* NOTE: change this value only with kprobe_mutex held */
71static bool kprobes_all_disarmed;
72
73static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75static struct {
76 spinlock_t lock ____cacheline_aligned_in_smp;
77} kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80{
81 return &(kretprobe_table_locks[hash].lock);
82}
83
84/*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
88 *
89 * For such cases, we now have a blacklist
90 */
91static struct kprobe_blackpoint kprobe_blacklist[] = {
92 {"preempt_schedule",},
93 {"native_get_debugreg",},
94 {"irq_entries_start",},
95 {"common_interrupt",},
96 {NULL} /* Terminator */
97};
98
99#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
100/*
101 * kprobe->ainsn.insn points to the copy of the instruction to be
102 * single-stepped. x86_64, POWER4 and above have no-exec support and
103 * stepping on the instruction on a vmalloced/kmalloced/data page
104 * is a recipe for disaster
105 */
106#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
107
108struct kprobe_insn_page {
109 struct list_head list;
110 kprobe_opcode_t *insns; /* Page of instruction slots */
111 char slot_used[INSNS_PER_PAGE];
112 int nused;
113 int ngarbage;
114};
115
116enum kprobe_slot_state {
117 SLOT_CLEAN = 0,
118 SLOT_DIRTY = 1,
119 SLOT_USED = 2,
120};
121
122static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
123static LIST_HEAD(kprobe_insn_pages);
124static int kprobe_garbage_slots;
125static int collect_garbage_slots(void);
126
127static int __kprobes check_safety(void)
128{
129 int ret = 0;
130#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
131 ret = freeze_processes();
132 if (ret == 0) {
133 struct task_struct *p, *q;
134 do_each_thread(p, q) {
135 if (p != current && p->state == TASK_RUNNING &&
136 p->pid != 0) {
137 printk("Check failed: %s is running\n",p->comm);
138 ret = -1;
139 goto loop_end;
140 }
141 } while_each_thread(p, q);
142 }
143loop_end:
144 thaw_processes();
145#else
146 synchronize_sched();
147#endif
148 return ret;
149}
150
151/**
152 * __get_insn_slot() - Find a slot on an executable page for an instruction.
153 * We allocate an executable page if there's no room on existing ones.
154 */
155static kprobe_opcode_t __kprobes *__get_insn_slot(void)
156{
157 struct kprobe_insn_page *kip;
158
159 retry:
160 list_for_each_entry(kip, &kprobe_insn_pages, list) {
161 if (kip->nused < INSNS_PER_PAGE) {
162 int i;
163 for (i = 0; i < INSNS_PER_PAGE; i++) {
164 if (kip->slot_used[i] == SLOT_CLEAN) {
165 kip->slot_used[i] = SLOT_USED;
166 kip->nused++;
167 return kip->insns + (i * MAX_INSN_SIZE);
168 }
169 }
170 /* Surprise! No unused slots. Fix kip->nused. */
171 kip->nused = INSNS_PER_PAGE;
172 }
173 }
174
175 /* If there are any garbage slots, collect it and try again. */
176 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
177 goto retry;
178 }
179 /* All out of space. Need to allocate a new page. Use slot 0. */
180 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
181 if (!kip)
182 return NULL;
183
184 /*
185 * Use module_alloc so this page is within +/- 2GB of where the
186 * kernel image and loaded module images reside. This is required
187 * so x86_64 can correctly handle the %rip-relative fixups.
188 */
189 kip->insns = module_alloc(PAGE_SIZE);
190 if (!kip->insns) {
191 kfree(kip);
192 return NULL;
193 }
194 INIT_LIST_HEAD(&kip->list);
195 list_add(&kip->list, &kprobe_insn_pages);
196 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
197 kip->slot_used[0] = SLOT_USED;
198 kip->nused = 1;
199 kip->ngarbage = 0;
200 return kip->insns;
201}
202
203kprobe_opcode_t __kprobes *get_insn_slot(void)
204{
205 kprobe_opcode_t *ret;
206 mutex_lock(&kprobe_insn_mutex);
207 ret = __get_insn_slot();
208 mutex_unlock(&kprobe_insn_mutex);
209 return ret;
210}
211
212/* Return 1 if all garbages are collected, otherwise 0. */
213static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
214{
215 kip->slot_used[idx] = SLOT_CLEAN;
216 kip->nused--;
217 if (kip->nused == 0) {
218 /*
219 * Page is no longer in use. Free it unless
220 * it's the last one. We keep the last one
221 * so as not to have to set it up again the
222 * next time somebody inserts a probe.
223 */
224 if (!list_is_singular(&kprobe_insn_pages)) {
225 list_del(&kip->list);
226 module_free(NULL, kip->insns);
227 kfree(kip);
228 }
229 return 1;
230 }
231 return 0;
232}
233
234static int __kprobes collect_garbage_slots(void)
235{
236 struct kprobe_insn_page *kip, *next;
237
238 /* Ensure no-one is preepmted on the garbages */
239 if (check_safety())
240 return -EAGAIN;
241
242 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
243 int i;
244 if (kip->ngarbage == 0)
245 continue;
246 kip->ngarbage = 0; /* we will collect all garbages */
247 for (i = 0; i < INSNS_PER_PAGE; i++) {
248 if (kip->slot_used[i] == SLOT_DIRTY &&
249 collect_one_slot(kip, i))
250 break;
251 }
252 }
253 kprobe_garbage_slots = 0;
254 return 0;
255}
256
257void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
258{
259 struct kprobe_insn_page *kip;
260
261 mutex_lock(&kprobe_insn_mutex);
262 list_for_each_entry(kip, &kprobe_insn_pages, list) {
263 if (kip->insns <= slot &&
264 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
265 int i = (slot - kip->insns) / MAX_INSN_SIZE;
266 if (dirty) {
267 kip->slot_used[i] = SLOT_DIRTY;
268 kip->ngarbage++;
269 } else
270 collect_one_slot(kip, i);
271 break;
272 }
273 }
274
275 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
276 collect_garbage_slots();
277
278 mutex_unlock(&kprobe_insn_mutex);
279}
280#endif
281
282/* We have preemption disabled.. so it is safe to use __ versions */
283static inline void set_kprobe_instance(struct kprobe *kp)
284{
285 __get_cpu_var(kprobe_instance) = kp;
286}
287
288static inline void reset_kprobe_instance(void)
289{
290 __get_cpu_var(kprobe_instance) = NULL;
291}
292
293/*
294 * This routine is called either:
295 * - under the kprobe_mutex - during kprobe_[un]register()
296 * OR
297 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
298 */
299struct kprobe __kprobes *get_kprobe(void *addr)
300{
301 struct hlist_head *head;
302 struct hlist_node *node;
303 struct kprobe *p;
304
305 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
306 hlist_for_each_entry_rcu(p, node, head, hlist) {
307 if (p->addr == addr)
308 return p;
309 }
310 return NULL;
311}
312
313/* Arm a kprobe with text_mutex */
314static void __kprobes arm_kprobe(struct kprobe *kp)
315{
316 mutex_lock(&text_mutex);
317 arch_arm_kprobe(kp);
318 mutex_unlock(&text_mutex);
319}
320
321/* Disarm a kprobe with text_mutex */
322static void __kprobes disarm_kprobe(struct kprobe *kp)
323{
324 mutex_lock(&text_mutex);
325 arch_disarm_kprobe(kp);
326 mutex_unlock(&text_mutex);
327}
328
329/*
330 * Aggregate handlers for multiple kprobes support - these handlers
331 * take care of invoking the individual kprobe handlers on p->list
332 */
333static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
334{
335 struct kprobe *kp;
336
337 list_for_each_entry_rcu(kp, &p->list, list) {
338 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
339 set_kprobe_instance(kp);
340 if (kp->pre_handler(kp, regs))
341 return 1;
342 }
343 reset_kprobe_instance();
344 }
345 return 0;
346}
347
348static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
349 unsigned long flags)
350{
351 struct kprobe *kp;
352
353 list_for_each_entry_rcu(kp, &p->list, list) {
354 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
355 set_kprobe_instance(kp);
356 kp->post_handler(kp, regs, flags);
357 reset_kprobe_instance();
358 }
359 }
360}
361
362static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
363 int trapnr)
364{
365 struct kprobe *cur = __get_cpu_var(kprobe_instance);
366
367 /*
368 * if we faulted "during" the execution of a user specified
369 * probe handler, invoke just that probe's fault handler
370 */
371 if (cur && cur->fault_handler) {
372 if (cur->fault_handler(cur, regs, trapnr))
373 return 1;
374 }
375 return 0;
376}
377
378static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
379{
380 struct kprobe *cur = __get_cpu_var(kprobe_instance);
381 int ret = 0;
382
383 if (cur && cur->break_handler) {
384 if (cur->break_handler(cur, regs))
385 ret = 1;
386 }
387 reset_kprobe_instance();
388 return ret;
389}
390
391/* Walks the list and increments nmissed count for multiprobe case */
392void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
393{
394 struct kprobe *kp;
395 if (p->pre_handler != aggr_pre_handler) {
396 p->nmissed++;
397 } else {
398 list_for_each_entry_rcu(kp, &p->list, list)
399 kp->nmissed++;
400 }
401 return;
402}
403
404void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
405 struct hlist_head *head)
406{
407 struct kretprobe *rp = ri->rp;
408
409 /* remove rp inst off the rprobe_inst_table */
410 hlist_del(&ri->hlist);
411 INIT_HLIST_NODE(&ri->hlist);
412 if (likely(rp)) {
413 spin_lock(&rp->lock);
414 hlist_add_head(&ri->hlist, &rp->free_instances);
415 spin_unlock(&rp->lock);
416 } else
417 /* Unregistering */
418 hlist_add_head(&ri->hlist, head);
419}
420
421void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
422 struct hlist_head **head, unsigned long *flags)
423{
424 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
425 spinlock_t *hlist_lock;
426
427 *head = &kretprobe_inst_table[hash];
428 hlist_lock = kretprobe_table_lock_ptr(hash);
429 spin_lock_irqsave(hlist_lock, *flags);
430}
431
432static void __kprobes kretprobe_table_lock(unsigned long hash,
433 unsigned long *flags)
434{
435 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
436 spin_lock_irqsave(hlist_lock, *flags);
437}
438
439void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
440 unsigned long *flags)
441{
442 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
443 spinlock_t *hlist_lock;
444
445 hlist_lock = kretprobe_table_lock_ptr(hash);
446 spin_unlock_irqrestore(hlist_lock, *flags);
447}
448
449void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
450{
451 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
452 spin_unlock_irqrestore(hlist_lock, *flags);
453}
454
455/*
456 * This function is called from finish_task_switch when task tk becomes dead,
457 * so that we can recycle any function-return probe instances associated
458 * with this task. These left over instances represent probed functions
459 * that have been called but will never return.
460 */
461void __kprobes kprobe_flush_task(struct task_struct *tk)
462{
463 struct kretprobe_instance *ri;
464 struct hlist_head *head, empty_rp;
465 struct hlist_node *node, *tmp;
466 unsigned long hash, flags = 0;
467
468 if (unlikely(!kprobes_initialized))
469 /* Early boot. kretprobe_table_locks not yet initialized. */
470 return;
471
472 hash = hash_ptr(tk, KPROBE_HASH_BITS);
473 head = &kretprobe_inst_table[hash];
474 kretprobe_table_lock(hash, &flags);
475 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
476 if (ri->task == tk)
477 recycle_rp_inst(ri, &empty_rp);
478 }
479 kretprobe_table_unlock(hash, &flags);
480 INIT_HLIST_HEAD(&empty_rp);
481 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
482 hlist_del(&ri->hlist);
483 kfree(ri);
484 }
485}
486
487static inline void free_rp_inst(struct kretprobe *rp)
488{
489 struct kretprobe_instance *ri;
490 struct hlist_node *pos, *next;
491
492 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
493 hlist_del(&ri->hlist);
494 kfree(ri);
495 }
496}
497
498static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
499{
500 unsigned long flags, hash;
501 struct kretprobe_instance *ri;
502 struct hlist_node *pos, *next;
503 struct hlist_head *head;
504
505 /* No race here */
506 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
507 kretprobe_table_lock(hash, &flags);
508 head = &kretprobe_inst_table[hash];
509 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
510 if (ri->rp == rp)
511 ri->rp = NULL;
512 }
513 kretprobe_table_unlock(hash, &flags);
514 }
515 free_rp_inst(rp);
516}
517
518/*
519 * Keep all fields in the kprobe consistent
520 */
521static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
522{
523 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
524 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
525}
526
527/*
528* Add the new probe to ap->list. Fail if this is the
529* second jprobe at the address - two jprobes can't coexist
530*/
531static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
532{
533 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
534 if (p->break_handler) {
535 if (ap->break_handler)
536 return -EEXIST;
537 list_add_tail_rcu(&p->list, &ap->list);
538 ap->break_handler = aggr_break_handler;
539 } else
540 list_add_rcu(&p->list, &ap->list);
541 if (p->post_handler && !ap->post_handler)
542 ap->post_handler = aggr_post_handler;
543
544 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
545 ap->flags &= ~KPROBE_FLAG_DISABLED;
546 if (!kprobes_all_disarmed)
547 /* Arm the breakpoint again. */
548 arm_kprobe(ap);
549 }
550 return 0;
551}
552
553/*
554 * Fill in the required fields of the "manager kprobe". Replace the
555 * earlier kprobe in the hlist with the manager kprobe
556 */
557static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
558{
559 copy_kprobe(p, ap);
560 flush_insn_slot(ap);
561 ap->addr = p->addr;
562 ap->flags = p->flags;
563 ap->pre_handler = aggr_pre_handler;
564 ap->fault_handler = aggr_fault_handler;
565 /* We don't care the kprobe which has gone. */
566 if (p->post_handler && !kprobe_gone(p))
567 ap->post_handler = aggr_post_handler;
568 if (p->break_handler && !kprobe_gone(p))
569 ap->break_handler = aggr_break_handler;
570
571 INIT_LIST_HEAD(&ap->list);
572 list_add_rcu(&p->list, &ap->list);
573
574 hlist_replace_rcu(&p->hlist, &ap->hlist);
575}
576
577/*
578 * This is the second or subsequent kprobe at the address - handle
579 * the intricacies
580 */
581static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
582 struct kprobe *p)
583{
584 int ret = 0;
585 struct kprobe *ap = old_p;
586
587 if (old_p->pre_handler != aggr_pre_handler) {
588 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
589 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
590 if (!ap)
591 return -ENOMEM;
592 add_aggr_kprobe(ap, old_p);
593 }
594
595 if (kprobe_gone(ap)) {
596 /*
597 * Attempting to insert new probe at the same location that
598 * had a probe in the module vaddr area which already
599 * freed. So, the instruction slot has already been
600 * released. We need a new slot for the new probe.
601 */
602 ret = arch_prepare_kprobe(ap);
603 if (ret)
604 /*
605 * Even if fail to allocate new slot, don't need to
606 * free aggr_probe. It will be used next time, or
607 * freed by unregister_kprobe.
608 */
609 return ret;
610
611 /*
612 * Clear gone flag to prevent allocating new slot again, and
613 * set disabled flag because it is not armed yet.
614 */
615 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
616 | KPROBE_FLAG_DISABLED;
617 }
618
619 copy_kprobe(ap, p);
620 return add_new_kprobe(ap, p);
621}
622
623/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
624static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
625{
626 struct kprobe *kp;
627
628 list_for_each_entry_rcu(kp, &p->list, list) {
629 if (!kprobe_disabled(kp))
630 /*
631 * There is an active probe on the list.
632 * We can't disable aggr_kprobe.
633 */
634 return 0;
635 }
636 p->flags |= KPROBE_FLAG_DISABLED;
637 return 1;
638}
639
640static int __kprobes in_kprobes_functions(unsigned long addr)
641{
642 struct kprobe_blackpoint *kb;
643
644 if (addr >= (unsigned long)__kprobes_text_start &&
645 addr < (unsigned long)__kprobes_text_end)
646 return -EINVAL;
647 /*
648 * If there exists a kprobe_blacklist, verify and
649 * fail any probe registration in the prohibited area
650 */
651 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
652 if (kb->start_addr) {
653 if (addr >= kb->start_addr &&
654 addr < (kb->start_addr + kb->range))
655 return -EINVAL;
656 }
657 }
658 return 0;
659}
660
661/*
662 * If we have a symbol_name argument, look it up and add the offset field
663 * to it. This way, we can specify a relative address to a symbol.
664 */
665static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
666{
667 kprobe_opcode_t *addr = p->addr;
668 if (p->symbol_name) {
669 if (addr)
670 return NULL;
671 kprobe_lookup_name(p->symbol_name, addr);
672 }
673
674 if (!addr)
675 return NULL;
676 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
677}
678
679int __kprobes register_kprobe(struct kprobe *p)
680{
681 int ret = 0;
682 struct kprobe *old_p;
683 struct module *probed_mod;
684 kprobe_opcode_t *addr;
685
686 addr = kprobe_addr(p);
687 if (!addr)
688 return -EINVAL;
689 p->addr = addr;
690
691 preempt_disable();
692 if (!kernel_text_address((unsigned long) p->addr) ||
693 in_kprobes_functions((unsigned long) p->addr)) {
694 preempt_enable();
695 return -EINVAL;
696 }
697
698 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
699 p->flags &= KPROBE_FLAG_DISABLED;
700
701 /*
702 * Check if are we probing a module.
703 */
704 probed_mod = __module_text_address((unsigned long) p->addr);
705 if (probed_mod) {
706 /*
707 * We must hold a refcount of the probed module while updating
708 * its code to prohibit unexpected unloading.
709 */
710 if (unlikely(!try_module_get(probed_mod))) {
711 preempt_enable();
712 return -EINVAL;
713 }
714 /*
715 * If the module freed .init.text, we couldn't insert
716 * kprobes in there.
717 */
718 if (within_module_init((unsigned long)p->addr, probed_mod) &&
719 probed_mod->state != MODULE_STATE_COMING) {
720 module_put(probed_mod);
721 preempt_enable();
722 return -EINVAL;
723 }
724 }
725 preempt_enable();
726
727 p->nmissed = 0;
728 INIT_LIST_HEAD(&p->list);
729 mutex_lock(&kprobe_mutex);
730 old_p = get_kprobe(p->addr);
731 if (old_p) {
732 ret = register_aggr_kprobe(old_p, p);
733 goto out;
734 }
735
736 mutex_lock(&text_mutex);
737 ret = arch_prepare_kprobe(p);
738 if (ret)
739 goto out_unlock_text;
740
741 INIT_HLIST_NODE(&p->hlist);
742 hlist_add_head_rcu(&p->hlist,
743 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
744
745 if (!kprobes_all_disarmed && !kprobe_disabled(p))
746 arch_arm_kprobe(p);
747
748out_unlock_text:
749 mutex_unlock(&text_mutex);
750out:
751 mutex_unlock(&kprobe_mutex);
752
753 if (probed_mod)
754 module_put(probed_mod);
755
756 return ret;
757}
758EXPORT_SYMBOL_GPL(register_kprobe);
759
760/* Check passed kprobe is valid and return kprobe in kprobe_table. */
761static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
762{
763 struct kprobe *old_p, *list_p;
764
765 old_p = get_kprobe(p->addr);
766 if (unlikely(!old_p))
767 return NULL;
768
769 if (p != old_p) {
770 list_for_each_entry_rcu(list_p, &old_p->list, list)
771 if (list_p == p)
772 /* kprobe p is a valid probe */
773 goto valid;
774 return NULL;
775 }
776valid:
777 return old_p;
778}
779
780/*
781 * Unregister a kprobe without a scheduler synchronization.
782 */
783static int __kprobes __unregister_kprobe_top(struct kprobe *p)
784{
785 struct kprobe *old_p, *list_p;
786
787 old_p = __get_valid_kprobe(p);
788 if (old_p == NULL)
789 return -EINVAL;
790
791 if (old_p == p ||
792 (old_p->pre_handler == aggr_pre_handler &&
793 list_is_singular(&old_p->list))) {
794 /*
795 * Only probe on the hash list. Disarm only if kprobes are
796 * enabled and not gone - otherwise, the breakpoint would
797 * already have been removed. We save on flushing icache.
798 */
799 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
800 disarm_kprobe(p);
801 hlist_del_rcu(&old_p->hlist);
802 } else {
803 if (p->break_handler && !kprobe_gone(p))
804 old_p->break_handler = NULL;
805 if (p->post_handler && !kprobe_gone(p)) {
806 list_for_each_entry_rcu(list_p, &old_p->list, list) {
807 if ((list_p != p) && (list_p->post_handler))
808 goto noclean;
809 }
810 old_p->post_handler = NULL;
811 }
812noclean:
813 list_del_rcu(&p->list);
814 if (!kprobe_disabled(old_p)) {
815 try_to_disable_aggr_kprobe(old_p);
816 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
817 disarm_kprobe(old_p);
818 }
819 }
820 return 0;
821}
822
823static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
824{
825 struct kprobe *old_p;
826
827 if (list_empty(&p->list))
828 arch_remove_kprobe(p);
829 else if (list_is_singular(&p->list)) {
830 /* "p" is the last child of an aggr_kprobe */
831 old_p = list_entry(p->list.next, struct kprobe, list);
832 list_del(&p->list);
833 arch_remove_kprobe(old_p);
834 kfree(old_p);
835 }
836}
837
838int __kprobes register_kprobes(struct kprobe **kps, int num)
839{
840 int i, ret = 0;
841
842 if (num <= 0)
843 return -EINVAL;
844 for (i = 0; i < num; i++) {
845 ret = register_kprobe(kps[i]);
846 if (ret < 0) {
847 if (i > 0)
848 unregister_kprobes(kps, i);
849 break;
850 }
851 }
852 return ret;
853}
854EXPORT_SYMBOL_GPL(register_kprobes);
855
856void __kprobes unregister_kprobe(struct kprobe *p)
857{
858 unregister_kprobes(&p, 1);
859}
860EXPORT_SYMBOL_GPL(unregister_kprobe);
861
862void __kprobes unregister_kprobes(struct kprobe **kps, int num)
863{
864 int i;
865
866 if (num <= 0)
867 return;
868 mutex_lock(&kprobe_mutex);
869 for (i = 0; i < num; i++)
870 if (__unregister_kprobe_top(kps[i]) < 0)
871 kps[i]->addr = NULL;
872 mutex_unlock(&kprobe_mutex);
873
874 synchronize_sched();
875 for (i = 0; i < num; i++)
876 if (kps[i]->addr)
877 __unregister_kprobe_bottom(kps[i]);
878}
879EXPORT_SYMBOL_GPL(unregister_kprobes);
880
881static struct notifier_block kprobe_exceptions_nb = {
882 .notifier_call = kprobe_exceptions_notify,
883 .priority = 0x7fffffff /* we need to be notified first */
884};
885
886unsigned long __weak arch_deref_entry_point(void *entry)
887{
888 return (unsigned long)entry;
889}
890
891int __kprobes register_jprobes(struct jprobe **jps, int num)
892{
893 struct jprobe *jp;
894 int ret = 0, i;
895
896 if (num <= 0)
897 return -EINVAL;
898 for (i = 0; i < num; i++) {
899 unsigned long addr;
900 jp = jps[i];
901 addr = arch_deref_entry_point(jp->entry);
902
903 if (!kernel_text_address(addr))
904 ret = -EINVAL;
905 else {
906 /* Todo: Verify probepoint is a function entry point */
907 jp->kp.pre_handler = setjmp_pre_handler;
908 jp->kp.break_handler = longjmp_break_handler;
909 ret = register_kprobe(&jp->kp);
910 }
911 if (ret < 0) {
912 if (i > 0)
913 unregister_jprobes(jps, i);
914 break;
915 }
916 }
917 return ret;
918}
919EXPORT_SYMBOL_GPL(register_jprobes);
920
921int __kprobes register_jprobe(struct jprobe *jp)
922{
923 return register_jprobes(&jp, 1);
924}
925EXPORT_SYMBOL_GPL(register_jprobe);
926
927void __kprobes unregister_jprobe(struct jprobe *jp)
928{
929 unregister_jprobes(&jp, 1);
930}
931EXPORT_SYMBOL_GPL(unregister_jprobe);
932
933void __kprobes unregister_jprobes(struct jprobe **jps, int num)
934{
935 int i;
936
937 if (num <= 0)
938 return;
939 mutex_lock(&kprobe_mutex);
940 for (i = 0; i < num; i++)
941 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
942 jps[i]->kp.addr = NULL;
943 mutex_unlock(&kprobe_mutex);
944
945 synchronize_sched();
946 for (i = 0; i < num; i++) {
947 if (jps[i]->kp.addr)
948 __unregister_kprobe_bottom(&jps[i]->kp);
949 }
950}
951EXPORT_SYMBOL_GPL(unregister_jprobes);
952
953#ifdef CONFIG_KRETPROBES
954/*
955 * This kprobe pre_handler is registered with every kretprobe. When probe
956 * hits it will set up the return probe.
957 */
958static int __kprobes pre_handler_kretprobe(struct kprobe *p,
959 struct pt_regs *regs)
960{
961 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
962 unsigned long hash, flags = 0;
963 struct kretprobe_instance *ri;
964
965 /*TODO: consider to only swap the RA after the last pre_handler fired */
966 hash = hash_ptr(current, KPROBE_HASH_BITS);
967 spin_lock_irqsave(&rp->lock, flags);
968 if (!hlist_empty(&rp->free_instances)) {
969 ri = hlist_entry(rp->free_instances.first,
970 struct kretprobe_instance, hlist);
971 hlist_del(&ri->hlist);
972 spin_unlock_irqrestore(&rp->lock, flags);
973
974 ri->rp = rp;
975 ri->task = current;
976
977 if (rp->entry_handler && rp->entry_handler(ri, regs))
978 return 0;
979
980 arch_prepare_kretprobe(ri, regs);
981
982 /* XXX(hch): why is there no hlist_move_head? */
983 INIT_HLIST_NODE(&ri->hlist);
984 kretprobe_table_lock(hash, &flags);
985 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
986 kretprobe_table_unlock(hash, &flags);
987 } else {
988 rp->nmissed++;
989 spin_unlock_irqrestore(&rp->lock, flags);
990 }
991 return 0;
992}
993
994int __kprobes register_kretprobe(struct kretprobe *rp)
995{
996 int ret = 0;
997 struct kretprobe_instance *inst;
998 int i;
999 void *addr;
1000
1001 if (kretprobe_blacklist_size) {
1002 addr = kprobe_addr(&rp->kp);
1003 if (!addr)
1004 return -EINVAL;
1005
1006 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1007 if (kretprobe_blacklist[i].addr == addr)
1008 return -EINVAL;
1009 }
1010 }
1011
1012 rp->kp.pre_handler = pre_handler_kretprobe;
1013 rp->kp.post_handler = NULL;
1014 rp->kp.fault_handler = NULL;
1015 rp->kp.break_handler = NULL;
1016
1017 /* Pre-allocate memory for max kretprobe instances */
1018 if (rp->maxactive <= 0) {
1019#ifdef CONFIG_PREEMPT
1020 rp->maxactive = max(10, 2 * NR_CPUS);
1021#else
1022 rp->maxactive = NR_CPUS;
1023#endif
1024 }
1025 spin_lock_init(&rp->lock);
1026 INIT_HLIST_HEAD(&rp->free_instances);
1027 for (i = 0; i < rp->maxactive; i++) {
1028 inst = kmalloc(sizeof(struct kretprobe_instance) +
1029 rp->data_size, GFP_KERNEL);
1030 if (inst == NULL) {
1031 free_rp_inst(rp);
1032 return -ENOMEM;
1033 }
1034 INIT_HLIST_NODE(&inst->hlist);
1035 hlist_add_head(&inst->hlist, &rp->free_instances);
1036 }
1037
1038 rp->nmissed = 0;
1039 /* Establish function entry probe point */
1040 ret = register_kprobe(&rp->kp);
1041 if (ret != 0)
1042 free_rp_inst(rp);
1043 return ret;
1044}
1045EXPORT_SYMBOL_GPL(register_kretprobe);
1046
1047int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1048{
1049 int ret = 0, i;
1050
1051 if (num <= 0)
1052 return -EINVAL;
1053 for (i = 0; i < num; i++) {
1054 ret = register_kretprobe(rps[i]);
1055 if (ret < 0) {
1056 if (i > 0)
1057 unregister_kretprobes(rps, i);
1058 break;
1059 }
1060 }
1061 return ret;
1062}
1063EXPORT_SYMBOL_GPL(register_kretprobes);
1064
1065void __kprobes unregister_kretprobe(struct kretprobe *rp)
1066{
1067 unregister_kretprobes(&rp, 1);
1068}
1069EXPORT_SYMBOL_GPL(unregister_kretprobe);
1070
1071void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1072{
1073 int i;
1074
1075 if (num <= 0)
1076 return;
1077 mutex_lock(&kprobe_mutex);
1078 for (i = 0; i < num; i++)
1079 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1080 rps[i]->kp.addr = NULL;
1081 mutex_unlock(&kprobe_mutex);
1082
1083 synchronize_sched();
1084 for (i = 0; i < num; i++) {
1085 if (rps[i]->kp.addr) {
1086 __unregister_kprobe_bottom(&rps[i]->kp);
1087 cleanup_rp_inst(rps[i]);
1088 }
1089 }
1090}
1091EXPORT_SYMBOL_GPL(unregister_kretprobes);
1092
1093#else /* CONFIG_KRETPROBES */
1094int __kprobes register_kretprobe(struct kretprobe *rp)
1095{
1096 return -ENOSYS;
1097}
1098EXPORT_SYMBOL_GPL(register_kretprobe);
1099
1100int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1101{
1102 return -ENOSYS;
1103}
1104EXPORT_SYMBOL_GPL(register_kretprobes);
1105
1106void __kprobes unregister_kretprobe(struct kretprobe *rp)
1107{
1108}
1109EXPORT_SYMBOL_GPL(unregister_kretprobe);
1110
1111void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1112{
1113}
1114EXPORT_SYMBOL_GPL(unregister_kretprobes);
1115
1116static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1117 struct pt_regs *regs)
1118{
1119 return 0;
1120}
1121
1122#endif /* CONFIG_KRETPROBES */
1123
1124/* Set the kprobe gone and remove its instruction buffer. */
1125static void __kprobes kill_kprobe(struct kprobe *p)
1126{
1127 struct kprobe *kp;
1128
1129 p->flags |= KPROBE_FLAG_GONE;
1130 if (p->pre_handler == aggr_pre_handler) {
1131 /*
1132 * If this is an aggr_kprobe, we have to list all the
1133 * chained probes and mark them GONE.
1134 */
1135 list_for_each_entry_rcu(kp, &p->list, list)
1136 kp->flags |= KPROBE_FLAG_GONE;
1137 p->post_handler = NULL;
1138 p->break_handler = NULL;
1139 }
1140 /*
1141 * Here, we can remove insn_slot safely, because no thread calls
1142 * the original probed function (which will be freed soon) any more.
1143 */
1144 arch_remove_kprobe(p);
1145}
1146
1147void __kprobes dump_kprobe(struct kprobe *kp)
1148{
1149 printk(KERN_WARNING "Dumping kprobe:\n");
1150 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1151 kp->symbol_name, kp->addr, kp->offset);
1152}
1153
1154/* Module notifier call back, checking kprobes on the module */
1155static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1156 unsigned long val, void *data)
1157{
1158 struct module *mod = data;
1159 struct hlist_head *head;
1160 struct hlist_node *node;
1161 struct kprobe *p;
1162 unsigned int i;
1163 int checkcore = (val == MODULE_STATE_GOING);
1164
1165 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1166 return NOTIFY_DONE;
1167
1168 /*
1169 * When MODULE_STATE_GOING was notified, both of module .text and
1170 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1171 * notified, only .init.text section would be freed. We need to
1172 * disable kprobes which have been inserted in the sections.
1173 */
1174 mutex_lock(&kprobe_mutex);
1175 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1176 head = &kprobe_table[i];
1177 hlist_for_each_entry_rcu(p, node, head, hlist)
1178 if (within_module_init((unsigned long)p->addr, mod) ||
1179 (checkcore &&
1180 within_module_core((unsigned long)p->addr, mod))) {
1181 /*
1182 * The vaddr this probe is installed will soon
1183 * be vfreed buy not synced to disk. Hence,
1184 * disarming the breakpoint isn't needed.
1185 */
1186 kill_kprobe(p);
1187 }
1188 }
1189 mutex_unlock(&kprobe_mutex);
1190 return NOTIFY_DONE;
1191}
1192
1193static struct notifier_block kprobe_module_nb = {
1194 .notifier_call = kprobes_module_callback,
1195 .priority = 0
1196};
1197
1198static int __init init_kprobes(void)
1199{
1200 int i, err = 0;
1201 unsigned long offset = 0, size = 0;
1202 char *modname, namebuf[128];
1203 const char *symbol_name;
1204 void *addr;
1205 struct kprobe_blackpoint *kb;
1206
1207 /* FIXME allocate the probe table, currently defined statically */
1208 /* initialize all list heads */
1209 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1210 INIT_HLIST_HEAD(&kprobe_table[i]);
1211 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1212 spin_lock_init(&(kretprobe_table_locks[i].lock));
1213 }
1214
1215 /*
1216 * Lookup and populate the kprobe_blacklist.
1217 *
1218 * Unlike the kretprobe blacklist, we'll need to determine
1219 * the range of addresses that belong to the said functions,
1220 * since a kprobe need not necessarily be at the beginning
1221 * of a function.
1222 */
1223 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1224 kprobe_lookup_name(kb->name, addr);
1225 if (!addr)
1226 continue;
1227
1228 kb->start_addr = (unsigned long)addr;
1229 symbol_name = kallsyms_lookup(kb->start_addr,
1230 &size, &offset, &modname, namebuf);
1231 if (!symbol_name)
1232 kb->range = 0;
1233 else
1234 kb->range = size;
1235 }
1236
1237 if (kretprobe_blacklist_size) {
1238 /* lookup the function address from its name */
1239 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1240 kprobe_lookup_name(kretprobe_blacklist[i].name,
1241 kretprobe_blacklist[i].addr);
1242 if (!kretprobe_blacklist[i].addr)
1243 printk("kretprobe: lookup failed: %s\n",
1244 kretprobe_blacklist[i].name);
1245 }
1246 }
1247
1248 /* By default, kprobes are armed */
1249 kprobes_all_disarmed = false;
1250
1251 err = arch_init_kprobes();
1252 if (!err)
1253 err = register_die_notifier(&kprobe_exceptions_nb);
1254 if (!err)
1255 err = register_module_notifier(&kprobe_module_nb);
1256
1257 kprobes_initialized = (err == 0);
1258
1259 if (!err)
1260 init_test_probes();
1261 return err;
1262}
1263
1264#ifdef CONFIG_DEBUG_FS
1265static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1266 const char *sym, int offset,char *modname)
1267{
1268 char *kprobe_type;
1269
1270 if (p->pre_handler == pre_handler_kretprobe)
1271 kprobe_type = "r";
1272 else if (p->pre_handler == setjmp_pre_handler)
1273 kprobe_type = "j";
1274 else
1275 kprobe_type = "k";
1276 if (sym)
1277 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1278 p->addr, kprobe_type, sym, offset,
1279 (modname ? modname : " "),
1280 (kprobe_gone(p) ? "[GONE]" : ""),
1281 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1282 "[DISABLED]" : ""));
1283 else
1284 seq_printf(pi, "%p %s %p %s%s\n",
1285 p->addr, kprobe_type, p->addr,
1286 (kprobe_gone(p) ? "[GONE]" : ""),
1287 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1288 "[DISABLED]" : ""));
1289}
1290
1291static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1292{
1293 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1294}
1295
1296static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1297{
1298 (*pos)++;
1299 if (*pos >= KPROBE_TABLE_SIZE)
1300 return NULL;
1301 return pos;
1302}
1303
1304static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1305{
1306 /* Nothing to do */
1307}
1308
1309static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1310{
1311 struct hlist_head *head;
1312 struct hlist_node *node;
1313 struct kprobe *p, *kp;
1314 const char *sym = NULL;
1315 unsigned int i = *(loff_t *) v;
1316 unsigned long offset = 0;
1317 char *modname, namebuf[128];
1318
1319 head = &kprobe_table[i];
1320 preempt_disable();
1321 hlist_for_each_entry_rcu(p, node, head, hlist) {
1322 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1323 &offset, &modname, namebuf);
1324 if (p->pre_handler == aggr_pre_handler) {
1325 list_for_each_entry_rcu(kp, &p->list, list)
1326 report_probe(pi, kp, sym, offset, modname);
1327 } else
1328 report_probe(pi, p, sym, offset, modname);
1329 }
1330 preempt_enable();
1331 return 0;
1332}
1333
1334static struct seq_operations kprobes_seq_ops = {
1335 .start = kprobe_seq_start,
1336 .next = kprobe_seq_next,
1337 .stop = kprobe_seq_stop,
1338 .show = show_kprobe_addr
1339};
1340
1341static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1342{
1343 return seq_open(filp, &kprobes_seq_ops);
1344}
1345
1346static struct file_operations debugfs_kprobes_operations = {
1347 .open = kprobes_open,
1348 .read = seq_read,
1349 .llseek = seq_lseek,
1350 .release = seq_release,
1351};
1352
1353/* Disable one kprobe */
1354int __kprobes disable_kprobe(struct kprobe *kp)
1355{
1356 int ret = 0;
1357 struct kprobe *p;
1358
1359 mutex_lock(&kprobe_mutex);
1360
1361 /* Check whether specified probe is valid. */
1362 p = __get_valid_kprobe(kp);
1363 if (unlikely(p == NULL)) {
1364 ret = -EINVAL;
1365 goto out;
1366 }
1367
1368 /* If the probe is already disabled (or gone), just return */
1369 if (kprobe_disabled(kp))
1370 goto out;
1371
1372 kp->flags |= KPROBE_FLAG_DISABLED;
1373 if (p != kp)
1374 /* When kp != p, p is always enabled. */
1375 try_to_disable_aggr_kprobe(p);
1376
1377 if (!kprobes_all_disarmed && kprobe_disabled(p))
1378 disarm_kprobe(p);
1379out:
1380 mutex_unlock(&kprobe_mutex);
1381 return ret;
1382}
1383EXPORT_SYMBOL_GPL(disable_kprobe);
1384
1385/* Enable one kprobe */
1386int __kprobes enable_kprobe(struct kprobe *kp)
1387{
1388 int ret = 0;
1389 struct kprobe *p;
1390
1391 mutex_lock(&kprobe_mutex);
1392
1393 /* Check whether specified probe is valid. */
1394 p = __get_valid_kprobe(kp);
1395 if (unlikely(p == NULL)) {
1396 ret = -EINVAL;
1397 goto out;
1398 }
1399
1400 if (kprobe_gone(kp)) {
1401 /* This kprobe has gone, we couldn't enable it. */
1402 ret = -EINVAL;
1403 goto out;
1404 }
1405
1406 if (!kprobes_all_disarmed && kprobe_disabled(p))
1407 arm_kprobe(p);
1408
1409 p->flags &= ~KPROBE_FLAG_DISABLED;
1410 if (p != kp)
1411 kp->flags &= ~KPROBE_FLAG_DISABLED;
1412out:
1413 mutex_unlock(&kprobe_mutex);
1414 return ret;
1415}
1416EXPORT_SYMBOL_GPL(enable_kprobe);
1417
1418static void __kprobes arm_all_kprobes(void)
1419{
1420 struct hlist_head *head;
1421 struct hlist_node *node;
1422 struct kprobe *p;
1423 unsigned int i;
1424
1425 mutex_lock(&kprobe_mutex);
1426
1427 /* If kprobes are armed, just return */
1428 if (!kprobes_all_disarmed)
1429 goto already_enabled;
1430
1431 mutex_lock(&text_mutex);
1432 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1433 head = &kprobe_table[i];
1434 hlist_for_each_entry_rcu(p, node, head, hlist)
1435 if (!kprobe_disabled(p))
1436 arch_arm_kprobe(p);
1437 }
1438 mutex_unlock(&text_mutex);
1439
1440 kprobes_all_disarmed = false;
1441 printk(KERN_INFO "Kprobes globally enabled\n");
1442
1443already_enabled:
1444 mutex_unlock(&kprobe_mutex);
1445 return;
1446}
1447
1448static void __kprobes disarm_all_kprobes(void)
1449{
1450 struct hlist_head *head;
1451 struct hlist_node *node;
1452 struct kprobe *p;
1453 unsigned int i;
1454
1455 mutex_lock(&kprobe_mutex);
1456
1457 /* If kprobes are already disarmed, just return */
1458 if (kprobes_all_disarmed)
1459 goto already_disabled;
1460
1461 kprobes_all_disarmed = true;
1462 printk(KERN_INFO "Kprobes globally disabled\n");
1463 mutex_lock(&text_mutex);
1464 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1465 head = &kprobe_table[i];
1466 hlist_for_each_entry_rcu(p, node, head, hlist) {
1467 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1468 arch_disarm_kprobe(p);
1469 }
1470 }
1471
1472 mutex_unlock(&text_mutex);
1473 mutex_unlock(&kprobe_mutex);
1474 /* Allow all currently running kprobes to complete */
1475 synchronize_sched();
1476 return;
1477
1478already_disabled:
1479 mutex_unlock(&kprobe_mutex);
1480 return;
1481}
1482
1483/*
1484 * XXX: The debugfs bool file interface doesn't allow for callbacks
1485 * when the bool state is switched. We can reuse that facility when
1486 * available
1487 */
1488static ssize_t read_enabled_file_bool(struct file *file,
1489 char __user *user_buf, size_t count, loff_t *ppos)
1490{
1491 char buf[3];
1492
1493 if (!kprobes_all_disarmed)
1494 buf[0] = '1';
1495 else
1496 buf[0] = '0';
1497 buf[1] = '\n';
1498 buf[2] = 0x00;
1499 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1500}
1501
1502static ssize_t write_enabled_file_bool(struct file *file,
1503 const char __user *user_buf, size_t count, loff_t *ppos)
1504{
1505 char buf[32];
1506 int buf_size;
1507
1508 buf_size = min(count, (sizeof(buf)-1));
1509 if (copy_from_user(buf, user_buf, buf_size))
1510 return -EFAULT;
1511
1512 switch (buf[0]) {
1513 case 'y':
1514 case 'Y':
1515 case '1':
1516 arm_all_kprobes();
1517 break;
1518 case 'n':
1519 case 'N':
1520 case '0':
1521 disarm_all_kprobes();
1522 break;
1523 }
1524
1525 return count;
1526}
1527
1528static struct file_operations fops_kp = {
1529 .read = read_enabled_file_bool,
1530 .write = write_enabled_file_bool,
1531};
1532
1533static int __kprobes debugfs_kprobe_init(void)
1534{
1535 struct dentry *dir, *file;
1536 unsigned int value = 1;
1537
1538 dir = debugfs_create_dir("kprobes", NULL);
1539 if (!dir)
1540 return -ENOMEM;
1541
1542 file = debugfs_create_file("list", 0444, dir, NULL,
1543 &debugfs_kprobes_operations);
1544 if (!file) {
1545 debugfs_remove(dir);
1546 return -ENOMEM;
1547 }
1548
1549 file = debugfs_create_file("enabled", 0600, dir,
1550 &value, &fops_kp);
1551 if (!file) {
1552 debugfs_remove(dir);
1553 return -ENOMEM;
1554 }
1555
1556 return 0;
1557}
1558
1559late_initcall(debugfs_kprobe_init);
1560#endif /* CONFIG_DEBUG_FS */
1561
1562module_init(init_kprobes);
1563
1564/* defined in arch/.../kernel/kprobes.c */
1565EXPORT_SYMBOL_GPL(jprobe_return);