/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_all_disarmed;
-static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
+/* This protects kprobe_table and optimizing_list */
+static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct {
spinlock_t lock ____cacheline_aligned_in_smp;
}
#ifdef CONFIG_SYSCTL
+/* This should be called with kprobe_mutex locked */
static void __kprobes optimize_all_kprobes(void)
{
struct hlist_head *head;
return;
kprobes_allow_optimization = true;
- mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
- mutex_unlock(&text_mutex);
printk(KERN_INFO "Kprobes globally optimized\n");
}
+/* This should be called with kprobe_mutex locked */
static void __kprobes unoptimize_all_kprobes(void)
{
struct hlist_head *head;
return ret;
preempt_disable();
+ jump_label_lock();
if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) ||
ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr)) {
preempt_enable();
+ jump_label_unlock();
return -EINVAL;
}
+ jump_label_unlock();
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED;
INIT_LIST_HEAD(&p->list);
mutex_lock(&kprobe_mutex);
+ jump_label_lock(); /* needed to call jump_label_text_reserved() */
+
get_online_cpus(); /* For avoiding text_mutex deadlock. */
mutex_lock(&text_mutex);
out:
mutex_unlock(&text_mutex);
put_online_cpus();
+ jump_label_unlock();
mutex_unlock(&kprobe_mutex);
if (probed_mod)
static const struct file_operations fops_kp = {
.read = read_enabled_file_bool,
.write = write_enabled_file_bool,
+ .llseek = default_llseek,
};
static int __kprobes debugfs_kprobe_init(void)