]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - kernel/kprobes.c
jump label: Fix deadlock b/w jump_label_mutex vs. text_mutex
[net-next-2.6.git] / kernel / kprobes.c
index 56a891914273319e0e0a69ca911d7824db1341f8..9437e14f36bd406697397882b7907e2b41dfaa5d 100644 (file)
@@ -74,7 +74,8 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 /* NOTE: change this value only with kprobe_mutex held */
 static bool kprobes_all_disarmed;
 
-static DEFINE_MUTEX(kprobe_mutex);     /* Protects kprobe_table */
+/* This protects kprobe_table and optimizing_list */
+static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 static struct {
        spinlock_t lock ____cacheline_aligned_in_smp;
@@ -595,6 +596,7 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
 }
 
 #ifdef CONFIG_SYSCTL
+/* This should be called with kprobe_mutex locked */
 static void __kprobes optimize_all_kprobes(void)
 {
        struct hlist_head *head;
@@ -607,17 +609,16 @@ static void __kprobes optimize_all_kprobes(void)
                return;
 
        kprobes_allow_optimization = true;
-       mutex_lock(&text_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                hlist_for_each_entry_rcu(p, node, head, hlist)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
-       mutex_unlock(&text_mutex);
        printk(KERN_INFO "Kprobes globally optimized\n");
 }
 
+/* This should be called with kprobe_mutex locked */
 static void __kprobes unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
@@ -1145,13 +1146,16 @@ int __kprobes register_kprobe(struct kprobe *p)
                return ret;
 
        preempt_disable();
+       jump_label_lock();
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr) ||
            ftrace_text_reserved(p->addr, p->addr) ||
            jump_label_text_reserved(p->addr, p->addr)) {
                preempt_enable();
+               jump_label_unlock();
                return -EINVAL;
        }
+       jump_label_unlock();
 
        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
        p->flags &= KPROBE_FLAG_DISABLED;
@@ -1186,6 +1190,8 @@ int __kprobes register_kprobe(struct kprobe *p)
        INIT_LIST_HEAD(&p->list);
        mutex_lock(&kprobe_mutex);
 
+       jump_label_lock(); /* needed to call jump_label_text_reserved() */
+
        get_online_cpus();      /* For avoiding text_mutex deadlock. */
        mutex_lock(&text_mutex);
 
@@ -1213,6 +1219,7 @@ int __kprobes register_kprobe(struct kprobe *p)
 out:
        mutex_unlock(&text_mutex);
        put_online_cpus();
+       jump_label_unlock();
        mutex_unlock(&kprobe_mutex);
 
        if (probed_mod)