]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
perf events: Clean up pid passing
authorMatt Helsley <matthltc@us.ibm.com>
Mon, 13 Sep 2010 20:01:20 +0000 (13:01 -0700)
committerIngo Molnar <mingo@elte.hu>
Wed, 15 Sep 2010 08:44:00 +0000 (10:44 +0200)
The kernel perf event creation path shouldn't use find_task_by_vpid()
because a vpid exists in a specific namespace. find_task_by_vpid() uses
current's pid namespace which isn't always the correct namespace to use
for the vpid in all the places perf_event_create_kernel_counter() (and
thus find_get_context()) is called.

The goal is to clean up pid namespace handling and prevent bugs like:

https://bugzilla.kernel.org/show_bug.cgi?id=17281

Instead of using pids switch find_get_context() to use task struct
pointers directly. The syscall is responsible for resolving the pid to
a task struct. This moves the pid namespace resolution into the syscall
much like every other syscall that takes pid parameters.

Signed-off-by: Matt Helsley <matthltc@us.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robin Green <greenrd@greenrd.org>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
LKML-Reference: <a134e5e392ab0204961fd1a62c84a222bf5874a9.1284407763.git.matthltc@us.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/arm/oprofile/common.c
include/linux/perf_event.h
kernel/hw_breakpoint.c
kernel/perf_event.c
kernel/watchdog.c

index 0691176899ffc24f0a176d154a34f5b7200c6047..aad63e611b36a90bd156c8e1a0caed99cd2182be 100644 (file)
@@ -96,7 +96,7 @@ static int op_create_counter(int cpu, int event)
                return ret;
 
        pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
-                                                 cpu, -1,
+                                                 cpu, NULL,
                                                  op_overflow_handler);
 
        if (IS_ERR(pevent)) {
index 93bf53aa50e5fa33766c1cf640310f5518656d8a..39d8860b268434c28d20437954cb633539c5bd7c 100644 (file)
@@ -902,7 +902,7 @@ extern int perf_event_release_kernel(struct perf_event *event);
 extern struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr,
                                int cpu,
-                               pid_t pid,
+                               struct task_struct *task,
                                perf_overflow_handler_t callback);
 extern u64 perf_event_read_value(struct perf_event *event,
                                 u64 *enabled, u64 *running);
index 6122f02cfedf9915d8642e35354ce806e85154b0..3b714e839c1053ec102f99bf8d924424bdf09381 100644 (file)
@@ -433,8 +433,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
                            perf_overflow_handler_t triggered,
                            struct task_struct *tsk)
 {
-       return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
-                                               triggered);
+       return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
 }
 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
 
@@ -516,7 +515,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
        get_online_cpus();
        for_each_online_cpu(cpu) {
                pevent = per_cpu_ptr(cpu_events, cpu);
-               bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
+               bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
 
                *pevent = bp;
 
index 3f5309db72f1597a64353d5b5e29ff921bcc0af7..86f394e15d53f54d8ea04ea7ed249d7bcc0f9ebc 100644 (file)
@@ -2053,15 +2053,14 @@ errout:
 }
 
 static struct perf_event_context *
-find_get_context(struct pmu *pmu, pid_t pid, int cpu)
+find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
 {
        struct perf_event_context *ctx;
        struct perf_cpu_context *cpuctx;
-       struct task_struct *task;
        unsigned long flags;
        int ctxn, err;
 
-       if (pid == -1 && cpu != -1) {
+       if (!task && cpu != -1) {
                /* Must be root to operate on a CPU event: */
                if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
                        return ERR_PTR(-EACCES);
@@ -2084,10 +2083,6 @@ find_get_context(struct pmu *pmu, pid_t pid, int cpu)
                return ctx;
        }
 
-       task = find_lively_task_by_vpid(pid);
-       if (IS_ERR(task))
-               return (void*)task;
-
        err = -EINVAL;
        ctxn = pmu->task_ctx_nr;
        if (ctxn < 0)
@@ -5527,6 +5522,7 @@ SYSCALL_DEFINE5(perf_event_open,
        struct perf_event_context *ctx;
        struct file *event_file = NULL;
        struct file *group_file = NULL;
+       struct task_struct *task = NULL;
        struct pmu *pmu;
        int event_fd;
        int fput_needed = 0;
@@ -5581,10 +5577,13 @@ SYSCALL_DEFINE5(perf_event_open,
        if ((pmu->task_ctx_nr == perf_sw_context) && group_leader)
                pmu = group_leader->pmu;
 
+       if (pid != -1)
+               task = find_lively_task_by_vpid(pid);
+
        /*
         * Get the target context (task or percpu):
         */
-       ctx = find_get_context(pmu, pid, cpu);
+       ctx = find_get_context(pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto err_group_fd;
@@ -5666,11 +5665,11 @@ err_fd:
  *
  * @attr: attributes of the counter to create
  * @cpu: cpu in which the counter is bound
- * @pid: task to profile
+ * @task: task to profile (NULL for percpu)
  */
 struct perf_event *
 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
-                                pid_t pid,
+                                struct task_struct *task,
                                 perf_overflow_handler_t overflow_handler)
 {
        struct perf_event_context *ctx;
@@ -5687,7 +5686,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
                goto err;
        }
 
-       ctx = find_get_context(event->pmu, pid, cpu);
+       ctx = find_get_context(event->pmu, task, cpu);
        if (IS_ERR(ctx)) {
                err = PTR_ERR(ctx);
                goto err_free;
index 89eadbb9cefe48fd24455c71c3ea5afaa094a655..dc8e16824b51b18c8c113f0d670d70cb58ede740 100644 (file)
@@ -358,7 +358,7 @@ static int watchdog_nmi_enable(int cpu)
        /* Try to register using hardware perf events */
        wd_attr = &wd_hw_attr;
        wd_attr->sample_period = hw_nmi_get_sample_period();
-       event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
+       event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
        if (!IS_ERR(event)) {
                printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
                goto out_save;