]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_event_profile.c
perf trace: Read_tracing_data should die() another day
[net-next-2.6.git] / kernel / trace / trace_event_profile.c
CommitLineData
ac199db0
PZ
1/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
558e6547 8#include <linux/module.h>
ac199db0
PZ
9#include "trace.h"
10
20ab4425 11
444a2a3b
FW
12struct perf_trace_buf *perf_trace_buf;
13EXPORT_SYMBOL_GPL(perf_trace_buf);
05bafda8 14
444a2a3b
FW
15struct perf_trace_buf *perf_trace_buf_nmi;
16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
20ab4425
FW
17
18/* Count the events in use (per event id, not per instance) */
19static int total_profile_count;
20
e5e25cf4
FW
21static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22{
444a2a3b 23 struct perf_trace_buf *buf;
20ab4425
FW
24 int ret = -ENOMEM;
25
e5e25cf4
FW
26 if (atomic_inc_return(&event->profile_count))
27 return 0;
28
fe8e5b5a 29 if (!total_profile_count) {
444a2a3b 30 buf = alloc_percpu(struct perf_trace_buf);
20ab4425
FW
31 if (!buf)
32 goto fail_buf;
33
444a2a3b 34 rcu_assign_pointer(perf_trace_buf, buf);
20ab4425 35
444a2a3b 36 buf = alloc_percpu(struct perf_trace_buf);
20ab4425
FW
37 if (!buf)
38 goto fail_buf_nmi;
39
444a2a3b 40 rcu_assign_pointer(perf_trace_buf_nmi, buf);
20ab4425
FW
41 }
42
d7a4b414 43 ret = event->profile_enable(event);
fe8e5b5a
FW
44 if (!ret) {
45 total_profile_count++;
20ab4425 46 return 0;
fe8e5b5a 47 }
20ab4425 48
20ab4425 49fail_buf_nmi:
fe8e5b5a 50 if (!total_profile_count) {
444a2a3b
FW
51 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL;
fe8e5b5a 55 }
20ab4425 56fail_buf:
20ab4425
FW
57 atomic_dec(&event->profile_count);
58
59 return ret;
e5e25cf4
FW
60}
61
ac199db0
PZ
62int ftrace_profile_enable(int event_id)
63{
64 struct ftrace_event_call *event;
20c8928a 65 int ret = -EINVAL;
ac199db0 66
20c8928a 67 mutex_lock(&event_mutex);
a59fd602 68 list_for_each_entry(event, &ftrace_events, list) {
558e6547
LZ
69 if (event->id == event_id && event->profile_enable &&
70 try_module_get(event->mod)) {
e5e25cf4 71 ret = ftrace_profile_enable_event(event);
20c8928a
LZ
72 break;
73 }
ac199db0 74 }
20c8928a 75 mutex_unlock(&event_mutex);
ac199db0 76
20c8928a 77 return ret;
ac199db0
PZ
78}
79
e5e25cf4
FW
80static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{
444a2a3b 82 struct perf_trace_buf *buf, *nmi_buf;
20ab4425 83
e5e25cf4
FW
84 if (!atomic_add_negative(-1, &event->profile_count))
85 return;
86
d7a4b414 87 event->profile_disable(event);
20ab4425
FW
88
89 if (!--total_profile_count) {
444a2a3b
FW
90 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL);
20ab4425 92
444a2a3b
FW
93 nmi_buf = perf_trace_buf_nmi;
94 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
20ab4425
FW
95
96 /*
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
99 */
100 synchronize_sched();
101
102 free_percpu(buf);
103 free_percpu(nmi_buf);
104 }
e5e25cf4
FW
105}
106
ac199db0
PZ
107void ftrace_profile_disable(int event_id)
108{
109 struct ftrace_event_call *event;
110
20c8928a 111 mutex_lock(&event_mutex);
a59fd602 112 list_for_each_entry(event, &ftrace_events, list) {
20c8928a 113 if (event->id == event_id) {
e5e25cf4 114 ftrace_profile_disable_event(event);
558e6547 115 module_put(event->mod);
20c8928a
LZ
116 break;
117 }
ac199db0 118 }
20c8928a 119 mutex_unlock(&event_mutex);
ac199db0 120}