]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/trace/trace_event_perf.c
perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint intera...
[net-next-2.6.git] / kernel / trace / trace_event_perf.c
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
13
14 static char *perf_trace_buf[4];
15
16 /*
17  * Force it to be aligned to unsigned long to avoid misaligned accesses
18  * suprises
19  */
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21         perf_trace_t;
22
23 /* Count the events in use (per event id, not per instance) */
24 static int      total_ref_count;
25
26 static int perf_trace_event_enable(struct ftrace_event_call *event, void *data)
27 {
28         int ret = -ENOMEM;
29
30         if (event->perf_refcount++ > 0) {
31                 event->perf_data = NULL;
32                 return 0;
33         }
34
35         if (!total_ref_count) {
36                 char *buf;
37                 int i;
38
39                 for (i = 0; i < 4; i++) {
40                         buf = (char *)alloc_percpu(perf_trace_t);
41                         if (!buf)
42                                 goto fail_buf;
43
44                         rcu_assign_pointer(perf_trace_buf[i], buf);
45                 }
46         }
47
48         ret = event->perf_event_enable(event);
49         if (!ret) {
50                 event->perf_data = data;
51                 total_ref_count++;
52                 return 0;
53         }
54
55 fail_buf:
56         if (!total_ref_count) {
57                 int i;
58
59                 for (i = 0; i < 4; i++) {
60                         free_percpu(perf_trace_buf[i]);
61                         perf_trace_buf[i] = NULL;
62                 }
63         }
64         event->perf_refcount--;
65
66         return ret;
67 }
68
69 int perf_trace_enable(int event_id, void *data)
70 {
71         struct ftrace_event_call *event;
72         int ret = -EINVAL;
73
74         mutex_lock(&event_mutex);
75         list_for_each_entry(event, &ftrace_events, list) {
76                 if (event->id == event_id && event->perf_event_enable &&
77                     try_module_get(event->mod)) {
78                         ret = perf_trace_event_enable(event, data);
79                         break;
80                 }
81         }
82         mutex_unlock(&event_mutex);
83
84         return ret;
85 }
86
87 static void perf_trace_event_disable(struct ftrace_event_call *event)
88 {
89         if (--event->perf_refcount > 0)
90                 return;
91
92         event->perf_event_disable(event);
93
94         if (!--total_ref_count) {
95                 char *buf[4];
96                 int i;
97
98                 for (i = 0; i < 4; i++) {
99                         buf[i] = perf_trace_buf[i];
100                         rcu_assign_pointer(perf_trace_buf[i], NULL);
101                 }
102
103                 /*
104                  * Ensure every events in profiling have finished before
105                  * releasing the buffers
106                  */
107                 synchronize_sched();
108
109                 for (i = 0; i < 4; i++)
110                         free_percpu(buf[i]);
111         }
112 }
113
114 void perf_trace_disable(int event_id)
115 {
116         struct ftrace_event_call *event;
117
118         mutex_lock(&event_mutex);
119         list_for_each_entry(event, &ftrace_events, list) {
120                 if (event->id == event_id) {
121                         perf_trace_event_disable(event);
122                         module_put(event->mod);
123                         break;
124                 }
125         }
126         mutex_unlock(&event_mutex);
127 }
128
129 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
130                                        struct pt_regs *regs, int *rctxp)
131 {
132         struct trace_entry *entry;
133         char *trace_buf, *raw_data;
134         int pc;
135
136         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
137
138         pc = preempt_count();
139
140         *rctxp = perf_swevent_get_recursion_context();
141         if (*rctxp < 0)
142                 goto err_recursion;
143
144         trace_buf = rcu_dereference_sched(perf_trace_buf[*rctxp]);
145         if (!trace_buf)
146                 goto err;
147
148         raw_data = per_cpu_ptr(trace_buf, smp_processor_id());
149
150         /* zero the dead bytes from align to not leak stack to user */
151         memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
152
153         entry = (struct trace_entry *)raw_data;
154         tracing_generic_entry_update(entry, regs->flags, pc);
155         entry->type = type;
156
157         return raw_data;
158 err:
159         perf_swevent_put_recursion_context(*rctxp);
160 err_recursion:
161         return NULL;
162 }
163 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);