2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
14 static char *perf_trace_buf[4];
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
23 /* Count the events in use (per event id, not per instance) */
24 static int total_ref_count;
26 static int perf_trace_event_enable(struct ftrace_event_call *event, void *data)
30 if (event->perf_refcount++ > 0) {
31 event->perf_data = NULL;
35 if (!total_ref_count) {
39 for (i = 0; i < 4; i++) {
40 buf = (char *)alloc_percpu(perf_trace_t);
44 rcu_assign_pointer(perf_trace_buf[i], buf);
48 ret = event->perf_event_enable(event);
50 event->perf_data = data;
56 if (!total_ref_count) {
59 for (i = 0; i < 4; i++) {
60 free_percpu(perf_trace_buf[i]);
61 perf_trace_buf[i] = NULL;
64 event->perf_refcount--;
69 int perf_trace_enable(int event_id, void *data)
71 struct ftrace_event_call *event;
74 mutex_lock(&event_mutex);
75 list_for_each_entry(event, &ftrace_events, list) {
76 if (event->id == event_id && event->perf_event_enable &&
77 try_module_get(event->mod)) {
78 ret = perf_trace_event_enable(event, data);
82 mutex_unlock(&event_mutex);
87 static void perf_trace_event_disable(struct ftrace_event_call *event)
89 if (--event->perf_refcount > 0)
92 event->perf_event_disable(event);
94 if (!--total_ref_count) {
98 for (i = 0; i < 4; i++) {
99 buf[i] = perf_trace_buf[i];
100 rcu_assign_pointer(perf_trace_buf[i], NULL);
104 * Ensure every events in profiling have finished before
105 * releasing the buffers
109 for (i = 0; i < 4; i++)
114 void perf_trace_disable(int event_id)
116 struct ftrace_event_call *event;
118 mutex_lock(&event_mutex);
119 list_for_each_entry(event, &ftrace_events, list) {
120 if (event->id == event_id) {
121 perf_trace_event_disable(event);
122 module_put(event->mod);
126 mutex_unlock(&event_mutex);
129 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
130 struct pt_regs *regs, int *rctxp)
132 struct trace_entry *entry;
133 char *trace_buf, *raw_data;
136 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
138 pc = preempt_count();
140 *rctxp = perf_swevent_get_recursion_context();
144 trace_buf = rcu_dereference_sched(perf_trace_buf[*rctxp]);
148 raw_data = per_cpu_ptr(trace_buf, smp_processor_id());
150 /* zero the dead bytes from align to not leak stack to user */
151 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
153 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, regs->flags, pc);
159 perf_swevent_put_recursion_context(*rctxp);
163 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);