2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
17 static char *perf_trace_buf;
18 static char *perf_trace_buf_nmi;
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
24 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
27 /* Count the events in use (per event id, not per instance) */
28 static int total_ref_count;
30 static int perf_trace_event_enable(struct ftrace_event_call *event, void *data)
35 if (event->perf_refcount++ > 0) {
36 event->perf_data = NULL;
40 if (!total_ref_count) {
41 buf = (char *)alloc_percpu(perf_trace_t);
45 rcu_assign_pointer(perf_trace_buf, buf);
47 buf = (char *)alloc_percpu(perf_trace_t);
51 rcu_assign_pointer(perf_trace_buf_nmi, buf);
54 ret = event->perf_event_enable(event);
56 event->perf_data = data;
62 if (!total_ref_count) {
63 free_percpu(perf_trace_buf_nmi);
64 free_percpu(perf_trace_buf);
65 perf_trace_buf_nmi = NULL;
66 perf_trace_buf = NULL;
69 event->perf_refcount--;
74 int perf_trace_enable(int event_id, void *data)
76 struct ftrace_event_call *event;
79 mutex_lock(&event_mutex);
80 list_for_each_entry(event, &ftrace_events, list) {
81 if (event->id == event_id && event->perf_event_enable &&
82 try_module_get(event->mod)) {
83 ret = perf_trace_event_enable(event, data);
87 mutex_unlock(&event_mutex);
92 static void perf_trace_event_disable(struct ftrace_event_call *event)
96 if (--event->perf_refcount > 0)
99 event->perf_event_disable(event);
101 if (!--total_ref_count) {
102 buf = perf_trace_buf;
103 rcu_assign_pointer(perf_trace_buf, NULL);
105 nmi_buf = perf_trace_buf_nmi;
106 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
109 * Ensure every events in profiling have finished before
110 * releasing the buffers
115 free_percpu(nmi_buf);
119 void perf_trace_disable(int event_id)
121 struct ftrace_event_call *event;
123 mutex_lock(&event_mutex);
124 list_for_each_entry(event, &ftrace_events, list) {
125 if (event->id == event_id) {
126 perf_trace_event_disable(event);
127 module_put(event->mod);
131 mutex_unlock(&event_mutex);
134 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
135 int *rctxp, unsigned long *irq_flags)
137 struct trace_entry *entry;
138 char *trace_buf, *raw_data;
141 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
143 pc = preempt_count();
145 /* Protect the per cpu buffer, begin the rcu read side */
146 local_irq_save(*irq_flags);
148 *rctxp = perf_swevent_get_recursion_context();
152 cpu = smp_processor_id();
155 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
157 trace_buf = rcu_dereference_sched(perf_trace_buf);
162 raw_data = per_cpu_ptr(trace_buf, cpu);
164 /* zero the dead bytes from align to not leak stack to user */
165 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
167 entry = (struct trace_entry *)raw_data;
168 tracing_generic_entry_update(entry, *irq_flags, pc);
173 perf_swevent_put_recursion_context(*rctxp);
175 local_irq_restore(*irq_flags);
178 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);