]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_event_perf.c
perf record: Don't try to find buildids in a zero sized file
[net-next-2.6.git] / kernel / trace / trace_event_perf.c
CommitLineData
ac199db0 1/*
97d5a220 2 * trace event based perf event profiling/tracing
ac199db0
PZ
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
c530665c 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
ac199db0
PZ
6 */
7
558e6547 8#include <linux/module.h>
430ad5a6 9#include <linux/kprobes.h>
ac199db0
PZ
10#include "trace.h"
11
c530665c 12DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
639fe4b1 13EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
20ab4425 14
430ad5a6
XG
15static char *perf_trace_buf;
16static char *perf_trace_buf_nmi;
20ab4425 17
97d5a220 18typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
ce71b9df 19
20ab4425 20/* Count the events in use (per event id, not per instance) */
97d5a220 21static int total_ref_count;
20ab4425 22
97d5a220 23static int perf_trace_event_enable(struct ftrace_event_call *event)
e5e25cf4 24{
ce71b9df 25 char *buf;
20ab4425
FW
26 int ret = -ENOMEM;
27
97d5a220 28 if (event->perf_refcount++ > 0)
e5e25cf4
FW
29 return 0;
30
97d5a220 31 if (!total_ref_count) {
ce71b9df 32 buf = (char *)alloc_percpu(perf_trace_t);
20ab4425
FW
33 if (!buf)
34 goto fail_buf;
35
444a2a3b 36 rcu_assign_pointer(perf_trace_buf, buf);
20ab4425 37
ce71b9df 38 buf = (char *)alloc_percpu(perf_trace_t);
20ab4425
FW
39 if (!buf)
40 goto fail_buf_nmi;
41
444a2a3b 42 rcu_assign_pointer(perf_trace_buf_nmi, buf);
20ab4425
FW
43 }
44
97d5a220 45 ret = event->perf_event_enable(event);
fe8e5b5a 46 if (!ret) {
97d5a220 47 total_ref_count++;
20ab4425 48 return 0;
fe8e5b5a 49 }
20ab4425 50
20ab4425 51fail_buf_nmi:
97d5a220 52 if (!total_ref_count) {
444a2a3b
FW
53 free_percpu(perf_trace_buf_nmi);
54 free_percpu(perf_trace_buf);
55 perf_trace_buf_nmi = NULL;
56 perf_trace_buf = NULL;
fe8e5b5a 57 }
20ab4425 58fail_buf:
97d5a220 59 event->perf_refcount--;
20ab4425
FW
60
61 return ret;
e5e25cf4
FW
62}
63
97d5a220 64int perf_trace_enable(int event_id)
ac199db0
PZ
65{
66 struct ftrace_event_call *event;
20c8928a 67 int ret = -EINVAL;
ac199db0 68
20c8928a 69 mutex_lock(&event_mutex);
a59fd602 70 list_for_each_entry(event, &ftrace_events, list) {
97d5a220 71 if (event->id == event_id && event->perf_event_enable &&
558e6547 72 try_module_get(event->mod)) {
97d5a220 73 ret = perf_trace_event_enable(event);
20c8928a
LZ
74 break;
75 }
ac199db0 76 }
20c8928a 77 mutex_unlock(&event_mutex);
ac199db0 78
20c8928a 79 return ret;
ac199db0
PZ
80}
81
97d5a220 82static void perf_trace_event_disable(struct ftrace_event_call *event)
e5e25cf4 83{
ce71b9df 84 char *buf, *nmi_buf;
20ab4425 85
97d5a220 86 if (--event->perf_refcount > 0)
e5e25cf4
FW
87 return;
88
97d5a220 89 event->perf_event_disable(event);
20ab4425 90
97d5a220 91 if (!--total_ref_count) {
444a2a3b
FW
92 buf = perf_trace_buf;
93 rcu_assign_pointer(perf_trace_buf, NULL);
20ab4425 94
444a2a3b
FW
95 nmi_buf = perf_trace_buf_nmi;
96 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
20ab4425
FW
97
98 /*
99 * Ensure every events in profiling have finished before
100 * releasing the buffers
101 */
102 synchronize_sched();
103
104 free_percpu(buf);
105 free_percpu(nmi_buf);
106 }
e5e25cf4
FW
107}
108
97d5a220 109void perf_trace_disable(int event_id)
ac199db0
PZ
110{
111 struct ftrace_event_call *event;
112
20c8928a 113 mutex_lock(&event_mutex);
a59fd602 114 list_for_each_entry(event, &ftrace_events, list) {
20c8928a 115 if (event->id == event_id) {
97d5a220 116 perf_trace_event_disable(event);
558e6547 117 module_put(event->mod);
20c8928a
LZ
118 break;
119 }
ac199db0 120 }
20c8928a 121 mutex_unlock(&event_mutex);
ac199db0 122}
430ad5a6 123
97d5a220
FW
124__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
125 int *rctxp, unsigned long *irq_flags)
430ad5a6
XG
126{
127 struct trace_entry *entry;
128 char *trace_buf, *raw_data;
129 int pc, cpu;
130
131 pc = preempt_count();
132
133 /* Protect the per cpu buffer, begin the rcu read side */
134 local_irq_save(*irq_flags);
135
136 *rctxp = perf_swevent_get_recursion_context();
137 if (*rctxp < 0)
138 goto err_recursion;
139
140 cpu = smp_processor_id();
141
142 if (in_nmi())
143 trace_buf = rcu_dereference(perf_trace_buf_nmi);
144 else
145 trace_buf = rcu_dereference(perf_trace_buf);
146
147 if (!trace_buf)
148 goto err;
149
150 raw_data = per_cpu_ptr(trace_buf, cpu);
151
152 /* zero the dead bytes from align to not leak stack to user */
153 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
154
155 entry = (struct trace_entry *)raw_data;
156 tracing_generic_entry_update(entry, *irq_flags, pc);
157 entry->type = type;
158
159 return raw_data;
160err:
161 perf_swevent_put_recursion_context(*rctxp);
162err_recursion:
163 local_irq_restore(*irq_flags);
164 return NULL;
165}
97d5a220 166EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);