]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_branch.c
Merge commit 'v2.6.28-rc7'; branch 'x86/dumpstack' into tracing/ftrace
[net-next-2.6.git] / kernel / trace / trace_branch.c
CommitLineData
1f0d69a9
SR
1/*
2 * unlikely profiler
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
65c6dc6a 9#include <linux/irqflags.h>
1f0d69a9
SR
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
12#include <linux/module.h>
13#include <linux/ftrace.h>
14#include <linux/hash.h>
15#include <linux/fs.h>
16#include <asm/local.h>
17#include "trace.h"
18
2ed84eeb 19#ifdef CONFIG_BRANCH_TRACER
52f232cb 20
9f029e83
SR
21static int branch_tracing_enabled __read_mostly;
22static DEFINE_MUTEX(branch_tracing_mutex);
23static struct trace_array *branch_tracer;
52f232cb
SR
24
25static void
9f029e83 26probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
52f232cb 27{
9f029e83 28 struct trace_array *tr = branch_tracer;
52f232cb 29 struct ring_buffer_event *event;
9f029e83 30 struct trace_branch *entry;
52f232cb
SR
31 unsigned long flags, irq_flags;
32 int cpu, pc;
33 const char *p;
34
35 /*
36 * I would love to save just the ftrace_likely_data pointer, but
37 * this code can also be used by modules. Ugly things can happen
38 * if the module is unloaded, and then we go and read the
39 * pointer. This is slower, but much safer.
40 */
41
42 if (unlikely(!tr))
43 return;
44
072b40a1 45 raw_local_irq_save(flags);
52f232cb
SR
46 cpu = raw_smp_processor_id();
47 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
48 goto out;
49
50 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
51 &irq_flags);
52 if (!event)
53 goto out;
54
55 pc = preempt_count();
56 entry = ring_buffer_event_data(event);
57 tracing_generic_entry_update(&entry->ent, flags, pc);
9f029e83 58 entry->ent.type = TRACE_BRANCH;
52f232cb
SR
59
60 /* Strip off the path, only save the file */
61 p = f->file + strlen(f->file);
62 while (p >= f->file && *p != '/')
63 p--;
64 p++;
65
66 strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
67 strncpy(entry->file, p, TRACE_FILE_SIZE);
68 entry->func[TRACE_FUNC_SIZE] = 0;
69 entry->file[TRACE_FILE_SIZE] = 0;
70 entry->line = f->line;
71 entry->correct = val == expect;
72
73 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
74
75 out:
76 atomic_dec(&tr->data[cpu]->disabled);
072b40a1 77 raw_local_irq_restore(flags);
52f232cb
SR
78}
79
80static inline
9f029e83 81void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
52f232cb 82{
9f029e83 83 if (!branch_tracing_enabled)
52f232cb
SR
84 return;
85
86 probe_likely_condition(f, val, expect);
87}
88
9f029e83 89int enable_branch_tracing(struct trace_array *tr)
52f232cb
SR
90{
91 int ret = 0;
92
9f029e83
SR
93 mutex_lock(&branch_tracing_mutex);
94 branch_tracer = tr;
52f232cb
SR
95 /*
96 * Must be seen before enabling. The reader is a condition
97 * where we do not need a matching rmb()
98 */
99 smp_wmb();
9f029e83
SR
100 branch_tracing_enabled++;
101 mutex_unlock(&branch_tracing_mutex);
52f232cb
SR
102
103 return ret;
104}
105
9f029e83 106void disable_branch_tracing(void)
52f232cb 107{
9f029e83 108 mutex_lock(&branch_tracing_mutex);
52f232cb 109
9f029e83 110 if (!branch_tracing_enabled)
52f232cb
SR
111 goto out_unlock;
112
9f029e83 113 branch_tracing_enabled--;
52f232cb
SR
114
115 out_unlock:
9f029e83 116 mutex_unlock(&branch_tracing_mutex);
52f232cb 117}
80e5ea45
SR
118
119static void start_branch_trace(struct trace_array *tr)
120{
121 enable_branch_tracing(tr);
122}
123
124static void stop_branch_trace(struct trace_array *tr)
125{
126 disable_branch_tracing();
127}
128
1c80025a 129static int branch_trace_init(struct trace_array *tr)
80e5ea45
SR
130{
131 int cpu;
132
133 for_each_online_cpu(cpu)
134 tracing_reset(tr, cpu);
135
136 start_branch_trace(tr);
1c80025a 137 return 0;
80e5ea45
SR
138}
139
140static void branch_trace_reset(struct trace_array *tr)
141{
142 stop_branch_trace(tr);
143}
144
145struct tracer branch_trace __read_mostly =
146{
147 .name = "branch",
148 .init = branch_trace_init,
149 .reset = branch_trace_reset,
150#ifdef CONFIG_FTRACE_SELFTEST
151 .selftest = trace_selftest_startup_branch,
152#endif
153};
154
155__init static int init_branch_trace(void)
156{
157 return register_tracer(&branch_trace);
158}
159
160device_initcall(init_branch_trace);
52f232cb
SR
161#else
162static inline
9f029e83 163void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
52f232cb
SR
164{
165}
2ed84eeb 166#endif /* CONFIG_BRANCH_TRACER */
52f232cb 167
9f029e83 168void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
1f0d69a9 169{
52f232cb
SR
170 /*
171 * I would love to have a trace point here instead, but the
172 * trace point code is so inundated with unlikely and likely
173 * conditions that the recursive nightmare that exists is too
174 * much to try to get working. At least for now.
175 */
176 trace_likely_condition(f, val, expect);
177
1f0d69a9
SR
178 /* FIXME: Make this atomic! */
179 if (val == expect)
180 f->correct++;
181 else
182 f->incorrect++;
183}
184EXPORT_SYMBOL(ftrace_likely_update);
185
186struct ftrace_pointer {
187 void *start;
188 void *stop;
2bcd521a 189 int hit;
1f0d69a9
SR
190};
191
192static void *
193t_next(struct seq_file *m, void *v, loff_t *pos)
194{
0429149f 195 const struct ftrace_pointer *f = m->private;
9f029e83 196 struct ftrace_branch_data *p = v;
1f0d69a9
SR
197
198 (*pos)++;
199
200 if (v == (void *)1)
201 return f->start;
202
203 ++p;
204
205 if ((void *)p >= (void *)f->stop)
206 return NULL;
207
208 return p;
209}
210
211static void *t_start(struct seq_file *m, loff_t *pos)
212{
213 void *t = (void *)1;
214 loff_t l = 0;
215
216 for (; t && l < *pos; t = t_next(m, t, &l))
217 ;
218
219 return t;
220}
221
222static void t_stop(struct seq_file *m, void *p)
223{
224}
225
226static int t_show(struct seq_file *m, void *v)
227{
0429149f 228 const struct ftrace_pointer *fp = m->private;
9f029e83 229 struct ftrace_branch_data *p = v;
1f0d69a9 230 const char *f;
bac28bfe 231 long percent;
1f0d69a9
SR
232
233 if (v == (void *)1) {
2bcd521a
SR
234 if (fp->hit)
235 seq_printf(m, " miss hit %% ");
236 else
237 seq_printf(m, " correct incorrect %% ");
238 seq_printf(m, " Function "
1f0d69a9
SR
239 " File Line\n"
240 " ------- --------- - "
241 " -------- "
242 " ---- ----\n");
243 return 0;
244 }
245
246 /* Only print the file, not the path */
247 f = p->file + strlen(p->file);
248 while (f >= p->file && *f != '/')
249 f--;
250 f++;
251
2bcd521a
SR
252 /*
253 * The miss is overlayed on correct, and hit on incorrect.
254 */
1f0d69a9
SR
255 if (p->correct) {
256 percent = p->incorrect * 100;
257 percent /= p->correct + p->incorrect;
258 } else
bac28bfe 259 percent = p->incorrect ? 100 : -1;
1f0d69a9 260
bac28bfe
SR
261 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
262 if (percent < 0)
263 seq_printf(m, " X ");
264 else
265 seq_printf(m, "%3ld ", percent);
1f0d69a9
SR
266 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
267 return 0;
268}
269
270static struct seq_operations tracing_likely_seq_ops = {
271 .start = t_start,
272 .next = t_next,
273 .stop = t_stop,
274 .show = t_show,
275};
276
45b79749 277static int tracing_branch_open(struct inode *inode, struct file *file)
1f0d69a9
SR
278{
279 int ret;
280
281 ret = seq_open(file, &tracing_likely_seq_ops);
282 if (!ret) {
283 struct seq_file *m = file->private_data;
284 m->private = (void *)inode->i_private;
285 }
286
287 return ret;
288}
289
45b79749
SR
290static const struct file_operations tracing_branch_fops = {
291 .open = tracing_branch_open,
1f0d69a9
SR
292 .read = seq_read,
293 .llseek = seq_lseek,
294};
295
2bcd521a
SR
296#ifdef CONFIG_PROFILE_ALL_BRANCHES
297extern unsigned long __start_branch_profile[];
298extern unsigned long __stop_branch_profile[];
299
0429149f 300static const struct ftrace_pointer ftrace_branch_pos = {
2bcd521a
SR
301 .start = __start_branch_profile,
302 .stop = __stop_branch_profile,
303 .hit = 1,
304};
305
306#endif /* CONFIG_PROFILE_ALL_BRANCHES */
307
45b79749
SR
308extern unsigned long __start_annotated_branch_profile[];
309extern unsigned long __stop_annotated_branch_profile[];
1f0d69a9 310
45b79749
SR
311static const struct ftrace_pointer ftrace_annotated_branch_pos = {
312 .start = __start_annotated_branch_profile,
313 .stop = __stop_annotated_branch_profile,
1f0d69a9
SR
314};
315
9f029e83 316static __init int ftrace_branch_init(void)
1f0d69a9
SR
317{
318 struct dentry *d_tracer;
319 struct dentry *entry;
320
321 d_tracer = tracing_init_dentry();
322
45b79749 323 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
0429149f 324 (void *)&ftrace_annotated_branch_pos,
45b79749 325 &tracing_branch_fops);
1f0d69a9 326 if (!entry)
45b79749
SR
327 pr_warning("Could not create debugfs "
328 "'profile_annotatet_branch' entry\n");
1f0d69a9 329
2bcd521a
SR
330#ifdef CONFIG_PROFILE_ALL_BRANCHES
331 entry = debugfs_create_file("profile_branch", 0444, d_tracer,
0429149f 332 (void *)&ftrace_branch_pos,
2bcd521a
SR
333 &tracing_branch_fops);
334 if (!entry)
335 pr_warning("Could not create debugfs"
336 " 'profile_branch' entry\n");
337#endif
338
1f0d69a9
SR
339 return 0;
340}
341
9f029e83 342device_initcall(ftrace_branch_init);