]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace.c
ftrace: rename iter_ctrl to trace_options
[net-next-2.6.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
3f5a54e3 17#include <linux/notifier.h>
bc0c38d1 18#include <linux/debugfs.h>
4c11d7ae 19#include <linux/pagemap.h>
bc0c38d1
SR
20#include <linux/hardirq.h>
21#include <linux/linkage.h>
22#include <linux/uaccess.h>
23#include <linux/ftrace.h>
24#include <linux/module.h>
25#include <linux/percpu.h>
3f5a54e3 26#include <linux/kdebug.h>
bc0c38d1
SR
27#include <linux/ctype.h>
28#include <linux/init.h>
2a2cc8f7 29#include <linux/poll.h>
bc0c38d1
SR
30#include <linux/gfp.h>
31#include <linux/fs.h>
76094a2c 32#include <linux/kprobes.h>
3eefae99 33#include <linux/writeback.h>
bc0c38d1 34
86387f7e 35#include <linux/stacktrace.h>
3928a8a2 36#include <linux/ring_buffer.h>
21798a84 37#include <linux/irqflags.h>
86387f7e 38
bc0c38d1
SR
39#include "trace.h"
40
3928a8a2
SR
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42
bc0c38d1
SR
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
44unsigned long __read_mostly tracing_thresh;
45
0f048701
SR
46
47/*
48 * Kill all tracing for good (never come back).
49 * It is initialized to 1 but will turn to zero if the initialization
50 * of the tracer is successful. But that is the only place that sets
51 * this back to zero.
52 */
53int tracing_disabled = 1;
54
d769041f
SR
55static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
56
57static inline void ftrace_disable_cpu(void)
58{
59 preempt_disable();
60 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
61}
62
63static inline void ftrace_enable_cpu(void)
64{
65 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
66 preempt_enable();
67}
68
ab46428c
SR
69static cpumask_t __read_mostly tracing_buffer_mask;
70
71#define for_each_tracing_cpu(cpu) \
72 for_each_cpu_mask(cpu, tracing_buffer_mask)
73
944ac425
SR
74/*
75 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
76 *
77 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
78 * is set, then ftrace_dump is called. This will output the contents
79 * of the ftrace buffers to the console. This is very useful for
80 * capturing traces that lead to crashes and outputing it to a
81 * serial console.
82 *
83 * It is default off, but you can enable it with either specifying
84 * "ftrace_dump_on_oops" in the kernel command line, or setting
85 * /proc/sys/kernel/ftrace_dump_on_oops to true.
86 */
87int ftrace_dump_on_oops;
88
d9e54076
PZ
89static int tracing_set_tracer(char *buf);
90
91static int __init set_ftrace(char *str)
92{
93 tracing_set_tracer(str);
94 return 1;
95}
96__setup("ftrace", set_ftrace);
97
944ac425
SR
98static int __init set_ftrace_dump_on_oops(char *str)
99{
100 ftrace_dump_on_oops = 1;
101 return 1;
102}
103__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
104
72829bc3 105long
bc0c38d1
SR
106ns2usecs(cycle_t nsec)
107{
108 nsec += 500;
109 do_div(nsec, 1000);
110 return nsec;
111}
112
e309b41d 113cycle_t ftrace_now(int cpu)
750ed1a4 114{
3928a8a2
SR
115 u64 ts = ring_buffer_time_stamp(cpu);
116 ring_buffer_normalize_time_stamp(cpu, &ts);
117 return ts;
750ed1a4
IM
118}
119
4fcdae83
SR
120/*
121 * The global_trace is the descriptor that holds the tracing
122 * buffers for the live tracing. For each CPU, it contains
123 * a link list of pages that will store trace entries. The
124 * page descriptor of the pages in the memory is used to hold
125 * the link list by linking the lru item in the page descriptor
126 * to each of the pages in the buffer per CPU.
127 *
128 * For each active CPU there is a data field that holds the
129 * pages for the buffer for that CPU. Each CPU has the same number
130 * of pages allocated for its buffer.
131 */
bc0c38d1
SR
132static struct trace_array global_trace;
133
134static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
135
4fcdae83
SR
136/*
137 * The max_tr is used to snapshot the global_trace when a maximum
138 * latency is reached. Some tracers will use this to store a maximum
139 * trace while it continues examining live traces.
140 *
141 * The buffers for the max_tr are set up the same as the global_trace.
142 * When a snapshot is taken, the link list of the max_tr is swapped
143 * with the link list of the global_trace and the buffers are reset for
144 * the global_trace so the tracing can continue.
145 */
bc0c38d1
SR
146static struct trace_array max_tr;
147
148static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
149
4fcdae83 150/* tracer_enabled is used to toggle activation of a tracer */
26994ead 151static int tracer_enabled = 1;
4fcdae83 152
9036990d
SR
153/**
154 * tracing_is_enabled - return tracer_enabled status
155 *
156 * This function is used by other tracers to know the status
157 * of the tracer_enabled flag. Tracers may use this function
158 * to know if it should enable their features when starting
159 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
160 */
161int tracing_is_enabled(void)
162{
163 return tracer_enabled;
164}
165
60bc0800
SR
166/* function tracing enabled */
167int ftrace_function_enabled;
168
4fcdae83 169/*
3928a8a2
SR
170 * trace_buf_size is the size in bytes that is allocated
171 * for a buffer. Note, the number of bytes is always rounded
172 * to page size.
3f5a54e3
SR
173 *
174 * This number is purposely set to a low number of 16384.
175 * If the dump on oops happens, it will be much appreciated
176 * to not have to wait for all that output. Anyway this can be
177 * boot time and run time configurable.
4fcdae83 178 */
3928a8a2 179#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 180
3928a8a2 181static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 182
4fcdae83 183/* trace_types holds a link list of available tracers. */
bc0c38d1 184static struct tracer *trace_types __read_mostly;
4fcdae83
SR
185
186/* current_trace points to the tracer that is currently active */
bc0c38d1 187static struct tracer *current_trace __read_mostly;
4fcdae83
SR
188
189/*
190 * max_tracer_type_len is used to simplify the allocating of
191 * buffers to read userspace tracer names. We keep track of
192 * the longest tracer name registered.
193 */
bc0c38d1
SR
194static int max_tracer_type_len;
195
4fcdae83
SR
196/*
197 * trace_types_lock is used to protect the trace_types list.
198 * This lock is also used to keep user access serialized.
199 * Accesses from userspace will grab this lock while userspace
200 * activities happen inside the kernel.
201 */
bc0c38d1 202static DEFINE_MUTEX(trace_types_lock);
4fcdae83
SR
203
204/* trace_wait is a waitqueue for tasks blocked on trace_poll */
4e655519
IM
205static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
206
ee6bce52 207/* trace_flags holds trace_options default values */
49833fc2 208unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK;
4e655519 209
4fcdae83
SR
210/**
211 * trace_wake_up - wake up tasks waiting for trace input
212 *
213 * Simply wakes up any task that is blocked on the trace_wait
214 * queue. These is used with trace_poll for tasks polling the trace.
215 */
4e655519
IM
216void trace_wake_up(void)
217{
017730c1
IM
218 /*
219 * The runqueue_is_locked() can fail, but this is the best we
220 * have for now:
221 */
222 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
4e655519
IM
223 wake_up(&trace_wait);
224}
bc0c38d1 225
3928a8a2 226static int __init set_buf_size(char *str)
bc0c38d1 227{
3928a8a2 228 unsigned long buf_size;
c6caeeb1
SR
229 int ret;
230
bc0c38d1
SR
231 if (!str)
232 return 0;
3928a8a2 233 ret = strict_strtoul(str, 0, &buf_size);
c6caeeb1 234 /* nr_entries can not be zero */
3928a8a2 235 if (ret < 0 || buf_size == 0)
c6caeeb1 236 return 0;
3928a8a2 237 trace_buf_size = buf_size;
bc0c38d1
SR
238 return 1;
239}
3928a8a2 240__setup("trace_buf_size=", set_buf_size);
bc0c38d1 241
57f50be1
SR
242unsigned long nsecs_to_usecs(unsigned long nsecs)
243{
244 return nsecs / 1000;
245}
246
4fcdae83 247/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
248static const char *trace_options[] = {
249 "print-parent",
250 "sym-offset",
251 "sym-addr",
252 "verbose",
f9896bf3 253 "raw",
5e3ca0ec 254 "hex",
cb0f12aa 255 "bin",
2a2cc8f7 256 "block",
86387f7e 257 "stacktrace",
4ac3ba41 258 "sched-tree",
f09ce573 259 "ftrace_printk",
b2a866f9 260 "ftrace_preempt",
2ed84eeb 261#ifdef CONFIG_BRANCH_TRACER
9f029e83 262 "branch",
52f232cb 263#endif
bc0c38d1
SR
264 NULL
265};
266
4fcdae83
SR
267/*
268 * ftrace_max_lock is used to protect the swapping of buffers
269 * when taking a max snapshot. The buffers themselves are
270 * protected by per_cpu spinlocks. But the action of the swap
271 * needs its own lock.
272 *
273 * This is defined as a raw_spinlock_t in order to help
274 * with performance when lockdep debugging is enabled.
275 */
92205c23
SR
276static raw_spinlock_t ftrace_max_lock =
277 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
bc0c38d1
SR
278
279/*
280 * Copy the new maximum trace into the separate maximum-trace
281 * structure. (this way the maximum trace is permanently saved,
282 * for later retrieval via /debugfs/tracing/latency_trace)
283 */
e309b41d 284static void
bc0c38d1
SR
285__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
286{
287 struct trace_array_cpu *data = tr->data[cpu];
288
289 max_tr.cpu = cpu;
290 max_tr.time_start = data->preempt_timestamp;
291
292 data = max_tr.data[cpu];
293 data->saved_latency = tracing_max_latency;
294
295 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
296 data->pid = tsk->pid;
297 data->uid = tsk->uid;
298 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
299 data->policy = tsk->policy;
300 data->rt_priority = tsk->rt_priority;
301
302 /* record this tasks comm */
303 tracing_record_cmdline(current);
304}
305
4fcdae83
SR
306/**
307 * trace_seq_printf - sequence printing of trace information
308 * @s: trace sequence descriptor
309 * @fmt: printf format string
310 *
311 * The tracer may use either sequence operations or its own
312 * copy to user routines. To simplify formating of a trace
313 * trace_seq_printf is used to store strings into a special
314 * buffer (@s). Then the output may be either used by
315 * the sequencer or pulled into another buffer.
316 */
72829bc3 317int
214023c3
SR
318trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
319{
320 int len = (PAGE_SIZE - 1) - s->len;
321 va_list ap;
b3806b43 322 int ret;
214023c3
SR
323
324 if (!len)
325 return 0;
326
327 va_start(ap, fmt);
b3806b43 328 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
214023c3
SR
329 va_end(ap);
330
b3806b43 331 /* If we can't write it all, don't bother writing anything */
72829bc3 332 if (ret >= len)
b3806b43
SR
333 return 0;
334
335 s->len += ret;
214023c3
SR
336
337 return len;
338}
339
4fcdae83
SR
340/**
341 * trace_seq_puts - trace sequence printing of simple string
342 * @s: trace sequence descriptor
343 * @str: simple string to record
344 *
345 * The tracer may use either the sequence operations or its own
346 * copy to user routines. This function records a simple string
347 * into a special buffer (@s) for later retrieval by a sequencer
348 * or other mechanism.
349 */
e309b41d 350static int
214023c3
SR
351trace_seq_puts(struct trace_seq *s, const char *str)
352{
353 int len = strlen(str);
354
355 if (len > ((PAGE_SIZE - 1) - s->len))
b3806b43 356 return 0;
214023c3
SR
357
358 memcpy(s->buffer + s->len, str, len);
359 s->len += len;
360
361 return len;
362}
363
e309b41d 364static int
214023c3
SR
365trace_seq_putc(struct trace_seq *s, unsigned char c)
366{
367 if (s->len >= (PAGE_SIZE - 1))
368 return 0;
369
370 s->buffer[s->len++] = c;
371
372 return 1;
373}
374
e309b41d 375static int
cb0f12aa
IM
376trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
377{
378 if (len > ((PAGE_SIZE - 1) - s->len))
379 return 0;
380
381 memcpy(s->buffer + s->len, mem, len);
382 s->len += len;
383
384 return len;
385}
386
ad0a3b68
HH
387#define MAX_MEMHEX_BYTES 8
388#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
5e3ca0ec 389
e309b41d 390static int
5e3ca0ec
IM
391trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
392{
393 unsigned char hex[HEX_CHARS];
93dcc6ea 394 unsigned char *data = mem;
5e3ca0ec
IM
395 int i, j;
396
5e3ca0ec
IM
397#ifdef __BIG_ENDIAN
398 for (i = 0, j = 0; i < len; i++) {
399#else
400 for (i = len-1, j = 0; i >= 0; i--) {
401#endif
2fbc4749
HH
402 hex[j++] = hex_asc_hi(data[i]);
403 hex[j++] = hex_asc_lo(data[i]);
5e3ca0ec 404 }
93dcc6ea 405 hex[j++] = ' ';
5e3ca0ec
IM
406
407 return trace_seq_putmem(s, hex, j);
408}
409
e309b41d 410static void
214023c3
SR
411trace_seq_reset(struct trace_seq *s)
412{
413 s->len = 0;
6c6c2796
PP
414 s->readpos = 0;
415}
416
417ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
418{
419 int len;
420 int ret;
421
422 if (s->len <= s->readpos)
423 return -EBUSY;
424
425 len = s->len - s->readpos;
426 if (cnt > len)
427 cnt = len;
428 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
429 if (ret)
430 return -EFAULT;
431
432 s->readpos += len;
433 return cnt;
214023c3
SR
434}
435
e309b41d 436static void
214023c3
SR
437trace_print_seq(struct seq_file *m, struct trace_seq *s)
438{
439 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
440
441 s->buffer[len] = 0;
442 seq_puts(m, s->buffer);
443
444 trace_seq_reset(s);
445}
446
4fcdae83
SR
447/**
448 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
449 * @tr: tracer
450 * @tsk: the task with the latency
451 * @cpu: The cpu that initiated the trace.
452 *
453 * Flip the buffers between the @tr and the max_tr and record information
454 * about which task was the cause of this latency.
455 */
e309b41d 456void
bc0c38d1
SR
457update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
458{
3928a8a2 459 struct ring_buffer *buf = tr->buffer;
bc0c38d1 460
4c11d7ae 461 WARN_ON_ONCE(!irqs_disabled());
92205c23 462 __raw_spin_lock(&ftrace_max_lock);
3928a8a2
SR
463
464 tr->buffer = max_tr.buffer;
465 max_tr.buffer = buf;
466
d769041f 467 ftrace_disable_cpu();
3928a8a2 468 ring_buffer_reset(tr->buffer);
d769041f 469 ftrace_enable_cpu();
bc0c38d1
SR
470
471 __update_max_tr(tr, tsk, cpu);
92205c23 472 __raw_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
473}
474
475/**
476 * update_max_tr_single - only copy one trace over, and reset the rest
477 * @tr - tracer
478 * @tsk - task with the latency
479 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
480 *
481 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 482 */
e309b41d 483void
bc0c38d1
SR
484update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
485{
3928a8a2 486 int ret;
bc0c38d1 487
4c11d7ae 488 WARN_ON_ONCE(!irqs_disabled());
92205c23 489 __raw_spin_lock(&ftrace_max_lock);
bc0c38d1 490
d769041f
SR
491 ftrace_disable_cpu();
492
3928a8a2
SR
493 ring_buffer_reset(max_tr.buffer);
494 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
495
d769041f
SR
496 ftrace_enable_cpu();
497
3928a8a2 498 WARN_ON_ONCE(ret);
bc0c38d1
SR
499
500 __update_max_tr(tr, tsk, cpu);
92205c23 501 __raw_spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
502}
503
4fcdae83
SR
504/**
505 * register_tracer - register a tracer with the ftrace system.
506 * @type - the plugin for the tracer
507 *
508 * Register a new plugin tracer.
509 */
bc0c38d1
SR
510int register_tracer(struct tracer *type)
511{
512 struct tracer *t;
513 int len;
514 int ret = 0;
515
516 if (!type->name) {
517 pr_info("Tracer must have a name\n");
518 return -1;
519 }
520
521 mutex_lock(&trace_types_lock);
522 for (t = trace_types; t; t = t->next) {
523 if (strcmp(type->name, t->name) == 0) {
524 /* already found */
525 pr_info("Trace %s already registered\n",
526 type->name);
527 ret = -1;
528 goto out;
529 }
530 }
531
60a11774
SR
532#ifdef CONFIG_FTRACE_STARTUP_TEST
533 if (type->selftest) {
534 struct tracer *saved_tracer = current_trace;
60a11774 535 struct trace_array *tr = &global_trace;
60a11774
SR
536 int i;
537 /*
538 * Run a selftest on this tracer.
539 * Here we reset the trace buffer, and set the current
540 * tracer to be this tracer. The tracer can then run some
541 * internal tracing to verify that everything is in order.
542 * If we fail, we do not register this tracer.
543 */
ab46428c 544 for_each_tracing_cpu(i) {
3928a8a2 545 tracing_reset(tr, i);
60a11774
SR
546 }
547 current_trace = type;
60a11774
SR
548 /* the test is responsible for initializing and enabling */
549 pr_info("Testing tracer %s: ", type->name);
550 ret = type->selftest(type, tr);
551 /* the test is responsible for resetting too */
552 current_trace = saved_tracer;
60a11774
SR
553 if (ret) {
554 printk(KERN_CONT "FAILED!\n");
555 goto out;
556 }
1d4db00a 557 /* Only reset on passing, to avoid touching corrupted buffers */
ab46428c 558 for_each_tracing_cpu(i) {
3928a8a2 559 tracing_reset(tr, i);
1d4db00a 560 }
60a11774
SR
561 printk(KERN_CONT "PASSED\n");
562 }
563#endif
564
bc0c38d1
SR
565 type->next = trace_types;
566 trace_types = type;
567 len = strlen(type->name);
568 if (len > max_tracer_type_len)
569 max_tracer_type_len = len;
60a11774 570
bc0c38d1
SR
571 out:
572 mutex_unlock(&trace_types_lock);
573
574 return ret;
575}
576
577void unregister_tracer(struct tracer *type)
578{
579 struct tracer **t;
580 int len;
581
582 mutex_lock(&trace_types_lock);
583 for (t = &trace_types; *t; t = &(*t)->next) {
584 if (*t == type)
585 goto found;
586 }
587 pr_info("Trace %s not registered\n", type->name);
588 goto out;
589
590 found:
591 *t = (*t)->next;
592 if (strlen(type->name) != max_tracer_type_len)
593 goto out;
594
595 max_tracer_type_len = 0;
596 for (t = &trace_types; *t; t = &(*t)->next) {
597 len = strlen((*t)->name);
598 if (len > max_tracer_type_len)
599 max_tracer_type_len = len;
600 }
601 out:
602 mutex_unlock(&trace_types_lock);
603}
604
3928a8a2 605void tracing_reset(struct trace_array *tr, int cpu)
bc0c38d1 606{
d769041f 607 ftrace_disable_cpu();
3928a8a2 608 ring_buffer_reset_cpu(tr->buffer, cpu);
d769041f 609 ftrace_enable_cpu();
bc0c38d1
SR
610}
611
bc0c38d1
SR
612#define SAVED_CMDLINES 128
613static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
614static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
615static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
616static int cmdline_idx;
617static DEFINE_SPINLOCK(trace_cmdline_lock);
25b0b44a 618
25b0b44a
SR
619/* temporary disable recording */
620atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1
SR
621
622static void trace_init_cmdlines(void)
623{
624 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
625 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
626 cmdline_idx = 0;
627}
628
0f048701
SR
629static int trace_stop_count;
630static DEFINE_SPINLOCK(tracing_start_lock);
631
632/**
633 * tracing_start - quick start of the tracer
634 *
635 * If tracing is enabled but was stopped by tracing_stop,
636 * this will start the tracer back up.
637 */
638void tracing_start(void)
639{
640 struct ring_buffer *buffer;
641 unsigned long flags;
642
643 if (tracing_disabled)
644 return;
645
646 spin_lock_irqsave(&tracing_start_lock, flags);
647 if (--trace_stop_count)
648 goto out;
649
650 if (trace_stop_count < 0) {
651 /* Someone screwed up their debugging */
652 WARN_ON_ONCE(1);
653 trace_stop_count = 0;
654 goto out;
655 }
656
657
658 buffer = global_trace.buffer;
659 if (buffer)
660 ring_buffer_record_enable(buffer);
661
662 buffer = max_tr.buffer;
663 if (buffer)
664 ring_buffer_record_enable(buffer);
665
666 ftrace_start();
667 out:
668 spin_unlock_irqrestore(&tracing_start_lock, flags);
669}
670
671/**
672 * tracing_stop - quick stop of the tracer
673 *
674 * Light weight way to stop tracing. Use in conjunction with
675 * tracing_start.
676 */
677void tracing_stop(void)
678{
679 struct ring_buffer *buffer;
680 unsigned long flags;
681
682 ftrace_stop();
683 spin_lock_irqsave(&tracing_start_lock, flags);
684 if (trace_stop_count++)
685 goto out;
686
687 buffer = global_trace.buffer;
688 if (buffer)
689 ring_buffer_record_disable(buffer);
690
691 buffer = max_tr.buffer;
692 if (buffer)
693 ring_buffer_record_disable(buffer);
694
695 out:
696 spin_unlock_irqrestore(&tracing_start_lock, flags);
697}
698
e309b41d 699void trace_stop_cmdline_recording(void);
bc0c38d1 700
e309b41d 701static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1
SR
702{
703 unsigned map;
704 unsigned idx;
705
706 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
707 return;
708
709 /*
710 * It's not the end of the world if we don't get
711 * the lock, but we also don't want to spin
712 * nor do we want to disable interrupts,
713 * so if we miss here, then better luck next time.
714 */
715 if (!spin_trylock(&trace_cmdline_lock))
716 return;
717
718 idx = map_pid_to_cmdline[tsk->pid];
719 if (idx >= SAVED_CMDLINES) {
720 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
721
722 map = map_cmdline_to_pid[idx];
723 if (map <= PID_MAX_DEFAULT)
724 map_pid_to_cmdline[map] = (unsigned)-1;
725
726 map_pid_to_cmdline[tsk->pid] = idx;
727
728 cmdline_idx = idx;
729 }
730
731 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
732
733 spin_unlock(&trace_cmdline_lock);
734}
735
e309b41d 736static char *trace_find_cmdline(int pid)
bc0c38d1
SR
737{
738 char *cmdline = "<...>";
739 unsigned map;
740
741 if (!pid)
742 return "<idle>";
743
744 if (pid > PID_MAX_DEFAULT)
745 goto out;
746
747 map = map_pid_to_cmdline[pid];
748 if (map >= SAVED_CMDLINES)
749 goto out;
750
751 cmdline = saved_cmdlines[map];
752
753 out:
754 return cmdline;
755}
756
e309b41d 757void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1
SR
758{
759 if (atomic_read(&trace_record_cmdline_disabled))
760 return;
761
762 trace_save_cmdline(tsk);
763}
764
45dcd8b8 765void
38697053
SR
766tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
767 int pc)
bc0c38d1
SR
768{
769 struct task_struct *tsk = current;
bc0c38d1 770
777e208d
SR
771 entry->preempt_count = pc & 0xff;
772 entry->pid = (tsk) ? tsk->pid : 0;
773 entry->flags =
9244489a 774#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 775 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
776#else
777 TRACE_FLAG_IRQS_NOSUPPORT |
778#endif
bc0c38d1
SR
779 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
780 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
781 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
782}
783
e309b41d 784void
6fb44b71 785trace_function(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
786 unsigned long ip, unsigned long parent_ip, unsigned long flags,
787 int pc)
bc0c38d1 788{
3928a8a2 789 struct ring_buffer_event *event;
777e208d 790 struct ftrace_entry *entry;
dcb6308f 791 unsigned long irq_flags;
bc0c38d1 792
d769041f
SR
793 /* If we are reading the ring buffer, don't trace */
794 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
795 return;
796
3928a8a2
SR
797 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
798 &irq_flags);
799 if (!event)
800 return;
801 entry = ring_buffer_event_data(event);
38697053 802 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
803 entry->ent.type = TRACE_FN;
804 entry->ip = ip;
805 entry->parent_ip = parent_ip;
3928a8a2 806 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
bc0c38d1
SR
807}
808
15e6cb36
FW
809#ifdef CONFIG_FUNCTION_RET_TRACER
810static void __trace_function_return(struct trace_array *tr,
811 struct trace_array_cpu *data,
812 struct ftrace_retfunc *trace,
813 unsigned long flags,
814 int pc)
815{
816 struct ring_buffer_event *event;
817 struct ftrace_ret_entry *entry;
818 unsigned long irq_flags;
819
820 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
821 return;
822
823 event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
824 &irq_flags);
825 if (!event)
826 return;
827 entry = ring_buffer_event_data(event);
828 tracing_generic_entry_update(&entry->ent, flags, pc);
829 entry->ent.type = TRACE_FN_RET;
830 entry->ip = trace->func;
831 entry->parent_ip = trace->ret;
832 entry->rettime = trace->rettime;
833 entry->calltime = trace->calltime;
834 ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
835}
836#endif
837
e309b41d 838void
2e0f5761 839ftrace(struct trace_array *tr, struct trace_array_cpu *data,
38697053
SR
840 unsigned long ip, unsigned long parent_ip, unsigned long flags,
841 int pc)
2e0f5761
IM
842{
843 if (likely(!atomic_read(&data->disabled)))
38697053 844 trace_function(tr, data, ip, parent_ip, flags, pc);
2e0f5761
IM
845}
846
38697053
SR
847static void ftrace_trace_stack(struct trace_array *tr,
848 struct trace_array_cpu *data,
849 unsigned long flags,
850 int skip, int pc)
86387f7e 851{
c2c80529 852#ifdef CONFIG_STACKTRACE
3928a8a2 853 struct ring_buffer_event *event;
777e208d 854 struct stack_entry *entry;
86387f7e 855 struct stack_trace trace;
3928a8a2 856 unsigned long irq_flags;
86387f7e
IM
857
858 if (!(trace_flags & TRACE_ITER_STACKTRACE))
859 return;
860
3928a8a2
SR
861 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
862 &irq_flags);
863 if (!event)
864 return;
865 entry = ring_buffer_event_data(event);
38697053 866 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d 867 entry->ent.type = TRACE_STACK;
86387f7e 868
777e208d 869 memset(&entry->caller, 0, sizeof(entry->caller));
86387f7e
IM
870
871 trace.nr_entries = 0;
872 trace.max_entries = FTRACE_STACK_ENTRIES;
873 trace.skip = skip;
777e208d 874 trace.entries = entry->caller;
86387f7e
IM
875
876 save_stack_trace(&trace);
3928a8a2 877 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
c2c80529 878#endif
f0a920d5
IM
879}
880
38697053
SR
881void __trace_stack(struct trace_array *tr,
882 struct trace_array_cpu *data,
883 unsigned long flags,
884 int skip)
885{
886 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
887}
888
889static void
890ftrace_trace_special(void *__tr, void *__data,
891 unsigned long arg1, unsigned long arg2, unsigned long arg3,
892 int pc)
a4feb834 893{
3928a8a2 894 struct ring_buffer_event *event;
a4feb834
IM
895 struct trace_array_cpu *data = __data;
896 struct trace_array *tr = __tr;
777e208d 897 struct special_entry *entry;
a4feb834
IM
898 unsigned long irq_flags;
899
3928a8a2
SR
900 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
901 &irq_flags);
902 if (!event)
903 return;
904 entry = ring_buffer_event_data(event);
38697053 905 tracing_generic_entry_update(&entry->ent, 0, pc);
777e208d
SR
906 entry->ent.type = TRACE_SPECIAL;
907 entry->arg1 = arg1;
908 entry->arg2 = arg2;
909 entry->arg3 = arg3;
3928a8a2 910 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 911 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
a4feb834
IM
912
913 trace_wake_up();
914}
915
38697053
SR
916void
917__trace_special(void *__tr, void *__data,
918 unsigned long arg1, unsigned long arg2, unsigned long arg3)
919{
920 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
921}
922
e309b41d 923void
bc0c38d1
SR
924tracing_sched_switch_trace(struct trace_array *tr,
925 struct trace_array_cpu *data,
86387f7e
IM
926 struct task_struct *prev,
927 struct task_struct *next,
38697053 928 unsigned long flags, int pc)
bc0c38d1 929{
3928a8a2 930 struct ring_buffer_event *event;
777e208d 931 struct ctx_switch_entry *entry;
dcb6308f 932 unsigned long irq_flags;
bc0c38d1 933
3928a8a2
SR
934 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
935 &irq_flags);
936 if (!event)
937 return;
938 entry = ring_buffer_event_data(event);
38697053 939 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
940 entry->ent.type = TRACE_CTX;
941 entry->prev_pid = prev->pid;
942 entry->prev_prio = prev->prio;
943 entry->prev_state = prev->state;
944 entry->next_pid = next->pid;
945 entry->next_prio = next->prio;
946 entry->next_state = next->state;
947 entry->next_cpu = task_cpu(next);
3928a8a2 948 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 949 ftrace_trace_stack(tr, data, flags, 5, pc);
bc0c38d1
SR
950}
951
57422797
IM
952void
953tracing_sched_wakeup_trace(struct trace_array *tr,
954 struct trace_array_cpu *data,
86387f7e
IM
955 struct task_struct *wakee,
956 struct task_struct *curr,
38697053 957 unsigned long flags, int pc)
57422797 958{
3928a8a2 959 struct ring_buffer_event *event;
777e208d 960 struct ctx_switch_entry *entry;
57422797
IM
961 unsigned long irq_flags;
962
3928a8a2
SR
963 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
964 &irq_flags);
965 if (!event)
966 return;
967 entry = ring_buffer_event_data(event);
38697053 968 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
969 entry->ent.type = TRACE_WAKE;
970 entry->prev_pid = curr->pid;
971 entry->prev_prio = curr->prio;
972 entry->prev_state = curr->state;
973 entry->next_pid = wakee->pid;
974 entry->next_prio = wakee->prio;
975 entry->next_state = wakee->state;
976 entry->next_cpu = task_cpu(wakee);
3928a8a2 977 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
38697053 978 ftrace_trace_stack(tr, data, flags, 6, pc);
017730c1
IM
979
980 trace_wake_up();
57422797
IM
981}
982
4902f884
SR
983void
984ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
985{
986 struct trace_array *tr = &global_trace;
987 struct trace_array_cpu *data;
5aa1ba6a 988 unsigned long flags;
4902f884 989 int cpu;
38697053 990 int pc;
4902f884 991
c76f0694 992 if (tracing_disabled)
4902f884
SR
993 return;
994
38697053 995 pc = preempt_count();
5aa1ba6a 996 local_irq_save(flags);
4902f884
SR
997 cpu = raw_smp_processor_id();
998 data = tr->data[cpu];
4902f884 999
5aa1ba6a 1000 if (likely(atomic_inc_return(&data->disabled) == 1))
38697053 1001 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
4902f884 1002
5aa1ba6a
SR
1003 atomic_dec(&data->disabled);
1004 local_irq_restore(flags);
4902f884
SR
1005}
1006
606576ce 1007#ifdef CONFIG_FUNCTION_TRACER
e309b41d 1008static void
b2a866f9 1009function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
2e0f5761
IM
1010{
1011 struct trace_array *tr = &global_trace;
1012 struct trace_array_cpu *data;
1013 unsigned long flags;
1014 long disabled;
38697053
SR
1015 int cpu, resched;
1016 int pc;
2e0f5761 1017
60bc0800 1018 if (unlikely(!ftrace_function_enabled))
2e0f5761
IM
1019 return;
1020
38697053 1021 pc = preempt_count();
182e9f5f 1022 resched = ftrace_preempt_disable();
38697053 1023 local_save_flags(flags);
2e0f5761
IM
1024 cpu = raw_smp_processor_id();
1025 data = tr->data[cpu];
1026 disabled = atomic_inc_return(&data->disabled);
1027
1028 if (likely(disabled == 1))
38697053 1029 trace_function(tr, data, ip, parent_ip, flags, pc);
2e0f5761
IM
1030
1031 atomic_dec(&data->disabled);
182e9f5f 1032 ftrace_preempt_enable(resched);
2e0f5761
IM
1033}
1034
b2a866f9
SR
1035static void
1036function_trace_call(unsigned long ip, unsigned long parent_ip)
1037{
1038 struct trace_array *tr = &global_trace;
1039 struct trace_array_cpu *data;
1040 unsigned long flags;
1041 long disabled;
1042 int cpu;
1043 int pc;
1044
1045 if (unlikely(!ftrace_function_enabled))
1046 return;
1047
1048 /*
1049 * Need to use raw, since this must be called before the
1050 * recursive protection is performed.
1051 */
1052 raw_local_irq_save(flags);
1053 cpu = raw_smp_processor_id();
1054 data = tr->data[cpu];
1055 disabled = atomic_inc_return(&data->disabled);
1056
1057 if (likely(disabled == 1)) {
1058 pc = preempt_count();
1059 trace_function(tr, data, ip, parent_ip, flags, pc);
1060 }
1061
1062 atomic_dec(&data->disabled);
1063 raw_local_irq_restore(flags);
1064}
1065
15e6cb36
FW
1066#ifdef CONFIG_FUNCTION_RET_TRACER
1067void trace_function_return(struct ftrace_retfunc *trace)
1068{
1069 struct trace_array *tr = &global_trace;
1070 struct trace_array_cpu *data;
1071 unsigned long flags;
1072 long disabled;
1073 int cpu;
1074 int pc;
1075
1076 raw_local_irq_save(flags);
1077 cpu = raw_smp_processor_id();
1078 data = tr->data[cpu];
1079 disabled = atomic_inc_return(&data->disabled);
1080 if (likely(disabled == 1)) {
1081 pc = preempt_count();
1082 __trace_function_return(tr, data, trace, flags, pc);
1083 }
1084 atomic_dec(&data->disabled);
1085 raw_local_irq_restore(flags);
1086}
1087#endif /* CONFIG_FUNCTION_RET_TRACER */
1088
2e0f5761
IM
1089static struct ftrace_ops trace_ops __read_mostly =
1090{
1091 .func = function_trace_call,
1092};
1093
e309b41d 1094void tracing_start_function_trace(void)
2e0f5761 1095{
60bc0800 1096 ftrace_function_enabled = 0;
b2a866f9
SR
1097
1098 if (trace_flags & TRACE_ITER_PREEMPTONLY)
1099 trace_ops.func = function_trace_call_preempt_only;
1100 else
1101 trace_ops.func = function_trace_call;
1102
2e0f5761 1103 register_ftrace_function(&trace_ops);
9036990d 1104 ftrace_function_enabled = 1;
2e0f5761
IM
1105}
1106
e309b41d 1107void tracing_stop_function_trace(void)
2e0f5761 1108{
60bc0800 1109 ftrace_function_enabled = 0;
2e0f5761
IM
1110 unregister_ftrace_function(&trace_ops);
1111}
1112#endif
1113
bc0c38d1
SR
1114enum trace_file_type {
1115 TRACE_FILE_LAT_FMT = 1,
1116};
1117
5a90f577
SR
1118static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1119{
d769041f
SR
1120 /* Don't allow ftrace to trace into the ring buffers */
1121 ftrace_disable_cpu();
1122
5a90f577 1123 iter->idx++;
d769041f
SR
1124 if (iter->buffer_iter[iter->cpu])
1125 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1126
1127 ftrace_enable_cpu();
5a90f577
SR
1128}
1129
e309b41d 1130static struct trace_entry *
3928a8a2 1131peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
dd0e545f 1132{
3928a8a2
SR
1133 struct ring_buffer_event *event;
1134 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
dd0e545f 1135
d769041f
SR
1136 /* Don't allow ftrace to trace into the ring buffers */
1137 ftrace_disable_cpu();
1138
1139 if (buf_iter)
1140 event = ring_buffer_iter_peek(buf_iter, ts);
1141 else
1142 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1143
1144 ftrace_enable_cpu();
1145
3928a8a2 1146 return event ? ring_buffer_event_data(event) : NULL;
dd0e545f 1147}
d769041f 1148
dd0e545f 1149static struct trace_entry *
3928a8a2 1150__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
bc0c38d1 1151{
3928a8a2 1152 struct ring_buffer *buffer = iter->tr->buffer;
bc0c38d1 1153 struct trace_entry *ent, *next = NULL;
3928a8a2 1154 u64 next_ts = 0, ts;
bc0c38d1
SR
1155 int next_cpu = -1;
1156 int cpu;
1157
ab46428c 1158 for_each_tracing_cpu(cpu) {
dd0e545f 1159
3928a8a2
SR
1160 if (ring_buffer_empty_cpu(buffer, cpu))
1161 continue;
dd0e545f 1162
3928a8a2 1163 ent = peek_next_entry(iter, cpu, &ts);
dd0e545f 1164
cdd31cd2
IM
1165 /*
1166 * Pick the entry with the smallest timestamp:
1167 */
3928a8a2 1168 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
1169 next = ent;
1170 next_cpu = cpu;
3928a8a2 1171 next_ts = ts;
bc0c38d1
SR
1172 }
1173 }
1174
1175 if (ent_cpu)
1176 *ent_cpu = next_cpu;
1177
3928a8a2
SR
1178 if (ent_ts)
1179 *ent_ts = next_ts;
1180
bc0c38d1
SR
1181 return next;
1182}
1183
dd0e545f
SR
1184/* Find the next real entry, without updating the iterator itself */
1185static struct trace_entry *
3928a8a2 1186find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
bc0c38d1 1187{
3928a8a2 1188 return __find_next_entry(iter, ent_cpu, ent_ts);
dd0e545f
SR
1189}
1190
1191/* Find the next real entry, and increment the iterator to the next entry */
1192static void *find_next_entry_inc(struct trace_iterator *iter)
1193{
3928a8a2 1194 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
dd0e545f 1195
3928a8a2 1196 if (iter->ent)
dd0e545f
SR
1197 trace_iterator_increment(iter, iter->cpu);
1198
3928a8a2 1199 return iter->ent ? iter : NULL;
b3806b43 1200}
bc0c38d1 1201
e309b41d 1202static void trace_consume(struct trace_iterator *iter)
b3806b43 1203{
d769041f
SR
1204 /* Don't allow ftrace to trace into the ring buffers */
1205 ftrace_disable_cpu();
3928a8a2 1206 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
d769041f 1207 ftrace_enable_cpu();
bc0c38d1
SR
1208}
1209
e309b41d 1210static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
1211{
1212 struct trace_iterator *iter = m->private;
bc0c38d1 1213 int i = (int)*pos;
4e3c3333 1214 void *ent;
bc0c38d1
SR
1215
1216 (*pos)++;
1217
1218 /* can't go backwards */
1219 if (iter->idx > i)
1220 return NULL;
1221
1222 if (iter->idx < 0)
1223 ent = find_next_entry_inc(iter);
1224 else
1225 ent = iter;
1226
1227 while (ent && iter->idx < i)
1228 ent = find_next_entry_inc(iter);
1229
1230 iter->pos = *pos;
1231
bc0c38d1
SR
1232 return ent;
1233}
1234
1235static void *s_start(struct seq_file *m, loff_t *pos)
1236{
1237 struct trace_iterator *iter = m->private;
1238 void *p = NULL;
1239 loff_t l = 0;
3928a8a2 1240 int cpu;
bc0c38d1
SR
1241
1242 mutex_lock(&trace_types_lock);
1243
d15f57f2
SR
1244 if (!current_trace || current_trace != iter->trace) {
1245 mutex_unlock(&trace_types_lock);
bc0c38d1 1246 return NULL;
d15f57f2 1247 }
bc0c38d1
SR
1248
1249 atomic_inc(&trace_record_cmdline_disabled);
1250
bc0c38d1
SR
1251 if (*pos != iter->pos) {
1252 iter->ent = NULL;
1253 iter->cpu = 0;
1254 iter->idx = -1;
1255
d769041f
SR
1256 ftrace_disable_cpu();
1257
3928a8a2
SR
1258 for_each_tracing_cpu(cpu) {
1259 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
4c11d7ae 1260 }
bc0c38d1 1261
d769041f
SR
1262 ftrace_enable_cpu();
1263
bc0c38d1
SR
1264 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1265 ;
1266
1267 } else {
4c11d7ae 1268 l = *pos - 1;
bc0c38d1
SR
1269 p = s_next(m, p, &l);
1270 }
1271
1272 return p;
1273}
1274
1275static void s_stop(struct seq_file *m, void *p)
1276{
bc0c38d1 1277 atomic_dec(&trace_record_cmdline_disabled);
bc0c38d1
SR
1278 mutex_unlock(&trace_types_lock);
1279}
1280
76094a2c 1281#ifdef CONFIG_KRETPROBES
b3aa5577 1282static inline const char *kretprobed(const char *name)
76094a2c 1283{
b3aa5577
SR
1284 static const char tramp_name[] = "kretprobe_trampoline";
1285 int size = sizeof(tramp_name);
1286
1287 if (strncmp(tramp_name, name, size) == 0)
1288 return "[unknown/kretprobe'd]";
1289 return name;
76094a2c
AS
1290}
1291#else
b3aa5577 1292static inline const char *kretprobed(const char *name)
76094a2c 1293{
b3aa5577 1294 return name;
76094a2c
AS
1295}
1296#endif /* CONFIG_KRETPROBES */
1297
b3806b43 1298static int
214023c3 1299seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
bc0c38d1
SR
1300{
1301#ifdef CONFIG_KALLSYMS
1302 char str[KSYM_SYMBOL_LEN];
b3aa5577 1303 const char *name;
bc0c38d1
SR
1304
1305 kallsyms_lookup(address, NULL, NULL, NULL, str);
1306
b3aa5577
SR
1307 name = kretprobed(str);
1308
1309 return trace_seq_printf(s, fmt, name);
bc0c38d1 1310#endif
b3806b43 1311 return 1;
bc0c38d1
SR
1312}
1313
b3806b43 1314static int
214023c3
SR
1315seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1316 unsigned long address)
bc0c38d1
SR
1317{
1318#ifdef CONFIG_KALLSYMS
1319 char str[KSYM_SYMBOL_LEN];
b3aa5577 1320 const char *name;
bc0c38d1
SR
1321
1322 sprint_symbol(str, address);
b3aa5577
SR
1323 name = kretprobed(str);
1324
1325 return trace_seq_printf(s, fmt, name);
bc0c38d1 1326#endif
b3806b43 1327 return 1;
bc0c38d1
SR
1328}
1329
1330#ifndef CONFIG_64BIT
1331# define IP_FMT "%08lx"
1332#else
1333# define IP_FMT "%016lx"
1334#endif
1335
15e6cb36 1336int
214023c3 1337seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
bc0c38d1 1338{
b3806b43
SR
1339 int ret;
1340
1341 if (!ip)
1342 return trace_seq_printf(s, "0");
bc0c38d1
SR
1343
1344 if (sym_flags & TRACE_ITER_SYM_OFFSET)
b3806b43 1345 ret = seq_print_sym_offset(s, "%s", ip);
bc0c38d1 1346 else
b3806b43
SR
1347 ret = seq_print_sym_short(s, "%s", ip);
1348
1349 if (!ret)
1350 return 0;
bc0c38d1
SR
1351
1352 if (sym_flags & TRACE_ITER_SYM_ADDR)
b3806b43
SR
1353 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1354 return ret;
bc0c38d1
SR
1355}
1356
e309b41d 1357static void print_lat_help_header(struct seq_file *m)
bc0c38d1 1358{
a6168353
ME
1359 seq_puts(m, "# _------=> CPU# \n");
1360 seq_puts(m, "# / _-----=> irqs-off \n");
1361 seq_puts(m, "# | / _----=> need-resched \n");
1362 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1363 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1364 seq_puts(m, "# |||| / \n");
1365 seq_puts(m, "# ||||| delay \n");
1366 seq_puts(m, "# cmd pid ||||| time | caller \n");
1367 seq_puts(m, "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
1368}
1369
e309b41d 1370static void print_func_help_header(struct seq_file *m)
bc0c38d1 1371{
a6168353
ME
1372 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1373 seq_puts(m, "# | | | | |\n");
bc0c38d1
SR
1374}
1375
1376
e309b41d 1377static void
bc0c38d1
SR
1378print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1379{
1380 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1381 struct trace_array *tr = iter->tr;
1382 struct trace_array_cpu *data = tr->data[tr->cpu];
1383 struct tracer *type = current_trace;
3928a8a2
SR
1384 unsigned long total;
1385 unsigned long entries;
bc0c38d1
SR
1386 const char *name = "preemption";
1387
1388 if (type)
1389 name = type->name;
1390
3928a8a2
SR
1391 entries = ring_buffer_entries(iter->tr->buffer);
1392 total = entries +
1393 ring_buffer_overruns(iter->tr->buffer);
bc0c38d1
SR
1394
1395 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1396 name, UTS_RELEASE);
1397 seq_puts(m, "-----------------------------------"
1398 "---------------------------------\n");
1399 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1400 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 1401 nsecs_to_usecs(data->saved_latency),
bc0c38d1 1402 entries,
4c11d7ae 1403 total,
bc0c38d1
SR
1404 tr->cpu,
1405#if defined(CONFIG_PREEMPT_NONE)
1406 "server",
1407#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1408 "desktop",
b5c21b45 1409#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
1410 "preempt",
1411#else
1412 "unknown",
1413#endif
1414 /* These are reserved for later use */
1415 0, 0, 0, 0);
1416#ifdef CONFIG_SMP
1417 seq_printf(m, " #P:%d)\n", num_online_cpus());
1418#else
1419 seq_puts(m, ")\n");
1420#endif
1421 seq_puts(m, " -----------------\n");
1422 seq_printf(m, " | task: %.16s-%d "
1423 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1424 data->comm, data->pid, data->uid, data->nice,
1425 data->policy, data->rt_priority);
1426 seq_puts(m, " -----------------\n");
1427
1428 if (data->critical_start) {
1429 seq_puts(m, " => started at: ");
214023c3
SR
1430 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1431 trace_print_seq(m, &iter->seq);
bc0c38d1 1432 seq_puts(m, "\n => ended at: ");
214023c3
SR
1433 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1434 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
1435 seq_puts(m, "\n");
1436 }
1437
1438 seq_puts(m, "\n");
1439}
1440
e309b41d 1441static void
214023c3 1442lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
bc0c38d1
SR
1443{
1444 int hardirq, softirq;
1445 char *comm;
1446
777e208d 1447 comm = trace_find_cmdline(entry->pid);
bc0c38d1 1448
777e208d 1449 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
a6168353 1450 trace_seq_printf(s, "%3d", cpu);
214023c3 1451 trace_seq_printf(s, "%c%c",
9244489a
SR
1452 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1453 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
777e208d 1454 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
bc0c38d1 1455
777e208d
SR
1456 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1457 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
afc2abc0 1458 if (hardirq && softirq) {
214023c3 1459 trace_seq_putc(s, 'H');
afc2abc0
IM
1460 } else {
1461 if (hardirq) {
214023c3 1462 trace_seq_putc(s, 'h');
afc2abc0 1463 } else {
bc0c38d1 1464 if (softirq)
214023c3 1465 trace_seq_putc(s, 's');
bc0c38d1 1466 else
214023c3 1467 trace_seq_putc(s, '.');
bc0c38d1
SR
1468 }
1469 }
1470
777e208d
SR
1471 if (entry->preempt_count)
1472 trace_seq_printf(s, "%x", entry->preempt_count);
bc0c38d1 1473 else
214023c3 1474 trace_seq_puts(s, ".");
bc0c38d1
SR
1475}
1476
1477unsigned long preempt_mark_thresh = 100;
1478
e309b41d 1479static void
3928a8a2 1480lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
bc0c38d1
SR
1481 unsigned long rel_usecs)
1482{
214023c3 1483 trace_seq_printf(s, " %4lldus", abs_usecs);
bc0c38d1 1484 if (rel_usecs > preempt_mark_thresh)
214023c3 1485 trace_seq_puts(s, "!: ");
bc0c38d1 1486 else if (rel_usecs > 1)
214023c3 1487 trace_seq_puts(s, "+: ");
bc0c38d1 1488 else
214023c3 1489 trace_seq_puts(s, " : ");
bc0c38d1
SR
1490}
1491
1492static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1493
fc5e27ae
PP
1494/*
1495 * The message is supposed to contain an ending newline.
1496 * If the printing stops prematurely, try to add a newline of our own.
1497 */
1498void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
dd0e545f 1499{
dd0e545f 1500 struct trace_entry *ent;
777e208d 1501 struct trace_field_cont *cont;
fc5e27ae 1502 bool ok = true;
dd0e545f 1503
3928a8a2 1504 ent = peek_next_entry(iter, iter->cpu, NULL);
dd0e545f
SR
1505 if (!ent || ent->type != TRACE_CONT) {
1506 trace_seq_putc(s, '\n');
1507 return;
1508 }
1509
1510 do {
777e208d 1511 cont = (struct trace_field_cont *)ent;
fc5e27ae 1512 if (ok)
777e208d 1513 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
d769041f
SR
1514
1515 ftrace_disable_cpu();
1516
1517 if (iter->buffer_iter[iter->cpu])
1518 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1519 else
1520 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1521
1522 ftrace_enable_cpu();
1523
3928a8a2 1524 ent = peek_next_entry(iter, iter->cpu, NULL);
dd0e545f 1525 } while (ent && ent->type == TRACE_CONT);
fc5e27ae
PP
1526
1527 if (!ok)
1528 trace_seq_putc(s, '\n');
dd0e545f
SR
1529}
1530
a309720c
SR
1531static void test_cpu_buff_start(struct trace_iterator *iter)
1532{
1533 struct trace_seq *s = &iter->seq;
1534
1535 if (cpu_isset(iter->cpu, iter->started))
1536 return;
1537
1538 cpu_set(iter->cpu, iter->started);
1539 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1540}
1541
2c4f035f 1542static enum print_line_t
214023c3 1543print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
bc0c38d1 1544{
214023c3 1545 struct trace_seq *s = &iter->seq;
bc0c38d1 1546 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
3928a8a2 1547 struct trace_entry *next_entry;
bc0c38d1
SR
1548 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1549 struct trace_entry *entry = iter->ent;
1550 unsigned long abs_usecs;
1551 unsigned long rel_usecs;
3928a8a2 1552 u64 next_ts;
bc0c38d1 1553 char *comm;
bac524d3 1554 int S, T;
86387f7e 1555 int i;
d17d9691 1556 unsigned state;
bc0c38d1 1557
dd0e545f 1558 if (entry->type == TRACE_CONT)
2c4f035f 1559 return TRACE_TYPE_HANDLED;
dd0e545f 1560
a309720c
SR
1561 test_cpu_buff_start(iter);
1562
3928a8a2
SR
1563 next_entry = find_next_entry(iter, NULL, &next_ts);
1564 if (!next_entry)
1565 next_ts = iter->ts;
1566 rel_usecs = ns2usecs(next_ts - iter->ts);
1567 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
bc0c38d1
SR
1568
1569 if (verbose) {
777e208d 1570 comm = trace_find_cmdline(entry->pid);
a6168353 1571 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
214023c3
SR
1572 " %ld.%03ldms (+%ld.%03ldms): ",
1573 comm,
777e208d
SR
1574 entry->pid, cpu, entry->flags,
1575 entry->preempt_count, trace_idx,
3928a8a2 1576 ns2usecs(iter->ts),
214023c3
SR
1577 abs_usecs/1000,
1578 abs_usecs % 1000, rel_usecs/1000,
1579 rel_usecs % 1000);
bc0c38d1 1580 } else {
f29c73fe
IM
1581 lat_print_generic(s, entry, cpu);
1582 lat_print_timestamp(s, abs_usecs, rel_usecs);
bc0c38d1
SR
1583 }
1584 switch (entry->type) {
777e208d 1585 case TRACE_FN: {
7104f300
SR
1586 struct ftrace_entry *field;
1587
1588 trace_assign_type(field, entry);
777e208d
SR
1589
1590 seq_print_ip_sym(s, field->ip, sym_flags);
214023c3 1591 trace_seq_puts(s, " (");
b3aa5577 1592 seq_print_ip_sym(s, field->parent_ip, sym_flags);
214023c3 1593 trace_seq_puts(s, ")\n");
bc0c38d1 1594 break;
777e208d 1595 }
bc0c38d1 1596 case TRACE_CTX:
777e208d 1597 case TRACE_WAKE: {
7104f300
SR
1598 struct ctx_switch_entry *field;
1599
1600 trace_assign_type(field, entry);
777e208d
SR
1601
1602 T = field->next_state < sizeof(state_to_char) ?
1603 state_to_char[field->next_state] : 'X';
bac524d3 1604
777e208d
SR
1605 state = field->prev_state ?
1606 __ffs(field->prev_state) + 1 : 0;
d17d9691 1607 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
777e208d 1608 comm = trace_find_cmdline(field->next_pid);
80b5e940 1609 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
777e208d
SR
1610 field->prev_pid,
1611 field->prev_prio,
57422797 1612 S, entry->type == TRACE_CTX ? "==>" : " +",
777e208d
SR
1613 field->next_cpu,
1614 field->next_pid,
1615 field->next_prio,
bac524d3 1616 T, comm);
bc0c38d1 1617 break;
777e208d
SR
1618 }
1619 case TRACE_SPECIAL: {
7104f300
SR
1620 struct special_entry *field;
1621
1622 trace_assign_type(field, entry);
777e208d 1623
88a4216c 1624 trace_seq_printf(s, "# %ld %ld %ld\n",
777e208d
SR
1625 field->arg1,
1626 field->arg2,
1627 field->arg3);
f0a920d5 1628 break;
777e208d
SR
1629 }
1630 case TRACE_STACK: {
7104f300
SR
1631 struct stack_entry *field;
1632
1633 trace_assign_type(field, entry);
777e208d 1634
86387f7e
IM
1635 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1636 if (i)
1637 trace_seq_puts(s, " <= ");
777e208d 1638 seq_print_ip_sym(s, field->caller[i], sym_flags);
86387f7e
IM
1639 }
1640 trace_seq_puts(s, "\n");
1641 break;
777e208d
SR
1642 }
1643 case TRACE_PRINT: {
7104f300
SR
1644 struct print_entry *field;
1645
1646 trace_assign_type(field, entry);
777e208d
SR
1647
1648 seq_print_ip_sym(s, field->ip, sym_flags);
1649 trace_seq_printf(s, ": %s", field->buf);
1650 if (entry->flags & TRACE_FLAG_CONT)
dd0e545f
SR
1651 trace_seq_print_cont(s, iter);
1652 break;
777e208d 1653 }
9f029e83
SR
1654 case TRACE_BRANCH: {
1655 struct trace_branch *field;
52f232cb
SR
1656
1657 trace_assign_type(field, entry);
1658
1659 trace_seq_printf(s, "[%s] %s:%s:%d\n",
68d119f0 1660 field->correct ? " ok " : " MISS ",
52f232cb
SR
1661 field->func,
1662 field->file,
1663 field->line);
1664 break;
1665 }
89b2f978 1666 default:
214023c3 1667 trace_seq_printf(s, "Unknown type %d\n", entry->type);
bc0c38d1 1668 }
2c4f035f 1669 return TRACE_TYPE_HANDLED;
bc0c38d1
SR
1670}
1671
2c4f035f 1672static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 1673{
214023c3 1674 struct trace_seq *s = &iter->seq;
bc0c38d1 1675 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 1676 struct trace_entry *entry;
bc0c38d1
SR
1677 unsigned long usec_rem;
1678 unsigned long long t;
1679 unsigned long secs;
1680 char *comm;
b3806b43 1681 int ret;
bac524d3 1682 int S, T;
86387f7e 1683 int i;
bc0c38d1 1684
4e3c3333 1685 entry = iter->ent;
dd0e545f
SR
1686
1687 if (entry->type == TRACE_CONT)
2c4f035f 1688 return TRACE_TYPE_HANDLED;
dd0e545f 1689
a309720c
SR
1690 test_cpu_buff_start(iter);
1691
777e208d 1692 comm = trace_find_cmdline(iter->ent->pid);
bc0c38d1 1693
3928a8a2 1694 t = ns2usecs(iter->ts);
bc0c38d1
SR
1695 usec_rem = do_div(t, 1000000ULL);
1696 secs = (unsigned long)t;
1697
777e208d 1698 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
f29c73fe 1699 if (!ret)
2c4f035f 1700 return TRACE_TYPE_PARTIAL_LINE;
a6168353 1701 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
f29c73fe 1702 if (!ret)
2c4f035f 1703 return TRACE_TYPE_PARTIAL_LINE;
f29c73fe
IM
1704 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1705 if (!ret)
2c4f035f 1706 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1
SR
1707
1708 switch (entry->type) {
777e208d 1709 case TRACE_FN: {
7104f300
SR
1710 struct ftrace_entry *field;
1711
1712 trace_assign_type(field, entry);
777e208d
SR
1713
1714 ret = seq_print_ip_sym(s, field->ip, sym_flags);
b3806b43 1715 if (!ret)
2c4f035f 1716 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1717 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
777e208d 1718 field->parent_ip) {
b3806b43
SR
1719 ret = trace_seq_printf(s, " <-");
1720 if (!ret)
2c4f035f 1721 return TRACE_TYPE_PARTIAL_LINE;
b3aa5577
SR
1722 ret = seq_print_ip_sym(s,
1723 field->parent_ip,
1724 sym_flags);
b3806b43 1725 if (!ret)
2c4f035f 1726 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1727 }
b3806b43
SR
1728 ret = trace_seq_printf(s, "\n");
1729 if (!ret)
2c4f035f 1730 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1731 break;
777e208d 1732 }
bc0c38d1 1733 case TRACE_CTX:
777e208d 1734 case TRACE_WAKE: {
7104f300
SR
1735 struct ctx_switch_entry *field;
1736
1737 trace_assign_type(field, entry);
777e208d
SR
1738
1739 S = field->prev_state < sizeof(state_to_char) ?
1740 state_to_char[field->prev_state] : 'X';
1741 T = field->next_state < sizeof(state_to_char) ?
1742 state_to_char[field->next_state] : 'X';
80b5e940 1743 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
777e208d
SR
1744 field->prev_pid,
1745 field->prev_prio,
b3806b43 1746 S,
57422797 1747 entry->type == TRACE_CTX ? "==>" : " +",
777e208d
SR
1748 field->next_cpu,
1749 field->next_pid,
1750 field->next_prio,
bac524d3 1751 T);
b3806b43 1752 if (!ret)
2c4f035f 1753 return TRACE_TYPE_PARTIAL_LINE;
bc0c38d1 1754 break;
777e208d
SR
1755 }
1756 case TRACE_SPECIAL: {
7104f300
SR
1757 struct special_entry *field;
1758
1759 trace_assign_type(field, entry);
777e208d 1760
88a4216c 1761 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
777e208d
SR
1762 field->arg1,
1763 field->arg2,
1764 field->arg3);
f0a920d5 1765 if (!ret)
2c4f035f 1766 return TRACE_TYPE_PARTIAL_LINE;
f0a920d5 1767 break;
777e208d
SR
1768 }
1769 case TRACE_STACK: {
7104f300
SR
1770 struct stack_entry *field;
1771
1772 trace_assign_type(field, entry);
777e208d 1773
86387f7e
IM
1774 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1775 if (i) {
1776 ret = trace_seq_puts(s, " <= ");
1777 if (!ret)
2c4f035f 1778 return TRACE_TYPE_PARTIAL_LINE;
86387f7e 1779 }
777e208d 1780 ret = seq_print_ip_sym(s, field->caller[i],
86387f7e
IM
1781 sym_flags);
1782 if (!ret)
2c4f035f 1783 return TRACE_TYPE_PARTIAL_LINE;
86387f7e
IM
1784 }
1785 ret = trace_seq_puts(s, "\n");
1786 if (!ret)
2c4f035f 1787 return TRACE_TYPE_PARTIAL_LINE;
86387f7e 1788 break;
777e208d
SR
1789 }
1790 case TRACE_PRINT: {
7104f300
SR
1791 struct print_entry *field;
1792
1793 trace_assign_type(field, entry);
777e208d
SR
1794
1795 seq_print_ip_sym(s, field->ip, sym_flags);
1796 trace_seq_printf(s, ": %s", field->buf);
1797 if (entry->flags & TRACE_FLAG_CONT)
dd0e545f
SR
1798 trace_seq_print_cont(s, iter);
1799 break;
bc0c38d1 1800 }
15e6cb36
FW
1801 case TRACE_FN_RET: {
1802 return print_return_function(iter);
1803 break;
1804 }
9f029e83
SR
1805 case TRACE_BRANCH: {
1806 struct trace_branch *field;
52f232cb
SR
1807
1808 trace_assign_type(field, entry);
1809
1810 trace_seq_printf(s, "[%s] %s:%s:%d\n",
68d119f0 1811 field->correct ? " ok " : " MISS ",
52f232cb
SR
1812 field->func,
1813 field->file,
1814 field->line);
1815 break;
1816 }
777e208d 1817 }
2c4f035f 1818 return TRACE_TYPE_HANDLED;
bc0c38d1
SR
1819}
1820
2c4f035f 1821static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
1822{
1823 struct trace_seq *s = &iter->seq;
1824 struct trace_entry *entry;
1825 int ret;
bac524d3 1826 int S, T;
f9896bf3
IM
1827
1828 entry = iter->ent;
dd0e545f
SR
1829
1830 if (entry->type == TRACE_CONT)
2c4f035f 1831 return TRACE_TYPE_HANDLED;
dd0e545f 1832
f9896bf3 1833 ret = trace_seq_printf(s, "%d %d %llu ",
777e208d 1834 entry->pid, iter->cpu, iter->ts);
f9896bf3 1835 if (!ret)
2c4f035f 1836 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3
IM
1837
1838 switch (entry->type) {
777e208d 1839 case TRACE_FN: {
7104f300
SR
1840 struct ftrace_entry *field;
1841
1842 trace_assign_type(field, entry);
777e208d 1843
f9896bf3 1844 ret = trace_seq_printf(s, "%x %x\n",
777e208d
SR
1845 field->ip,
1846 field->parent_ip);
f9896bf3 1847 if (!ret)
2c4f035f 1848 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 1849 break;
777e208d 1850 }
f9896bf3 1851 case TRACE_CTX:
777e208d 1852 case TRACE_WAKE: {
7104f300
SR
1853 struct ctx_switch_entry *field;
1854
1855 trace_assign_type(field, entry);
777e208d
SR
1856
1857 S = field->prev_state < sizeof(state_to_char) ?
1858 state_to_char[field->prev_state] : 'X';
1859 T = field->next_state < sizeof(state_to_char) ?
1860 state_to_char[field->next_state] : 'X';
57422797
IM
1861 if (entry->type == TRACE_WAKE)
1862 S = '+';
80b5e940 1863 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
777e208d
SR
1864 field->prev_pid,
1865 field->prev_prio,
f9896bf3 1866 S,
777e208d
SR
1867 field->next_cpu,
1868 field->next_pid,
1869 field->next_prio,
bac524d3 1870 T);
f9896bf3 1871 if (!ret)
2c4f035f 1872 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 1873 break;
777e208d 1874 }
f0a920d5 1875 case TRACE_SPECIAL:
777e208d 1876 case TRACE_STACK: {
7104f300
SR
1877 struct special_entry *field;
1878
1879 trace_assign_type(field, entry);
777e208d 1880
88a4216c 1881 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
777e208d
SR
1882 field->arg1,
1883 field->arg2,
1884 field->arg3);
f0a920d5 1885 if (!ret)
2c4f035f 1886 return TRACE_TYPE_PARTIAL_LINE;
f0a920d5 1887 break;
777e208d
SR
1888 }
1889 case TRACE_PRINT: {
7104f300
SR
1890 struct print_entry *field;
1891
1892 trace_assign_type(field, entry);
777e208d
SR
1893
1894 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1895 if (entry->flags & TRACE_FLAG_CONT)
dd0e545f
SR
1896 trace_seq_print_cont(s, iter);
1897 break;
f9896bf3 1898 }
777e208d 1899 }
2c4f035f 1900 return TRACE_TYPE_HANDLED;
f9896bf3
IM
1901}
1902
cb0f12aa
IM
1903#define SEQ_PUT_FIELD_RET(s, x) \
1904do { \
1905 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
1906 return 0; \
1907} while (0)
1908
5e3ca0ec
IM
1909#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1910do { \
ad0a3b68 1911 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
5e3ca0ec
IM
1912 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1913 return 0; \
1914} while (0)
1915
2c4f035f 1916static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
1917{
1918 struct trace_seq *s = &iter->seq;
1919 unsigned char newline = '\n';
1920 struct trace_entry *entry;
bac524d3 1921 int S, T;
5e3ca0ec
IM
1922
1923 entry = iter->ent;
dd0e545f
SR
1924
1925 if (entry->type == TRACE_CONT)
2c4f035f 1926 return TRACE_TYPE_HANDLED;
dd0e545f 1927
777e208d 1928 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
5e3ca0ec 1929 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
3928a8a2 1930 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
5e3ca0ec
IM
1931
1932 switch (entry->type) {
777e208d 1933 case TRACE_FN: {
7104f300
SR
1934 struct ftrace_entry *field;
1935
1936 trace_assign_type(field, entry);
777e208d
SR
1937
1938 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1939 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
5e3ca0ec 1940 break;
777e208d 1941 }
5e3ca0ec 1942 case TRACE_CTX:
777e208d 1943 case TRACE_WAKE: {
7104f300
SR
1944 struct ctx_switch_entry *field;
1945
1946 trace_assign_type(field, entry);
777e208d
SR
1947
1948 S = field->prev_state < sizeof(state_to_char) ?
1949 state_to_char[field->prev_state] : 'X';
1950 T = field->next_state < sizeof(state_to_char) ?
1951 state_to_char[field->next_state] : 'X';
57422797
IM
1952 if (entry->type == TRACE_WAKE)
1953 S = '+';
777e208d
SR
1954 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1955 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
5e3ca0ec 1956 SEQ_PUT_HEX_FIELD_RET(s, S);
777e208d
SR
1957 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1958 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1959 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
bac524d3 1960 SEQ_PUT_HEX_FIELD_RET(s, T);
5e3ca0ec 1961 break;
777e208d 1962 }
5e3ca0ec 1963 case TRACE_SPECIAL:
777e208d 1964 case TRACE_STACK: {
7104f300
SR
1965 struct special_entry *field;
1966
1967 trace_assign_type(field, entry);
777e208d
SR
1968
1969 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1970 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1971 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
5e3ca0ec
IM
1972 break;
1973 }
777e208d 1974 }
5e3ca0ec
IM
1975 SEQ_PUT_FIELD_RET(s, newline);
1976
2c4f035f 1977 return TRACE_TYPE_HANDLED;
5e3ca0ec
IM
1978}
1979
2c4f035f 1980static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
1981{
1982 struct trace_seq *s = &iter->seq;
1983 struct trace_entry *entry;
1984
1985 entry = iter->ent;
dd0e545f
SR
1986
1987 if (entry->type == TRACE_CONT)
2c4f035f 1988 return TRACE_TYPE_HANDLED;
dd0e545f 1989
777e208d 1990 SEQ_PUT_FIELD_RET(s, entry->pid);
072ba498 1991 SEQ_PUT_FIELD_RET(s, entry->cpu);
3928a8a2 1992 SEQ_PUT_FIELD_RET(s, iter->ts);
cb0f12aa
IM
1993
1994 switch (entry->type) {
777e208d 1995 case TRACE_FN: {
7104f300
SR
1996 struct ftrace_entry *field;
1997
1998 trace_assign_type(field, entry);
777e208d
SR
1999
2000 SEQ_PUT_FIELD_RET(s, field->ip);
2001 SEQ_PUT_FIELD_RET(s, field->parent_ip);
cb0f12aa 2002 break;
777e208d
SR
2003 }
2004 case TRACE_CTX: {
7104f300
SR
2005 struct ctx_switch_entry *field;
2006
2007 trace_assign_type(field, entry);
777e208d
SR
2008
2009 SEQ_PUT_FIELD_RET(s, field->prev_pid);
2010 SEQ_PUT_FIELD_RET(s, field->prev_prio);
2011 SEQ_PUT_FIELD_RET(s, field->prev_state);
2012 SEQ_PUT_FIELD_RET(s, field->next_pid);
2013 SEQ_PUT_FIELD_RET(s, field->next_prio);
2014 SEQ_PUT_FIELD_RET(s, field->next_state);
cb0f12aa 2015 break;
777e208d 2016 }
f0a920d5 2017 case TRACE_SPECIAL:
777e208d 2018 case TRACE_STACK: {
7104f300
SR
2019 struct special_entry *field;
2020
2021 trace_assign_type(field, entry);
777e208d
SR
2022
2023 SEQ_PUT_FIELD_RET(s, field->arg1);
2024 SEQ_PUT_FIELD_RET(s, field->arg2);
2025 SEQ_PUT_FIELD_RET(s, field->arg3);
f0a920d5 2026 break;
cb0f12aa 2027 }
777e208d 2028 }
cb0f12aa
IM
2029 return 1;
2030}
2031
bc0c38d1
SR
2032static int trace_empty(struct trace_iterator *iter)
2033{
bc0c38d1
SR
2034 int cpu;
2035
ab46428c 2036 for_each_tracing_cpu(cpu) {
d769041f
SR
2037 if (iter->buffer_iter[cpu]) {
2038 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2039 return 0;
2040 } else {
2041 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2042 return 0;
2043 }
bc0c38d1 2044 }
d769041f 2045
797d3712 2046 return 1;
bc0c38d1
SR
2047}
2048
2c4f035f 2049static enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2050{
2c4f035f
FW
2051 enum print_line_t ret;
2052
2053 if (iter->trace && iter->trace->print_line) {
2054 ret = iter->trace->print_line(iter);
2055 if (ret != TRACE_TYPE_UNHANDLED)
2056 return ret;
2057 }
72829bc3 2058
cb0f12aa
IM
2059 if (trace_flags & TRACE_ITER_BIN)
2060 return print_bin_fmt(iter);
2061
5e3ca0ec
IM
2062 if (trace_flags & TRACE_ITER_HEX)
2063 return print_hex_fmt(iter);
2064
f9896bf3
IM
2065 if (trace_flags & TRACE_ITER_RAW)
2066 return print_raw_fmt(iter);
2067
2068 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2069 return print_lat_fmt(iter, iter->idx, iter->cpu);
2070
2071 return print_trace_fmt(iter);
2072}
2073
bc0c38d1
SR
2074static int s_show(struct seq_file *m, void *v)
2075{
2076 struct trace_iterator *iter = v;
2077
2078 if (iter->ent == NULL) {
2079 if (iter->tr) {
2080 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2081 seq_puts(m, "#\n");
2082 }
2083 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2084 /* print nothing if the buffers are empty */
2085 if (trace_empty(iter))
2086 return 0;
2087 print_trace_header(m, iter);
2088 if (!(trace_flags & TRACE_ITER_VERBOSE))
2089 print_lat_help_header(m);
2090 } else {
2091 if (!(trace_flags & TRACE_ITER_VERBOSE))
2092 print_func_help_header(m);
2093 }
2094 } else {
f9896bf3 2095 print_trace_line(iter);
214023c3 2096 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
2097 }
2098
2099 return 0;
2100}
2101
2102static struct seq_operations tracer_seq_ops = {
4bf39a94
IM
2103 .start = s_start,
2104 .next = s_next,
2105 .stop = s_stop,
2106 .show = s_show,
bc0c38d1
SR
2107};
2108
e309b41d 2109static struct trace_iterator *
bc0c38d1
SR
2110__tracing_open(struct inode *inode, struct file *file, int *ret)
2111{
2112 struct trace_iterator *iter;
3928a8a2
SR
2113 struct seq_file *m;
2114 int cpu;
bc0c38d1 2115
60a11774
SR
2116 if (tracing_disabled) {
2117 *ret = -ENODEV;
2118 return NULL;
2119 }
2120
bc0c38d1
SR
2121 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2122 if (!iter) {
2123 *ret = -ENOMEM;
2124 goto out;
2125 }
2126
2127 mutex_lock(&trace_types_lock);
2128 if (current_trace && current_trace->print_max)
2129 iter->tr = &max_tr;
2130 else
2131 iter->tr = inode->i_private;
2132 iter->trace = current_trace;
2133 iter->pos = -1;
2134
3928a8a2 2135 for_each_tracing_cpu(cpu) {
d769041f 2136
3928a8a2
SR
2137 iter->buffer_iter[cpu] =
2138 ring_buffer_read_start(iter->tr->buffer, cpu);
d769041f 2139
3928a8a2
SR
2140 if (!iter->buffer_iter[cpu])
2141 goto fail_buffer;
2142 }
2143
bc0c38d1
SR
2144 /* TODO stop tracer */
2145 *ret = seq_open(file, &tracer_seq_ops);
3928a8a2
SR
2146 if (*ret)
2147 goto fail_buffer;
bc0c38d1 2148
3928a8a2
SR
2149 m = file->private_data;
2150 m->private = iter;
bc0c38d1 2151
3928a8a2 2152 /* stop the trace while dumping */
9036990d 2153 tracing_stop();
3928a8a2
SR
2154
2155 if (iter->trace && iter->trace->open)
2156 iter->trace->open(iter);
2157
bc0c38d1
SR
2158 mutex_unlock(&trace_types_lock);
2159
2160 out:
2161 return iter;
3928a8a2
SR
2162
2163 fail_buffer:
2164 for_each_tracing_cpu(cpu) {
2165 if (iter->buffer_iter[cpu])
2166 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2167 }
2168 mutex_unlock(&trace_types_lock);
2169
2170 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
2171}
2172
2173int tracing_open_generic(struct inode *inode, struct file *filp)
2174{
60a11774
SR
2175 if (tracing_disabled)
2176 return -ENODEV;
2177
bc0c38d1
SR
2178 filp->private_data = inode->i_private;
2179 return 0;
2180}
2181
2182int tracing_release(struct inode *inode, struct file *file)
2183{
2184 struct seq_file *m = (struct seq_file *)file->private_data;
2185 struct trace_iterator *iter = m->private;
3928a8a2 2186 int cpu;
bc0c38d1
SR
2187
2188 mutex_lock(&trace_types_lock);
3928a8a2
SR
2189 for_each_tracing_cpu(cpu) {
2190 if (iter->buffer_iter[cpu])
2191 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2192 }
2193
bc0c38d1
SR
2194 if (iter->trace && iter->trace->close)
2195 iter->trace->close(iter);
2196
2197 /* reenable tracing if it was previously enabled */
9036990d 2198 tracing_start();
bc0c38d1
SR
2199 mutex_unlock(&trace_types_lock);
2200
2201 seq_release(inode, file);
2202 kfree(iter);
2203 return 0;
2204}
2205
2206static int tracing_open(struct inode *inode, struct file *file)
2207{
2208 int ret;
2209
2210 __tracing_open(inode, file, &ret);
2211
2212 return ret;
2213}
2214
2215static int tracing_lt_open(struct inode *inode, struct file *file)
2216{
2217 struct trace_iterator *iter;
2218 int ret;
2219
2220 iter = __tracing_open(inode, file, &ret);
2221
2222 if (!ret)
2223 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2224
2225 return ret;
2226}
2227
2228
e309b41d 2229static void *
bc0c38d1
SR
2230t_next(struct seq_file *m, void *v, loff_t *pos)
2231{
2232 struct tracer *t = m->private;
2233
2234 (*pos)++;
2235
2236 if (t)
2237 t = t->next;
2238
2239 m->private = t;
2240
2241 return t;
2242}
2243
2244static void *t_start(struct seq_file *m, loff_t *pos)
2245{
2246 struct tracer *t = m->private;
2247 loff_t l = 0;
2248
2249 mutex_lock(&trace_types_lock);
2250 for (; t && l < *pos; t = t_next(m, t, &l))
2251 ;
2252
2253 return t;
2254}
2255
2256static void t_stop(struct seq_file *m, void *p)
2257{
2258 mutex_unlock(&trace_types_lock);
2259}
2260
2261static int t_show(struct seq_file *m, void *v)
2262{
2263 struct tracer *t = v;
2264
2265 if (!t)
2266 return 0;
2267
2268 seq_printf(m, "%s", t->name);
2269 if (t->next)
2270 seq_putc(m, ' ');
2271 else
2272 seq_putc(m, '\n');
2273
2274 return 0;
2275}
2276
2277static struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
2278 .start = t_start,
2279 .next = t_next,
2280 .stop = t_stop,
2281 .show = t_show,
bc0c38d1
SR
2282};
2283
2284static int show_traces_open(struct inode *inode, struct file *file)
2285{
2286 int ret;
2287
60a11774
SR
2288 if (tracing_disabled)
2289 return -ENODEV;
2290
bc0c38d1
SR
2291 ret = seq_open(file, &show_traces_seq_ops);
2292 if (!ret) {
2293 struct seq_file *m = file->private_data;
2294 m->private = trace_types;
2295 }
2296
2297 return ret;
2298}
2299
2300static struct file_operations tracing_fops = {
4bf39a94
IM
2301 .open = tracing_open,
2302 .read = seq_read,
2303 .llseek = seq_lseek,
2304 .release = tracing_release,
bc0c38d1
SR
2305};
2306
2307static struct file_operations tracing_lt_fops = {
4bf39a94
IM
2308 .open = tracing_lt_open,
2309 .read = seq_read,
2310 .llseek = seq_lseek,
2311 .release = tracing_release,
bc0c38d1
SR
2312};
2313
2314static struct file_operations show_traces_fops = {
c7078de1
IM
2315 .open = show_traces_open,
2316 .read = seq_read,
2317 .release = seq_release,
2318};
2319
36dfe925
IM
2320/*
2321 * Only trace on a CPU if the bitmask is set:
2322 */
2323static cpumask_t tracing_cpumask = CPU_MASK_ALL;
2324
2325/*
2326 * When tracing/tracing_cpu_mask is modified then this holds
2327 * the new bitmask we are about to install:
2328 */
2329static cpumask_t tracing_cpumask_new;
2330
2331/*
2332 * The tracer itself will not take this lock, but still we want
2333 * to provide a consistent cpumask to user-space:
2334 */
2335static DEFINE_MUTEX(tracing_cpumask_update_lock);
2336
2337/*
2338 * Temporary storage for the character representation of the
2339 * CPU bitmask (and one more byte for the newline):
2340 */
2341static char mask_str[NR_CPUS + 1];
2342
c7078de1
IM
2343static ssize_t
2344tracing_cpumask_read(struct file *filp, char __user *ubuf,
2345 size_t count, loff_t *ppos)
2346{
36dfe925 2347 int len;
c7078de1
IM
2348
2349 mutex_lock(&tracing_cpumask_update_lock);
36dfe925
IM
2350
2351 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2352 if (count - len < 2) {
2353 count = -EINVAL;
2354 goto out_err;
2355 }
2356 len += sprintf(mask_str + len, "\n");
2357 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2358
2359out_err:
c7078de1
IM
2360 mutex_unlock(&tracing_cpumask_update_lock);
2361
2362 return count;
2363}
2364
2365static ssize_t
2366tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2367 size_t count, loff_t *ppos)
2368{
36dfe925 2369 int err, cpu;
c7078de1
IM
2370
2371 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 2372 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 2373 if (err)
36dfe925
IM
2374 goto err_unlock;
2375
92205c23
SR
2376 raw_local_irq_disable();
2377 __raw_spin_lock(&ftrace_max_lock);
ab46428c 2378 for_each_tracing_cpu(cpu) {
36dfe925
IM
2379 /*
2380 * Increase/decrease the disabled counter if we are
2381 * about to flip a bit in the cpumask:
2382 */
2383 if (cpu_isset(cpu, tracing_cpumask) &&
2384 !cpu_isset(cpu, tracing_cpumask_new)) {
2385 atomic_inc(&global_trace.data[cpu]->disabled);
2386 }
2387 if (!cpu_isset(cpu, tracing_cpumask) &&
2388 cpu_isset(cpu, tracing_cpumask_new)) {
2389 atomic_dec(&global_trace.data[cpu]->disabled);
2390 }
2391 }
92205c23
SR
2392 __raw_spin_unlock(&ftrace_max_lock);
2393 raw_local_irq_enable();
36dfe925
IM
2394
2395 tracing_cpumask = tracing_cpumask_new;
2396
2397 mutex_unlock(&tracing_cpumask_update_lock);
c7078de1
IM
2398
2399 return count;
36dfe925
IM
2400
2401err_unlock:
2402 mutex_unlock(&tracing_cpumask_update_lock);
2403
2404 return err;
c7078de1
IM
2405}
2406
2407static struct file_operations tracing_cpumask_fops = {
2408 .open = tracing_open_generic,
2409 .read = tracing_cpumask_read,
2410 .write = tracing_cpumask_write,
bc0c38d1
SR
2411};
2412
2413static ssize_t
ee6bce52 2414tracing_trace_options_read(struct file *filp, char __user *ubuf,
bc0c38d1
SR
2415 size_t cnt, loff_t *ppos)
2416{
2417 char *buf;
2418 int r = 0;
2419 int len = 0;
2420 int i;
2421
2422 /* calulate max size */
2423 for (i = 0; trace_options[i]; i++) {
2424 len += strlen(trace_options[i]);
2425 len += 3; /* "no" and space */
2426 }
2427
2428 /* +2 for \n and \0 */
2429 buf = kmalloc(len + 2, GFP_KERNEL);
2430 if (!buf)
2431 return -ENOMEM;
2432
2433 for (i = 0; trace_options[i]; i++) {
2434 if (trace_flags & (1 << i))
2435 r += sprintf(buf + r, "%s ", trace_options[i]);
2436 else
2437 r += sprintf(buf + r, "no%s ", trace_options[i]);
2438 }
2439
2440 r += sprintf(buf + r, "\n");
2441 WARN_ON(r >= len + 2);
2442
36dfe925 2443 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2444
2445 kfree(buf);
2446
2447 return r;
2448}
2449
2450static ssize_t
ee6bce52 2451tracing_trace_options_write(struct file *filp, const char __user *ubuf,
bc0c38d1
SR
2452 size_t cnt, loff_t *ppos)
2453{
2454 char buf[64];
2455 char *cmp = buf;
2456 int neg = 0;
2457 int i;
2458
cffae437
SR
2459 if (cnt >= sizeof(buf))
2460 return -EINVAL;
bc0c38d1
SR
2461
2462 if (copy_from_user(&buf, ubuf, cnt))
2463 return -EFAULT;
2464
2465 buf[cnt] = 0;
2466
2467 if (strncmp(buf, "no", 2) == 0) {
2468 neg = 1;
2469 cmp += 2;
2470 }
2471
2472 for (i = 0; trace_options[i]; i++) {
2473 int len = strlen(trace_options[i]);
2474
2475 if (strncmp(cmp, trace_options[i], len) == 0) {
2476 if (neg)
2477 trace_flags &= ~(1 << i);
2478 else
2479 trace_flags |= (1 << i);
2480 break;
2481 }
2482 }
442e544c
IM
2483 /*
2484 * If no option could be set, return an error:
2485 */
2486 if (!trace_options[i])
2487 return -EINVAL;
bc0c38d1
SR
2488
2489 filp->f_pos += cnt;
2490
2491 return cnt;
2492}
2493
2494static struct file_operations tracing_iter_fops = {
c7078de1 2495 .open = tracing_open_generic,
ee6bce52
SR
2496 .read = tracing_trace_options_read,
2497 .write = tracing_trace_options_write,
bc0c38d1
SR
2498};
2499
7bd2f24c
IM
2500static const char readme_msg[] =
2501 "tracing mini-HOWTO:\n\n"
2502 "# mkdir /debug\n"
2503 "# mount -t debugfs nodev /debug\n\n"
2504 "# cat /debug/tracing/available_tracers\n"
2505 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2506 "# cat /debug/tracing/current_tracer\n"
2507 "none\n"
2508 "# echo sched_switch > /debug/tracing/current_tracer\n"
2509 "# cat /debug/tracing/current_tracer\n"
2510 "sched_switch\n"
ee6bce52 2511 "# cat /debug/tracing/trace_options\n"
7bd2f24c 2512 "noprint-parent nosym-offset nosym-addr noverbose\n"
ee6bce52 2513 "# echo print-parent > /debug/tracing/trace_options\n"
7bd2f24c
IM
2514 "# echo 1 > /debug/tracing/tracing_enabled\n"
2515 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2516 "echo 0 > /debug/tracing/tracing_enabled\n"
2517;
2518
2519static ssize_t
2520tracing_readme_read(struct file *filp, char __user *ubuf,
2521 size_t cnt, loff_t *ppos)
2522{
2523 return simple_read_from_buffer(ubuf, cnt, ppos,
2524 readme_msg, strlen(readme_msg));
2525}
2526
2527static struct file_operations tracing_readme_fops = {
c7078de1
IM
2528 .open = tracing_open_generic,
2529 .read = tracing_readme_read,
7bd2f24c
IM
2530};
2531
bc0c38d1
SR
2532static ssize_t
2533tracing_ctrl_read(struct file *filp, char __user *ubuf,
2534 size_t cnt, loff_t *ppos)
2535{
bc0c38d1
SR
2536 char buf[64];
2537 int r;
2538
9036990d 2539 r = sprintf(buf, "%u\n", tracer_enabled);
4e3c3333 2540 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2541}
2542
2543static ssize_t
2544tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2545 size_t cnt, loff_t *ppos)
2546{
2547 struct trace_array *tr = filp->private_data;
bc0c38d1 2548 char buf[64];
c6caeeb1
SR
2549 long val;
2550 int ret;
bc0c38d1 2551
cffae437
SR
2552 if (cnt >= sizeof(buf))
2553 return -EINVAL;
bc0c38d1
SR
2554
2555 if (copy_from_user(&buf, ubuf, cnt))
2556 return -EFAULT;
2557
2558 buf[cnt] = 0;
2559
c6caeeb1
SR
2560 ret = strict_strtoul(buf, 10, &val);
2561 if (ret < 0)
2562 return ret;
bc0c38d1
SR
2563
2564 val = !!val;
2565
2566 mutex_lock(&trace_types_lock);
9036990d
SR
2567 if (tracer_enabled ^ val) {
2568 if (val) {
bc0c38d1 2569 tracer_enabled = 1;
9036990d
SR
2570 if (current_trace->start)
2571 current_trace->start(tr);
2572 tracing_start();
2573 } else {
bc0c38d1 2574 tracer_enabled = 0;
9036990d
SR
2575 tracing_stop();
2576 if (current_trace->stop)
2577 current_trace->stop(tr);
2578 }
bc0c38d1
SR
2579 }
2580 mutex_unlock(&trace_types_lock);
2581
2582 filp->f_pos += cnt;
2583
2584 return cnt;
2585}
2586
2587static ssize_t
2588tracing_set_trace_read(struct file *filp, char __user *ubuf,
2589 size_t cnt, loff_t *ppos)
2590{
2591 char buf[max_tracer_type_len+2];
2592 int r;
2593
2594 mutex_lock(&trace_types_lock);
2595 if (current_trace)
2596 r = sprintf(buf, "%s\n", current_trace->name);
2597 else
2598 r = sprintf(buf, "\n");
2599 mutex_unlock(&trace_types_lock);
2600
4bf39a94 2601 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2602}
2603
d9e54076 2604static int tracing_set_tracer(char *buf)
bc0c38d1
SR
2605{
2606 struct trace_array *tr = &global_trace;
2607 struct tracer *t;
d9e54076 2608 int ret = 0;
bc0c38d1
SR
2609
2610 mutex_lock(&trace_types_lock);
2611 for (t = trace_types; t; t = t->next) {
2612 if (strcmp(t->name, buf) == 0)
2613 break;
2614 }
c2931e05
FW
2615 if (!t) {
2616 ret = -EINVAL;
2617 goto out;
2618 }
2619 if (t == current_trace)
bc0c38d1
SR
2620 goto out;
2621
9f029e83 2622 trace_branch_disable();
bc0c38d1
SR
2623 if (current_trace && current_trace->reset)
2624 current_trace->reset(tr);
2625
2626 current_trace = t;
2627 if (t->init)
2628 t->init(tr);
2629
9f029e83 2630 trace_branch_enable(tr);
bc0c38d1
SR
2631 out:
2632 mutex_unlock(&trace_types_lock);
2633
d9e54076
PZ
2634 return ret;
2635}
2636
2637static ssize_t
2638tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2639 size_t cnt, loff_t *ppos)
2640{
2641 char buf[max_tracer_type_len+1];
2642 int i;
2643 size_t ret;
2644
2645 if (cnt > max_tracer_type_len)
2646 cnt = max_tracer_type_len;
2647
2648 if (copy_from_user(&buf, ubuf, cnt))
2649 return -EFAULT;
2650
2651 buf[cnt] = 0;
2652
2653 /* strip ending whitespace. */
2654 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2655 buf[i] = 0;
2656
2657 ret = tracing_set_tracer(buf);
2658 if (!ret)
2659 ret = cnt;
2660
60063a66
SR
2661 if (ret > 0)
2662 filp->f_pos += ret;
bc0c38d1 2663
c2931e05 2664 return ret;
bc0c38d1
SR
2665}
2666
2667static ssize_t
2668tracing_max_lat_read(struct file *filp, char __user *ubuf,
2669 size_t cnt, loff_t *ppos)
2670{
2671 unsigned long *ptr = filp->private_data;
2672 char buf[64];
2673 int r;
2674
cffae437 2675 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 2676 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
2677 if (r > sizeof(buf))
2678 r = sizeof(buf);
4bf39a94 2679 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2680}
2681
2682static ssize_t
2683tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2684 size_t cnt, loff_t *ppos)
2685{
2686 long *ptr = filp->private_data;
bc0c38d1 2687 char buf[64];
c6caeeb1
SR
2688 long val;
2689 int ret;
bc0c38d1 2690
cffae437
SR
2691 if (cnt >= sizeof(buf))
2692 return -EINVAL;
bc0c38d1
SR
2693
2694 if (copy_from_user(&buf, ubuf, cnt))
2695 return -EFAULT;
2696
2697 buf[cnt] = 0;
2698
c6caeeb1
SR
2699 ret = strict_strtoul(buf, 10, &val);
2700 if (ret < 0)
2701 return ret;
bc0c38d1
SR
2702
2703 *ptr = val * 1000;
2704
2705 return cnt;
2706}
2707
b3806b43
SR
2708static atomic_t tracing_reader;
2709
2710static int tracing_open_pipe(struct inode *inode, struct file *filp)
2711{
2712 struct trace_iterator *iter;
2713
2714 if (tracing_disabled)
2715 return -ENODEV;
2716
2717 /* We only allow for reader of the pipe */
2718 if (atomic_inc_return(&tracing_reader) != 1) {
2719 atomic_dec(&tracing_reader);
2720 return -EBUSY;
2721 }
2722
2723 /* create a buffer to store the information to pass to userspace */
2724 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2725 if (!iter)
2726 return -ENOMEM;
2727
107bad8b 2728 mutex_lock(&trace_types_lock);
a309720c
SR
2729
2730 /* trace pipe does not show start of buffer */
2731 cpus_setall(iter->started);
2732
b3806b43 2733 iter->tr = &global_trace;
72829bc3 2734 iter->trace = current_trace;
b3806b43
SR
2735 filp->private_data = iter;
2736
107bad8b
SR
2737 if (iter->trace->pipe_open)
2738 iter->trace->pipe_open(iter);
2739 mutex_unlock(&trace_types_lock);
2740
b3806b43
SR
2741 return 0;
2742}
2743
2744static int tracing_release_pipe(struct inode *inode, struct file *file)
2745{
2746 struct trace_iterator *iter = file->private_data;
2747
2748 kfree(iter);
2749 atomic_dec(&tracing_reader);
2750
2751 return 0;
2752}
2753
2a2cc8f7
SSP
2754static unsigned int
2755tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2756{
2757 struct trace_iterator *iter = filp->private_data;
2758
2759 if (trace_flags & TRACE_ITER_BLOCK) {
2760 /*
2761 * Always select as readable when in blocking mode
2762 */
2763 return POLLIN | POLLRDNORM;
afc2abc0 2764 } else {
2a2cc8f7
SSP
2765 if (!trace_empty(iter))
2766 return POLLIN | POLLRDNORM;
2767 poll_wait(filp, &trace_wait, poll_table);
2768 if (!trace_empty(iter))
2769 return POLLIN | POLLRDNORM;
2770
2771 return 0;
2772 }
2773}
2774
b3806b43
SR
2775/*
2776 * Consumer reader.
2777 */
2778static ssize_t
2779tracing_read_pipe(struct file *filp, char __user *ubuf,
2780 size_t cnt, loff_t *ppos)
2781{
2782 struct trace_iterator *iter = filp->private_data;
6c6c2796 2783 ssize_t sret;
b3806b43
SR
2784
2785 /* return any leftover data */
6c6c2796
PP
2786 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2787 if (sret != -EBUSY)
2788 return sret;
b3806b43 2789
6c6c2796 2790 trace_seq_reset(&iter->seq);
b3806b43 2791
107bad8b
SR
2792 mutex_lock(&trace_types_lock);
2793 if (iter->trace->read) {
6c6c2796
PP
2794 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2795 if (sret)
107bad8b 2796 goto out;
107bad8b
SR
2797 }
2798
9ff4b974
PP
2799waitagain:
2800 sret = 0;
b3806b43 2801 while (trace_empty(iter)) {
2dc8f095 2802
107bad8b 2803 if ((filp->f_flags & O_NONBLOCK)) {
6c6c2796 2804 sret = -EAGAIN;
107bad8b
SR
2805 goto out;
2806 }
2dc8f095 2807
b3806b43
SR
2808 /*
2809 * This is a make-shift waitqueue. The reason we don't use
2810 * an actual wait queue is because:
2811 * 1) we only ever have one waiter
2812 * 2) the tracing, traces all functions, we don't want
2813 * the overhead of calling wake_up and friends
2814 * (and tracing them too)
2815 * Anyway, this is really very primitive wakeup.
2816 */
2817 set_current_state(TASK_INTERRUPTIBLE);
2818 iter->tr->waiter = current;
2819
107bad8b
SR
2820 mutex_unlock(&trace_types_lock);
2821
9fe068e9
IM
2822 /* sleep for 100 msecs, and try again. */
2823 schedule_timeout(HZ/10);
b3806b43 2824
107bad8b
SR
2825 mutex_lock(&trace_types_lock);
2826
b3806b43
SR
2827 iter->tr->waiter = NULL;
2828
107bad8b 2829 if (signal_pending(current)) {
6c6c2796 2830 sret = -EINTR;
107bad8b
SR
2831 goto out;
2832 }
b3806b43 2833
84527997 2834 if (iter->trace != current_trace)
107bad8b 2835 goto out;
84527997 2836
b3806b43
SR
2837 /*
2838 * We block until we read something and tracing is disabled.
2839 * We still block if tracing is disabled, but we have never
2840 * read anything. This allows a user to cat this file, and
2841 * then enable tracing. But after we have read something,
2842 * we give an EOF when tracing is again disabled.
2843 *
2844 * iter->pos will be 0 if we haven't read anything.
2845 */
2846 if (!tracer_enabled && iter->pos)
2847 break;
2848
2849 continue;
2850 }
2851
2852 /* stop when tracing is finished */
2853 if (trace_empty(iter))
107bad8b 2854 goto out;
b3806b43
SR
2855
2856 if (cnt >= PAGE_SIZE)
2857 cnt = PAGE_SIZE - 1;
2858
53d0aa77 2859 /* reset all but tr, trace, and overruns */
53d0aa77
SR
2860 memset(&iter->seq, 0,
2861 sizeof(struct trace_iterator) -
2862 offsetof(struct trace_iterator, seq));
4823ed7e 2863 iter->pos = -1;
b3806b43 2864
088b1e42 2865 while (find_next_entry_inc(iter) != NULL) {
2c4f035f 2866 enum print_line_t ret;
088b1e42
SR
2867 int len = iter->seq.len;
2868
f9896bf3 2869 ret = print_trace_line(iter);
2c4f035f 2870 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42
SR
2871 /* don't print partial lines */
2872 iter->seq.len = len;
b3806b43 2873 break;
088b1e42 2874 }
b3806b43
SR
2875
2876 trace_consume(iter);
2877
2878 if (iter->seq.len >= cnt)
2879 break;
b3806b43
SR
2880 }
2881
b3806b43 2882 /* Now copy what we have to the user */
6c6c2796
PP
2883 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2884 if (iter->seq.readpos >= iter->seq.len)
b3806b43 2885 trace_seq_reset(&iter->seq);
9ff4b974
PP
2886
2887 /*
2888 * If there was nothing to send to user, inspite of consuming trace
2889 * entries, go back to wait for more entries.
2890 */
6c6c2796 2891 if (sret == -EBUSY)
9ff4b974 2892 goto waitagain;
b3806b43 2893
107bad8b
SR
2894out:
2895 mutex_unlock(&trace_types_lock);
2896
6c6c2796 2897 return sret;
b3806b43
SR
2898}
2899
a98a3c3f
SR
2900static ssize_t
2901tracing_entries_read(struct file *filp, char __user *ubuf,
2902 size_t cnt, loff_t *ppos)
2903{
2904 struct trace_array *tr = filp->private_data;
2905 char buf[64];
2906 int r;
2907
1696b2b0 2908 r = sprintf(buf, "%lu\n", tr->entries >> 10);
a98a3c3f
SR
2909 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2910}
2911
2912static ssize_t
2913tracing_entries_write(struct file *filp, const char __user *ubuf,
2914 size_t cnt, loff_t *ppos)
2915{
2916 unsigned long val;
2917 char buf[64];
bf5e6519 2918 int ret, cpu;
a98a3c3f 2919
cffae437
SR
2920 if (cnt >= sizeof(buf))
2921 return -EINVAL;
a98a3c3f
SR
2922
2923 if (copy_from_user(&buf, ubuf, cnt))
2924 return -EFAULT;
2925
2926 buf[cnt] = 0;
2927
c6caeeb1
SR
2928 ret = strict_strtoul(buf, 10, &val);
2929 if (ret < 0)
2930 return ret;
a98a3c3f
SR
2931
2932 /* must have at least 1 entry */
2933 if (!val)
2934 return -EINVAL;
2935
2936 mutex_lock(&trace_types_lock);
2937
c76f0694 2938 tracing_stop();
a98a3c3f 2939
bf5e6519
SR
2940 /* disable all cpu buffers */
2941 for_each_tracing_cpu(cpu) {
2942 if (global_trace.data[cpu])
2943 atomic_inc(&global_trace.data[cpu]->disabled);
2944 if (max_tr.data[cpu])
2945 atomic_inc(&max_tr.data[cpu]->disabled);
2946 }
2947
1696b2b0
SR
2948 /* value is in KB */
2949 val <<= 10;
2950
3928a8a2
SR
2951 if (val != global_trace.entries) {
2952 ret = ring_buffer_resize(global_trace.buffer, val);
2953 if (ret < 0) {
2954 cnt = ret;
3eefae99
SR
2955 goto out;
2956 }
2957
3928a8a2
SR
2958 ret = ring_buffer_resize(max_tr.buffer, val);
2959 if (ret < 0) {
2960 int r;
2961 cnt = ret;
2962 r = ring_buffer_resize(global_trace.buffer,
2963 global_trace.entries);
2964 if (r < 0) {
2965 /* AARGH! We are left with different
2966 * size max buffer!!!! */
2967 WARN_ON(1);
2968 tracing_disabled = 1;
a98a3c3f 2969 }
3928a8a2 2970 goto out;
a98a3c3f 2971 }
3eefae99 2972
3928a8a2 2973 global_trace.entries = val;
a98a3c3f
SR
2974 }
2975
2976 filp->f_pos += cnt;
2977
19384c03
SR
2978 /* If check pages failed, return ENOMEM */
2979 if (tracing_disabled)
2980 cnt = -ENOMEM;
a98a3c3f 2981 out:
bf5e6519
SR
2982 for_each_tracing_cpu(cpu) {
2983 if (global_trace.data[cpu])
2984 atomic_dec(&global_trace.data[cpu]->disabled);
2985 if (max_tr.data[cpu])
2986 atomic_dec(&max_tr.data[cpu]->disabled);
2987 }
2988
c76f0694 2989 tracing_start();
a98a3c3f
SR
2990 max_tr.entries = global_trace.entries;
2991 mutex_unlock(&trace_types_lock);
2992
2993 return cnt;
2994}
2995
5bf9a1ee
PP
2996static int mark_printk(const char *fmt, ...)
2997{
2998 int ret;
2999 va_list args;
3000 va_start(args, fmt);
3001 ret = trace_vprintk(0, fmt, args);
3002 va_end(args);
3003 return ret;
3004}
3005
3006static ssize_t
3007tracing_mark_write(struct file *filp, const char __user *ubuf,
3008 size_t cnt, loff_t *fpos)
3009{
3010 char *buf;
3011 char *end;
5bf9a1ee 3012
c76f0694 3013 if (tracing_disabled)
5bf9a1ee
PP
3014 return -EINVAL;
3015
3016 if (cnt > TRACE_BUF_SIZE)
3017 cnt = TRACE_BUF_SIZE;
3018
3019 buf = kmalloc(cnt + 1, GFP_KERNEL);
3020 if (buf == NULL)
3021 return -ENOMEM;
3022
3023 if (copy_from_user(buf, ubuf, cnt)) {
3024 kfree(buf);
3025 return -EFAULT;
3026 }
3027
3028 /* Cut from the first nil or newline. */
3029 buf[cnt] = '\0';
3030 end = strchr(buf, '\n');
3031 if (end)
3032 *end = '\0';
3033
3034 cnt = mark_printk("%s\n", buf);
3035 kfree(buf);
3036 *fpos += cnt;
3037
3038 return cnt;
3039}
3040
bc0c38d1 3041static struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
3042 .open = tracing_open_generic,
3043 .read = tracing_max_lat_read,
3044 .write = tracing_max_lat_write,
bc0c38d1
SR
3045};
3046
3047static struct file_operations tracing_ctrl_fops = {
4bf39a94
IM
3048 .open = tracing_open_generic,
3049 .read = tracing_ctrl_read,
3050 .write = tracing_ctrl_write,
bc0c38d1
SR
3051};
3052
3053static struct file_operations set_tracer_fops = {
4bf39a94
IM
3054 .open = tracing_open_generic,
3055 .read = tracing_set_trace_read,
3056 .write = tracing_set_trace_write,
bc0c38d1
SR
3057};
3058
b3806b43 3059static struct file_operations tracing_pipe_fops = {
4bf39a94 3060 .open = tracing_open_pipe,
2a2cc8f7 3061 .poll = tracing_poll_pipe,
4bf39a94
IM
3062 .read = tracing_read_pipe,
3063 .release = tracing_release_pipe,
b3806b43
SR
3064};
3065
a98a3c3f
SR
3066static struct file_operations tracing_entries_fops = {
3067 .open = tracing_open_generic,
3068 .read = tracing_entries_read,
3069 .write = tracing_entries_write,
3070};
3071
5bf9a1ee 3072static struct file_operations tracing_mark_fops = {
43a15386 3073 .open = tracing_open_generic,
5bf9a1ee
PP
3074 .write = tracing_mark_write,
3075};
3076
bc0c38d1
SR
3077#ifdef CONFIG_DYNAMIC_FTRACE
3078
b807c3d0
SR
3079int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3080{
3081 return 0;
3082}
3083
bc0c38d1 3084static ssize_t
b807c3d0 3085tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
3086 size_t cnt, loff_t *ppos)
3087{
a26a2a27
SR
3088 static char ftrace_dyn_info_buffer[1024];
3089 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 3090 unsigned long *p = filp->private_data;
b807c3d0 3091 char *buf = ftrace_dyn_info_buffer;
a26a2a27 3092 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
3093 int r;
3094
b807c3d0
SR
3095 mutex_lock(&dyn_info_mutex);
3096 r = sprintf(buf, "%ld ", *p);
4bf39a94 3097
a26a2a27 3098 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
3099 buf[r++] = '\n';
3100
3101 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3102
3103 mutex_unlock(&dyn_info_mutex);
3104
3105 return r;
bc0c38d1
SR
3106}
3107
b807c3d0 3108static struct file_operations tracing_dyn_info_fops = {
4bf39a94 3109 .open = tracing_open_generic,
b807c3d0 3110 .read = tracing_read_dyn_info,
bc0c38d1
SR
3111};
3112#endif
3113
3114static struct dentry *d_tracer;
3115
3116struct dentry *tracing_init_dentry(void)
3117{
3118 static int once;
3119
3120 if (d_tracer)
3121 return d_tracer;
3122
3123 d_tracer = debugfs_create_dir("tracing", NULL);
3124
3125 if (!d_tracer && !once) {
3126 once = 1;
3127 pr_warning("Could not create debugfs directory 'tracing'\n");
3128 return NULL;
3129 }
3130
3131 return d_tracer;
3132}
3133
60a11774
SR
3134#ifdef CONFIG_FTRACE_SELFTEST
3135/* Let selftest have access to static functions in this file */
3136#include "trace_selftest.c"
3137#endif
3138
b5ad384e 3139static __init int tracer_init_debugfs(void)
bc0c38d1
SR
3140{
3141 struct dentry *d_tracer;
3142 struct dentry *entry;
3143
3144 d_tracer = tracing_init_dentry();
3145
3146 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
3147 &global_trace, &tracing_ctrl_fops);
3148 if (!entry)
3149 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
3150
ee6bce52 3151 entry = debugfs_create_file("trace_options", 0644, d_tracer,
bc0c38d1
SR
3152 NULL, &tracing_iter_fops);
3153 if (!entry)
ee6bce52 3154 pr_warning("Could not create debugfs 'trace_options' entry\n");
bc0c38d1 3155
c7078de1
IM
3156 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
3157 NULL, &tracing_cpumask_fops);
3158 if (!entry)
3159 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
3160
bc0c38d1
SR
3161 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
3162 &global_trace, &tracing_lt_fops);
3163 if (!entry)
3164 pr_warning("Could not create debugfs 'latency_trace' entry\n");
3165
3166 entry = debugfs_create_file("trace", 0444, d_tracer,
3167 &global_trace, &tracing_fops);
3168 if (!entry)
3169 pr_warning("Could not create debugfs 'trace' entry\n");
3170
3171 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
3172 &global_trace, &show_traces_fops);
3173 if (!entry)
98a983aa 3174 pr_warning("Could not create debugfs 'available_tracers' entry\n");
bc0c38d1
SR
3175
3176 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
3177 &global_trace, &set_tracer_fops);
3178 if (!entry)
98a983aa 3179 pr_warning("Could not create debugfs 'current_tracer' entry\n");
bc0c38d1
SR
3180
3181 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
3182 &tracing_max_latency,
3183 &tracing_max_lat_fops);
3184 if (!entry)
3185 pr_warning("Could not create debugfs "
3186 "'tracing_max_latency' entry\n");
3187
3188 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
3189 &tracing_thresh, &tracing_max_lat_fops);
3190 if (!entry)
3191 pr_warning("Could not create debugfs "
98a983aa 3192 "'tracing_thresh' entry\n");
7bd2f24c
IM
3193 entry = debugfs_create_file("README", 0644, d_tracer,
3194 NULL, &tracing_readme_fops);
3195 if (!entry)
3196 pr_warning("Could not create debugfs 'README' entry\n");
3197
b3806b43
SR
3198 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
3199 NULL, &tracing_pipe_fops);
3200 if (!entry)
3201 pr_warning("Could not create debugfs "
98a983aa 3202 "'trace_pipe' entry\n");
bc0c38d1 3203
a94c80e7 3204 entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
a98a3c3f
SR
3205 &global_trace, &tracing_entries_fops);
3206 if (!entry)
3207 pr_warning("Could not create debugfs "
a94c80e7 3208 "'buffer_size_kb' entry\n");
a98a3c3f 3209
5bf9a1ee
PP
3210 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
3211 NULL, &tracing_mark_fops);
3212 if (!entry)
3213 pr_warning("Could not create debugfs "
3214 "'trace_marker' entry\n");
3215
bc0c38d1
SR
3216#ifdef CONFIG_DYNAMIC_FTRACE
3217 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3218 &ftrace_update_tot_cnt,
b807c3d0 3219 &tracing_dyn_info_fops);
bc0c38d1
SR
3220 if (!entry)
3221 pr_warning("Could not create debugfs "
3222 "'dyn_ftrace_total_info' entry\n");
3223#endif
d618b3e6
IM
3224#ifdef CONFIG_SYSPROF_TRACER
3225 init_tracer_sysprof_debugfs(d_tracer);
3226#endif
b5ad384e 3227 return 0;
bc0c38d1
SR
3228}
3229
801fe400 3230int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
dd0e545f 3231{
dd0e545f
SR
3232 static DEFINE_SPINLOCK(trace_buf_lock);
3233 static char trace_buf[TRACE_BUF_SIZE];
f09ce573 3234
3928a8a2 3235 struct ring_buffer_event *event;
f09ce573 3236 struct trace_array *tr = &global_trace;
dd0e545f 3237 struct trace_array_cpu *data;
777e208d 3238 struct print_entry *entry;
3928a8a2 3239 unsigned long flags, irq_flags;
38697053 3240 int cpu, len = 0, size, pc;
dd0e545f 3241
c76f0694 3242 if (tracing_disabled)
dd0e545f
SR
3243 return 0;
3244
38697053
SR
3245 pc = preempt_count();
3246 preempt_disable_notrace();
dd0e545f
SR
3247 cpu = raw_smp_processor_id();
3248 data = tr->data[cpu];
dd0e545f 3249
3ea2e6d7 3250 if (unlikely(atomic_read(&data->disabled)))
dd0e545f
SR
3251 goto out;
3252
38697053 3253 spin_lock_irqsave(&trace_buf_lock, flags);
801fe400 3254 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
dd0e545f
SR
3255
3256 len = min(len, TRACE_BUF_SIZE-1);
3257 trace_buf[len] = 0;
3258
777e208d
SR
3259 size = sizeof(*entry) + len + 1;
3260 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3928a8a2
SR
3261 if (!event)
3262 goto out_unlock;
777e208d 3263 entry = ring_buffer_event_data(event);
38697053 3264 tracing_generic_entry_update(&entry->ent, flags, pc);
777e208d
SR
3265 entry->ent.type = TRACE_PRINT;
3266 entry->ip = ip;
dd0e545f 3267
777e208d
SR
3268 memcpy(&entry->buf, trace_buf, len);
3269 entry->buf[len] = 0;
3928a8a2 3270 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
dd0e545f 3271
3928a8a2 3272 out_unlock:
38697053 3273 spin_unlock_irqrestore(&trace_buf_lock, flags);
dd0e545f
SR
3274
3275 out:
38697053 3276 preempt_enable_notrace();
dd0e545f
SR
3277
3278 return len;
3279}
801fe400
PP
3280EXPORT_SYMBOL_GPL(trace_vprintk);
3281
3282int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3283{
3284 int ret;
3285 va_list ap;
3286
3287 if (!(trace_flags & TRACE_ITER_PRINTK))
3288 return 0;
3289
3290 va_start(ap, fmt);
3291 ret = trace_vprintk(ip, fmt, ap);
3292 va_end(ap);
3293 return ret;
3294}
dd0e545f
SR
3295EXPORT_SYMBOL_GPL(__ftrace_printk);
3296
3f5a54e3
SR
3297static int trace_panic_handler(struct notifier_block *this,
3298 unsigned long event, void *unused)
3299{
944ac425
SR
3300 if (ftrace_dump_on_oops)
3301 ftrace_dump();
3f5a54e3
SR
3302 return NOTIFY_OK;
3303}
3304
3305static struct notifier_block trace_panic_notifier = {
3306 .notifier_call = trace_panic_handler,
3307 .next = NULL,
3308 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3309};
3310
3311static int trace_die_handler(struct notifier_block *self,
3312 unsigned long val,
3313 void *data)
3314{
3315 switch (val) {
3316 case DIE_OOPS:
944ac425
SR
3317 if (ftrace_dump_on_oops)
3318 ftrace_dump();
3f5a54e3
SR
3319 break;
3320 default:
3321 break;
3322 }
3323 return NOTIFY_OK;
3324}
3325
3326static struct notifier_block trace_die_notifier = {
3327 .notifier_call = trace_die_handler,
3328 .priority = 200
3329};
3330
3331/*
3332 * printk is set to max of 1024, we really don't need it that big.
3333 * Nothing should be printing 1000 characters anyway.
3334 */
3335#define TRACE_MAX_PRINT 1000
3336
3337/*
3338 * Define here KERN_TRACE so that we have one place to modify
3339 * it if we decide to change what log level the ftrace dump
3340 * should be at.
3341 */
3342#define KERN_TRACE KERN_INFO
3343
3344static void
3345trace_printk_seq(struct trace_seq *s)
3346{
3347 /* Probably should print a warning here. */
3348 if (s->len >= 1000)
3349 s->len = 1000;
3350
3351 /* should be zero ended, but we are paranoid. */
3352 s->buffer[s->len] = 0;
3353
3354 printk(KERN_TRACE "%s", s->buffer);
3355
3356 trace_seq_reset(s);
3357}
3358
3f5a54e3
SR
3359void ftrace_dump(void)
3360{
3361 static DEFINE_SPINLOCK(ftrace_dump_lock);
3362 /* use static because iter can be a bit big for the stack */
3363 static struct trace_iterator iter;
3f5a54e3
SR
3364 static cpumask_t mask;
3365 static int dump_ran;
d769041f
SR
3366 unsigned long flags;
3367 int cnt = 0, cpu;
3f5a54e3
SR
3368
3369 /* only one dump */
3370 spin_lock_irqsave(&ftrace_dump_lock, flags);
3371 if (dump_ran)
3372 goto out;
3373
3374 dump_ran = 1;
3375
3376 /* No turning back! */
81adbdc0 3377 ftrace_kill();
3f5a54e3 3378
d769041f
SR
3379 for_each_tracing_cpu(cpu) {
3380 atomic_inc(&global_trace.data[cpu]->disabled);
3381 }
3382
3f5a54e3
SR
3383 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3384
3385 iter.tr = &global_trace;
3386 iter.trace = current_trace;
3387
3388 /*
3389 * We need to stop all tracing on all CPUS to read the
3390 * the next buffer. This is a bit expensive, but is
3391 * not done often. We fill all what we can read,
3392 * and then release the locks again.
3393 */
3394
3395 cpus_clear(mask);
3396
3f5a54e3
SR
3397 while (!trace_empty(&iter)) {
3398
3399 if (!cnt)
3400 printk(KERN_TRACE "---------------------------------\n");
3401
3402 cnt++;
3403
3404 /* reset all but tr, trace, and overruns */
3405 memset(&iter.seq, 0,
3406 sizeof(struct trace_iterator) -
3407 offsetof(struct trace_iterator, seq));
3408 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3409 iter.pos = -1;
3410
3411 if (find_next_entry_inc(&iter) != NULL) {
3412 print_trace_line(&iter);
3413 trace_consume(&iter);
3414 }
3415
3416 trace_printk_seq(&iter.seq);
3417 }
3418
3419 if (!cnt)
3420 printk(KERN_TRACE " (ftrace buffer empty)\n");
3421 else
3422 printk(KERN_TRACE "---------------------------------\n");
3423
3f5a54e3
SR
3424 out:
3425 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3426}
3427
3928a8a2 3428__init static int tracer_alloc_buffers(void)
bc0c38d1 3429{
4c11d7ae 3430 struct trace_array_cpu *data;
4c11d7ae
SR
3431 int i;
3432
3928a8a2
SR
3433 /* TODO: make the number of buffers hot pluggable with CPUS */
3434 tracing_buffer_mask = cpu_possible_map;
4c11d7ae 3435
3928a8a2
SR
3436 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3437 TRACE_BUFFER_FLAGS);
3438 if (!global_trace.buffer) {
3439 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3440 WARN_ON(1);
3441 return 0;
4c11d7ae 3442 }
3928a8a2 3443 global_trace.entries = ring_buffer_size(global_trace.buffer);
4c11d7ae
SR
3444
3445#ifdef CONFIG_TRACER_MAX_TRACE
3928a8a2
SR
3446 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3447 TRACE_BUFFER_FLAGS);
3448 if (!max_tr.buffer) {
3449 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3450 WARN_ON(1);
3451 ring_buffer_free(global_trace.buffer);
3452 return 0;
4c11d7ae 3453 }
3928a8a2
SR
3454 max_tr.entries = ring_buffer_size(max_tr.buffer);
3455 WARN_ON(max_tr.entries != global_trace.entries);
a98a3c3f 3456#endif
ab46428c 3457
4c11d7ae 3458 /* Allocate the first page for all buffers */
ab46428c 3459 for_each_tracing_cpu(i) {
4c11d7ae 3460 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
bc0c38d1 3461 max_tr.data[i] = &per_cpu(max_data, i);
4c11d7ae 3462 }
bc0c38d1 3463
bc0c38d1
SR
3464 trace_init_cmdlines();
3465
43a15386 3466 register_tracer(&nop_trace);
b5ad384e
FW
3467#ifdef CONFIG_BOOT_TRACER
3468 register_tracer(&boot_tracer);
3469 current_trace = &boot_tracer;
3470 current_trace->init(&global_trace);
3471#else
43a15386 3472 current_trace = &nop_trace;
b5ad384e 3473#endif
bc0c38d1 3474
60a11774
SR
3475 /* All seems OK, enable tracing */
3476 tracing_disabled = 0;
3928a8a2 3477
3f5a54e3
SR
3478 atomic_notifier_chain_register(&panic_notifier_list,
3479 &trace_panic_notifier);
3480
3481 register_die_notifier(&trace_die_notifier);
3482
bc0c38d1 3483 return 0;
bc0c38d1 3484}
b5ad384e
FW
3485early_initcall(tracer_alloc_buffers);
3486fs_initcall(tracer_init_debugfs);