]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_functions_graph.c
Merge branches 'msm-fixes' and 'msm-video' of git://codeaurora.org/quic/kernel/dwalke...
[net-next-2.6.git] / kernel / trace / trace_functions_graph.c
CommitLineData
fb52607a
FW
1/*
2 *
3 * Function graph tracer.
9005f3eb 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
fb52607a
FW
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
5a0e3ad6 12#include <linux/slab.h>
fb52607a
FW
13#include <linux/fs.h>
14
15#include "trace.h"
f0868d1e 16#include "trace_output.h"
fb52607a 17
b304d044
SR
18/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs;
20
be1eca39 21struct fgraph_cpu_data {
2fbcdb35
SR
22 pid_t last_pid;
23 int depth;
2bd16212 24 int depth_irq;
be1eca39 25 int ignore;
f1c7f517 26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
be1eca39
JO
27};
28
29struct fgraph_data {
6016ee13 30 struct fgraph_cpu_data __percpu *cpu_data;
be1eca39
JO
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
2fbcdb35
SR
37};
38
287b6e68 39#define TRACE_GRAPH_INDENT 2
fb52607a 40
1a056155 41/* Flag options */
fb52607a 42#define TRACE_GRAPH_PRINT_OVERRUN 0x1
1a056155
FW
43#define TRACE_GRAPH_PRINT_CPU 0x2
44#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
11e84acc 45#define TRACE_GRAPH_PRINT_PROC 0x8
9005f3eb 46#define TRACE_GRAPH_PRINT_DURATION 0x10
9106b693 47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
2bd16212 48#define TRACE_GRAPH_PRINT_IRQS 0x40
1a056155 49
fb52607a 50static struct tracer_opt trace_opts[] = {
9005f3eb 51 /* Display overruns? (for self-debug purpose) */
1a056155
FW
52 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
53 /* Display CPU ? */
54 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
55 /* Display Overhead ? */
56 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
11e84acc
FW
57 /* Display proc name/pid */
58 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
9005f3eb
FW
59 /* Display duration of execution */
60 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
61 /* Display absolute time of an entry */
62 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
2bd16212
JO
63 /* Display interrupts */
64 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
fb52607a
FW
65 { } /* Empty entry */
66};
67
68static struct tracer_flags tracer_flags = {
11e84acc 69 /* Don't display overruns and proc by default */
9005f3eb 70 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
2bd16212 71 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
fb52607a
FW
72 .opts = trace_opts
73};
74
1a0799a8 75static struct trace_array *graph_array;
9005f3eb 76
fb52607a 77
712406a6
SR
78/* Add a function return address to the trace stack on thread info.*/
79int
71e308a2
SR
80ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
81 unsigned long frame_pointer)
712406a6 82{
5d1a03dc 83 unsigned long long calltime;
712406a6
SR
84 int index;
85
86 if (!current->ret_stack)
87 return -EBUSY;
88
82310a32
SR
89 /*
90 * We must make sure the ret_stack is tested before we read
91 * anything else.
92 */
93 smp_rmb();
94
712406a6
SR
95 /* The return trace stack is full */
96 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
97 atomic_inc(&current->trace_overrun);
98 return -EBUSY;
99 }
100
5d1a03dc
SR
101 calltime = trace_clock_local();
102
712406a6
SR
103 index = ++current->curr_ret_stack;
104 barrier();
105 current->ret_stack[index].ret = ret;
106 current->ret_stack[index].func = func;
5d1a03dc 107 current->ret_stack[index].calltime = calltime;
a2a16d6a 108 current->ret_stack[index].subtime = 0;
71e308a2 109 current->ret_stack[index].fp = frame_pointer;
712406a6
SR
110 *depth = index;
111
112 return 0;
113}
114
115/* Retrieve a function return address to the trace stack on thread info.*/
a2a16d6a 116static void
71e308a2
SR
117ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
118 unsigned long frame_pointer)
712406a6
SR
119{
120 int index;
121
122 index = current->curr_ret_stack;
123
124 if (unlikely(index < 0)) {
125 ftrace_graph_stop();
126 WARN_ON(1);
127 /* Might as well panic, otherwise we have no where to go */
128 *ret = (unsigned long)panic;
129 return;
130 }
131
71e308a2
SR
132#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
133 /*
134 * The arch may choose to record the frame pointer used
135 * and check it here to make sure that it is what we expect it
136 * to be. If gcc does not set the place holder of the return
137 * address in the frame pointer, and does a copy instead, then
138 * the function graph trace will fail. This test detects this
139 * case.
140 *
141 * Currently, x86_32 with optimize for size (-Os) makes the latest
142 * gcc do the above.
143 */
144 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
145 ftrace_graph_stop();
146 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
b375a11a 147 " from func %ps return to %lx\n",
71e308a2
SR
148 current->ret_stack[index].fp,
149 frame_pointer,
150 (void *)current->ret_stack[index].func,
151 current->ret_stack[index].ret);
152 *ret = (unsigned long)panic;
153 return;
154 }
155#endif
156
712406a6
SR
157 *ret = current->ret_stack[index].ret;
158 trace->func = current->ret_stack[index].func;
159 trace->calltime = current->ret_stack[index].calltime;
160 trace->overrun = atomic_read(&current->trace_overrun);
161 trace->depth = index;
712406a6
SR
162}
163
164/*
165 * Send the trace to the ring-buffer.
166 * @return the original return address.
167 */
71e308a2 168unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
712406a6
SR
169{
170 struct ftrace_graph_ret trace;
171 unsigned long ret;
172
71e308a2 173 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
0012693a 174 trace.rettime = trace_clock_local();
712406a6 175 ftrace_graph_return(&trace);
a2a16d6a
SR
176 barrier();
177 current->curr_ret_stack--;
712406a6
SR
178
179 if (unlikely(!ret)) {
180 ftrace_graph_stop();
181 WARN_ON(1);
182 /* Might as well panic. What else to do? */
183 ret = (unsigned long)panic;
184 }
185
186 return ret;
187}
188
62b915f1 189int __trace_graph_entry(struct trace_array *tr,
1a0799a8
FW
190 struct ftrace_graph_ent *trace,
191 unsigned long flags,
192 int pc)
193{
194 struct ftrace_event_call *call = &event_funcgraph_entry;
195 struct ring_buffer_event *event;
e77405ad 196 struct ring_buffer *buffer = tr->buffer;
1a0799a8
FW
197 struct ftrace_graph_ent_entry *entry;
198
dd17c8f7 199 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1a0799a8
FW
200 return 0;
201
e77405ad 202 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
1a0799a8
FW
203 sizeof(*entry), flags, pc);
204 if (!event)
205 return 0;
206 entry = ring_buffer_event_data(event);
207 entry->graph_ent = *trace;
e77405ad
SR
208 if (!filter_current_check_discard(buffer, call, entry, event))
209 ring_buffer_unlock_commit(buffer, event);
1a0799a8
FW
210
211 return 1;
212}
213
b304d044
SR
214static inline int ftrace_graph_ignore_irqs(void)
215{
216 if (!ftrace_graph_skip_irqs)
217 return 0;
218
219 return in_irq();
220}
221
1a0799a8
FW
222int trace_graph_entry(struct ftrace_graph_ent *trace)
223{
224 struct trace_array *tr = graph_array;
225 struct trace_array_cpu *data;
226 unsigned long flags;
227 long disabled;
228 int ret;
229 int cpu;
230 int pc;
231
1a0799a8
FW
232 if (!ftrace_trace_task(current))
233 return 0;
234
ea2c68a0 235 /* trace it when it is-nested-in or is a function enabled. */
b304d044
SR
236 if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
237 ftrace_graph_ignore_irqs())
1a0799a8
FW
238 return 0;
239
240 local_irq_save(flags);
241 cpu = raw_smp_processor_id();
242 data = tr->data[cpu];
243 disabled = atomic_inc_return(&data->disabled);
244 if (likely(disabled == 1)) {
245 pc = preempt_count();
246 ret = __trace_graph_entry(tr, trace, flags, pc);
247 } else {
248 ret = 0;
249 }
1a0799a8
FW
250
251 atomic_dec(&data->disabled);
252 local_irq_restore(flags);
253
254 return ret;
255}
256
0e950173
TB
257int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
258{
259 if (tracing_thresh)
260 return 1;
261 else
262 return trace_graph_entry(trace);
263}
264
0a772620
JO
265static void
266__trace_graph_function(struct trace_array *tr,
267 unsigned long ip, unsigned long flags, int pc)
268{
269 u64 time = trace_clock_local();
270 struct ftrace_graph_ent ent = {
271 .func = ip,
272 .depth = 0,
273 };
274 struct ftrace_graph_ret ret = {
275 .func = ip,
276 .depth = 0,
277 .calltime = time,
278 .rettime = time,
279 };
280
281 __trace_graph_entry(tr, &ent, flags, pc);
282 __trace_graph_return(tr, &ret, flags, pc);
283}
284
285void
286trace_graph_function(struct trace_array *tr,
287 unsigned long ip, unsigned long parent_ip,
288 unsigned long flags, int pc)
289{
0a772620
JO
290 __trace_graph_function(tr, ip, flags, pc);
291}
292
62b915f1 293void __trace_graph_return(struct trace_array *tr,
1a0799a8
FW
294 struct ftrace_graph_ret *trace,
295 unsigned long flags,
296 int pc)
297{
298 struct ftrace_event_call *call = &event_funcgraph_exit;
299 struct ring_buffer_event *event;
e77405ad 300 struct ring_buffer *buffer = tr->buffer;
1a0799a8
FW
301 struct ftrace_graph_ret_entry *entry;
302
dd17c8f7 303 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1a0799a8
FW
304 return;
305
e77405ad 306 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
1a0799a8
FW
307 sizeof(*entry), flags, pc);
308 if (!event)
309 return;
310 entry = ring_buffer_event_data(event);
311 entry->ret = *trace;
e77405ad
SR
312 if (!filter_current_check_discard(buffer, call, entry, event))
313 ring_buffer_unlock_commit(buffer, event);
1a0799a8
FW
314}
315
316void trace_graph_return(struct ftrace_graph_ret *trace)
317{
318 struct trace_array *tr = graph_array;
319 struct trace_array_cpu *data;
320 unsigned long flags;
321 long disabled;
322 int cpu;
323 int pc;
324
325 local_irq_save(flags);
326 cpu = raw_smp_processor_id();
327 data = tr->data[cpu];
328 disabled = atomic_inc_return(&data->disabled);
329 if (likely(disabled == 1)) {
330 pc = preempt_count();
331 __trace_graph_return(tr, trace, flags, pc);
332 }
1a0799a8
FW
333 atomic_dec(&data->disabled);
334 local_irq_restore(flags);
335}
336
24a53652
FW
337void set_graph_array(struct trace_array *tr)
338{
339 graph_array = tr;
340
341 /* Make graph_array visible before we start tracing */
342
343 smp_mb();
344}
345
0e950173
TB
346void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
347{
348 if (tracing_thresh &&
349 (trace->rettime - trace->calltime < tracing_thresh))
350 return;
351 else
352 trace_graph_return(trace);
353}
354
fb52607a
FW
355static int graph_trace_init(struct trace_array *tr)
356{
1a0799a8
FW
357 int ret;
358
24a53652 359 set_graph_array(tr);
0e950173
TB
360 if (tracing_thresh)
361 ret = register_ftrace_graph(&trace_graph_thresh_return,
362 &trace_graph_thresh_entry);
363 else
364 ret = register_ftrace_graph(&trace_graph_return,
365 &trace_graph_entry);
660c7f9b
SR
366 if (ret)
367 return ret;
368 tracing_start_cmdline_record();
369
370 return 0;
fb52607a
FW
371}
372
373static void graph_trace_reset(struct trace_array *tr)
374{
660c7f9b
SR
375 tracing_stop_cmdline_record();
376 unregister_ftrace_graph();
fb52607a
FW
377}
378
0c9e6f63 379static int max_bytes_for_cpu;
1a056155
FW
380
381static enum print_line_t
382print_graph_cpu(struct trace_seq *s, int cpu)
383{
1a056155 384 int ret;
1a056155 385
d51090b3
IM
386 /*
387 * Start with a space character - to make it stand out
388 * to the right a bit when trace output is pasted into
389 * email:
390 */
0c9e6f63 391 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
1a056155 392 if (!ret)
d51090b3
IM
393 return TRACE_TYPE_PARTIAL_LINE;
394
1a056155
FW
395 return TRACE_TYPE_HANDLED;
396}
397
11e84acc
FW
398#define TRACE_GRAPH_PROCINFO_LENGTH 14
399
400static enum print_line_t
401print_graph_proc(struct trace_seq *s, pid_t pid)
402{
4ca53085 403 char comm[TASK_COMM_LEN];
11e84acc
FW
404 /* sign + log10(MAX_INT) + '\0' */
405 char pid_str[11];
4ca53085
SR
406 int spaces = 0;
407 int ret;
408 int len;
409 int i;
11e84acc 410
4ca53085 411 trace_find_cmdline(pid, comm);
11e84acc
FW
412 comm[7] = '\0';
413 sprintf(pid_str, "%d", pid);
414
415 /* 1 stands for the "-" character */
416 len = strlen(comm) + strlen(pid_str) + 1;
417
418 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
419 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
420
421 /* First spaces to align center */
422 for (i = 0; i < spaces / 2; i++) {
423 ret = trace_seq_printf(s, " ");
424 if (!ret)
425 return TRACE_TYPE_PARTIAL_LINE;
426 }
427
428 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
429 if (!ret)
430 return TRACE_TYPE_PARTIAL_LINE;
431
432 /* Last spaces to align center */
433 for (i = 0; i < spaces - (spaces / 2); i++) {
434 ret = trace_seq_printf(s, " ");
435 if (!ret)
436 return TRACE_TYPE_PARTIAL_LINE;
437 }
438 return TRACE_TYPE_HANDLED;
439}
440
1a056155 441
49ff5903
SR
442static enum print_line_t
443print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
444{
f81c972d 445 if (!trace_seq_putc(s, ' '))
637e7e86
SR
446 return 0;
447
f81c972d 448 return trace_print_lat_fmt(s, entry);
49ff5903
SR
449}
450
287b6e68 451/* If the pid changed since the last trace, output this event */
11e84acc 452static enum print_line_t
2fbcdb35 453verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
287b6e68 454{
d51090b3 455 pid_t prev_pid;
9005f3eb 456 pid_t *last_pid;
d51090b3 457 int ret;
660c7f9b 458
2fbcdb35 459 if (!data)
9005f3eb
FW
460 return TRACE_TYPE_HANDLED;
461
be1eca39 462 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
9005f3eb
FW
463
464 if (*last_pid == pid)
11e84acc 465 return TRACE_TYPE_HANDLED;
fb52607a 466
9005f3eb
FW
467 prev_pid = *last_pid;
468 *last_pid = pid;
d51090b3 469
9005f3eb
FW
470 if (prev_pid == -1)
471 return TRACE_TYPE_HANDLED;
d51090b3
IM
472/*
473 * Context-switch trace line:
474
475 ------------------------------------------
476 | 1) migration/0--1 => sshd-1755
477 ------------------------------------------
478
479 */
480 ret = trace_seq_printf(s,
1fd8f2a3 481 " ------------------------------------------\n");
11e84acc 482 if (!ret)
810dc732 483 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
484
485 ret = print_graph_cpu(s, cpu);
486 if (ret == TRACE_TYPE_PARTIAL_LINE)
810dc732 487 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
488
489 ret = print_graph_proc(s, prev_pid);
490 if (ret == TRACE_TYPE_PARTIAL_LINE)
810dc732 491 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
492
493 ret = trace_seq_printf(s, " => ");
494 if (!ret)
810dc732 495 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
496
497 ret = print_graph_proc(s, pid);
498 if (ret == TRACE_TYPE_PARTIAL_LINE)
810dc732 499 return TRACE_TYPE_PARTIAL_LINE;
11e84acc
FW
500
501 ret = trace_seq_printf(s,
502 "\n ------------------------------------------\n\n");
503 if (!ret)
810dc732 504 return TRACE_TYPE_PARTIAL_LINE;
11e84acc 505
810dc732 506 return TRACE_TYPE_HANDLED;
287b6e68
FW
507}
508
b91facc3
FW
509static struct ftrace_graph_ret_entry *
510get_return_for_leaf(struct trace_iterator *iter,
83a8df61
FW
511 struct ftrace_graph_ent_entry *curr)
512{
be1eca39
JO
513 struct fgraph_data *data = iter->private;
514 struct ring_buffer_iter *ring_iter = NULL;
83a8df61
FW
515 struct ring_buffer_event *event;
516 struct ftrace_graph_ret_entry *next;
517
be1eca39
JO
518 /*
519 * If the previous output failed to write to the seq buffer,
520 * then we just reuse the data from before.
521 */
522 if (data && data->failed) {
523 curr = &data->ent;
524 next = &data->ret;
525 } else {
83a8df61 526
be1eca39
JO
527 ring_iter = iter->buffer_iter[iter->cpu];
528
529 /* First peek to compare current entry and the next one */
530 if (ring_iter)
531 event = ring_buffer_iter_peek(ring_iter, NULL);
532 else {
533 /*
534 * We need to consume the current entry to see
535 * the next one.
536 */
66a8cb95
SR
537 ring_buffer_consume(iter->tr->buffer, iter->cpu,
538 NULL, NULL);
be1eca39 539 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
66a8cb95 540 NULL, NULL);
be1eca39 541 }
83a8df61 542
be1eca39
JO
543 if (!event)
544 return NULL;
545
546 next = ring_buffer_event_data(event);
83a8df61 547
be1eca39
JO
548 if (data) {
549 /*
550 * Save current and next entries for later reference
551 * if the output fails.
552 */
553 data->ent = *curr;
575570f0
SL
554 /*
555 * If the next event is not a return type, then
556 * we only care about what type it is. Otherwise we can
557 * safely copy the entire event.
558 */
559 if (next->ent.type == TRACE_GRAPH_RET)
560 data->ret = *next;
561 else
562 data->ret.ent.type = next->ent.type;
be1eca39
JO
563 }
564 }
83a8df61
FW
565
566 if (next->ent.type != TRACE_GRAPH_RET)
b91facc3 567 return NULL;
83a8df61
FW
568
569 if (curr->ent.pid != next->ent.pid ||
570 curr->graph_ent.func != next->ret.func)
b91facc3 571 return NULL;
83a8df61 572
b91facc3
FW
573 /* this is a leaf, now advance the iterator */
574 if (ring_iter)
575 ring_buffer_read(ring_iter, NULL);
576
577 return next;
83a8df61
FW
578}
579
9005f3eb
FW
580/* Signal a overhead of time execution to the output */
581static int
d7a8d9e9
JO
582print_graph_overhead(unsigned long long duration, struct trace_seq *s,
583 u32 flags)
9005f3eb
FW
584{
585 /* If duration disappear, we don't need anything */
d7a8d9e9 586 if (!(flags & TRACE_GRAPH_PRINT_DURATION))
9005f3eb
FW
587 return 1;
588
589 /* Non nested entry or return */
590 if (duration == -1)
591 return trace_seq_printf(s, " ");
592
d7a8d9e9 593 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
9005f3eb
FW
594 /* Duration exceeded 100 msecs */
595 if (duration > 100000ULL)
596 return trace_seq_printf(s, "! ");
597
598 /* Duration exceeded 10 msecs */
599 if (duration > 10000ULL)
600 return trace_seq_printf(s, "+ ");
601 }
602
603 return trace_seq_printf(s, " ");
604}
605
d1f9cbd7
FW
606static int print_graph_abs_time(u64 t, struct trace_seq *s)
607{
608 unsigned long usecs_rem;
609
610 usecs_rem = do_div(t, NSEC_PER_SEC);
611 usecs_rem /= 1000;
612
613 return trace_seq_printf(s, "%5lu.%06lu | ",
614 (unsigned long)t, usecs_rem);
615}
616
f8b755ac 617static enum print_line_t
d1f9cbd7 618print_graph_irq(struct trace_iterator *iter, unsigned long addr,
d7a8d9e9 619 enum trace_type type, int cpu, pid_t pid, u32 flags)
f8b755ac
FW
620{
621 int ret;
d1f9cbd7 622 struct trace_seq *s = &iter->seq;
f8b755ac
FW
623
624 if (addr < (unsigned long)__irqentry_text_start ||
625 addr >= (unsigned long)__irqentry_text_end)
626 return TRACE_TYPE_UNHANDLED;
627
d1f9cbd7 628 /* Absolute time */
d7a8d9e9 629 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
d1f9cbd7
FW
630 ret = print_graph_abs_time(iter->ts, s);
631 if (!ret)
632 return TRACE_TYPE_PARTIAL_LINE;
633 }
634
9005f3eb 635 /* Cpu */
d7a8d9e9 636 if (flags & TRACE_GRAPH_PRINT_CPU) {
9005f3eb
FW
637 ret = print_graph_cpu(s, cpu);
638 if (ret == TRACE_TYPE_PARTIAL_LINE)
639 return TRACE_TYPE_PARTIAL_LINE;
640 }
49ff5903 641
9005f3eb 642 /* Proc */
d7a8d9e9 643 if (flags & TRACE_GRAPH_PRINT_PROC) {
9005f3eb
FW
644 ret = print_graph_proc(s, pid);
645 if (ret == TRACE_TYPE_PARTIAL_LINE)
646 return TRACE_TYPE_PARTIAL_LINE;
647 ret = trace_seq_printf(s, " | ");
648 if (!ret)
649 return TRACE_TYPE_PARTIAL_LINE;
650 }
f8b755ac 651
9005f3eb 652 /* No overhead */
d7a8d9e9 653 ret = print_graph_overhead(-1, s, flags);
9005f3eb
FW
654 if (!ret)
655 return TRACE_TYPE_PARTIAL_LINE;
f8b755ac 656
9005f3eb
FW
657 if (type == TRACE_GRAPH_ENT)
658 ret = trace_seq_printf(s, "==========>");
659 else
660 ret = trace_seq_printf(s, "<==========");
661
662 if (!ret)
663 return TRACE_TYPE_PARTIAL_LINE;
664
665 /* Don't close the duration column if haven't one */
d7a8d9e9 666 if (flags & TRACE_GRAPH_PRINT_DURATION)
9005f3eb
FW
667 trace_seq_printf(s, " |");
668 ret = trace_seq_printf(s, "\n");
f8b755ac 669
f8b755ac
FW
670 if (!ret)
671 return TRACE_TYPE_PARTIAL_LINE;
672 return TRACE_TYPE_HANDLED;
673}
83a8df61 674
0706f1c4
SR
675enum print_line_t
676trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
83a8df61
FW
677{
678 unsigned long nsecs_rem = do_div(duration, 1000);
166d3c79
FW
679 /* log10(ULONG_MAX) + '\0' */
680 char msecs_str[21];
681 char nsecs_str[5];
682 int ret, len;
683 int i;
684
685 sprintf(msecs_str, "%lu", (unsigned long) duration);
686
687 /* Print msecs */
9005f3eb 688 ret = trace_seq_printf(s, "%s", msecs_str);
166d3c79
FW
689 if (!ret)
690 return TRACE_TYPE_PARTIAL_LINE;
691
692 len = strlen(msecs_str);
693
694 /* Print nsecs (we don't want to exceed 7 numbers) */
695 if (len < 7) {
14cae9bd
BP
696 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
697
698 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
166d3c79
FW
699 ret = trace_seq_printf(s, ".%s", nsecs_str);
700 if (!ret)
701 return TRACE_TYPE_PARTIAL_LINE;
702 len += strlen(nsecs_str);
703 }
704
705 ret = trace_seq_printf(s, " us ");
706 if (!ret)
707 return TRACE_TYPE_PARTIAL_LINE;
708
709 /* Print remaining spaces to fit the row's width */
710 for (i = len; i < 7; i++) {
711 ret = trace_seq_printf(s, " ");
712 if (!ret)
713 return TRACE_TYPE_PARTIAL_LINE;
714 }
0706f1c4
SR
715 return TRACE_TYPE_HANDLED;
716}
717
718static enum print_line_t
719print_graph_duration(unsigned long long duration, struct trace_seq *s)
720{
721 int ret;
722
723 ret = trace_print_graph_duration(duration, s);
724 if (ret != TRACE_TYPE_HANDLED)
725 return ret;
166d3c79
FW
726
727 ret = trace_seq_printf(s, "| ");
728 if (!ret)
729 return TRACE_TYPE_PARTIAL_LINE;
166d3c79 730
0706f1c4 731 return TRACE_TYPE_HANDLED;
83a8df61
FW
732}
733
83a8df61 734/* Case of a leaf function on its call entry */
287b6e68 735static enum print_line_t
83a8df61 736print_graph_entry_leaf(struct trace_iterator *iter,
b91facc3 737 struct ftrace_graph_ent_entry *entry,
d7a8d9e9
JO
738 struct ftrace_graph_ret_entry *ret_entry,
739 struct trace_seq *s, u32 flags)
fb52607a 740{
2fbcdb35 741 struct fgraph_data *data = iter->private;
83a8df61 742 struct ftrace_graph_ret *graph_ret;
83a8df61
FW
743 struct ftrace_graph_ent *call;
744 unsigned long long duration;
fb52607a 745 int ret;
1a056155 746 int i;
fb52607a 747
83a8df61
FW
748 graph_ret = &ret_entry->ret;
749 call = &entry->graph_ent;
750 duration = graph_ret->rettime - graph_ret->calltime;
751
2fbcdb35 752 if (data) {
f1c7f517 753 struct fgraph_cpu_data *cpu_data;
2fbcdb35 754 int cpu = iter->cpu;
f1c7f517
SR
755
756 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
2fbcdb35
SR
757
758 /*
759 * Comments display at + 1 to depth. Since
760 * this is a leaf function, keep the comments
761 * equal to this depth.
762 */
f1c7f517
SR
763 cpu_data->depth = call->depth - 1;
764
765 /* No need to keep this function around for this depth */
766 if (call->depth < FTRACE_RETFUNC_DEPTH)
767 cpu_data->enter_funcs[call->depth] = 0;
2fbcdb35
SR
768 }
769
83a8df61 770 /* Overhead */
d7a8d9e9 771 ret = print_graph_overhead(duration, s, flags);
9005f3eb
FW
772 if (!ret)
773 return TRACE_TYPE_PARTIAL_LINE;
1a056155
FW
774
775 /* Duration */
d7a8d9e9 776 if (flags & TRACE_GRAPH_PRINT_DURATION) {
9005f3eb
FW
777 ret = print_graph_duration(duration, s);
778 if (ret == TRACE_TYPE_PARTIAL_LINE)
779 return TRACE_TYPE_PARTIAL_LINE;
780 }
437f24fb 781
83a8df61
FW
782 /* Function */
783 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
784 ret = trace_seq_printf(s, " ");
785 if (!ret)
786 return TRACE_TYPE_PARTIAL_LINE;
787 }
788
b375a11a 789 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
83a8df61
FW
790 if (!ret)
791 return TRACE_TYPE_PARTIAL_LINE;
792
793 return TRACE_TYPE_HANDLED;
794}
795
796static enum print_line_t
2fbcdb35
SR
797print_graph_entry_nested(struct trace_iterator *iter,
798 struct ftrace_graph_ent_entry *entry,
d7a8d9e9 799 struct trace_seq *s, int cpu, u32 flags)
83a8df61 800{
83a8df61 801 struct ftrace_graph_ent *call = &entry->graph_ent;
2fbcdb35
SR
802 struct fgraph_data *data = iter->private;
803 int ret;
804 int i;
805
806 if (data) {
f1c7f517 807 struct fgraph_cpu_data *cpu_data;
2fbcdb35 808 int cpu = iter->cpu;
2fbcdb35 809
f1c7f517
SR
810 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
811 cpu_data->depth = call->depth;
812
813 /* Save this function pointer to see if the exit matches */
814 if (call->depth < FTRACE_RETFUNC_DEPTH)
815 cpu_data->enter_funcs[call->depth] = call->func;
2fbcdb35 816 }
83a8df61
FW
817
818 /* No overhead */
d7a8d9e9 819 ret = print_graph_overhead(-1, s, flags);
9005f3eb
FW
820 if (!ret)
821 return TRACE_TYPE_PARTIAL_LINE;
1a056155 822
9005f3eb 823 /* No time */
d7a8d9e9 824 if (flags & TRACE_GRAPH_PRINT_DURATION) {
f8b755ac
FW
825 ret = trace_seq_printf(s, " | ");
826 if (!ret)
827 return TRACE_TYPE_PARTIAL_LINE;
f8b755ac
FW
828 }
829
83a8df61 830 /* Function */
287b6e68
FW
831 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
832 ret = trace_seq_printf(s, " ");
fb52607a
FW
833 if (!ret)
834 return TRACE_TYPE_PARTIAL_LINE;
287b6e68
FW
835 }
836
b375a11a 837 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
83a8df61
FW
838 if (!ret)
839 return TRACE_TYPE_PARTIAL_LINE;
840
b91facc3
FW
841 /*
842 * we already consumed the current entry to check the next one
843 * and see if this is a leaf.
844 */
845 return TRACE_TYPE_NO_CONSUME;
287b6e68
FW
846}
847
83a8df61 848static enum print_line_t
ac5f6c96 849print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
d7a8d9e9 850 int type, unsigned long addr, u32 flags)
83a8df61 851{
2fbcdb35 852 struct fgraph_data *data = iter->private;
83a8df61 853 struct trace_entry *ent = iter->ent;
ac5f6c96
SR
854 int cpu = iter->cpu;
855 int ret;
83a8df61 856
1a056155 857 /* Pid */
2fbcdb35 858 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
9005f3eb
FW
859 return TRACE_TYPE_PARTIAL_LINE;
860
ac5f6c96
SR
861 if (type) {
862 /* Interrupt */
d7a8d9e9 863 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
ac5f6c96
SR
864 if (ret == TRACE_TYPE_PARTIAL_LINE)
865 return TRACE_TYPE_PARTIAL_LINE;
866 }
83a8df61 867
9005f3eb 868 /* Absolute time */
d7a8d9e9 869 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
9005f3eb
FW
870 ret = print_graph_abs_time(iter->ts, s);
871 if (!ret)
872 return TRACE_TYPE_PARTIAL_LINE;
873 }
874
1a056155 875 /* Cpu */
d7a8d9e9 876 if (flags & TRACE_GRAPH_PRINT_CPU) {
1a056155 877 ret = print_graph_cpu(s, cpu);
11e84acc
FW
878 if (ret == TRACE_TYPE_PARTIAL_LINE)
879 return TRACE_TYPE_PARTIAL_LINE;
880 }
881
882 /* Proc */
d7a8d9e9 883 if (flags & TRACE_GRAPH_PRINT_PROC) {
00a8bf85 884 ret = print_graph_proc(s, ent->pid);
11e84acc
FW
885 if (ret == TRACE_TYPE_PARTIAL_LINE)
886 return TRACE_TYPE_PARTIAL_LINE;
887
888 ret = trace_seq_printf(s, " | ");
1a056155
FW
889 if (!ret)
890 return TRACE_TYPE_PARTIAL_LINE;
891 }
83a8df61 892
49ff5903
SR
893 /* Latency format */
894 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
895 ret = print_graph_lat_fmt(s, ent);
896 if (ret == TRACE_TYPE_PARTIAL_LINE)
897 return TRACE_TYPE_PARTIAL_LINE;
898 }
899
ac5f6c96
SR
900 return 0;
901}
902
2bd16212
JO
903/*
904 * Entry check for irq code
905 *
906 * returns 1 if
907 * - we are inside irq code
908 * - we just extered irq code
909 *
910 * retunns 0 if
911 * - funcgraph-interrupts option is set
912 * - we are not inside irq code
913 */
914static int
915check_irq_entry(struct trace_iterator *iter, u32 flags,
916 unsigned long addr, int depth)
917{
918 int cpu = iter->cpu;
a9d61173 919 int *depth_irq;
2bd16212 920 struct fgraph_data *data = iter->private;
2bd16212 921
a9d61173
JO
922 /*
923 * If we are either displaying irqs, or we got called as
924 * a graph event and private data does not exist,
925 * then we bypass the irq check.
926 */
927 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
928 (!data))
2bd16212
JO
929 return 0;
930
a9d61173
JO
931 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
932
2bd16212
JO
933 /*
934 * We are inside the irq code
935 */
936 if (*depth_irq >= 0)
937 return 1;
938
939 if ((addr < (unsigned long)__irqentry_text_start) ||
940 (addr >= (unsigned long)__irqentry_text_end))
941 return 0;
942
943 /*
944 * We are entering irq code.
945 */
946 *depth_irq = depth;
947 return 1;
948}
949
950/*
951 * Return check for irq code
952 *
953 * returns 1 if
954 * - we are inside irq code
955 * - we just left irq code
956 *
957 * returns 0 if
958 * - funcgraph-interrupts option is set
959 * - we are not inside irq code
960 */
961static int
962check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
963{
964 int cpu = iter->cpu;
a9d61173 965 int *depth_irq;
2bd16212 966 struct fgraph_data *data = iter->private;
2bd16212 967
a9d61173
JO
968 /*
969 * If we are either displaying irqs, or we got called as
970 * a graph event and private data does not exist,
971 * then we bypass the irq check.
972 */
973 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
974 (!data))
2bd16212
JO
975 return 0;
976
a9d61173
JO
977 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
978
2bd16212
JO
979 /*
980 * We are not inside the irq code.
981 */
982 if (*depth_irq == -1)
983 return 0;
984
985 /*
986 * We are inside the irq code, and this is returning entry.
987 * Let's not trace it and clear the entry depth, since
988 * we are out of irq code.
989 *
990 * This condition ensures that we 'leave the irq code' once
991 * we are out of the entry depth. Thus protecting us from
992 * the RETURN entry loss.
993 */
994 if (*depth_irq >= depth) {
995 *depth_irq = -1;
996 return 1;
997 }
998
999 /*
1000 * We are inside the irq code, and this is not the entry.
1001 */
1002 return 1;
1003}
1004
ac5f6c96
SR
1005static enum print_line_t
1006print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
d7a8d9e9 1007 struct trace_iterator *iter, u32 flags)
ac5f6c96 1008{
be1eca39 1009 struct fgraph_data *data = iter->private;
ac5f6c96
SR
1010 struct ftrace_graph_ent *call = &field->graph_ent;
1011 struct ftrace_graph_ret_entry *leaf_ret;
be1eca39
JO
1012 static enum print_line_t ret;
1013 int cpu = iter->cpu;
ac5f6c96 1014
2bd16212
JO
1015 if (check_irq_entry(iter, flags, call->func, call->depth))
1016 return TRACE_TYPE_HANDLED;
1017
d7a8d9e9 1018 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
ac5f6c96
SR
1019 return TRACE_TYPE_PARTIAL_LINE;
1020
b91facc3
FW
1021 leaf_ret = get_return_for_leaf(iter, field);
1022 if (leaf_ret)
d7a8d9e9 1023 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
83a8df61 1024 else
d7a8d9e9 1025 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
83a8df61 1026
be1eca39
JO
1027 if (data) {
1028 /*
1029 * If we failed to write our output, then we need to make
1030 * note of it. Because we already consumed our entry.
1031 */
1032 if (s->full) {
1033 data->failed = 1;
1034 data->cpu = cpu;
1035 } else
1036 data->failed = 0;
1037 }
1038
1039 return ret;
83a8df61
FW
1040}
1041
287b6e68
FW
1042static enum print_line_t
1043print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
d7a8d9e9
JO
1044 struct trace_entry *ent, struct trace_iterator *iter,
1045 u32 flags)
287b6e68 1046{
83a8df61 1047 unsigned long long duration = trace->rettime - trace->calltime;
2fbcdb35
SR
1048 struct fgraph_data *data = iter->private;
1049 pid_t pid = ent->pid;
1050 int cpu = iter->cpu;
f1c7f517 1051 int func_match = 1;
2fbcdb35
SR
1052 int ret;
1053 int i;
1054
2bd16212
JO
1055 if (check_irq_return(iter, flags, trace->depth))
1056 return TRACE_TYPE_HANDLED;
1057
2fbcdb35 1058 if (data) {
f1c7f517
SR
1059 struct fgraph_cpu_data *cpu_data;
1060 int cpu = iter->cpu;
1061
1062 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
2fbcdb35
SR
1063
1064 /*
1065 * Comments display at + 1 to depth. This is the
1066 * return from a function, we now want the comments
1067 * to display at the same level of the bracket.
1068 */
f1c7f517
SR
1069 cpu_data->depth = trace->depth - 1;
1070
1071 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1072 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1073 func_match = 0;
1074 cpu_data->enter_funcs[trace->depth] = 0;
1075 }
2fbcdb35 1076 }
287b6e68 1077
d7a8d9e9 1078 if (print_graph_prologue(iter, s, 0, 0, flags))
437f24fb
SR
1079 return TRACE_TYPE_PARTIAL_LINE;
1080
83a8df61 1081 /* Overhead */
d7a8d9e9 1082 ret = print_graph_overhead(duration, s, flags);
9005f3eb
FW
1083 if (!ret)
1084 return TRACE_TYPE_PARTIAL_LINE;
1a056155
FW
1085
1086 /* Duration */
d7a8d9e9 1087 if (flags & TRACE_GRAPH_PRINT_DURATION) {
9005f3eb
FW
1088 ret = print_graph_duration(duration, s);
1089 if (ret == TRACE_TYPE_PARTIAL_LINE)
1090 return TRACE_TYPE_PARTIAL_LINE;
1091 }
83a8df61
FW
1092
1093 /* Closing brace */
287b6e68
FW
1094 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1095 ret = trace_seq_printf(s, " ");
fb52607a
FW
1096 if (!ret)
1097 return TRACE_TYPE_PARTIAL_LINE;
287b6e68
FW
1098 }
1099
f1c7f517
SR
1100 /*
1101 * If the return function does not have a matching entry,
1102 * then the entry was lost. Instead of just printing
1103 * the '}' and letting the user guess what function this
1104 * belongs to, write out the function name.
1105 */
1106 if (func_match) {
1107 ret = trace_seq_printf(s, "}\n");
1108 if (!ret)
1109 return TRACE_TYPE_PARTIAL_LINE;
1110 } else {
a094fe04 1111 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
f1c7f517
SR
1112 if (!ret)
1113 return TRACE_TYPE_PARTIAL_LINE;
1114 }
fb52607a 1115
83a8df61 1116 /* Overrun */
d7a8d9e9 1117 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
287b6e68
FW
1118 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1119 trace->overrun);
fb52607a
FW
1120 if (!ret)
1121 return TRACE_TYPE_PARTIAL_LINE;
287b6e68 1122 }
f8b755ac 1123
d7a8d9e9
JO
1124 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1125 cpu, pid, flags);
f8b755ac
FW
1126 if (ret == TRACE_TYPE_PARTIAL_LINE)
1127 return TRACE_TYPE_PARTIAL_LINE;
1128
287b6e68
FW
1129 return TRACE_TYPE_HANDLED;
1130}
1131
1fd8f2a3 1132static enum print_line_t
d7a8d9e9
JO
1133print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1134 struct trace_iterator *iter, u32 flags)
1fd8f2a3 1135{
5087f8d2 1136 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2fbcdb35 1137 struct fgraph_data *data = iter->private;
5087f8d2 1138 struct trace_event *event;
2fbcdb35 1139 int depth = 0;
1fd8f2a3 1140 int ret;
2fbcdb35
SR
1141 int i;
1142
1143 if (data)
be1eca39 1144 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
9005f3eb 1145
d7a8d9e9 1146 if (print_graph_prologue(iter, s, 0, 0, flags))
d1f9cbd7
FW
1147 return TRACE_TYPE_PARTIAL_LINE;
1148
1fd8f2a3 1149 /* No overhead */
d7a8d9e9 1150 ret = print_graph_overhead(-1, s, flags);
9005f3eb
FW
1151 if (!ret)
1152 return TRACE_TYPE_PARTIAL_LINE;
1153
1154 /* No time */
d7a8d9e9 1155 if (flags & TRACE_GRAPH_PRINT_DURATION) {
9005f3eb 1156 ret = trace_seq_printf(s, " | ");
1fd8f2a3
FW
1157 if (!ret)
1158 return TRACE_TYPE_PARTIAL_LINE;
1159 }
1160
1fd8f2a3 1161 /* Indentation */
2fbcdb35
SR
1162 if (depth > 0)
1163 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1fd8f2a3
FW
1164 ret = trace_seq_printf(s, " ");
1165 if (!ret)
1166 return TRACE_TYPE_PARTIAL_LINE;
1167 }
1168
1169 /* The comment */
769b0441
FW
1170 ret = trace_seq_printf(s, "/* ");
1171 if (!ret)
1172 return TRACE_TYPE_PARTIAL_LINE;
1173
5087f8d2
SR
1174 switch (iter->ent->type) {
1175 case TRACE_BPRINT:
1176 ret = trace_print_bprintk_msg_only(iter);
1177 if (ret != TRACE_TYPE_HANDLED)
1178 return ret;
1179 break;
1180 case TRACE_PRINT:
1181 ret = trace_print_printk_msg_only(iter);
1182 if (ret != TRACE_TYPE_HANDLED)
1183 return ret;
1184 break;
1185 default:
1186 event = ftrace_find_event(ent->type);
1187 if (!event)
1188 return TRACE_TYPE_UNHANDLED;
1189
a9a57763 1190 ret = event->funcs->trace(iter, sym_flags, event);
5087f8d2
SR
1191 if (ret != TRACE_TYPE_HANDLED)
1192 return ret;
1193 }
1fd8f2a3 1194
412d0bb5
FW
1195 /* Strip ending newline */
1196 if (s->buffer[s->len - 1] == '\n') {
1197 s->buffer[s->len - 1] = '\0';
1198 s->len--;
1199 }
1200
1fd8f2a3
FW
1201 ret = trace_seq_printf(s, " */\n");
1202 if (!ret)
1203 return TRACE_TYPE_PARTIAL_LINE;
1204
1205 return TRACE_TYPE_HANDLED;
1206}
1207
1208
287b6e68 1209enum print_line_t
0a772620 1210__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
287b6e68 1211{
be1eca39
JO
1212 struct ftrace_graph_ent_entry *field;
1213 struct fgraph_data *data = iter->private;
287b6e68 1214 struct trace_entry *entry = iter->ent;
5087f8d2 1215 struct trace_seq *s = &iter->seq;
be1eca39
JO
1216 int cpu = iter->cpu;
1217 int ret;
1218
1219 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1220 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1221 return TRACE_TYPE_HANDLED;
1222 }
1223
1224 /*
1225 * If the last output failed, there's a possibility we need
1226 * to print out the missing entry which would never go out.
1227 */
1228 if (data && data->failed) {
1229 field = &data->ent;
1230 iter->cpu = data->cpu;
d7a8d9e9 1231 ret = print_graph_entry(field, s, iter, flags);
be1eca39
JO
1232 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1233 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1234 ret = TRACE_TYPE_NO_CONSUME;
1235 }
1236 iter->cpu = cpu;
1237 return ret;
1238 }
fb52607a 1239
287b6e68
FW
1240 switch (entry->type) {
1241 case TRACE_GRAPH_ENT: {
38ceb592
LJ
1242 /*
1243 * print_graph_entry() may consume the current event,
1244 * thus @field may become invalid, so we need to save it.
1245 * sizeof(struct ftrace_graph_ent_entry) is very small,
1246 * it can be safely saved at the stack.
1247 */
be1eca39 1248 struct ftrace_graph_ent_entry saved;
287b6e68 1249 trace_assign_type(field, entry);
38ceb592 1250 saved = *field;
d7a8d9e9 1251 return print_graph_entry(&saved, s, iter, flags);
287b6e68
FW
1252 }
1253 case TRACE_GRAPH_RET: {
1254 struct ftrace_graph_ret_entry *field;
1255 trace_assign_type(field, entry);
d7a8d9e9 1256 return print_graph_return(&field->ret, s, entry, iter, flags);
287b6e68 1257 }
62b915f1
JO
1258 case TRACE_STACK:
1259 case TRACE_FN:
1260 /* dont trace stack and functions as comments */
1261 return TRACE_TYPE_UNHANDLED;
1262
287b6e68 1263 default:
d7a8d9e9 1264 return print_graph_comment(s, entry, iter, flags);
fb52607a 1265 }
5087f8d2
SR
1266
1267 return TRACE_TYPE_HANDLED;
fb52607a
FW
1268}
1269
d7a8d9e9
JO
1270static enum print_line_t
1271print_graph_function(struct trace_iterator *iter)
1272{
0a772620
JO
1273 return __print_graph_function_flags(iter, tracer_flags.val);
1274}
1275
1276enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
1277 u32 flags)
1278{
1279 if (trace_flags & TRACE_ITER_LATENCY_FMT)
1280 flags |= TRACE_GRAPH_PRINT_DURATION;
1281 else
1282 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
1283
1284 return __print_graph_function_flags(iter, flags);
d7a8d9e9
JO
1285}
1286
9106b693 1287static enum print_line_t
a9a57763
SR
1288print_graph_function_event(struct trace_iterator *iter, int flags,
1289 struct trace_event *event)
9106b693
JO
1290{
1291 return print_graph_function(iter);
1292}
1293
d7a8d9e9 1294static void print_lat_header(struct seq_file *s, u32 flags)
49ff5903
SR
1295{
1296 static const char spaces[] = " " /* 16 spaces */
1297 " " /* 4 spaces */
1298 " "; /* 17 spaces */
1299 int size = 0;
1300
d7a8d9e9 1301 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
49ff5903 1302 size += 16;
d7a8d9e9 1303 if (flags & TRACE_GRAPH_PRINT_CPU)
49ff5903 1304 size += 4;
d7a8d9e9 1305 if (flags & TRACE_GRAPH_PRINT_PROC)
49ff5903
SR
1306 size += 17;
1307
1308 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1309 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1310 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1311 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
637e7e86
SR
1312 seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
1313 seq_printf(s, "#%.*s|||| / \n", size, spaces);
49ff5903
SR
1314}
1315
0a772620 1316static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
decbec38 1317{
49ff5903
SR
1318 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1319
1320 if (lat)
d7a8d9e9 1321 print_lat_header(s, flags);
49ff5903 1322
decbec38 1323 /* 1st line */
49ff5903 1324 seq_printf(s, "#");
d7a8d9e9 1325 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
9005f3eb 1326 seq_printf(s, " TIME ");
d7a8d9e9 1327 if (flags & TRACE_GRAPH_PRINT_CPU)
49ff5903 1328 seq_printf(s, " CPU");
d7a8d9e9 1329 if (flags & TRACE_GRAPH_PRINT_PROC)
49ff5903
SR
1330 seq_printf(s, " TASK/PID ");
1331 if (lat)
637e7e86 1332 seq_printf(s, "|||||");
d7a8d9e9 1333 if (flags & TRACE_GRAPH_PRINT_DURATION)
9005f3eb
FW
1334 seq_printf(s, " DURATION ");
1335 seq_printf(s, " FUNCTION CALLS\n");
decbec38
FW
1336
1337 /* 2nd line */
49ff5903 1338 seq_printf(s, "#");
d7a8d9e9 1339 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
9005f3eb 1340 seq_printf(s, " | ");
d7a8d9e9 1341 if (flags & TRACE_GRAPH_PRINT_CPU)
49ff5903 1342 seq_printf(s, " | ");
d7a8d9e9 1343 if (flags & TRACE_GRAPH_PRINT_PROC)
49ff5903
SR
1344 seq_printf(s, " | | ");
1345 if (lat)
637e7e86 1346 seq_printf(s, "|||||");
d7a8d9e9 1347 if (flags & TRACE_GRAPH_PRINT_DURATION)
9005f3eb
FW
1348 seq_printf(s, " | | ");
1349 seq_printf(s, " | | | |\n");
decbec38 1350}
9005f3eb 1351
62b915f1 1352void print_graph_headers(struct seq_file *s)
d7a8d9e9
JO
1353{
1354 print_graph_headers_flags(s, tracer_flags.val);
1355}
1356
0a772620
JO
1357void print_graph_headers_flags(struct seq_file *s, u32 flags)
1358{
1359 struct trace_iterator *iter = s->private;
1360
1361 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1362 /* print nothing if the buffers are empty */
1363 if (trace_empty(iter))
1364 return;
1365
1366 print_trace_header(s, iter);
1367 flags |= TRACE_GRAPH_PRINT_DURATION;
1368 } else
1369 flags |= TRACE_GRAPH_PRINT_ABS_TIME;
1370
1371 __print_graph_headers_flags(s, flags);
1372}
1373
62b915f1 1374void graph_trace_open(struct trace_iterator *iter)
9005f3eb 1375{
2fbcdb35 1376 /* pid and depth on the last trace processed */
be1eca39 1377 struct fgraph_data *data;
9005f3eb
FW
1378 int cpu;
1379
be1eca39
JO
1380 iter->private = NULL;
1381
1382 data = kzalloc(sizeof(*data), GFP_KERNEL);
2fbcdb35 1383 if (!data)
be1eca39
JO
1384 goto out_err;
1385
1386 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1387 if (!data->cpu_data)
1388 goto out_err_free;
1389
1390 for_each_possible_cpu(cpu) {
1391 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1392 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1393 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
2bd16212
JO
1394 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1395
be1eca39
JO
1396 *pid = -1;
1397 *depth = 0;
1398 *ignore = 0;
2bd16212 1399 *depth_irq = -1;
be1eca39 1400 }
9005f3eb 1401
2fbcdb35 1402 iter->private = data;
be1eca39
JO
1403
1404 return;
1405
1406 out_err_free:
1407 kfree(data);
1408 out_err:
1409 pr_warning("function graph tracer: not enough memory\n");
9005f3eb
FW
1410}
1411
62b915f1 1412void graph_trace_close(struct trace_iterator *iter)
9005f3eb 1413{
be1eca39
JO
1414 struct fgraph_data *data = iter->private;
1415
1416 if (data) {
1417 free_percpu(data->cpu_data);
1418 kfree(data);
1419 }
9005f3eb
FW
1420}
1421
b304d044
SR
1422static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1423{
1424 if (bit == TRACE_GRAPH_PRINT_IRQS)
1425 ftrace_graph_skip_irqs = !set;
1426
1427 return 0;
1428}
1429
a9a57763
SR
1430static struct trace_event_functions graph_functions = {
1431 .trace = print_graph_function_event,
1432};
1433
9106b693
JO
1434static struct trace_event graph_trace_entry_event = {
1435 .type = TRACE_GRAPH_ENT,
a9a57763 1436 .funcs = &graph_functions,
9106b693
JO
1437};
1438
1439static struct trace_event graph_trace_ret_event = {
1440 .type = TRACE_GRAPH_RET,
a9a57763 1441 .funcs = &graph_functions
9106b693
JO
1442};
1443
fb52607a 1444static struct tracer graph_trace __read_mostly = {
ef18012b 1445 .name = "function_graph",
9005f3eb 1446 .open = graph_trace_open,
be1eca39 1447 .pipe_open = graph_trace_open,
9005f3eb 1448 .close = graph_trace_close,
be1eca39 1449 .pipe_close = graph_trace_close,
6eaaa5d5 1450 .wait_pipe = poll_wait_pipe,
ef18012b
SR
1451 .init = graph_trace_init,
1452 .reset = graph_trace_reset,
decbec38
FW
1453 .print_line = print_graph_function,
1454 .print_header = print_graph_headers,
fb52607a 1455 .flags = &tracer_flags,
b304d044 1456 .set_flag = func_graph_set_flag,
7447dce9
FW
1457#ifdef CONFIG_FTRACE_SELFTEST
1458 .selftest = trace_selftest_startup_function_graph,
1459#endif
fb52607a
FW
1460};
1461
1462static __init int init_graph_trace(void)
1463{
0c9e6f63
LJ
1464 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1465
9106b693
JO
1466 if (!register_ftrace_event(&graph_trace_entry_event)) {
1467 pr_warning("Warning: could not register graph trace events\n");
1468 return 1;
1469 }
1470
1471 if (!register_ftrace_event(&graph_trace_ret_event)) {
1472 pr_warning("Warning: could not register graph trace events\n");
1473 return 1;
1474 }
1475
fb52607a
FW
1476 return register_tracer(&graph_trace);
1477}
1478
1479device_initcall(init_graph_trace);