]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace.c
ftrace: trace scheduler rbtree
[net-next-2.6.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
4c11d7ae 18#include <linux/pagemap.h>
bc0c38d1
SR
19#include <linux/hardirq.h>
20#include <linux/linkage.h>
21#include <linux/uaccess.h>
22#include <linux/ftrace.h>
23#include <linux/module.h>
24#include <linux/percpu.h>
25#include <linux/ctype.h>
26#include <linux/init.h>
2a2cc8f7 27#include <linux/poll.h>
bc0c38d1
SR
28#include <linux/gfp.h>
29#include <linux/fs.h>
30
86387f7e
IM
31#include <linux/stacktrace.h>
32
bc0c38d1
SR
33#include "trace.h"
34
35unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
36unsigned long __read_mostly tracing_thresh;
37
60a11774
SR
38static int tracing_disabled = 1;
39
e309b41d 40static long
bc0c38d1
SR
41ns2usecs(cycle_t nsec)
42{
43 nsec += 500;
44 do_div(nsec, 1000);
45 return nsec;
46}
47
e309b41d 48cycle_t ftrace_now(int cpu)
750ed1a4 49{
0fd9e0da 50 return cpu_clock(cpu);
750ed1a4
IM
51}
52
bc0c38d1
SR
53static struct trace_array global_trace;
54
55static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
56
57static struct trace_array max_tr;
58
59static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
60
26994ead 61static int tracer_enabled = 1;
57422797 62static unsigned long trace_nr_entries = 65536UL;
bc0c38d1
SR
63
64static struct tracer *trace_types __read_mostly;
65static struct tracer *current_trace __read_mostly;
66static int max_tracer_type_len;
67
68static DEFINE_MUTEX(trace_types_lock);
4e655519
IM
69static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
70
71unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
72
4e655519
IM
73void trace_wake_up(void)
74{
017730c1
IM
75 /*
76 * The runqueue_is_locked() can fail, but this is the best we
77 * have for now:
78 */
79 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
4e655519
IM
80 wake_up(&trace_wait);
81}
bc0c38d1 82
4c11d7ae
SR
83#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
84
bc0c38d1
SR
85static int __init set_nr_entries(char *str)
86{
87 if (!str)
88 return 0;
89 trace_nr_entries = simple_strtoul(str, &str, 0);
90 return 1;
91}
92__setup("trace_entries=", set_nr_entries);
93
57f50be1
SR
94unsigned long nsecs_to_usecs(unsigned long nsecs)
95{
96 return nsecs / 1000;
97}
98
bc0c38d1
SR
99enum trace_type {
100 __TRACE_FIRST_TYPE = 0,
101
102 TRACE_FN,
103 TRACE_CTX,
57422797 104 TRACE_WAKE,
86387f7e 105 TRACE_STACK,
f0a920d5 106 TRACE_SPECIAL,
bc0c38d1
SR
107
108 __TRACE_LAST_TYPE
109};
110
111enum trace_flag_type {
112 TRACE_FLAG_IRQS_OFF = 0x01,
113 TRACE_FLAG_NEED_RESCHED = 0x02,
114 TRACE_FLAG_HARDIRQ = 0x04,
115 TRACE_FLAG_SOFTIRQ = 0x08,
116};
117
bc0c38d1
SR
118#define TRACE_ITER_SYM_MASK \
119 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
120
121/* These must match the bit postions above */
122static const char *trace_options[] = {
123 "print-parent",
124 "sym-offset",
125 "sym-addr",
126 "verbose",
f9896bf3 127 "raw",
5e3ca0ec 128 "hex",
cb0f12aa 129 "bin",
2a2cc8f7 130 "block",
86387f7e 131 "stacktrace",
4ac3ba41 132 "sched-tree",
bc0c38d1
SR
133 NULL
134};
135
4c11d7ae 136static DEFINE_SPINLOCK(ftrace_max_lock);
bc0c38d1
SR
137
138/*
139 * Copy the new maximum trace into the separate maximum-trace
140 * structure. (this way the maximum trace is permanently saved,
141 * for later retrieval via /debugfs/tracing/latency_trace)
142 */
e309b41d 143static void
bc0c38d1
SR
144__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
145{
146 struct trace_array_cpu *data = tr->data[cpu];
147
148 max_tr.cpu = cpu;
149 max_tr.time_start = data->preempt_timestamp;
150
151 data = max_tr.data[cpu];
152 data->saved_latency = tracing_max_latency;
153
154 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
155 data->pid = tsk->pid;
156 data->uid = tsk->uid;
157 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
158 data->policy = tsk->policy;
159 data->rt_priority = tsk->rt_priority;
160
161 /* record this tasks comm */
162 tracing_record_cmdline(current);
163}
164
c7aafc54
IM
165void check_pages(struct trace_array_cpu *data)
166{
167 struct page *page, *tmp;
168
169 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
170 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
171
172 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
173 BUG_ON(page->lru.next->prev != &page->lru);
174 BUG_ON(page->lru.prev->next != &page->lru);
175 }
176}
177
178void *head_page(struct trace_array_cpu *data)
179{
180 struct page *page;
181
182 check_pages(data);
183 if (list_empty(&data->trace_pages))
184 return NULL;
185
186 page = list_entry(data->trace_pages.next, struct page, lru);
187 BUG_ON(&page->lru == &data->trace_pages);
188
189 return page_address(page);
190}
191
e309b41d 192static int
214023c3
SR
193trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
194{
195 int len = (PAGE_SIZE - 1) - s->len;
196 va_list ap;
b3806b43 197 int ret;
214023c3
SR
198
199 if (!len)
200 return 0;
201
202 va_start(ap, fmt);
b3806b43 203 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
214023c3
SR
204 va_end(ap);
205
b3806b43
SR
206 /* If we can't write it all, don't bother writing anything */
207 if (ret > len)
208 return 0;
209
210 s->len += ret;
214023c3
SR
211
212 return len;
213}
214
e309b41d 215static int
214023c3
SR
216trace_seq_puts(struct trace_seq *s, const char *str)
217{
218 int len = strlen(str);
219
220 if (len > ((PAGE_SIZE - 1) - s->len))
b3806b43 221 return 0;
214023c3
SR
222
223 memcpy(s->buffer + s->len, str, len);
224 s->len += len;
225
226 return len;
227}
228
e309b41d 229static int
214023c3
SR
230trace_seq_putc(struct trace_seq *s, unsigned char c)
231{
232 if (s->len >= (PAGE_SIZE - 1))
233 return 0;
234
235 s->buffer[s->len++] = c;
236
237 return 1;
238}
239
e309b41d 240static int
cb0f12aa
IM
241trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
242{
243 if (len > ((PAGE_SIZE - 1) - s->len))
244 return 0;
245
246 memcpy(s->buffer + s->len, mem, len);
247 s->len += len;
248
249 return len;
250}
251
5e3ca0ec
IM
252#define HEX_CHARS 17
253
e309b41d 254static int
5e3ca0ec
IM
255trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
256{
257 unsigned char hex[HEX_CHARS];
258 unsigned char *data;
259 unsigned char byte;
260 int i, j;
261
262 BUG_ON(len >= HEX_CHARS);
263
264 data = mem;
265
266#ifdef __BIG_ENDIAN
267 for (i = 0, j = 0; i < len; i++) {
268#else
269 for (i = len-1, j = 0; i >= 0; i--) {
270#endif
271 byte = data[i];
272
273 hex[j] = byte & 0x0f;
274 if (hex[j] >= 10)
275 hex[j] += 'a' - 10;
276 else
277 hex[j] += '0';
278 j++;
279
280 hex[j] = byte >> 4;
281 if (hex[j] >= 10)
282 hex[j] += 'a' - 10;
283 else
284 hex[j] += '0';
285 j++;
286 }
287 hex[j] = ' ';
288 j++;
289
290 return trace_seq_putmem(s, hex, j);
291}
292
e309b41d 293static void
214023c3
SR
294trace_seq_reset(struct trace_seq *s)
295{
296 s->len = 0;
297}
298
e309b41d 299static void
214023c3
SR
300trace_print_seq(struct seq_file *m, struct trace_seq *s)
301{
302 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
303
304 s->buffer[len] = 0;
305 seq_puts(m, s->buffer);
306
307 trace_seq_reset(s);
308}
309
e309b41d 310static void
c7aafc54
IM
311flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
312{
313 struct list_head flip_pages;
314
315 INIT_LIST_HEAD(&flip_pages);
316
93a588f4 317 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
c7aafc54 318 sizeof(struct trace_array_cpu) -
93a588f4 319 offsetof(struct trace_array_cpu, trace_head_idx));
c7aafc54
IM
320
321 check_pages(tr1);
322 check_pages(tr2);
323 list_splice_init(&tr1->trace_pages, &flip_pages);
324 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
325 list_splice_init(&flip_pages, &tr2->trace_pages);
326 BUG_ON(!list_empty(&flip_pages));
327 check_pages(tr1);
328 check_pages(tr2);
329}
330
e309b41d 331void
bc0c38d1
SR
332update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
333{
334 struct trace_array_cpu *data;
bc0c38d1
SR
335 int i;
336
4c11d7ae
SR
337 WARN_ON_ONCE(!irqs_disabled());
338 spin_lock(&ftrace_max_lock);
bc0c38d1
SR
339 /* clear out all the previous traces */
340 for_each_possible_cpu(i) {
341 data = tr->data[i];
c7aafc54 342 flip_trace(max_tr.data[i], data);
89b2f978 343 tracing_reset(data);
bc0c38d1
SR
344 }
345
346 __update_max_tr(tr, tsk, cpu);
4c11d7ae 347 spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
348}
349
350/**
351 * update_max_tr_single - only copy one trace over, and reset the rest
352 * @tr - tracer
353 * @tsk - task with the latency
354 * @cpu - the cpu of the buffer to copy.
355 */
e309b41d 356void
bc0c38d1
SR
357update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
358{
359 struct trace_array_cpu *data = tr->data[cpu];
bc0c38d1
SR
360 int i;
361
4c11d7ae
SR
362 WARN_ON_ONCE(!irqs_disabled());
363 spin_lock(&ftrace_max_lock);
bc0c38d1
SR
364 for_each_possible_cpu(i)
365 tracing_reset(max_tr.data[i]);
366
c7aafc54 367 flip_trace(max_tr.data[cpu], data);
89b2f978 368 tracing_reset(data);
bc0c38d1
SR
369
370 __update_max_tr(tr, tsk, cpu);
4c11d7ae 371 spin_unlock(&ftrace_max_lock);
bc0c38d1
SR
372}
373
374int register_tracer(struct tracer *type)
375{
376 struct tracer *t;
377 int len;
378 int ret = 0;
379
380 if (!type->name) {
381 pr_info("Tracer must have a name\n");
382 return -1;
383 }
384
385 mutex_lock(&trace_types_lock);
386 for (t = trace_types; t; t = t->next) {
387 if (strcmp(type->name, t->name) == 0) {
388 /* already found */
389 pr_info("Trace %s already registered\n",
390 type->name);
391 ret = -1;
392 goto out;
393 }
394 }
395
60a11774
SR
396#ifdef CONFIG_FTRACE_STARTUP_TEST
397 if (type->selftest) {
398 struct tracer *saved_tracer = current_trace;
399 struct trace_array_cpu *data;
400 struct trace_array *tr = &global_trace;
401 int saved_ctrl = tr->ctrl;
402 int i;
403 /*
404 * Run a selftest on this tracer.
405 * Here we reset the trace buffer, and set the current
406 * tracer to be this tracer. The tracer can then run some
407 * internal tracing to verify that everything is in order.
408 * If we fail, we do not register this tracer.
409 */
410 for_each_possible_cpu(i) {
60a11774 411 data = tr->data[i];
c7aafc54
IM
412 if (!head_page(data))
413 continue;
60a11774
SR
414 tracing_reset(data);
415 }
416 current_trace = type;
417 tr->ctrl = 0;
418 /* the test is responsible for initializing and enabling */
419 pr_info("Testing tracer %s: ", type->name);
420 ret = type->selftest(type, tr);
421 /* the test is responsible for resetting too */
422 current_trace = saved_tracer;
423 tr->ctrl = saved_ctrl;
424 if (ret) {
425 printk(KERN_CONT "FAILED!\n");
426 goto out;
427 }
1d4db00a
SR
428 /* Only reset on passing, to avoid touching corrupted buffers */
429 for_each_possible_cpu(i) {
430 data = tr->data[i];
431 if (!head_page(data))
432 continue;
433 tracing_reset(data);
434 }
60a11774
SR
435 printk(KERN_CONT "PASSED\n");
436 }
437#endif
438
bc0c38d1
SR
439 type->next = trace_types;
440 trace_types = type;
441 len = strlen(type->name);
442 if (len > max_tracer_type_len)
443 max_tracer_type_len = len;
60a11774 444
bc0c38d1
SR
445 out:
446 mutex_unlock(&trace_types_lock);
447
448 return ret;
449}
450
451void unregister_tracer(struct tracer *type)
452{
453 struct tracer **t;
454 int len;
455
456 mutex_lock(&trace_types_lock);
457 for (t = &trace_types; *t; t = &(*t)->next) {
458 if (*t == type)
459 goto found;
460 }
461 pr_info("Trace %s not registered\n", type->name);
462 goto out;
463
464 found:
465 *t = (*t)->next;
466 if (strlen(type->name) != max_tracer_type_len)
467 goto out;
468
469 max_tracer_type_len = 0;
470 for (t = &trace_types; *t; t = &(*t)->next) {
471 len = strlen((*t)->name);
472 if (len > max_tracer_type_len)
473 max_tracer_type_len = len;
474 }
475 out:
476 mutex_unlock(&trace_types_lock);
477}
478
e309b41d 479void tracing_reset(struct trace_array_cpu *data)
bc0c38d1
SR
480{
481 data->trace_idx = 0;
93a588f4
SR
482 data->trace_head = data->trace_tail = head_page(data);
483 data->trace_head_idx = 0;
484 data->trace_tail_idx = 0;
bc0c38d1
SR
485}
486
bc0c38d1
SR
487#define SAVED_CMDLINES 128
488static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
489static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
490static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
491static int cmdline_idx;
492static DEFINE_SPINLOCK(trace_cmdline_lock);
493atomic_t trace_record_cmdline_disabled;
494
495static void trace_init_cmdlines(void)
496{
497 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
498 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
499 cmdline_idx = 0;
500}
501
e309b41d 502void trace_stop_cmdline_recording(void);
bc0c38d1 503
e309b41d 504static void trace_save_cmdline(struct task_struct *tsk)
bc0c38d1
SR
505{
506 unsigned map;
507 unsigned idx;
508
509 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
510 return;
511
512 /*
513 * It's not the end of the world if we don't get
514 * the lock, but we also don't want to spin
515 * nor do we want to disable interrupts,
516 * so if we miss here, then better luck next time.
517 */
518 if (!spin_trylock(&trace_cmdline_lock))
519 return;
520
521 idx = map_pid_to_cmdline[tsk->pid];
522 if (idx >= SAVED_CMDLINES) {
523 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
524
525 map = map_cmdline_to_pid[idx];
526 if (map <= PID_MAX_DEFAULT)
527 map_pid_to_cmdline[map] = (unsigned)-1;
528
529 map_pid_to_cmdline[tsk->pid] = idx;
530
531 cmdline_idx = idx;
532 }
533
534 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
535
536 spin_unlock(&trace_cmdline_lock);
537}
538
e309b41d 539static char *trace_find_cmdline(int pid)
bc0c38d1
SR
540{
541 char *cmdline = "<...>";
542 unsigned map;
543
544 if (!pid)
545 return "<idle>";
546
547 if (pid > PID_MAX_DEFAULT)
548 goto out;
549
550 map = map_pid_to_cmdline[pid];
551 if (map >= SAVED_CMDLINES)
552 goto out;
553
554 cmdline = saved_cmdlines[map];
555
556 out:
557 return cmdline;
558}
559
e309b41d 560void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1
SR
561{
562 if (atomic_read(&trace_record_cmdline_disabled))
563 return;
564
565 trace_save_cmdline(tsk);
566}
567
e309b41d 568static inline struct list_head *
93a588f4
SR
569trace_next_list(struct trace_array_cpu *data, struct list_head *next)
570{
571 /*
572 * Roundrobin - but skip the head (which is not a real page):
573 */
574 next = next->next;
575 if (unlikely(next == &data->trace_pages))
576 next = next->next;
577 BUG_ON(next == &data->trace_pages);
578
579 return next;
580}
581
e309b41d 582static inline void *
93a588f4
SR
583trace_next_page(struct trace_array_cpu *data, void *addr)
584{
585 struct list_head *next;
586 struct page *page;
587
588 page = virt_to_page(addr);
589
590 next = trace_next_list(data, &page->lru);
591 page = list_entry(next, struct page, lru);
592
593 return page_address(page);
594}
595
e309b41d 596static inline struct trace_entry *
c7aafc54 597tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
bc0c38d1
SR
598{
599 unsigned long idx, idx_next;
600 struct trace_entry *entry;
601
4c11d7ae 602 data->trace_idx++;
93a588f4 603 idx = data->trace_head_idx;
bc0c38d1
SR
604 idx_next = idx + 1;
605
c7aafc54
IM
606 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
607
93a588f4 608 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
4c11d7ae
SR
609
610 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
93a588f4 611 data->trace_head = trace_next_page(data, data->trace_head);
bc0c38d1
SR
612 idx_next = 0;
613 }
614
93a588f4
SR
615 if (data->trace_head == data->trace_tail &&
616 idx_next == data->trace_tail_idx) {
617 /* overrun */
618 data->trace_tail_idx++;
619 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
620 data->trace_tail =
621 trace_next_page(data, data->trace_tail);
622 data->trace_tail_idx = 0;
623 }
624 }
625
626 data->trace_head_idx = idx_next;
bc0c38d1
SR
627
628 return entry;
629}
630
e309b41d 631static inline void
c7aafc54 632tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
bc0c38d1
SR
633{
634 struct task_struct *tsk = current;
635 unsigned long pc;
636
637 pc = preempt_count();
638
c7aafc54
IM
639 entry->preempt_count = pc & 0xff;
640 entry->pid = tsk->pid;
750ed1a4 641 entry->t = ftrace_now(raw_smp_processor_id());
bc0c38d1
SR
642 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
643 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
644 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
645 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
646}
647
e309b41d 648void
6fb44b71
SR
649trace_function(struct trace_array *tr, struct trace_array_cpu *data,
650 unsigned long ip, unsigned long parent_ip, unsigned long flags)
bc0c38d1
SR
651{
652 struct trace_entry *entry;
dcb6308f 653 unsigned long irq_flags;
bc0c38d1 654
dcb6308f 655 spin_lock_irqsave(&data->lock, irq_flags);
c7aafc54 656 entry = tracing_get_trace_entry(tr, data);
bc0c38d1 657 tracing_generic_entry_update(entry, flags);
c7aafc54
IM
658 entry->type = TRACE_FN;
659 entry->fn.ip = ip;
660 entry->fn.parent_ip = parent_ip;
dcb6308f 661 spin_unlock_irqrestore(&data->lock, irq_flags);
017730c1
IM
662
663 trace_wake_up();
bc0c38d1
SR
664}
665
e309b41d 666void
2e0f5761
IM
667ftrace(struct trace_array *tr, struct trace_array_cpu *data,
668 unsigned long ip, unsigned long parent_ip, unsigned long flags)
669{
670 if (likely(!atomic_read(&data->disabled)))
6fb44b71 671 trace_function(tr, data, ip, parent_ip, flags);
2e0f5761
IM
672}
673
e309b41d 674void
4e655519
IM
675__trace_special(void *__tr, void *__data,
676 unsigned long arg1, unsigned long arg2, unsigned long arg3)
f0a920d5 677{
4e655519
IM
678 struct trace_array_cpu *data = __data;
679 struct trace_array *tr = __tr;
f0a920d5 680 struct trace_entry *entry;
dcb6308f 681 unsigned long irq_flags;
f0a920d5 682
dcb6308f 683 spin_lock_irqsave(&data->lock, irq_flags);
f0a920d5
IM
684 entry = tracing_get_trace_entry(tr, data);
685 tracing_generic_entry_update(entry, 0);
686 entry->type = TRACE_SPECIAL;
687 entry->special.arg1 = arg1;
688 entry->special.arg2 = arg2;
689 entry->special.arg3 = arg3;
dcb6308f 690 spin_unlock_irqrestore(&data->lock, irq_flags);
017730c1
IM
691
692 trace_wake_up();
86387f7e
IM
693}
694
695void __trace_stack(struct trace_array *tr,
696 struct trace_array_cpu *data,
697 unsigned long flags,
698 int skip)
699{
700 struct trace_entry *entry;
701 struct stack_trace trace;
702
703 if (!(trace_flags & TRACE_ITER_STACKTRACE))
704 return;
705
706 entry = tracing_get_trace_entry(tr, data);
707 tracing_generic_entry_update(entry, flags);
708 entry->type = TRACE_STACK;
709
710 memset(&entry->stack, 0, sizeof(entry->stack));
711
712 trace.nr_entries = 0;
713 trace.max_entries = FTRACE_STACK_ENTRIES;
714 trace.skip = skip;
715 trace.entries = entry->stack.caller;
716
717 save_stack_trace(&trace);
f0a920d5
IM
718}
719
e309b41d 720void
bc0c38d1
SR
721tracing_sched_switch_trace(struct trace_array *tr,
722 struct trace_array_cpu *data,
86387f7e
IM
723 struct task_struct *prev,
724 struct task_struct *next,
bc0c38d1
SR
725 unsigned long flags)
726{
727 struct trace_entry *entry;
dcb6308f 728 unsigned long irq_flags;
bc0c38d1 729
dcb6308f 730 spin_lock_irqsave(&data->lock, irq_flags);
c7aafc54 731 entry = tracing_get_trace_entry(tr, data);
bc0c38d1
SR
732 tracing_generic_entry_update(entry, flags);
733 entry->type = TRACE_CTX;
734 entry->ctx.prev_pid = prev->pid;
735 entry->ctx.prev_prio = prev->prio;
736 entry->ctx.prev_state = prev->state;
737 entry->ctx.next_pid = next->pid;
738 entry->ctx.next_prio = next->prio;
86387f7e 739 __trace_stack(tr, data, flags, 4);
dcb6308f 740 spin_unlock_irqrestore(&data->lock, irq_flags);
bc0c38d1
SR
741}
742
57422797
IM
743void
744tracing_sched_wakeup_trace(struct trace_array *tr,
745 struct trace_array_cpu *data,
86387f7e
IM
746 struct task_struct *wakee,
747 struct task_struct *curr,
57422797
IM
748 unsigned long flags)
749{
750 struct trace_entry *entry;
751 unsigned long irq_flags;
752
753 spin_lock_irqsave(&data->lock, irq_flags);
754 entry = tracing_get_trace_entry(tr, data);
755 tracing_generic_entry_update(entry, flags);
756 entry->type = TRACE_WAKE;
757 entry->ctx.prev_pid = curr->pid;
758 entry->ctx.prev_prio = curr->prio;
759 entry->ctx.prev_state = curr->state;
760 entry->ctx.next_pid = wakee->pid;
761 entry->ctx.next_prio = wakee->prio;
86387f7e 762 __trace_stack(tr, data, flags, 5);
57422797 763 spin_unlock_irqrestore(&data->lock, irq_flags);
017730c1
IM
764
765 trace_wake_up();
57422797
IM
766}
767
2e0f5761 768#ifdef CONFIG_FTRACE
e309b41d 769static void
2e0f5761
IM
770function_trace_call(unsigned long ip, unsigned long parent_ip)
771{
772 struct trace_array *tr = &global_trace;
773 struct trace_array_cpu *data;
774 unsigned long flags;
775 long disabled;
776 int cpu;
777
778 if (unlikely(!tracer_enabled))
779 return;
780
781 local_irq_save(flags);
782 cpu = raw_smp_processor_id();
783 data = tr->data[cpu];
784 disabled = atomic_inc_return(&data->disabled);
785
786 if (likely(disabled == 1))
6fb44b71 787 trace_function(tr, data, ip, parent_ip, flags);
2e0f5761
IM
788
789 atomic_dec(&data->disabled);
790 local_irq_restore(flags);
791}
792
793static struct ftrace_ops trace_ops __read_mostly =
794{
795 .func = function_trace_call,
796};
797
e309b41d 798void tracing_start_function_trace(void)
2e0f5761
IM
799{
800 register_ftrace_function(&trace_ops);
801}
802
e309b41d 803void tracing_stop_function_trace(void)
2e0f5761
IM
804{
805 unregister_ftrace_function(&trace_ops);
806}
807#endif
808
bc0c38d1
SR
809enum trace_file_type {
810 TRACE_FILE_LAT_FMT = 1,
811};
812
813static struct trace_entry *
4c11d7ae
SR
814trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
815 struct trace_iterator *iter, int cpu)
bc0c38d1 816{
4c11d7ae
SR
817 struct page *page;
818 struct trace_entry *array;
bc0c38d1 819
4c11d7ae 820 if (iter->next_idx[cpu] >= tr->entries ||
b3806b43
SR
821 iter->next_idx[cpu] >= data->trace_idx ||
822 (data->trace_head == data->trace_tail &&
823 data->trace_head_idx == data->trace_tail_idx))
bc0c38d1
SR
824 return NULL;
825
4c11d7ae 826 if (!iter->next_page[cpu]) {
93a588f4
SR
827 /* Initialize the iterator for this cpu trace buffer */
828 WARN_ON(!data->trace_tail);
829 page = virt_to_page(data->trace_tail);
830 iter->next_page[cpu] = &page->lru;
831 iter->next_page_idx[cpu] = data->trace_tail_idx;
4c11d7ae 832 }
bc0c38d1 833
4c11d7ae 834 page = list_entry(iter->next_page[cpu], struct page, lru);
c7aafc54
IM
835 BUG_ON(&data->trace_pages == &page->lru);
836
4c11d7ae
SR
837 array = page_address(page);
838
93a588f4 839 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
4c11d7ae 840 return &array[iter->next_page_idx[cpu]];
bc0c38d1
SR
841}
842
e309b41d 843static struct trace_entry *
bc0c38d1
SR
844find_next_entry(struct trace_iterator *iter, int *ent_cpu)
845{
846 struct trace_array *tr = iter->tr;
847 struct trace_entry *ent, *next = NULL;
848 int next_cpu = -1;
849 int cpu;
850
851 for_each_possible_cpu(cpu) {
c7aafc54 852 if (!head_page(tr->data[cpu]))
bc0c38d1 853 continue;
4c11d7ae 854 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
cdd31cd2
IM
855 /*
856 * Pick the entry with the smallest timestamp:
857 */
858 if (ent && (!next || ent->t < next->t)) {
bc0c38d1
SR
859 next = ent;
860 next_cpu = cpu;
861 }
862 }
863
864 if (ent_cpu)
865 *ent_cpu = next_cpu;
866
867 return next;
868}
869
e309b41d 870static void trace_iterator_increment(struct trace_iterator *iter)
bc0c38d1 871{
b3806b43
SR
872 iter->idx++;
873 iter->next_idx[iter->cpu]++;
874 iter->next_page_idx[iter->cpu]++;
8c523a9d 875
b3806b43
SR
876 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
877 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
bc0c38d1 878
b3806b43
SR
879 iter->next_page_idx[iter->cpu] = 0;
880 iter->next_page[iter->cpu] =
881 trace_next_list(data, iter->next_page[iter->cpu]);
882 }
883}
bc0c38d1 884
e309b41d 885static void trace_consume(struct trace_iterator *iter)
b3806b43
SR
886{
887 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
888
889 data->trace_tail_idx++;
890 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
891 data->trace_tail = trace_next_page(data, data->trace_tail);
892 data->trace_tail_idx = 0;
893 }
4e3c3333 894
b3806b43
SR
895 /* Check if we empty it, then reset the index */
896 if (data->trace_head == data->trace_tail &&
897 data->trace_head_idx == data->trace_tail_idx)
898 data->trace_idx = 0;
b3806b43
SR
899}
900
e309b41d 901static void *find_next_entry_inc(struct trace_iterator *iter)
b3806b43
SR
902{
903 struct trace_entry *next;
904 int next_cpu = -1;
905
906 next = find_next_entry(iter, &next_cpu);
93a588f4 907
4e3c3333
IM
908 iter->prev_ent = iter->ent;
909 iter->prev_cpu = iter->cpu;
910
bc0c38d1
SR
911 iter->ent = next;
912 iter->cpu = next_cpu;
913
b3806b43
SR
914 if (next)
915 trace_iterator_increment(iter);
916
bc0c38d1
SR
917 return next ? iter : NULL;
918}
919
e309b41d 920static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
921{
922 struct trace_iterator *iter = m->private;
bc0c38d1
SR
923 void *last_ent = iter->ent;
924 int i = (int)*pos;
4e3c3333 925 void *ent;
bc0c38d1
SR
926
927 (*pos)++;
928
929 /* can't go backwards */
930 if (iter->idx > i)
931 return NULL;
932
933 if (iter->idx < 0)
934 ent = find_next_entry_inc(iter);
935 else
936 ent = iter;
937
938 while (ent && iter->idx < i)
939 ent = find_next_entry_inc(iter);
940
941 iter->pos = *pos;
942
943 if (last_ent && !ent)
944 seq_puts(m, "\n\nvim:ft=help\n");
945
946 return ent;
947}
948
949static void *s_start(struct seq_file *m, loff_t *pos)
950{
951 struct trace_iterator *iter = m->private;
952 void *p = NULL;
953 loff_t l = 0;
954 int i;
955
956 mutex_lock(&trace_types_lock);
957
958 if (!current_trace || current_trace != iter->trace)
959 return NULL;
960
961 atomic_inc(&trace_record_cmdline_disabled);
962
963 /* let the tracer grab locks here if needed */
964 if (current_trace->start)
965 current_trace->start(iter);
966
967 if (*pos != iter->pos) {
968 iter->ent = NULL;
969 iter->cpu = 0;
970 iter->idx = -1;
4e3c3333
IM
971 iter->prev_ent = NULL;
972 iter->prev_cpu = -1;
bc0c38d1 973
4c11d7ae 974 for_each_possible_cpu(i) {
bc0c38d1 975 iter->next_idx[i] = 0;
4c11d7ae
SR
976 iter->next_page[i] = NULL;
977 }
bc0c38d1
SR
978
979 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
980 ;
981
982 } else {
4c11d7ae 983 l = *pos - 1;
bc0c38d1
SR
984 p = s_next(m, p, &l);
985 }
986
987 return p;
988}
989
990static void s_stop(struct seq_file *m, void *p)
991{
992 struct trace_iterator *iter = m->private;
993
994 atomic_dec(&trace_record_cmdline_disabled);
995
996 /* let the tracer release locks here if needed */
997 if (current_trace && current_trace == iter->trace && iter->trace->stop)
998 iter->trace->stop(iter);
999
1000 mutex_unlock(&trace_types_lock);
1001}
1002
b3806b43 1003static int
214023c3 1004seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
bc0c38d1
SR
1005{
1006#ifdef CONFIG_KALLSYMS
1007 char str[KSYM_SYMBOL_LEN];
1008
1009 kallsyms_lookup(address, NULL, NULL, NULL, str);
1010
b3806b43 1011 return trace_seq_printf(s, fmt, str);
bc0c38d1 1012#endif
b3806b43 1013 return 1;
bc0c38d1
SR
1014}
1015
b3806b43 1016static int
214023c3
SR
1017seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1018 unsigned long address)
bc0c38d1
SR
1019{
1020#ifdef CONFIG_KALLSYMS
1021 char str[KSYM_SYMBOL_LEN];
1022
1023 sprint_symbol(str, address);
b3806b43 1024 return trace_seq_printf(s, fmt, str);
bc0c38d1 1025#endif
b3806b43 1026 return 1;
bc0c38d1
SR
1027}
1028
1029#ifndef CONFIG_64BIT
1030# define IP_FMT "%08lx"
1031#else
1032# define IP_FMT "%016lx"
1033#endif
1034
e309b41d 1035static int
214023c3 1036seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
bc0c38d1 1037{
b3806b43
SR
1038 int ret;
1039
1040 if (!ip)
1041 return trace_seq_printf(s, "0");
bc0c38d1
SR
1042
1043 if (sym_flags & TRACE_ITER_SYM_OFFSET)
b3806b43 1044 ret = seq_print_sym_offset(s, "%s", ip);
bc0c38d1 1045 else
b3806b43
SR
1046 ret = seq_print_sym_short(s, "%s", ip);
1047
1048 if (!ret)
1049 return 0;
bc0c38d1
SR
1050
1051 if (sym_flags & TRACE_ITER_SYM_ADDR)
b3806b43
SR
1052 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1053 return ret;
bc0c38d1
SR
1054}
1055
e309b41d 1056static void print_lat_help_header(struct seq_file *m)
bc0c38d1
SR
1057{
1058 seq_puts(m, "# _------=> CPU# \n");
1059 seq_puts(m, "# / _-----=> irqs-off \n");
1060 seq_puts(m, "# | / _----=> need-resched \n");
1061 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1062 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1063 seq_puts(m, "# |||| / \n");
1064 seq_puts(m, "# ||||| delay \n");
1065 seq_puts(m, "# cmd pid ||||| time | caller \n");
1066 seq_puts(m, "# \\ / ||||| \\ | / \n");
1067}
1068
e309b41d 1069static void print_func_help_header(struct seq_file *m)
bc0c38d1
SR
1070{
1071 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1072 seq_puts(m, "# | | | | |\n");
1073}
1074
1075
e309b41d 1076static void
bc0c38d1
SR
1077print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1078{
1079 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1080 struct trace_array *tr = iter->tr;
1081 struct trace_array_cpu *data = tr->data[tr->cpu];
1082 struct tracer *type = current_trace;
4c11d7ae
SR
1083 unsigned long total = 0;
1084 unsigned long entries = 0;
bc0c38d1
SR
1085 int cpu;
1086 const char *name = "preemption";
1087
1088 if (type)
1089 name = type->name;
1090
1091 for_each_possible_cpu(cpu) {
c7aafc54 1092 if (head_page(tr->data[cpu])) {
4c11d7ae
SR
1093 total += tr->data[cpu]->trace_idx;
1094 if (tr->data[cpu]->trace_idx > tr->entries)
bc0c38d1 1095 entries += tr->entries;
4c11d7ae 1096 else
bc0c38d1
SR
1097 entries += tr->data[cpu]->trace_idx;
1098 }
1099 }
1100
1101 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1102 name, UTS_RELEASE);
1103 seq_puts(m, "-----------------------------------"
1104 "---------------------------------\n");
1105 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1106 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 1107 nsecs_to_usecs(data->saved_latency),
bc0c38d1 1108 entries,
4c11d7ae 1109 total,
bc0c38d1
SR
1110 tr->cpu,
1111#if defined(CONFIG_PREEMPT_NONE)
1112 "server",
1113#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1114 "desktop",
1115#elif defined(CONFIG_PREEMPT_DESKTOP)
1116 "preempt",
1117#else
1118 "unknown",
1119#endif
1120 /* These are reserved for later use */
1121 0, 0, 0, 0);
1122#ifdef CONFIG_SMP
1123 seq_printf(m, " #P:%d)\n", num_online_cpus());
1124#else
1125 seq_puts(m, ")\n");
1126#endif
1127 seq_puts(m, " -----------------\n");
1128 seq_printf(m, " | task: %.16s-%d "
1129 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1130 data->comm, data->pid, data->uid, data->nice,
1131 data->policy, data->rt_priority);
1132 seq_puts(m, " -----------------\n");
1133
1134 if (data->critical_start) {
1135 seq_puts(m, " => started at: ");
214023c3
SR
1136 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1137 trace_print_seq(m, &iter->seq);
bc0c38d1 1138 seq_puts(m, "\n => ended at: ");
214023c3
SR
1139 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1140 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
1141 seq_puts(m, "\n");
1142 }
1143
1144 seq_puts(m, "\n");
1145}
1146
e309b41d 1147static void
214023c3 1148lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
bc0c38d1
SR
1149{
1150 int hardirq, softirq;
1151 char *comm;
1152
1153 comm = trace_find_cmdline(entry->pid);
1154
214023c3
SR
1155 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1156 trace_seq_printf(s, "%d", cpu);
1157 trace_seq_printf(s, "%c%c",
1158 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1159 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
bc0c38d1
SR
1160
1161 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1162 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1163 if (hardirq && softirq)
214023c3 1164 trace_seq_putc(s, 'H');
bc0c38d1
SR
1165 else {
1166 if (hardirq)
214023c3 1167 trace_seq_putc(s, 'h');
bc0c38d1
SR
1168 else {
1169 if (softirq)
214023c3 1170 trace_seq_putc(s, 's');
bc0c38d1 1171 else
214023c3 1172 trace_seq_putc(s, '.');
bc0c38d1
SR
1173 }
1174 }
1175
1176 if (entry->preempt_count)
214023c3 1177 trace_seq_printf(s, "%x", entry->preempt_count);
bc0c38d1 1178 else
214023c3 1179 trace_seq_puts(s, ".");
bc0c38d1
SR
1180}
1181
1182unsigned long preempt_mark_thresh = 100;
1183
e309b41d 1184static void
214023c3 1185lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
bc0c38d1
SR
1186 unsigned long rel_usecs)
1187{
214023c3 1188 trace_seq_printf(s, " %4lldus", abs_usecs);
bc0c38d1 1189 if (rel_usecs > preempt_mark_thresh)
214023c3 1190 trace_seq_puts(s, "!: ");
bc0c38d1 1191 else if (rel_usecs > 1)
214023c3 1192 trace_seq_puts(s, "+: ");
bc0c38d1 1193 else
214023c3 1194 trace_seq_puts(s, " : ");
bc0c38d1
SR
1195}
1196
1197static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1198
e309b41d 1199static int
214023c3 1200print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
bc0c38d1 1201{
214023c3 1202 struct trace_seq *s = &iter->seq;
bc0c38d1
SR
1203 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1204 struct trace_entry *next_entry = find_next_entry(iter, NULL);
1205 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1206 struct trace_entry *entry = iter->ent;
1207 unsigned long abs_usecs;
1208 unsigned long rel_usecs;
1209 char *comm;
1210 int S;
86387f7e 1211 int i;
bc0c38d1
SR
1212
1213 if (!next_entry)
1214 next_entry = entry;
1215 rel_usecs = ns2usecs(next_entry->t - entry->t);
1216 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1217
1218 if (verbose) {
1219 comm = trace_find_cmdline(entry->pid);
214023c3
SR
1220 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1221 " %ld.%03ldms (+%ld.%03ldms): ",
1222 comm,
1223 entry->pid, cpu, entry->flags,
1224 entry->preempt_count, trace_idx,
1225 ns2usecs(entry->t),
1226 abs_usecs/1000,
1227 abs_usecs % 1000, rel_usecs/1000,
1228 rel_usecs % 1000);
bc0c38d1 1229 } else {
86387f7e
IM
1230 if (entry->type != TRACE_STACK) {
1231 lat_print_generic(s, entry, cpu);
1232 lat_print_timestamp(s, abs_usecs, rel_usecs);
1233 }
bc0c38d1
SR
1234 }
1235 switch (entry->type) {
1236 case TRACE_FN:
214023c3
SR
1237 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1238 trace_seq_puts(s, " (");
1239 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1240 trace_seq_puts(s, ")\n");
bc0c38d1
SR
1241 break;
1242 case TRACE_CTX:
57422797 1243 case TRACE_WAKE:
bc0c38d1
SR
1244 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1245 state_to_char[entry->ctx.prev_state] : 'X';
1246 comm = trace_find_cmdline(entry->ctx.next_pid);
57422797 1247 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d %s\n",
214023c3
SR
1248 entry->ctx.prev_pid,
1249 entry->ctx.prev_prio,
57422797 1250 S, entry->type == TRACE_CTX ? "==>" : " +",
214023c3
SR
1251 entry->ctx.next_pid,
1252 entry->ctx.next_prio,
1253 comm);
bc0c38d1 1254 break;
f0a920d5 1255 case TRACE_SPECIAL:
4e655519 1256 trace_seq_printf(s, " %ld %ld %ld\n",
f0a920d5
IM
1257 entry->special.arg1,
1258 entry->special.arg2,
1259 entry->special.arg3);
1260 break;
86387f7e
IM
1261 case TRACE_STACK:
1262 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1263 if (i)
1264 trace_seq_puts(s, " <= ");
1265 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
1266 }
1267 trace_seq_puts(s, "\n");
1268 break;
89b2f978 1269 default:
214023c3 1270 trace_seq_printf(s, "Unknown type %d\n", entry->type);
bc0c38d1 1271 }
f9896bf3 1272 return 1;
bc0c38d1
SR
1273}
1274
e309b41d 1275static int print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 1276{
214023c3 1277 struct trace_seq *s = &iter->seq;
bc0c38d1 1278 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 1279 struct trace_entry *entry;
bc0c38d1
SR
1280 unsigned long usec_rem;
1281 unsigned long long t;
1282 unsigned long secs;
1283 char *comm;
b3806b43 1284 int ret;
86387f7e
IM
1285 int S;
1286 int i;
bc0c38d1 1287
4e3c3333
IM
1288 entry = iter->ent;
1289
bc0c38d1
SR
1290 comm = trace_find_cmdline(iter->ent->pid);
1291
cdd31cd2 1292 t = ns2usecs(entry->t);
bc0c38d1
SR
1293 usec_rem = do_div(t, 1000000ULL);
1294 secs = (unsigned long)t;
1295
86387f7e
IM
1296 if (entry->type != TRACE_STACK) {
1297 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1298 if (!ret)
1299 return 0;
1300 ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1301 if (!ret)
1302 return 0;
1303 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1304 if (!ret)
1305 return 0;
1306 }
bc0c38d1
SR
1307
1308 switch (entry->type) {
1309 case TRACE_FN:
b3806b43
SR
1310 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1311 if (!ret)
1312 return 0;
bc0c38d1
SR
1313 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1314 entry->fn.parent_ip) {
b3806b43
SR
1315 ret = trace_seq_printf(s, " <-");
1316 if (!ret)
1317 return 0;
1318 ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1319 sym_flags);
1320 if (!ret)
1321 return 0;
bc0c38d1 1322 }
b3806b43
SR
1323 ret = trace_seq_printf(s, "\n");
1324 if (!ret)
1325 return 0;
bc0c38d1
SR
1326 break;
1327 case TRACE_CTX:
57422797 1328 case TRACE_WAKE:
bc0c38d1
SR
1329 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1330 state_to_char[entry->ctx.prev_state] : 'X';
57422797 1331 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d\n",
b3806b43
SR
1332 entry->ctx.prev_pid,
1333 entry->ctx.prev_prio,
1334 S,
57422797 1335 entry->type == TRACE_CTX ? "==>" : " +",
b3806b43
SR
1336 entry->ctx.next_pid,
1337 entry->ctx.next_prio);
1338 if (!ret)
1339 return 0;
bc0c38d1 1340 break;
f0a920d5 1341 case TRACE_SPECIAL:
4e655519 1342 ret = trace_seq_printf(s, " %ld %ld %ld\n",
f0a920d5
IM
1343 entry->special.arg1,
1344 entry->special.arg2,
1345 entry->special.arg3);
1346 if (!ret)
1347 return 0;
1348 break;
86387f7e
IM
1349 case TRACE_STACK:
1350 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1351 if (i) {
1352 ret = trace_seq_puts(s, " <= ");
1353 if (!ret)
1354 return 0;
1355 }
1356 ret = seq_print_ip_sym(s, entry->stack.caller[i],
1357 sym_flags);
1358 if (!ret)
1359 return 0;
1360 }
1361 ret = trace_seq_puts(s, "\n");
1362 if (!ret)
1363 return 0;
1364 break;
bc0c38d1 1365 }
b3806b43 1366 return 1;
bc0c38d1
SR
1367}
1368
e309b41d 1369static int print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
1370{
1371 struct trace_seq *s = &iter->seq;
1372 struct trace_entry *entry;
1373 int ret;
1374 int S;
1375
1376 entry = iter->ent;
1377
1378 ret = trace_seq_printf(s, "%d %d %llu ",
1379 entry->pid, iter->cpu, entry->t);
1380 if (!ret)
1381 return 0;
1382
1383 switch (entry->type) {
1384 case TRACE_FN:
1385 ret = trace_seq_printf(s, "%x %x\n",
1386 entry->fn.ip, entry->fn.parent_ip);
1387 if (!ret)
1388 return 0;
1389 break;
1390 case TRACE_CTX:
57422797 1391 case TRACE_WAKE:
f9896bf3
IM
1392 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1393 state_to_char[entry->ctx.prev_state] : 'X';
57422797
IM
1394 if (entry->type == TRACE_WAKE)
1395 S = '+';
f9896bf3
IM
1396 ret = trace_seq_printf(s, "%d %d %c %d %d\n",
1397 entry->ctx.prev_pid,
1398 entry->ctx.prev_prio,
1399 S,
1400 entry->ctx.next_pid,
1401 entry->ctx.next_prio);
1402 if (!ret)
1403 return 0;
1404 break;
f0a920d5 1405 case TRACE_SPECIAL:
86387f7e 1406 case TRACE_STACK:
4e655519 1407 ret = trace_seq_printf(s, " %ld %ld %ld\n",
f0a920d5
IM
1408 entry->special.arg1,
1409 entry->special.arg2,
1410 entry->special.arg3);
1411 if (!ret)
1412 return 0;
1413 break;
f9896bf3
IM
1414 }
1415 return 1;
1416}
1417
cb0f12aa
IM
1418#define SEQ_PUT_FIELD_RET(s, x) \
1419do { \
1420 if (!trace_seq_putmem(s, &(x), sizeof(x))) \
1421 return 0; \
1422} while (0)
1423
5e3ca0ec
IM
1424#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1425do { \
1426 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1427 return 0; \
1428} while (0)
1429
e309b41d 1430static int print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
1431{
1432 struct trace_seq *s = &iter->seq;
1433 unsigned char newline = '\n';
1434 struct trace_entry *entry;
1435 int S;
1436
1437 entry = iter->ent;
1438
1439 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1440 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1441 SEQ_PUT_HEX_FIELD_RET(s, entry->t);
1442
1443 switch (entry->type) {
1444 case TRACE_FN:
1445 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
1446 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1447 break;
1448 case TRACE_CTX:
57422797 1449 case TRACE_WAKE:
5e3ca0ec
IM
1450 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1451 state_to_char[entry->ctx.prev_state] : 'X';
57422797
IM
1452 if (entry->type == TRACE_WAKE)
1453 S = '+';
5e3ca0ec
IM
1454 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
1455 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
1456 SEQ_PUT_HEX_FIELD_RET(s, S);
1457 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
1458 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
1459 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1460 break;
1461 case TRACE_SPECIAL:
86387f7e 1462 case TRACE_STACK:
5e3ca0ec
IM
1463 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
1464 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
1465 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
1466 break;
1467 }
1468 SEQ_PUT_FIELD_RET(s, newline);
1469
1470 return 1;
1471}
1472
e309b41d 1473static int print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
1474{
1475 struct trace_seq *s = &iter->seq;
1476 struct trace_entry *entry;
1477
1478 entry = iter->ent;
1479
1480 SEQ_PUT_FIELD_RET(s, entry->pid);
1481 SEQ_PUT_FIELD_RET(s, entry->cpu);
1482 SEQ_PUT_FIELD_RET(s, entry->t);
1483
1484 switch (entry->type) {
1485 case TRACE_FN:
1486 SEQ_PUT_FIELD_RET(s, entry->fn.ip);
1487 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
1488 break;
1489 case TRACE_CTX:
1490 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
1491 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
1492 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
1493 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
1494 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1495 break;
f0a920d5 1496 case TRACE_SPECIAL:
86387f7e 1497 case TRACE_STACK:
f0a920d5
IM
1498 SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1499 SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1500 SEQ_PUT_FIELD_RET(s, entry->special.arg3);
1501 break;
cb0f12aa
IM
1502 }
1503 return 1;
1504}
1505
bc0c38d1
SR
1506static int trace_empty(struct trace_iterator *iter)
1507{
1508 struct trace_array_cpu *data;
1509 int cpu;
1510
1511 for_each_possible_cpu(cpu) {
1512 data = iter->tr->data[cpu];
1513
b3806b43
SR
1514 if (head_page(data) && data->trace_idx &&
1515 (data->trace_tail != data->trace_head ||
1516 data->trace_tail_idx != data->trace_head_idx))
bc0c38d1
SR
1517 return 0;
1518 }
1519 return 1;
1520}
1521
f9896bf3
IM
1522static int print_trace_line(struct trace_iterator *iter)
1523{
cb0f12aa
IM
1524 if (trace_flags & TRACE_ITER_BIN)
1525 return print_bin_fmt(iter);
1526
5e3ca0ec
IM
1527 if (trace_flags & TRACE_ITER_HEX)
1528 return print_hex_fmt(iter);
1529
f9896bf3
IM
1530 if (trace_flags & TRACE_ITER_RAW)
1531 return print_raw_fmt(iter);
1532
1533 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1534 return print_lat_fmt(iter, iter->idx, iter->cpu);
1535
1536 return print_trace_fmt(iter);
1537}
1538
bc0c38d1
SR
1539static int s_show(struct seq_file *m, void *v)
1540{
1541 struct trace_iterator *iter = v;
1542
1543 if (iter->ent == NULL) {
1544 if (iter->tr) {
1545 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1546 seq_puts(m, "#\n");
1547 }
1548 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1549 /* print nothing if the buffers are empty */
1550 if (trace_empty(iter))
1551 return 0;
1552 print_trace_header(m, iter);
1553 if (!(trace_flags & TRACE_ITER_VERBOSE))
1554 print_lat_help_header(m);
1555 } else {
1556 if (!(trace_flags & TRACE_ITER_VERBOSE))
1557 print_func_help_header(m);
1558 }
1559 } else {
f9896bf3 1560 print_trace_line(iter);
214023c3 1561 trace_print_seq(m, &iter->seq);
bc0c38d1
SR
1562 }
1563
1564 return 0;
1565}
1566
1567static struct seq_operations tracer_seq_ops = {
4bf39a94
IM
1568 .start = s_start,
1569 .next = s_next,
1570 .stop = s_stop,
1571 .show = s_show,
bc0c38d1
SR
1572};
1573
e309b41d 1574static struct trace_iterator *
bc0c38d1
SR
1575__tracing_open(struct inode *inode, struct file *file, int *ret)
1576{
1577 struct trace_iterator *iter;
1578
60a11774
SR
1579 if (tracing_disabled) {
1580 *ret = -ENODEV;
1581 return NULL;
1582 }
1583
bc0c38d1
SR
1584 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1585 if (!iter) {
1586 *ret = -ENOMEM;
1587 goto out;
1588 }
1589
1590 mutex_lock(&trace_types_lock);
1591 if (current_trace && current_trace->print_max)
1592 iter->tr = &max_tr;
1593 else
1594 iter->tr = inode->i_private;
1595 iter->trace = current_trace;
1596 iter->pos = -1;
1597
1598 /* TODO stop tracer */
1599 *ret = seq_open(file, &tracer_seq_ops);
1600 if (!*ret) {
1601 struct seq_file *m = file->private_data;
1602 m->private = iter;
1603
1604 /* stop the trace while dumping */
1605 if (iter->tr->ctrl)
1606 tracer_enabled = 0;
1607
1608 if (iter->trace && iter->trace->open)
1609 iter->trace->open(iter);
1610 } else {
1611 kfree(iter);
1612 iter = NULL;
1613 }
1614 mutex_unlock(&trace_types_lock);
1615
1616 out:
1617 return iter;
1618}
1619
1620int tracing_open_generic(struct inode *inode, struct file *filp)
1621{
60a11774
SR
1622 if (tracing_disabled)
1623 return -ENODEV;
1624
bc0c38d1
SR
1625 filp->private_data = inode->i_private;
1626 return 0;
1627}
1628
1629int tracing_release(struct inode *inode, struct file *file)
1630{
1631 struct seq_file *m = (struct seq_file *)file->private_data;
1632 struct trace_iterator *iter = m->private;
1633
1634 mutex_lock(&trace_types_lock);
1635 if (iter->trace && iter->trace->close)
1636 iter->trace->close(iter);
1637
1638 /* reenable tracing if it was previously enabled */
1639 if (iter->tr->ctrl)
1640 tracer_enabled = 1;
1641 mutex_unlock(&trace_types_lock);
1642
1643 seq_release(inode, file);
1644 kfree(iter);
1645 return 0;
1646}
1647
1648static int tracing_open(struct inode *inode, struct file *file)
1649{
1650 int ret;
1651
1652 __tracing_open(inode, file, &ret);
1653
1654 return ret;
1655}
1656
1657static int tracing_lt_open(struct inode *inode, struct file *file)
1658{
1659 struct trace_iterator *iter;
1660 int ret;
1661
1662 iter = __tracing_open(inode, file, &ret);
1663
1664 if (!ret)
1665 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1666
1667 return ret;
1668}
1669
1670
e309b41d 1671static void *
bc0c38d1
SR
1672t_next(struct seq_file *m, void *v, loff_t *pos)
1673{
1674 struct tracer *t = m->private;
1675
1676 (*pos)++;
1677
1678 if (t)
1679 t = t->next;
1680
1681 m->private = t;
1682
1683 return t;
1684}
1685
1686static void *t_start(struct seq_file *m, loff_t *pos)
1687{
1688 struct tracer *t = m->private;
1689 loff_t l = 0;
1690
1691 mutex_lock(&trace_types_lock);
1692 for (; t && l < *pos; t = t_next(m, t, &l))
1693 ;
1694
1695 return t;
1696}
1697
1698static void t_stop(struct seq_file *m, void *p)
1699{
1700 mutex_unlock(&trace_types_lock);
1701}
1702
1703static int t_show(struct seq_file *m, void *v)
1704{
1705 struct tracer *t = v;
1706
1707 if (!t)
1708 return 0;
1709
1710 seq_printf(m, "%s", t->name);
1711 if (t->next)
1712 seq_putc(m, ' ');
1713 else
1714 seq_putc(m, '\n');
1715
1716 return 0;
1717}
1718
1719static struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
1720 .start = t_start,
1721 .next = t_next,
1722 .stop = t_stop,
1723 .show = t_show,
bc0c38d1
SR
1724};
1725
1726static int show_traces_open(struct inode *inode, struct file *file)
1727{
1728 int ret;
1729
60a11774
SR
1730 if (tracing_disabled)
1731 return -ENODEV;
1732
bc0c38d1
SR
1733 ret = seq_open(file, &show_traces_seq_ops);
1734 if (!ret) {
1735 struct seq_file *m = file->private_data;
1736 m->private = trace_types;
1737 }
1738
1739 return ret;
1740}
1741
1742static struct file_operations tracing_fops = {
4bf39a94
IM
1743 .open = tracing_open,
1744 .read = seq_read,
1745 .llseek = seq_lseek,
1746 .release = tracing_release,
bc0c38d1
SR
1747};
1748
1749static struct file_operations tracing_lt_fops = {
4bf39a94
IM
1750 .open = tracing_lt_open,
1751 .read = seq_read,
1752 .llseek = seq_lseek,
1753 .release = tracing_release,
bc0c38d1
SR
1754};
1755
1756static struct file_operations show_traces_fops = {
1757 .open = show_traces_open,
1758 .read = seq_read,
1759 .release = seq_release,
1760};
1761
1762static ssize_t
1763tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1764 size_t cnt, loff_t *ppos)
1765{
1766 char *buf;
1767 int r = 0;
1768 int len = 0;
1769 int i;
1770
1771 /* calulate max size */
1772 for (i = 0; trace_options[i]; i++) {
1773 len += strlen(trace_options[i]);
1774 len += 3; /* "no" and space */
1775 }
1776
1777 /* +2 for \n and \0 */
1778 buf = kmalloc(len + 2, GFP_KERNEL);
1779 if (!buf)
1780 return -ENOMEM;
1781
1782 for (i = 0; trace_options[i]; i++) {
1783 if (trace_flags & (1 << i))
1784 r += sprintf(buf + r, "%s ", trace_options[i]);
1785 else
1786 r += sprintf(buf + r, "no%s ", trace_options[i]);
1787 }
1788
1789 r += sprintf(buf + r, "\n");
1790 WARN_ON(r >= len + 2);
1791
1792 r = simple_read_from_buffer(ubuf, cnt, ppos,
1793 buf, r);
1794
1795 kfree(buf);
1796
1797 return r;
1798}
1799
1800static ssize_t
1801tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1802 size_t cnt, loff_t *ppos)
1803{
1804 char buf[64];
1805 char *cmp = buf;
1806 int neg = 0;
1807 int i;
1808
1809 if (cnt > 63)
1810 cnt = 63;
1811
1812 if (copy_from_user(&buf, ubuf, cnt))
1813 return -EFAULT;
1814
1815 buf[cnt] = 0;
1816
1817 if (strncmp(buf, "no", 2) == 0) {
1818 neg = 1;
1819 cmp += 2;
1820 }
1821
1822 for (i = 0; trace_options[i]; i++) {
1823 int len = strlen(trace_options[i]);
1824
1825 if (strncmp(cmp, trace_options[i], len) == 0) {
1826 if (neg)
1827 trace_flags &= ~(1 << i);
1828 else
1829 trace_flags |= (1 << i);
1830 break;
1831 }
1832 }
1833
1834 filp->f_pos += cnt;
1835
1836 return cnt;
1837}
1838
1839static struct file_operations tracing_iter_fops = {
1840 .open = tracing_open_generic,
1841 .read = tracing_iter_ctrl_read,
1842 .write = tracing_iter_ctrl_write,
1843};
1844
7bd2f24c
IM
1845static const char readme_msg[] =
1846 "tracing mini-HOWTO:\n\n"
1847 "# mkdir /debug\n"
1848 "# mount -t debugfs nodev /debug\n\n"
1849 "# cat /debug/tracing/available_tracers\n"
1850 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1851 "# cat /debug/tracing/current_tracer\n"
1852 "none\n"
1853 "# echo sched_switch > /debug/tracing/current_tracer\n"
1854 "# cat /debug/tracing/current_tracer\n"
1855 "sched_switch\n"
1856 "# cat /debug/tracing/iter_ctrl\n"
1857 "noprint-parent nosym-offset nosym-addr noverbose\n"
1858 "# echo print-parent > /debug/tracing/iter_ctrl\n"
1859 "# echo 1 > /debug/tracing/tracing_enabled\n"
1860 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1861 "echo 0 > /debug/tracing/tracing_enabled\n"
1862;
1863
1864static ssize_t
1865tracing_readme_read(struct file *filp, char __user *ubuf,
1866 size_t cnt, loff_t *ppos)
1867{
1868 return simple_read_from_buffer(ubuf, cnt, ppos,
1869 readme_msg, strlen(readme_msg));
1870}
1871
1872static struct file_operations tracing_readme_fops = {
1873 .open = tracing_open_generic,
1874 .read = tracing_readme_read,
1875};
1876
bc0c38d1
SR
1877static ssize_t
1878tracing_ctrl_read(struct file *filp, char __user *ubuf,
1879 size_t cnt, loff_t *ppos)
1880{
1881 struct trace_array *tr = filp->private_data;
1882 char buf[64];
1883 int r;
1884
1885 r = sprintf(buf, "%ld\n", tr->ctrl);
4e3c3333 1886 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
1887}
1888
1889static ssize_t
1890tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1891 size_t cnt, loff_t *ppos)
1892{
1893 struct trace_array *tr = filp->private_data;
1894 long val;
1895 char buf[64];
1896
1897 if (cnt > 63)
1898 cnt = 63;
1899
1900 if (copy_from_user(&buf, ubuf, cnt))
1901 return -EFAULT;
1902
1903 buf[cnt] = 0;
1904
1905 val = simple_strtoul(buf, NULL, 10);
1906
1907 val = !!val;
1908
1909 mutex_lock(&trace_types_lock);
1910 if (tr->ctrl ^ val) {
1911 if (val)
1912 tracer_enabled = 1;
1913 else
1914 tracer_enabled = 0;
1915
1916 tr->ctrl = val;
1917
1918 if (current_trace && current_trace->ctrl_update)
1919 current_trace->ctrl_update(tr);
1920 }
1921 mutex_unlock(&trace_types_lock);
1922
1923 filp->f_pos += cnt;
1924
1925 return cnt;
1926}
1927
1928static ssize_t
1929tracing_set_trace_read(struct file *filp, char __user *ubuf,
1930 size_t cnt, loff_t *ppos)
1931{
1932 char buf[max_tracer_type_len+2];
1933 int r;
1934
1935 mutex_lock(&trace_types_lock);
1936 if (current_trace)
1937 r = sprintf(buf, "%s\n", current_trace->name);
1938 else
1939 r = sprintf(buf, "\n");
1940 mutex_unlock(&trace_types_lock);
1941
4bf39a94 1942 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
1943}
1944
1945static ssize_t
1946tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1947 size_t cnt, loff_t *ppos)
1948{
1949 struct trace_array *tr = &global_trace;
1950 struct tracer *t;
1951 char buf[max_tracer_type_len+1];
1952 int i;
1953
1954 if (cnt > max_tracer_type_len)
1955 cnt = max_tracer_type_len;
1956
1957 if (copy_from_user(&buf, ubuf, cnt))
1958 return -EFAULT;
1959
1960 buf[cnt] = 0;
1961
1962 /* strip ending whitespace. */
1963 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1964 buf[i] = 0;
1965
1966 mutex_lock(&trace_types_lock);
1967 for (t = trace_types; t; t = t->next) {
1968 if (strcmp(t->name, buf) == 0)
1969 break;
1970 }
1971 if (!t || t == current_trace)
1972 goto out;
1973
1974 if (current_trace && current_trace->reset)
1975 current_trace->reset(tr);
1976
1977 current_trace = t;
1978 if (t->init)
1979 t->init(tr);
1980
1981 out:
1982 mutex_unlock(&trace_types_lock);
1983
1984 filp->f_pos += cnt;
1985
1986 return cnt;
1987}
1988
1989static ssize_t
1990tracing_max_lat_read(struct file *filp, char __user *ubuf,
1991 size_t cnt, loff_t *ppos)
1992{
1993 unsigned long *ptr = filp->private_data;
1994 char buf[64];
1995 int r;
1996
1997 r = snprintf(buf, 64, "%ld\n",
1998 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1999 if (r > 64)
2000 r = 64;
4bf39a94 2001 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2002}
2003
2004static ssize_t
2005tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2006 size_t cnt, loff_t *ppos)
2007{
2008 long *ptr = filp->private_data;
2009 long val;
2010 char buf[64];
2011
2012 if (cnt > 63)
2013 cnt = 63;
2014
2015 if (copy_from_user(&buf, ubuf, cnt))
2016 return -EFAULT;
2017
2018 buf[cnt] = 0;
2019
2020 val = simple_strtoul(buf, NULL, 10);
2021
2022 *ptr = val * 1000;
2023
2024 return cnt;
2025}
2026
b3806b43
SR
2027static atomic_t tracing_reader;
2028
2029static int tracing_open_pipe(struct inode *inode, struct file *filp)
2030{
2031 struct trace_iterator *iter;
2032
2033 if (tracing_disabled)
2034 return -ENODEV;
2035
2036 /* We only allow for reader of the pipe */
2037 if (atomic_inc_return(&tracing_reader) != 1) {
2038 atomic_dec(&tracing_reader);
2039 return -EBUSY;
2040 }
2041
2042 /* create a buffer to store the information to pass to userspace */
2043 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2044 if (!iter)
2045 return -ENOMEM;
2046
2047 iter->tr = &global_trace;
2048
2049 filp->private_data = iter;
2050
2051 return 0;
2052}
2053
2054static int tracing_release_pipe(struct inode *inode, struct file *file)
2055{
2056 struct trace_iterator *iter = file->private_data;
2057
2058 kfree(iter);
2059 atomic_dec(&tracing_reader);
2060
2061 return 0;
2062}
2063
2a2cc8f7
SSP
2064static unsigned int
2065tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2066{
2067 struct trace_iterator *iter = filp->private_data;
2068
2069 if (trace_flags & TRACE_ITER_BLOCK) {
2070 /*
2071 * Always select as readable when in blocking mode
2072 */
2073 return POLLIN | POLLRDNORM;
2074 }
2075 else {
2076 if (!trace_empty(iter))
2077 return POLLIN | POLLRDNORM;
2078 poll_wait(filp, &trace_wait, poll_table);
2079 if (!trace_empty(iter))
2080 return POLLIN | POLLRDNORM;
2081
2082 return 0;
2083 }
2084}
2085
b3806b43
SR
2086/*
2087 * Consumer reader.
2088 */
2089static ssize_t
2090tracing_read_pipe(struct file *filp, char __user *ubuf,
2091 size_t cnt, loff_t *ppos)
2092{
2093 struct trace_iterator *iter = filp->private_data;
2094 struct trace_array_cpu *data;
2095 static cpumask_t mask;
b3806b43
SR
2096 static int start;
2097 unsigned long flags;
25770467 2098#ifdef CONFIG_FTRACE
2e0f5761 2099 int ftrace_save;
25770467 2100#endif
b3806b43
SR
2101 int read = 0;
2102 int cpu;
2103 int len;
2104 int ret;
2105
2106 /* return any leftover data */
2107 if (iter->seq.len > start) {
2108 len = iter->seq.len - start;
2109 if (cnt > len)
2110 cnt = len;
2111 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
2112 if (ret)
2113 cnt = -EFAULT;
2114
2115 start += len;
2116
2117 return cnt;
2118 }
2119
2120 trace_seq_reset(&iter->seq);
2121 start = 0;
2122
2123 while (trace_empty(iter)) {
2a2cc8f7
SSP
2124 if (!(trace_flags & TRACE_ITER_BLOCK))
2125 return -EWOULDBLOCK;
b3806b43
SR
2126 /*
2127 * This is a make-shift waitqueue. The reason we don't use
2128 * an actual wait queue is because:
2129 * 1) we only ever have one waiter
2130 * 2) the tracing, traces all functions, we don't want
2131 * the overhead of calling wake_up and friends
2132 * (and tracing them too)
2133 * Anyway, this is really very primitive wakeup.
2134 */
2135 set_current_state(TASK_INTERRUPTIBLE);
2136 iter->tr->waiter = current;
2137
2138 /* sleep for one second, and try again. */
2139 schedule_timeout(HZ);
2140
2141 iter->tr->waiter = NULL;
2142
2143 if (signal_pending(current))
2144 return -EINTR;
2145
2146 /*
2147 * We block until we read something and tracing is disabled.
2148 * We still block if tracing is disabled, but we have never
2149 * read anything. This allows a user to cat this file, and
2150 * then enable tracing. But after we have read something,
2151 * we give an EOF when tracing is again disabled.
2152 *
2153 * iter->pos will be 0 if we haven't read anything.
2154 */
2155 if (!tracer_enabled && iter->pos)
2156 break;
2157
2158 continue;
2159 }
2160
2161 /* stop when tracing is finished */
2162 if (trace_empty(iter))
2163 return 0;
2164
2165 if (cnt >= PAGE_SIZE)
2166 cnt = PAGE_SIZE - 1;
2167
2168 memset(iter, 0, sizeof(*iter));
2169 iter->tr = &global_trace;
2170 iter->pos = -1;
2171
2172 /*
2173 * We need to stop all tracing on all CPUS to read the
2174 * the next buffer. This is a bit expensive, but is
2175 * not done often. We fill all what we can read,
2176 * and then release the locks again.
2177 */
2178
2179 cpus_clear(mask);
2180 local_irq_save(flags);
25770467 2181#ifdef CONFIG_FTRACE
2e0f5761
IM
2182 ftrace_save = ftrace_enabled;
2183 ftrace_enabled = 0;
25770467 2184#endif
2e0f5761 2185 smp_wmb();
b3806b43
SR
2186 for_each_possible_cpu(cpu) {
2187 data = iter->tr->data[cpu];
2188
2189 if (!head_page(data) || !data->trace_idx)
2190 continue;
2191
2192 atomic_inc(&data->disabled);
b3806b43
SR
2193 cpu_set(cpu, mask);
2194 }
2195
2e0f5761
IM
2196 for_each_cpu_mask(cpu, mask) {
2197 data = iter->tr->data[cpu];
2198 spin_lock(&data->lock);
2199 }
2200
088b1e42
SR
2201 while (find_next_entry_inc(iter) != NULL) {
2202 int len = iter->seq.len;
2203
f9896bf3 2204 ret = print_trace_line(iter);
088b1e42
SR
2205 if (!ret) {
2206 /* don't print partial lines */
2207 iter->seq.len = len;
b3806b43 2208 break;
088b1e42 2209 }
b3806b43
SR
2210
2211 trace_consume(iter);
2212
2213 if (iter->seq.len >= cnt)
2214 break;
b3806b43
SR
2215 }
2216
d4c5a2f5 2217 for_each_cpu_mask(cpu, mask) {
b3806b43 2218 data = iter->tr->data[cpu];
b3806b43 2219 spin_unlock(&data->lock);
2e0f5761
IM
2220 }
2221
2222 for_each_cpu_mask(cpu, mask) {
2223 data = iter->tr->data[cpu];
b3806b43
SR
2224 atomic_dec(&data->disabled);
2225 }
25770467 2226#ifdef CONFIG_FTRACE
2e0f5761 2227 ftrace_enabled = ftrace_save;
25770467 2228#endif
b3806b43
SR
2229 local_irq_restore(flags);
2230
2231 /* Now copy what we have to the user */
2232 read = iter->seq.len;
2233 if (read > cnt)
2234 read = cnt;
2235
2236 ret = copy_to_user(ubuf, iter->seq.buffer, read);
2237
2238 if (read < iter->seq.len)
2239 start = read;
2240 else
2241 trace_seq_reset(&iter->seq);
2242
2243 if (ret)
2244 read = -EFAULT;
2245
2246 return read;
2247}
2248
bc0c38d1 2249static struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
2250 .open = tracing_open_generic,
2251 .read = tracing_max_lat_read,
2252 .write = tracing_max_lat_write,
bc0c38d1
SR
2253};
2254
2255static struct file_operations tracing_ctrl_fops = {
4bf39a94
IM
2256 .open = tracing_open_generic,
2257 .read = tracing_ctrl_read,
2258 .write = tracing_ctrl_write,
bc0c38d1
SR
2259};
2260
2261static struct file_operations set_tracer_fops = {
4bf39a94
IM
2262 .open = tracing_open_generic,
2263 .read = tracing_set_trace_read,
2264 .write = tracing_set_trace_write,
bc0c38d1
SR
2265};
2266
b3806b43 2267static struct file_operations tracing_pipe_fops = {
4bf39a94 2268 .open = tracing_open_pipe,
2a2cc8f7 2269 .poll = tracing_poll_pipe,
4bf39a94
IM
2270 .read = tracing_read_pipe,
2271 .release = tracing_release_pipe,
b3806b43
SR
2272};
2273
bc0c38d1
SR
2274#ifdef CONFIG_DYNAMIC_FTRACE
2275
2276static ssize_t
2277tracing_read_long(struct file *filp, char __user *ubuf,
2278 size_t cnt, loff_t *ppos)
2279{
2280 unsigned long *p = filp->private_data;
2281 char buf[64];
2282 int r;
2283
2284 r = sprintf(buf, "%ld\n", *p);
4bf39a94
IM
2285
2286 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
2287}
2288
2289static struct file_operations tracing_read_long_fops = {
4bf39a94
IM
2290 .open = tracing_open_generic,
2291 .read = tracing_read_long,
bc0c38d1
SR
2292};
2293#endif
2294
2295static struct dentry *d_tracer;
2296
2297struct dentry *tracing_init_dentry(void)
2298{
2299 static int once;
2300
2301 if (d_tracer)
2302 return d_tracer;
2303
2304 d_tracer = debugfs_create_dir("tracing", NULL);
2305
2306 if (!d_tracer && !once) {
2307 once = 1;
2308 pr_warning("Could not create debugfs directory 'tracing'\n");
2309 return NULL;
2310 }
2311
2312 return d_tracer;
2313}
2314
60a11774
SR
2315#ifdef CONFIG_FTRACE_SELFTEST
2316/* Let selftest have access to static functions in this file */
2317#include "trace_selftest.c"
2318#endif
2319
bc0c38d1
SR
2320static __init void tracer_init_debugfs(void)
2321{
2322 struct dentry *d_tracer;
2323 struct dentry *entry;
2324
2325 d_tracer = tracing_init_dentry();
2326
2327 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2328 &global_trace, &tracing_ctrl_fops);
2329 if (!entry)
2330 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2331
2332 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2333 NULL, &tracing_iter_fops);
2334 if (!entry)
2335 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2336
2337 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2338 &global_trace, &tracing_lt_fops);
2339 if (!entry)
2340 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2341
2342 entry = debugfs_create_file("trace", 0444, d_tracer,
2343 &global_trace, &tracing_fops);
2344 if (!entry)
2345 pr_warning("Could not create debugfs 'trace' entry\n");
2346
2347 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2348 &global_trace, &show_traces_fops);
2349 if (!entry)
2350 pr_warning("Could not create debugfs 'trace' entry\n");
2351
2352 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2353 &global_trace, &set_tracer_fops);
2354 if (!entry)
2355 pr_warning("Could not create debugfs 'trace' entry\n");
2356
2357 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2358 &tracing_max_latency,
2359 &tracing_max_lat_fops);
2360 if (!entry)
2361 pr_warning("Could not create debugfs "
2362 "'tracing_max_latency' entry\n");
2363
2364 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2365 &tracing_thresh, &tracing_max_lat_fops);
2366 if (!entry)
2367 pr_warning("Could not create debugfs "
2368 "'tracing_threash' entry\n");
7bd2f24c
IM
2369 entry = debugfs_create_file("README", 0644, d_tracer,
2370 NULL, &tracing_readme_fops);
2371 if (!entry)
2372 pr_warning("Could not create debugfs 'README' entry\n");
2373
b3806b43
SR
2374 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2375 NULL, &tracing_pipe_fops);
2376 if (!entry)
2377 pr_warning("Could not create debugfs "
2378 "'tracing_threash' entry\n");
bc0c38d1
SR
2379
2380#ifdef CONFIG_DYNAMIC_FTRACE
2381 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2382 &ftrace_update_tot_cnt,
2383 &tracing_read_long_fops);
2384 if (!entry)
2385 pr_warning("Could not create debugfs "
2386 "'dyn_ftrace_total_info' entry\n");
2387#endif
2388}
2389
2390/* dummy trace to disable tracing */
2391static struct tracer no_tracer __read_mostly =
2392{
4bf39a94 2393 .name = "none",
bc0c38d1
SR
2394};
2395
4c11d7ae 2396static int trace_alloc_page(void)
bc0c38d1 2397{
4c11d7ae 2398 struct trace_array_cpu *data;
4c11d7ae
SR
2399 struct page *page, *tmp;
2400 LIST_HEAD(pages);
c7aafc54 2401 void *array;
4c11d7ae
SR
2402 int i;
2403
2404 /* first allocate a page for each CPU */
2405 for_each_possible_cpu(i) {
2406 array = (void *)__get_free_page(GFP_KERNEL);
2407 if (array == NULL) {
2408 printk(KERN_ERR "tracer: failed to allocate page"
2409 "for trace buffer!\n");
2410 goto free_pages;
2411 }
2412
2413 page = virt_to_page(array);
2414 list_add(&page->lru, &pages);
2415
2416/* Only allocate if we are actually using the max trace */
2417#ifdef CONFIG_TRACER_MAX_TRACE
2418 array = (void *)__get_free_page(GFP_KERNEL);
2419 if (array == NULL) {
2420 printk(KERN_ERR "tracer: failed to allocate page"
2421 "for trace buffer!\n");
2422 goto free_pages;
2423 }
2424 page = virt_to_page(array);
2425 list_add(&page->lru, &pages);
2426#endif
2427 }
2428
2429 /* Now that we successfully allocate a page per CPU, add them */
2430 for_each_possible_cpu(i) {
2431 data = global_trace.data[i];
b3806b43 2432 spin_lock_init(&data->lock);
d4c5a2f5 2433 lockdep_set_class(&data->lock, &data->lock_key);
4c11d7ae 2434 page = list_entry(pages.next, struct page, lru);
c7aafc54 2435 list_del_init(&page->lru);
4c11d7ae
SR
2436 list_add_tail(&page->lru, &data->trace_pages);
2437 ClearPageLRU(page);
2438
2439#ifdef CONFIG_TRACER_MAX_TRACE
2440 data = max_tr.data[i];
b3806b43 2441 spin_lock_init(&data->lock);
d4c5a2f5 2442 lockdep_set_class(&data->lock, &data->lock_key);
4c11d7ae 2443 page = list_entry(pages.next, struct page, lru);
c7aafc54 2444 list_del_init(&page->lru);
4c11d7ae
SR
2445 list_add_tail(&page->lru, &data->trace_pages);
2446 SetPageLRU(page);
2447#endif
2448 }
2449 global_trace.entries += ENTRIES_PER_PAGE;
2450
2451 return 0;
2452
2453 free_pages:
2454 list_for_each_entry_safe(page, tmp, &pages, lru) {
c7aafc54 2455 list_del_init(&page->lru);
4c11d7ae
SR
2456 __free_page(page);
2457 }
2458 return -ENOMEM;
bc0c38d1
SR
2459}
2460
2461__init static int tracer_alloc_buffers(void)
2462{
4c11d7ae
SR
2463 struct trace_array_cpu *data;
2464 void *array;
2465 struct page *page;
2466 int pages = 0;
60a11774 2467 int ret = -ENOMEM;
bc0c38d1
SR
2468 int i;
2469
26994ead
SR
2470 global_trace.ctrl = tracer_enabled;
2471
4c11d7ae 2472 /* Allocate the first page for all buffers */
bc0c38d1 2473 for_each_possible_cpu(i) {
4c11d7ae 2474 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
bc0c38d1
SR
2475 max_tr.data[i] = &per_cpu(max_data, i);
2476
4c11d7ae 2477 array = (void *)__get_free_page(GFP_KERNEL);
bc0c38d1 2478 if (array == NULL) {
4c11d7ae
SR
2479 printk(KERN_ERR "tracer: failed to allocate page"
2480 "for trace buffer!\n");
bc0c38d1
SR
2481 goto free_buffers;
2482 }
4c11d7ae
SR
2483
2484 /* set the array to the list */
2485 INIT_LIST_HEAD(&data->trace_pages);
2486 page = virt_to_page(array);
2487 list_add(&page->lru, &data->trace_pages);
2488 /* use the LRU flag to differentiate the two buffers */
2489 ClearPageLRU(page);
bc0c38d1
SR
2490
2491/* Only allocate if we are actually using the max trace */
2492#ifdef CONFIG_TRACER_MAX_TRACE
4c11d7ae 2493 array = (void *)__get_free_page(GFP_KERNEL);
bc0c38d1 2494 if (array == NULL) {
4c11d7ae
SR
2495 printk(KERN_ERR "tracer: failed to allocate page"
2496 "for trace buffer!\n");
bc0c38d1
SR
2497 goto free_buffers;
2498 }
4c11d7ae
SR
2499
2500 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2501 page = virt_to_page(array);
2502 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2503 SetPageLRU(page);
bc0c38d1
SR
2504#endif
2505 }
2506
2507 /*
2508 * Since we allocate by orders of pages, we may be able to
2509 * round up a bit.
2510 */
4c11d7ae 2511 global_trace.entries = ENTRIES_PER_PAGE;
4c11d7ae
SR
2512 pages++;
2513
2514 while (global_trace.entries < trace_nr_entries) {
2515 if (trace_alloc_page())
2516 break;
2517 pages++;
2518 }
89b2f978 2519 max_tr.entries = global_trace.entries;
bc0c38d1 2520
4c11d7ae
SR
2521 pr_info("tracer: %d pages allocated for %ld",
2522 pages, trace_nr_entries);
bc0c38d1
SR
2523 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2524 pr_info(" actual entries %ld\n", global_trace.entries);
2525
2526 tracer_init_debugfs();
2527
2528 trace_init_cmdlines();
2529
2530 register_tracer(&no_tracer);
2531 current_trace = &no_tracer;
2532
60a11774
SR
2533 /* All seems OK, enable tracing */
2534 tracing_disabled = 0;
2535
bc0c38d1
SR
2536 return 0;
2537
2538 free_buffers:
2539 for (i-- ; i >= 0; i--) {
4c11d7ae 2540 struct page *page, *tmp;
bc0c38d1
SR
2541 struct trace_array_cpu *data = global_trace.data[i];
2542
c7aafc54 2543 if (data) {
4c11d7ae
SR
2544 list_for_each_entry_safe(page, tmp,
2545 &data->trace_pages, lru) {
c7aafc54 2546 list_del_init(&page->lru);
4c11d7ae
SR
2547 __free_page(page);
2548 }
bc0c38d1
SR
2549 }
2550
2551#ifdef CONFIG_TRACER_MAX_TRACE
2552 data = max_tr.data[i];
c7aafc54 2553 if (data) {
4c11d7ae
SR
2554 list_for_each_entry_safe(page, tmp,
2555 &data->trace_pages, lru) {
c7aafc54 2556 list_del_init(&page->lru);
4c11d7ae
SR
2557 __free_page(page);
2558 }
bc0c38d1
SR
2559 }
2560#endif
2561 }
60a11774 2562 return ret;
bc0c38d1 2563}
60a11774 2564fs_initcall(tracer_alloc_buffers);