]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/trace/trace.c
Merge branch 'x86-setup-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[net-next-2.6.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/ring_buffer.h>
15 #include <linux/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/smp_lock.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/ctype.h>
36 #include <linux/init.h>
37 #include <linux/poll.h>
38 #include <linux/gfp.h>
39 #include <linux/fs.h>
40
41 #include "trace.h"
42 #include "trace_output.h"
43
44 #define TRACE_BUFFER_FLAGS      (RB_FL_OVERWRITE)
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 int ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * Kill all tracing for good (never come back).
83  * It is initialized to 1 but will turn to zero if the initialization
84  * of the tracer is successful. But that is the only place that sets
85  * this back to zero.
86  */
87 static int tracing_disabled = 1;
88
89 DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
90
91 static inline void ftrace_disable_cpu(void)
92 {
93         preempt_disable();
94         local_inc(&__get_cpu_var(ftrace_cpu_disabled));
95 }
96
97 static inline void ftrace_enable_cpu(void)
98 {
99         local_dec(&__get_cpu_var(ftrace_cpu_disabled));
100         preempt_enable();
101 }
102
103 static cpumask_var_t __read_mostly      tracing_buffer_mask;
104
105 /* Define which cpu buffers are currently read in trace_pipe */
106 static cpumask_var_t                    tracing_reader_cpumask;
107
108 #define for_each_tracing_cpu(cpu)       \
109         for_each_cpu(cpu, tracing_buffer_mask)
110
111 /*
112  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
113  *
114  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
115  * is set, then ftrace_dump is called. This will output the contents
116  * of the ftrace buffers to the console.  This is very useful for
117  * capturing traces that lead to crashes and outputing it to a
118  * serial console.
119  *
120  * It is default off, but you can enable it with either specifying
121  * "ftrace_dump_on_oops" in the kernel command line, or setting
122  * /proc/sys/kernel/ftrace_dump_on_oops to true.
123  */
124 int ftrace_dump_on_oops;
125
126 static int tracing_set_tracer(const char *buf);
127
128 #define BOOTUP_TRACER_SIZE              100
129 static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
131
132 static int __init set_ftrace(char *str)
133 {
134         strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
135         default_bootup_tracer = bootup_tracer_buf;
136         /* We are using ftrace early, expand it */
137         ring_buffer_expanded = 1;
138         return 1;
139 }
140 __setup("ftrace=", set_ftrace);
141
142 static int __init set_ftrace_dump_on_oops(char *str)
143 {
144         ftrace_dump_on_oops = 1;
145         return 1;
146 }
147 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
148
149 unsigned long long ns2usecs(cycle_t nsec)
150 {
151         nsec += 500;
152         do_div(nsec, 1000);
153         return nsec;
154 }
155
156 /*
157  * The global_trace is the descriptor that holds the tracing
158  * buffers for the live tracing. For each CPU, it contains
159  * a link list of pages that will store trace entries. The
160  * page descriptor of the pages in the memory is used to hold
161  * the link list by linking the lru item in the page descriptor
162  * to each of the pages in the buffer per CPU.
163  *
164  * For each active CPU there is a data field that holds the
165  * pages for the buffer for that CPU. Each CPU has the same number
166  * of pages allocated for its buffer.
167  */
168 static struct trace_array       global_trace;
169
170 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
171
172 int filter_current_check_discard(struct ring_buffer *buffer,
173                                  struct ftrace_event_call *call, void *rec,
174                                  struct ring_buffer_event *event)
175 {
176         return filter_check_discard(call, rec, buffer, event);
177 }
178 EXPORT_SYMBOL_GPL(filter_current_check_discard);
179
180 cycle_t ftrace_now(int cpu)
181 {
182         u64 ts;
183
184         /* Early boot up does not have a buffer yet */
185         if (!global_trace.buffer)
186                 return trace_clock_local();
187
188         ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
189         ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
190
191         return ts;
192 }
193
194 /*
195  * The max_tr is used to snapshot the global_trace when a maximum
196  * latency is reached. Some tracers will use this to store a maximum
197  * trace while it continues examining live traces.
198  *
199  * The buffers for the max_tr are set up the same as the global_trace.
200  * When a snapshot is taken, the link list of the max_tr is swapped
201  * with the link list of the global_trace and the buffers are reset for
202  * the global_trace so the tracing can continue.
203  */
204 static struct trace_array       max_tr;
205
206 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
207
208 /* tracer_enabled is used to toggle activation of a tracer */
209 static int                      tracer_enabled = 1;
210
211 /**
212  * tracing_is_enabled - return tracer_enabled status
213  *
214  * This function is used by other tracers to know the status
215  * of the tracer_enabled flag.  Tracers may use this function
216  * to know if it should enable their features when starting
217  * up. See irqsoff tracer for an example (start_irqsoff_tracer).
218  */
219 int tracing_is_enabled(void)
220 {
221         return tracer_enabled;
222 }
223
224 /*
225  * trace_buf_size is the size in bytes that is allocated
226  * for a buffer. Note, the number of bytes is always rounded
227  * to page size.
228  *
229  * This number is purposely set to a low number of 16384.
230  * If the dump on oops happens, it will be much appreciated
231  * to not have to wait for all that output. Anyway this can be
232  * boot time and run time configurable.
233  */
234 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
235
236 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
237
238 /* trace_types holds a link list of available tracers. */
239 static struct tracer            *trace_types __read_mostly;
240
241 /* current_trace points to the tracer that is currently active */
242 static struct tracer            *current_trace __read_mostly;
243
244 /*
245  * max_tracer_type_len is used to simplify the allocating of
246  * buffers to read userspace tracer names. We keep track of
247  * the longest tracer name registered.
248  */
249 static int                      max_tracer_type_len;
250
251 /*
252  * trace_types_lock is used to protect the trace_types list.
253  * This lock is also used to keep user access serialized.
254  * Accesses from userspace will grab this lock while userspace
255  * activities happen inside the kernel.
256  */
257 static DEFINE_MUTEX(trace_types_lock);
258
259 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
260 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
261
262 /* trace_flags holds trace_options default values */
263 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
264         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
265         TRACE_ITER_GRAPH_TIME;
266
267 static int trace_stop_count;
268 static DEFINE_SPINLOCK(tracing_start_lock);
269
270 /**
271  * trace_wake_up - wake up tasks waiting for trace input
272  *
273  * Simply wakes up any task that is blocked on the trace_wait
274  * queue. These is used with trace_poll for tasks polling the trace.
275  */
276 void trace_wake_up(void)
277 {
278         /*
279          * The runqueue_is_locked() can fail, but this is the best we
280          * have for now:
281          */
282         if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
283                 wake_up(&trace_wait);
284 }
285
286 static int __init set_buf_size(char *str)
287 {
288         unsigned long buf_size;
289
290         if (!str)
291                 return 0;
292         buf_size = memparse(str, &str);
293         /* nr_entries can not be zero */
294         if (buf_size == 0)
295                 return 0;
296         trace_buf_size = buf_size;
297         return 1;
298 }
299 __setup("trace_buf_size=", set_buf_size);
300
301 unsigned long nsecs_to_usecs(unsigned long nsecs)
302 {
303         return nsecs / 1000;
304 }
305
306 /* These must match the bit postions in trace_iterator_flags */
307 static const char *trace_options[] = {
308         "print-parent",
309         "sym-offset",
310         "sym-addr",
311         "verbose",
312         "raw",
313         "hex",
314         "bin",
315         "block",
316         "stacktrace",
317         "sched-tree",
318         "trace_printk",
319         "ftrace_preempt",
320         "branch",
321         "annotate",
322         "userstacktrace",
323         "sym-userobj",
324         "printk-msg-only",
325         "context-info",
326         "latency-format",
327         "sleep-time",
328         "graph-time",
329         NULL
330 };
331
332 static struct {
333         u64 (*func)(void);
334         const char *name;
335 } trace_clocks[] = {
336         { trace_clock_local,    "local" },
337         { trace_clock_global,   "global" },
338 };
339
340 int trace_clock_id;
341
342 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
343 {
344         int len;
345         int ret;
346
347         if (!cnt)
348                 return 0;
349
350         if (s->len <= s->readpos)
351                 return -EBUSY;
352
353         len = s->len - s->readpos;
354         if (cnt > len)
355                 cnt = len;
356         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
357         if (ret == cnt)
358                 return -EFAULT;
359
360         cnt -= ret;
361
362         s->readpos += cnt;
363         return cnt;
364 }
365
366 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
367 {
368         int len;
369         void *ret;
370
371         if (s->len <= s->readpos)
372                 return -EBUSY;
373
374         len = s->len - s->readpos;
375         if (cnt > len)
376                 cnt = len;
377         ret = memcpy(buf, s->buffer + s->readpos, cnt);
378         if (!ret)
379                 return -EFAULT;
380
381         s->readpos += cnt;
382         return cnt;
383 }
384
385 /*
386  * ftrace_max_lock is used to protect the swapping of buffers
387  * when taking a max snapshot. The buffers themselves are
388  * protected by per_cpu spinlocks. But the action of the swap
389  * needs its own lock.
390  *
391  * This is defined as a raw_spinlock_t in order to help
392  * with performance when lockdep debugging is enabled.
393  *
394  * It is also used in other places outside the update_max_tr
395  * so it needs to be defined outside of the
396  * CONFIG_TRACER_MAX_TRACE.
397  */
398 static raw_spinlock_t ftrace_max_lock =
399         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
400
401 #ifdef CONFIG_TRACER_MAX_TRACE
402 unsigned long __read_mostly     tracing_max_latency;
403 unsigned long __read_mostly     tracing_thresh;
404
405 /*
406  * Copy the new maximum trace into the separate maximum-trace
407  * structure. (this way the maximum trace is permanently saved,
408  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
409  */
410 static void
411 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
412 {
413         struct trace_array_cpu *data = tr->data[cpu];
414         struct trace_array_cpu *max_data = tr->data[cpu];
415
416         max_tr.cpu = cpu;
417         max_tr.time_start = data->preempt_timestamp;
418
419         max_data = max_tr.data[cpu];
420         max_data->saved_latency = tracing_max_latency;
421         max_data->critical_start = data->critical_start;
422         max_data->critical_end = data->critical_end;
423
424         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
425         max_data->pid = tsk->pid;
426         max_data->uid = task_uid(tsk);
427         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
428         max_data->policy = tsk->policy;
429         max_data->rt_priority = tsk->rt_priority;
430
431         /* record this tasks comm */
432         tracing_record_cmdline(tsk);
433 }
434
435 /**
436  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
437  * @tr: tracer
438  * @tsk: the task with the latency
439  * @cpu: The cpu that initiated the trace.
440  *
441  * Flip the buffers between the @tr and the max_tr and record information
442  * about which task was the cause of this latency.
443  */
444 void
445 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
446 {
447         struct ring_buffer *buf = tr->buffer;
448
449         if (trace_stop_count)
450                 return;
451
452         WARN_ON_ONCE(!irqs_disabled());
453         __raw_spin_lock(&ftrace_max_lock);
454
455         tr->buffer = max_tr.buffer;
456         max_tr.buffer = buf;
457
458         __update_max_tr(tr, tsk, cpu);
459         __raw_spin_unlock(&ftrace_max_lock);
460 }
461
462 /**
463  * update_max_tr_single - only copy one trace over, and reset the rest
464  * @tr - tracer
465  * @tsk - task with the latency
466  * @cpu - the cpu of the buffer to copy.
467  *
468  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
469  */
470 void
471 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
472 {
473         int ret;
474
475         if (trace_stop_count)
476                 return;
477
478         WARN_ON_ONCE(!irqs_disabled());
479         __raw_spin_lock(&ftrace_max_lock);
480
481         ftrace_disable_cpu();
482
483         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
484
485         if (ret == -EBUSY) {
486                 /*
487                  * We failed to swap the buffer due to a commit taking
488                  * place on this CPU. We fail to record, but we reset
489                  * the max trace buffer (no one writes directly to it)
490                  * and flag that it failed.
491                  */
492                 trace_array_printk(&max_tr, _THIS_IP_,
493                         "Failed to swap buffers due to commit in progress\n");
494         }
495
496         ftrace_enable_cpu();
497
498         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
499
500         __update_max_tr(tr, tsk, cpu);
501         __raw_spin_unlock(&ftrace_max_lock);
502 }
503 #endif /* CONFIG_TRACER_MAX_TRACE */
504
505 /**
506  * register_tracer - register a tracer with the ftrace system.
507  * @type - the plugin for the tracer
508  *
509  * Register a new plugin tracer.
510  */
511 int register_tracer(struct tracer *type)
512 __releases(kernel_lock)
513 __acquires(kernel_lock)
514 {
515         struct tracer *t;
516         int len;
517         int ret = 0;
518
519         if (!type->name) {
520                 pr_info("Tracer must have a name\n");
521                 return -1;
522         }
523
524         /*
525          * When this gets called we hold the BKL which means that
526          * preemption is disabled. Various trace selftests however
527          * need to disable and enable preemption for successful tests.
528          * So we drop the BKL here and grab it after the tests again.
529          */
530         unlock_kernel();
531         mutex_lock(&trace_types_lock);
532
533         tracing_selftest_running = true;
534
535         for (t = trace_types; t; t = t->next) {
536                 if (strcmp(type->name, t->name) == 0) {
537                         /* already found */
538                         pr_info("Trace %s already registered\n",
539                                 type->name);
540                         ret = -1;
541                         goto out;
542                 }
543         }
544
545         if (!type->set_flag)
546                 type->set_flag = &dummy_set_flag;
547         if (!type->flags)
548                 type->flags = &dummy_tracer_flags;
549         else
550                 if (!type->flags->opts)
551                         type->flags->opts = dummy_tracer_opt;
552         if (!type->wait_pipe)
553                 type->wait_pipe = default_wait_pipe;
554
555
556 #ifdef CONFIG_FTRACE_STARTUP_TEST
557         if (type->selftest && !tracing_selftest_disabled) {
558                 struct tracer *saved_tracer = current_trace;
559                 struct trace_array *tr = &global_trace;
560
561                 /*
562                  * Run a selftest on this tracer.
563                  * Here we reset the trace buffer, and set the current
564                  * tracer to be this tracer. The tracer can then run some
565                  * internal tracing to verify that everything is in order.
566                  * If we fail, we do not register this tracer.
567                  */
568                 tracing_reset_online_cpus(tr);
569
570                 current_trace = type;
571                 /* the test is responsible for initializing and enabling */
572                 pr_info("Testing tracer %s: ", type->name);
573                 ret = type->selftest(type, tr);
574                 /* the test is responsible for resetting too */
575                 current_trace = saved_tracer;
576                 if (ret) {
577                         printk(KERN_CONT "FAILED!\n");
578                         goto out;
579                 }
580                 /* Only reset on passing, to avoid touching corrupted buffers */
581                 tracing_reset_online_cpus(tr);
582
583                 printk(KERN_CONT "PASSED\n");
584         }
585 #endif
586
587         type->next = trace_types;
588         trace_types = type;
589         len = strlen(type->name);
590         if (len > max_tracer_type_len)
591                 max_tracer_type_len = len;
592
593  out:
594         tracing_selftest_running = false;
595         mutex_unlock(&trace_types_lock);
596
597         if (ret || !default_bootup_tracer)
598                 goto out_unlock;
599
600         if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
601                 goto out_unlock;
602
603         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
604         /* Do we want this tracer to start on bootup? */
605         tracing_set_tracer(type->name);
606         default_bootup_tracer = NULL;
607         /* disable other selftests, since this will break it. */
608         tracing_selftest_disabled = 1;
609 #ifdef CONFIG_FTRACE_STARTUP_TEST
610         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
611                type->name);
612 #endif
613
614  out_unlock:
615         lock_kernel();
616         return ret;
617 }
618
619 void unregister_tracer(struct tracer *type)
620 {
621         struct tracer **t;
622         int len;
623
624         mutex_lock(&trace_types_lock);
625         for (t = &trace_types; *t; t = &(*t)->next) {
626                 if (*t == type)
627                         goto found;
628         }
629         pr_info("Trace %s not registered\n", type->name);
630         goto out;
631
632  found:
633         *t = (*t)->next;
634
635         if (type == current_trace && tracer_enabled) {
636                 tracer_enabled = 0;
637                 tracing_stop();
638                 if (current_trace->stop)
639                         current_trace->stop(&global_trace);
640                 current_trace = &nop_trace;
641         }
642
643         if (strlen(type->name) != max_tracer_type_len)
644                 goto out;
645
646         max_tracer_type_len = 0;
647         for (t = &trace_types; *t; t = &(*t)->next) {
648                 len = strlen((*t)->name);
649                 if (len > max_tracer_type_len)
650                         max_tracer_type_len = len;
651         }
652  out:
653         mutex_unlock(&trace_types_lock);
654 }
655
656 static void __tracing_reset(struct trace_array *tr, int cpu)
657 {
658         ftrace_disable_cpu();
659         ring_buffer_reset_cpu(tr->buffer, cpu);
660         ftrace_enable_cpu();
661 }
662
663 void tracing_reset(struct trace_array *tr, int cpu)
664 {
665         struct ring_buffer *buffer = tr->buffer;
666
667         ring_buffer_record_disable(buffer);
668
669         /* Make sure all commits have finished */
670         synchronize_sched();
671         __tracing_reset(tr, cpu);
672
673         ring_buffer_record_enable(buffer);
674 }
675
676 void tracing_reset_online_cpus(struct trace_array *tr)
677 {
678         struct ring_buffer *buffer = tr->buffer;
679         int cpu;
680
681         ring_buffer_record_disable(buffer);
682
683         /* Make sure all commits have finished */
684         synchronize_sched();
685
686         tr->time_start = ftrace_now(tr->cpu);
687
688         for_each_online_cpu(cpu)
689                 __tracing_reset(tr, cpu);
690
691         ring_buffer_record_enable(buffer);
692 }
693
694 void tracing_reset_current(int cpu)
695 {
696         tracing_reset(&global_trace, cpu);
697 }
698
699 void tracing_reset_current_online_cpus(void)
700 {
701         tracing_reset_online_cpus(&global_trace);
702 }
703
704 #define SAVED_CMDLINES 128
705 #define NO_CMDLINE_MAP UINT_MAX
706 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
707 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
708 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
709 static int cmdline_idx;
710 static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
711
712 /* temporary disable recording */
713 static atomic_t trace_record_cmdline_disabled __read_mostly;
714
715 static void trace_init_cmdlines(void)
716 {
717         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
718         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
719         cmdline_idx = 0;
720 }
721
722 /**
723  * ftrace_off_permanent - disable all ftrace code permanently
724  *
725  * This should only be called when a serious anomally has
726  * been detected.  This will turn off the function tracing,
727  * ring buffers, and other tracing utilites. It takes no
728  * locks and can be called from any context.
729  */
730 void ftrace_off_permanent(void)
731 {
732         tracing_disabled = 1;
733         ftrace_stop();
734         tracing_off_permanent();
735 }
736
737 /**
738  * tracing_start - quick start of the tracer
739  *
740  * If tracing is enabled but was stopped by tracing_stop,
741  * this will start the tracer back up.
742  */
743 void tracing_start(void)
744 {
745         struct ring_buffer *buffer;
746         unsigned long flags;
747
748         if (tracing_disabled)
749                 return;
750
751         spin_lock_irqsave(&tracing_start_lock, flags);
752         if (--trace_stop_count) {
753                 if (trace_stop_count < 0) {
754                         /* Someone screwed up their debugging */
755                         WARN_ON_ONCE(1);
756                         trace_stop_count = 0;
757                 }
758                 goto out;
759         }
760
761
762         buffer = global_trace.buffer;
763         if (buffer)
764                 ring_buffer_record_enable(buffer);
765
766         buffer = max_tr.buffer;
767         if (buffer)
768                 ring_buffer_record_enable(buffer);
769
770         ftrace_start();
771  out:
772         spin_unlock_irqrestore(&tracing_start_lock, flags);
773 }
774
775 /**
776  * tracing_stop - quick stop of the tracer
777  *
778  * Light weight way to stop tracing. Use in conjunction with
779  * tracing_start.
780  */
781 void tracing_stop(void)
782 {
783         struct ring_buffer *buffer;
784         unsigned long flags;
785
786         ftrace_stop();
787         spin_lock_irqsave(&tracing_start_lock, flags);
788         if (trace_stop_count++)
789                 goto out;
790
791         buffer = global_trace.buffer;
792         if (buffer)
793                 ring_buffer_record_disable(buffer);
794
795         buffer = max_tr.buffer;
796         if (buffer)
797                 ring_buffer_record_disable(buffer);
798
799  out:
800         spin_unlock_irqrestore(&tracing_start_lock, flags);
801 }
802
803 void trace_stop_cmdline_recording(void);
804
805 static void trace_save_cmdline(struct task_struct *tsk)
806 {
807         unsigned pid, idx;
808
809         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
810                 return;
811
812         /*
813          * It's not the end of the world if we don't get
814          * the lock, but we also don't want to spin
815          * nor do we want to disable interrupts,
816          * so if we miss here, then better luck next time.
817          */
818         if (!__raw_spin_trylock(&trace_cmdline_lock))
819                 return;
820
821         idx = map_pid_to_cmdline[tsk->pid];
822         if (idx == NO_CMDLINE_MAP) {
823                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
824
825                 /*
826                  * Check whether the cmdline buffer at idx has a pid
827                  * mapped. We are going to overwrite that entry so we
828                  * need to clear the map_pid_to_cmdline. Otherwise we
829                  * would read the new comm for the old pid.
830                  */
831                 pid = map_cmdline_to_pid[idx];
832                 if (pid != NO_CMDLINE_MAP)
833                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
834
835                 map_cmdline_to_pid[idx] = tsk->pid;
836                 map_pid_to_cmdline[tsk->pid] = idx;
837
838                 cmdline_idx = idx;
839         }
840
841         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
842
843         __raw_spin_unlock(&trace_cmdline_lock);
844 }
845
846 void trace_find_cmdline(int pid, char comm[])
847 {
848         unsigned map;
849
850         if (!pid) {
851                 strcpy(comm, "<idle>");
852                 return;
853         }
854
855         if (pid > PID_MAX_DEFAULT) {
856                 strcpy(comm, "<...>");
857                 return;
858         }
859
860         preempt_disable();
861         __raw_spin_lock(&trace_cmdline_lock);
862         map = map_pid_to_cmdline[pid];
863         if (map != NO_CMDLINE_MAP)
864                 strcpy(comm, saved_cmdlines[map]);
865         else
866                 strcpy(comm, "<...>");
867
868         __raw_spin_unlock(&trace_cmdline_lock);
869         preempt_enable();
870 }
871
872 void tracing_record_cmdline(struct task_struct *tsk)
873 {
874         if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
875             !tracing_is_on())
876                 return;
877
878         trace_save_cmdline(tsk);
879 }
880
881 void
882 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
883                              int pc)
884 {
885         struct task_struct *tsk = current;
886
887         entry->preempt_count            = pc & 0xff;
888         entry->pid                      = (tsk) ? tsk->pid : 0;
889         entry->tgid                     = (tsk) ? tsk->tgid : 0;
890         entry->flags =
891 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
892                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
893 #else
894                 TRACE_FLAG_IRQS_NOSUPPORT |
895 #endif
896                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
897                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
898                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
899 }
900 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
901
902 struct ring_buffer_event *
903 trace_buffer_lock_reserve(struct ring_buffer *buffer,
904                           int type,
905                           unsigned long len,
906                           unsigned long flags, int pc)
907 {
908         struct ring_buffer_event *event;
909
910         event = ring_buffer_lock_reserve(buffer, len);
911         if (event != NULL) {
912                 struct trace_entry *ent = ring_buffer_event_data(event);
913
914                 tracing_generic_entry_update(ent, flags, pc);
915                 ent->type = type;
916         }
917
918         return event;
919 }
920
921 static inline void
922 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
923                              struct ring_buffer_event *event,
924                              unsigned long flags, int pc,
925                              int wake)
926 {
927         ring_buffer_unlock_commit(buffer, event);
928
929         ftrace_trace_stack(buffer, flags, 6, pc);
930         ftrace_trace_userstack(buffer, flags, pc);
931
932         if (wake)
933                 trace_wake_up();
934 }
935
936 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
937                                 struct ring_buffer_event *event,
938                                 unsigned long flags, int pc)
939 {
940         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
941 }
942
943 struct ring_buffer_event *
944 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
945                                   int type, unsigned long len,
946                                   unsigned long flags, int pc)
947 {
948         *current_rb = global_trace.buffer;
949         return trace_buffer_lock_reserve(*current_rb,
950                                          type, len, flags, pc);
951 }
952 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
953
954 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
955                                         struct ring_buffer_event *event,
956                                         unsigned long flags, int pc)
957 {
958         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
959 }
960 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
961
962 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
963                                        struct ring_buffer_event *event,
964                                        unsigned long flags, int pc)
965 {
966         __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
967 }
968 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
969
970 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
971                                          struct ring_buffer_event *event)
972 {
973         ring_buffer_discard_commit(buffer, event);
974 }
975 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
976
977 void
978 trace_function(struct trace_array *tr,
979                unsigned long ip, unsigned long parent_ip, unsigned long flags,
980                int pc)
981 {
982         struct ftrace_event_call *call = &event_function;
983         struct ring_buffer *buffer = tr->buffer;
984         struct ring_buffer_event *event;
985         struct ftrace_entry *entry;
986
987         /* If we are reading the ring buffer, don't trace */
988         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
989                 return;
990
991         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
992                                           flags, pc);
993         if (!event)
994                 return;
995         entry   = ring_buffer_event_data(event);
996         entry->ip                       = ip;
997         entry->parent_ip                = parent_ip;
998
999         if (!filter_check_discard(call, entry, buffer, event))
1000                 ring_buffer_unlock_commit(buffer, event);
1001 }
1002
1003 void
1004 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1005        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1006        int pc)
1007 {
1008         if (likely(!atomic_read(&data->disabled)))
1009                 trace_function(tr, ip, parent_ip, flags, pc);
1010 }
1011
1012 #ifdef CONFIG_STACKTRACE
1013 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1014                                  unsigned long flags,
1015                                  int skip, int pc)
1016 {
1017         struct ftrace_event_call *call = &event_kernel_stack;
1018         struct ring_buffer_event *event;
1019         struct stack_entry *entry;
1020         struct stack_trace trace;
1021
1022         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1023                                           sizeof(*entry), flags, pc);
1024         if (!event)
1025                 return;
1026         entry   = ring_buffer_event_data(event);
1027         memset(&entry->caller, 0, sizeof(entry->caller));
1028
1029         trace.nr_entries        = 0;
1030         trace.max_entries       = FTRACE_STACK_ENTRIES;
1031         trace.skip              = skip;
1032         trace.entries           = entry->caller;
1033
1034         save_stack_trace(&trace);
1035         if (!filter_check_discard(call, entry, buffer, event))
1036                 ring_buffer_unlock_commit(buffer, event);
1037 }
1038
1039 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1040                         int skip, int pc)
1041 {
1042         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1043                 return;
1044
1045         __ftrace_trace_stack(buffer, flags, skip, pc);
1046 }
1047
1048 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1049                    int pc)
1050 {
1051         __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1052 }
1053
1054 void
1055 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1056 {
1057         struct ftrace_event_call *call = &event_user_stack;
1058         struct ring_buffer_event *event;
1059         struct userstack_entry *entry;
1060         struct stack_trace trace;
1061
1062         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1063                 return;
1064
1065         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1066                                           sizeof(*entry), flags, pc);
1067         if (!event)
1068                 return;
1069         entry   = ring_buffer_event_data(event);
1070
1071         memset(&entry->caller, 0, sizeof(entry->caller));
1072
1073         trace.nr_entries        = 0;
1074         trace.max_entries       = FTRACE_STACK_ENTRIES;
1075         trace.skip              = 0;
1076         trace.entries           = entry->caller;
1077
1078         save_stack_trace_user(&trace);
1079         if (!filter_check_discard(call, entry, buffer, event))
1080                 ring_buffer_unlock_commit(buffer, event);
1081 }
1082
1083 #ifdef UNUSED
1084 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1085 {
1086         ftrace_trace_userstack(tr, flags, preempt_count());
1087 }
1088 #endif /* UNUSED */
1089
1090 #endif /* CONFIG_STACKTRACE */
1091
1092 static void
1093 ftrace_trace_special(void *__tr,
1094                      unsigned long arg1, unsigned long arg2, unsigned long arg3,
1095                      int pc)
1096 {
1097         struct ring_buffer_event *event;
1098         struct trace_array *tr = __tr;
1099         struct ring_buffer *buffer = tr->buffer;
1100         struct special_entry *entry;
1101
1102         event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1103                                           sizeof(*entry), 0, pc);
1104         if (!event)
1105                 return;
1106         entry   = ring_buffer_event_data(event);
1107         entry->arg1                     = arg1;
1108         entry->arg2                     = arg2;
1109         entry->arg3                     = arg3;
1110         trace_buffer_unlock_commit(buffer, event, 0, pc);
1111 }
1112
1113 void
1114 __trace_special(void *__tr, void *__data,
1115                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1116 {
1117         ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
1118 }
1119
1120 void
1121 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1122 {
1123         struct trace_array *tr = &global_trace;
1124         struct trace_array_cpu *data;
1125         unsigned long flags;
1126         int cpu;
1127         int pc;
1128
1129         if (tracing_disabled)
1130                 return;
1131
1132         pc = preempt_count();
1133         local_irq_save(flags);
1134         cpu = raw_smp_processor_id();
1135         data = tr->data[cpu];
1136
1137         if (likely(atomic_inc_return(&data->disabled) == 1))
1138                 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1139
1140         atomic_dec(&data->disabled);
1141         local_irq_restore(flags);
1142 }
1143
1144 /**
1145  * trace_vbprintk - write binary msg to tracing buffer
1146  *
1147  */
1148 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1149 {
1150         static raw_spinlock_t trace_buf_lock =
1151                 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
1152         static u32 trace_buf[TRACE_BUF_SIZE];
1153
1154         struct ftrace_event_call *call = &event_bprint;
1155         struct ring_buffer_event *event;
1156         struct ring_buffer *buffer;
1157         struct trace_array *tr = &global_trace;
1158         struct trace_array_cpu *data;
1159         struct bprint_entry *entry;
1160         unsigned long flags;
1161         int disable;
1162         int resched;
1163         int cpu, len = 0, size, pc;
1164
1165         if (unlikely(tracing_selftest_running || tracing_disabled))
1166                 return 0;
1167
1168         /* Don't pollute graph traces with trace_vprintk internals */
1169         pause_graph_tracing();
1170
1171         pc = preempt_count();
1172         resched = ftrace_preempt_disable();
1173         cpu = raw_smp_processor_id();
1174         data = tr->data[cpu];
1175
1176         disable = atomic_inc_return(&data->disabled);
1177         if (unlikely(disable != 1))
1178                 goto out;
1179
1180         /* Lockdep uses trace_printk for lock tracing */
1181         local_irq_save(flags);
1182         __raw_spin_lock(&trace_buf_lock);
1183         len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1184
1185         if (len > TRACE_BUF_SIZE || len < 0)
1186                 goto out_unlock;
1187
1188         size = sizeof(*entry) + sizeof(u32) * len;
1189         buffer = tr->buffer;
1190         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1191                                           flags, pc);
1192         if (!event)
1193                 goto out_unlock;
1194         entry = ring_buffer_event_data(event);
1195         entry->ip                       = ip;
1196         entry->fmt                      = fmt;
1197
1198         memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1199         if (!filter_check_discard(call, entry, buffer, event))
1200                 ring_buffer_unlock_commit(buffer, event);
1201
1202 out_unlock:
1203         __raw_spin_unlock(&trace_buf_lock);
1204         local_irq_restore(flags);
1205
1206 out:
1207         atomic_dec_return(&data->disabled);
1208         ftrace_preempt_enable(resched);
1209         unpause_graph_tracing();
1210
1211         return len;
1212 }
1213 EXPORT_SYMBOL_GPL(trace_vbprintk);
1214
1215 int trace_array_printk(struct trace_array *tr,
1216                        unsigned long ip, const char *fmt, ...)
1217 {
1218         int ret;
1219         va_list ap;
1220
1221         if (!(trace_flags & TRACE_ITER_PRINTK))
1222                 return 0;
1223
1224         va_start(ap, fmt);
1225         ret = trace_array_vprintk(tr, ip, fmt, ap);
1226         va_end(ap);
1227         return ret;
1228 }
1229
1230 int trace_array_vprintk(struct trace_array *tr,
1231                         unsigned long ip, const char *fmt, va_list args)
1232 {
1233         static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1234         static char trace_buf[TRACE_BUF_SIZE];
1235
1236         struct ftrace_event_call *call = &event_print;
1237         struct ring_buffer_event *event;
1238         struct ring_buffer *buffer;
1239         struct trace_array_cpu *data;
1240         int cpu, len = 0, size, pc;
1241         struct print_entry *entry;
1242         unsigned long irq_flags;
1243         int disable;
1244
1245         if (tracing_disabled || tracing_selftest_running)
1246                 return 0;
1247
1248         pc = preempt_count();
1249         preempt_disable_notrace();
1250         cpu = raw_smp_processor_id();
1251         data = tr->data[cpu];
1252
1253         disable = atomic_inc_return(&data->disabled);
1254         if (unlikely(disable != 1))
1255                 goto out;
1256
1257         pause_graph_tracing();
1258         raw_local_irq_save(irq_flags);
1259         __raw_spin_lock(&trace_buf_lock);
1260         len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1261
1262         len = min(len, TRACE_BUF_SIZE-1);
1263         trace_buf[len] = 0;
1264
1265         size = sizeof(*entry) + len + 1;
1266         buffer = tr->buffer;
1267         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1268                                           irq_flags, pc);
1269         if (!event)
1270                 goto out_unlock;
1271         entry = ring_buffer_event_data(event);
1272         entry->ip                       = ip;
1273
1274         memcpy(&entry->buf, trace_buf, len);
1275         entry->buf[len] = 0;
1276         if (!filter_check_discard(call, entry, buffer, event))
1277                 ring_buffer_unlock_commit(buffer, event);
1278
1279  out_unlock:
1280         __raw_spin_unlock(&trace_buf_lock);
1281         raw_local_irq_restore(irq_flags);
1282         unpause_graph_tracing();
1283  out:
1284         atomic_dec_return(&data->disabled);
1285         preempt_enable_notrace();
1286
1287         return len;
1288 }
1289
1290 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1291 {
1292         return trace_array_printk(&global_trace, ip, fmt, args);
1293 }
1294 EXPORT_SYMBOL_GPL(trace_vprintk);
1295
1296 enum trace_file_type {
1297         TRACE_FILE_LAT_FMT      = 1,
1298         TRACE_FILE_ANNOTATE     = 2,
1299 };
1300
1301 static void trace_iterator_increment(struct trace_iterator *iter)
1302 {
1303         /* Don't allow ftrace to trace into the ring buffers */
1304         ftrace_disable_cpu();
1305
1306         iter->idx++;
1307         if (iter->buffer_iter[iter->cpu])
1308                 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1309
1310         ftrace_enable_cpu();
1311 }
1312
1313 static struct trace_entry *
1314 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1315 {
1316         struct ring_buffer_event *event;
1317         struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1318
1319         /* Don't allow ftrace to trace into the ring buffers */
1320         ftrace_disable_cpu();
1321
1322         if (buf_iter)
1323                 event = ring_buffer_iter_peek(buf_iter, ts);
1324         else
1325                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1326
1327         ftrace_enable_cpu();
1328
1329         return event ? ring_buffer_event_data(event) : NULL;
1330 }
1331
1332 static struct trace_entry *
1333 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1334 {
1335         struct ring_buffer *buffer = iter->tr->buffer;
1336         struct trace_entry *ent, *next = NULL;
1337         int cpu_file = iter->cpu_file;
1338         u64 next_ts = 0, ts;
1339         int next_cpu = -1;
1340         int cpu;
1341
1342         /*
1343          * If we are in a per_cpu trace file, don't bother by iterating over
1344          * all cpu and peek directly.
1345          */
1346         if (cpu_file > TRACE_PIPE_ALL_CPU) {
1347                 if (ring_buffer_empty_cpu(buffer, cpu_file))
1348                         return NULL;
1349                 ent = peek_next_entry(iter, cpu_file, ent_ts);
1350                 if (ent_cpu)
1351                         *ent_cpu = cpu_file;
1352
1353                 return ent;
1354         }
1355
1356         for_each_tracing_cpu(cpu) {
1357
1358                 if (ring_buffer_empty_cpu(buffer, cpu))
1359                         continue;
1360
1361                 ent = peek_next_entry(iter, cpu, &ts);
1362
1363                 /*
1364                  * Pick the entry with the smallest timestamp:
1365                  */
1366                 if (ent && (!next || ts < next_ts)) {
1367                         next = ent;
1368                         next_cpu = cpu;
1369                         next_ts = ts;
1370                 }
1371         }
1372
1373         if (ent_cpu)
1374                 *ent_cpu = next_cpu;
1375
1376         if (ent_ts)
1377                 *ent_ts = next_ts;
1378
1379         return next;
1380 }
1381
1382 /* Find the next real entry, without updating the iterator itself */
1383 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1384                                           int *ent_cpu, u64 *ent_ts)
1385 {
1386         return __find_next_entry(iter, ent_cpu, ent_ts);
1387 }
1388
1389 /* Find the next real entry, and increment the iterator to the next entry */
1390 static void *find_next_entry_inc(struct trace_iterator *iter)
1391 {
1392         iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1393
1394         if (iter->ent)
1395                 trace_iterator_increment(iter);
1396
1397         return iter->ent ? iter : NULL;
1398 }
1399
1400 static void trace_consume(struct trace_iterator *iter)
1401 {
1402         /* Don't allow ftrace to trace into the ring buffers */
1403         ftrace_disable_cpu();
1404         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1405         ftrace_enable_cpu();
1406 }
1407
1408 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1409 {
1410         struct trace_iterator *iter = m->private;
1411         int i = (int)*pos;
1412         void *ent;
1413
1414         (*pos)++;
1415
1416         /* can't go backwards */
1417         if (iter->idx > i)
1418                 return NULL;
1419
1420         if (iter->idx < 0)
1421                 ent = find_next_entry_inc(iter);
1422         else
1423                 ent = iter;
1424
1425         while (ent && iter->idx < i)
1426                 ent = find_next_entry_inc(iter);
1427
1428         iter->pos = *pos;
1429
1430         return ent;
1431 }
1432
1433 static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1434 {
1435         struct trace_array *tr = iter->tr;
1436         struct ring_buffer_event *event;
1437         struct ring_buffer_iter *buf_iter;
1438         unsigned long entries = 0;
1439         u64 ts;
1440
1441         tr->data[cpu]->skipped_entries = 0;
1442
1443         if (!iter->buffer_iter[cpu])
1444                 return;
1445
1446         buf_iter = iter->buffer_iter[cpu];
1447         ring_buffer_iter_reset(buf_iter);
1448
1449         /*
1450          * We could have the case with the max latency tracers
1451          * that a reset never took place on a cpu. This is evident
1452          * by the timestamp being before the start of the buffer.
1453          */
1454         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1455                 if (ts >= iter->tr->time_start)
1456                         break;
1457                 entries++;
1458                 ring_buffer_read(buf_iter, NULL);
1459         }
1460
1461         tr->data[cpu]->skipped_entries = entries;
1462 }
1463
1464 /*
1465  * No necessary locking here. The worst thing which can
1466  * happen is loosing events consumed at the same time
1467  * by a trace_pipe reader.
1468  * Other than that, we don't risk to crash the ring buffer
1469  * because it serializes the readers.
1470  *
1471  * The current tracer is copied to avoid a global locking
1472  * all around.
1473  */
1474 static void *s_start(struct seq_file *m, loff_t *pos)
1475 {
1476         struct trace_iterator *iter = m->private;
1477         static struct tracer *old_tracer;
1478         int cpu_file = iter->cpu_file;
1479         void *p = NULL;
1480         loff_t l = 0;
1481         int cpu;
1482
1483         /* copy the tracer to avoid using a global lock all around */
1484         mutex_lock(&trace_types_lock);
1485         if (unlikely(old_tracer != current_trace && current_trace)) {
1486                 old_tracer = current_trace;
1487                 *iter->trace = *current_trace;
1488         }
1489         mutex_unlock(&trace_types_lock);
1490
1491         atomic_inc(&trace_record_cmdline_disabled);
1492
1493         if (*pos != iter->pos) {
1494                 iter->ent = NULL;
1495                 iter->cpu = 0;
1496                 iter->idx = -1;
1497
1498                 ftrace_disable_cpu();
1499
1500                 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1501                         for_each_tracing_cpu(cpu)
1502                                 tracing_iter_reset(iter, cpu);
1503                 } else
1504                         tracing_iter_reset(iter, cpu_file);
1505
1506                 ftrace_enable_cpu();
1507
1508                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1509                         ;
1510
1511         } else {
1512                 l = *pos - 1;
1513                 p = s_next(m, p, &l);
1514         }
1515
1516         trace_event_read_lock();
1517         return p;
1518 }
1519
1520 static void s_stop(struct seq_file *m, void *p)
1521 {
1522         atomic_dec(&trace_record_cmdline_disabled);
1523         trace_event_read_unlock();
1524 }
1525
1526 static void print_lat_help_header(struct seq_file *m)
1527 {
1528         seq_puts(m, "#                  _------=> CPU#            \n");
1529         seq_puts(m, "#                 / _-----=> irqs-off        \n");
1530         seq_puts(m, "#                | / _----=> need-resched    \n");
1531         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1532         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1533         seq_puts(m, "#                |||| /                      \n");
1534         seq_puts(m, "#                |||||     delay             \n");
1535         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1536         seq_puts(m, "#     \\   /      |||||   \\   |   /           \n");
1537 }
1538
1539 static void print_func_help_header(struct seq_file *m)
1540 {
1541         seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1542         seq_puts(m, "#              | |       |          |         |\n");
1543 }
1544
1545
1546 static void
1547 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1548 {
1549         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1550         struct trace_array *tr = iter->tr;
1551         struct trace_array_cpu *data = tr->data[tr->cpu];
1552         struct tracer *type = current_trace;
1553         unsigned long entries = 0;
1554         unsigned long total = 0;
1555         unsigned long count;
1556         const char *name = "preemption";
1557         int cpu;
1558
1559         if (type)
1560                 name = type->name;
1561
1562
1563         for_each_tracing_cpu(cpu) {
1564                 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1565                 /*
1566                  * If this buffer has skipped entries, then we hold all
1567                  * entries for the trace and we need to ignore the
1568                  * ones before the time stamp.
1569                  */
1570                 if (tr->data[cpu]->skipped_entries) {
1571                         count -= tr->data[cpu]->skipped_entries;
1572                         /* total is the same as the entries */
1573                         total += count;
1574                 } else
1575                         total += count +
1576                                 ring_buffer_overrun_cpu(tr->buffer, cpu);
1577                 entries += count;
1578         }
1579
1580         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1581                    name, UTS_RELEASE);
1582         seq_puts(m, "# -----------------------------------"
1583                  "---------------------------------\n");
1584         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1585                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1586                    nsecs_to_usecs(data->saved_latency),
1587                    entries,
1588                    total,
1589                    tr->cpu,
1590 #if defined(CONFIG_PREEMPT_NONE)
1591                    "server",
1592 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1593                    "desktop",
1594 #elif defined(CONFIG_PREEMPT)
1595                    "preempt",
1596 #else
1597                    "unknown",
1598 #endif
1599                    /* These are reserved for later use */
1600                    0, 0, 0, 0);
1601 #ifdef CONFIG_SMP
1602         seq_printf(m, " #P:%d)\n", num_online_cpus());
1603 #else
1604         seq_puts(m, ")\n");
1605 #endif
1606         seq_puts(m, "#    -----------------\n");
1607         seq_printf(m, "#    | task: %.16s-%d "
1608                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1609                    data->comm, data->pid, data->uid, data->nice,
1610                    data->policy, data->rt_priority);
1611         seq_puts(m, "#    -----------------\n");
1612
1613         if (data->critical_start) {
1614                 seq_puts(m, "#  => started at: ");
1615                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1616                 trace_print_seq(m, &iter->seq);
1617                 seq_puts(m, "\n#  => ended at:   ");
1618                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1619                 trace_print_seq(m, &iter->seq);
1620                 seq_puts(m, "\n#\n");
1621         }
1622
1623         seq_puts(m, "#\n");
1624 }
1625
1626 static void test_cpu_buff_start(struct trace_iterator *iter)
1627 {
1628         struct trace_seq *s = &iter->seq;
1629
1630         if (!(trace_flags & TRACE_ITER_ANNOTATE))
1631                 return;
1632
1633         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1634                 return;
1635
1636         if (cpumask_test_cpu(iter->cpu, iter->started))
1637                 return;
1638
1639         if (iter->tr->data[iter->cpu]->skipped_entries)
1640                 return;
1641
1642         cpumask_set_cpu(iter->cpu, iter->started);
1643
1644         /* Don't print started cpu buffer for the first entry of the trace */
1645         if (iter->idx > 1)
1646                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1647                                 iter->cpu);
1648 }
1649
1650 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1651 {
1652         struct trace_seq *s = &iter->seq;
1653         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1654         struct trace_entry *entry;
1655         struct trace_event *event;
1656
1657         entry = iter->ent;
1658
1659         test_cpu_buff_start(iter);
1660
1661         event = ftrace_find_event(entry->type);
1662
1663         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1664                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1665                         if (!trace_print_lat_context(iter))
1666                                 goto partial;
1667                 } else {
1668                         if (!trace_print_context(iter))
1669                                 goto partial;
1670                 }
1671         }
1672
1673         if (event)
1674                 return event->trace(iter, sym_flags);
1675
1676         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1677                 goto partial;
1678
1679         return TRACE_TYPE_HANDLED;
1680 partial:
1681         return TRACE_TYPE_PARTIAL_LINE;
1682 }
1683
1684 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1685 {
1686         struct trace_seq *s = &iter->seq;
1687         struct trace_entry *entry;
1688         struct trace_event *event;
1689
1690         entry = iter->ent;
1691
1692         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1693                 if (!trace_seq_printf(s, "%d %d %llu ",
1694                                       entry->pid, iter->cpu, iter->ts))
1695                         goto partial;
1696         }
1697
1698         event = ftrace_find_event(entry->type);
1699         if (event)
1700                 return event->raw(iter, 0);
1701
1702         if (!trace_seq_printf(s, "%d ?\n", entry->type))
1703                 goto partial;
1704
1705         return TRACE_TYPE_HANDLED;
1706 partial:
1707         return TRACE_TYPE_PARTIAL_LINE;
1708 }
1709
1710 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1711 {
1712         struct trace_seq *s = &iter->seq;
1713         unsigned char newline = '\n';
1714         struct trace_entry *entry;
1715         struct trace_event *event;
1716
1717         entry = iter->ent;
1718
1719         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1720                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1721                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1722                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1723         }
1724
1725         event = ftrace_find_event(entry->type);
1726         if (event) {
1727                 enum print_line_t ret = event->hex(iter, 0);
1728                 if (ret != TRACE_TYPE_HANDLED)
1729                         return ret;
1730         }
1731
1732         SEQ_PUT_FIELD_RET(s, newline);
1733
1734         return TRACE_TYPE_HANDLED;
1735 }
1736
1737 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1738 {
1739         struct trace_seq *s = &iter->seq;
1740         struct trace_entry *entry;
1741         struct trace_event *event;
1742
1743         entry = iter->ent;
1744
1745         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1746                 SEQ_PUT_FIELD_RET(s, entry->pid);
1747                 SEQ_PUT_FIELD_RET(s, iter->cpu);
1748                 SEQ_PUT_FIELD_RET(s, iter->ts);
1749         }
1750
1751         event = ftrace_find_event(entry->type);
1752         return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
1753 }
1754
1755 static int trace_empty(struct trace_iterator *iter)
1756 {
1757         int cpu;
1758
1759         /* If we are looking at one CPU buffer, only check that one */
1760         if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
1761                 cpu = iter->cpu_file;
1762                 if (iter->buffer_iter[cpu]) {
1763                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1764                                 return 0;
1765                 } else {
1766                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1767                                 return 0;
1768                 }
1769                 return 1;
1770         }
1771
1772         for_each_tracing_cpu(cpu) {
1773                 if (iter->buffer_iter[cpu]) {
1774                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1775                                 return 0;
1776                 } else {
1777                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1778                                 return 0;
1779                 }
1780         }
1781
1782         return 1;
1783 }
1784
1785 /*  Called with trace_event_read_lock() held. */
1786 static enum print_line_t print_trace_line(struct trace_iterator *iter)
1787 {
1788         enum print_line_t ret;
1789
1790         if (iter->trace && iter->trace->print_line) {
1791                 ret = iter->trace->print_line(iter);
1792                 if (ret != TRACE_TYPE_UNHANDLED)
1793                         return ret;
1794         }
1795
1796         if (iter->ent->type == TRACE_BPRINT &&
1797                         trace_flags & TRACE_ITER_PRINTK &&
1798                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1799                 return trace_print_bprintk_msg_only(iter);
1800
1801         if (iter->ent->type == TRACE_PRINT &&
1802                         trace_flags & TRACE_ITER_PRINTK &&
1803                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1804                 return trace_print_printk_msg_only(iter);
1805
1806         if (trace_flags & TRACE_ITER_BIN)
1807                 return print_bin_fmt(iter);
1808
1809         if (trace_flags & TRACE_ITER_HEX)
1810                 return print_hex_fmt(iter);
1811
1812         if (trace_flags & TRACE_ITER_RAW)
1813                 return print_raw_fmt(iter);
1814
1815         return print_trace_fmt(iter);
1816 }
1817
1818 static int s_show(struct seq_file *m, void *v)
1819 {
1820         struct trace_iterator *iter = v;
1821
1822         if (iter->ent == NULL) {
1823                 if (iter->tr) {
1824                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1825                         seq_puts(m, "#\n");
1826                 }
1827                 if (iter->trace && iter->trace->print_header)
1828                         iter->trace->print_header(m);
1829                 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1830                         /* print nothing if the buffers are empty */
1831                         if (trace_empty(iter))
1832                                 return 0;
1833                         print_trace_header(m, iter);
1834                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1835                                 print_lat_help_header(m);
1836                 } else {
1837                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1838                                 print_func_help_header(m);
1839                 }
1840         } else {
1841                 print_trace_line(iter);
1842                 trace_print_seq(m, &iter->seq);
1843         }
1844
1845         return 0;
1846 }
1847
1848 static struct seq_operations tracer_seq_ops = {
1849         .start          = s_start,
1850         .next           = s_next,
1851         .stop           = s_stop,
1852         .show           = s_show,
1853 };
1854
1855 static struct trace_iterator *
1856 __tracing_open(struct inode *inode, struct file *file)
1857 {
1858         long cpu_file = (long) inode->i_private;
1859         void *fail_ret = ERR_PTR(-ENOMEM);
1860         struct trace_iterator *iter;
1861         struct seq_file *m;
1862         int cpu, ret;
1863
1864         if (tracing_disabled)
1865                 return ERR_PTR(-ENODEV);
1866
1867         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1868         if (!iter)
1869                 return ERR_PTR(-ENOMEM);
1870
1871         /*
1872          * We make a copy of the current tracer to avoid concurrent
1873          * changes on it while we are reading.
1874          */
1875         mutex_lock(&trace_types_lock);
1876         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
1877         if (!iter->trace)
1878                 goto fail;
1879
1880         if (current_trace)
1881                 *iter->trace = *current_trace;
1882
1883         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
1884                 goto fail;
1885
1886         cpumask_clear(iter->started);
1887
1888         if (current_trace && current_trace->print_max)
1889                 iter->tr = &max_tr;
1890         else
1891                 iter->tr = &global_trace;
1892         iter->pos = -1;
1893         mutex_init(&iter->mutex);
1894         iter->cpu_file = cpu_file;
1895
1896         /* Notify the tracer early; before we stop tracing. */
1897         if (iter->trace && iter->trace->open)
1898                 iter->trace->open(iter);
1899
1900         /* Annotate start of buffers if we had overruns */
1901         if (ring_buffer_overruns(iter->tr->buffer))
1902                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
1903
1904         /* stop the trace while dumping */
1905         tracing_stop();
1906
1907         if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
1908                 for_each_tracing_cpu(cpu) {
1909
1910                         iter->buffer_iter[cpu] =
1911                                 ring_buffer_read_start(iter->tr->buffer, cpu);
1912                         tracing_iter_reset(iter, cpu);
1913                 }
1914         } else {
1915                 cpu = iter->cpu_file;
1916                 iter->buffer_iter[cpu] =
1917                                 ring_buffer_read_start(iter->tr->buffer, cpu);
1918                 tracing_iter_reset(iter, cpu);
1919         }
1920
1921         ret = seq_open(file, &tracer_seq_ops);
1922         if (ret < 0) {
1923                 fail_ret = ERR_PTR(ret);
1924                 goto fail_buffer;
1925         }
1926
1927         m = file->private_data;
1928         m->private = iter;
1929
1930         mutex_unlock(&trace_types_lock);
1931
1932         return iter;
1933
1934  fail_buffer:
1935         for_each_tracing_cpu(cpu) {
1936                 if (iter->buffer_iter[cpu])
1937                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
1938         }
1939         free_cpumask_var(iter->started);
1940         tracing_start();
1941  fail:
1942         mutex_unlock(&trace_types_lock);
1943         kfree(iter->trace);
1944         kfree(iter);
1945
1946         return fail_ret;
1947 }
1948
1949 int tracing_open_generic(struct inode *inode, struct file *filp)
1950 {
1951         if (tracing_disabled)
1952                 return -ENODEV;
1953
1954         filp->private_data = inode->i_private;
1955         return 0;
1956 }
1957
1958 static int tracing_release(struct inode *inode, struct file *file)
1959 {
1960         struct seq_file *m = (struct seq_file *)file->private_data;
1961         struct trace_iterator *iter;
1962         int cpu;
1963
1964         if (!(file->f_mode & FMODE_READ))
1965                 return 0;
1966
1967         iter = m->private;
1968
1969         mutex_lock(&trace_types_lock);
1970         for_each_tracing_cpu(cpu) {
1971                 if (iter->buffer_iter[cpu])
1972                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
1973         }
1974
1975         if (iter->trace && iter->trace->close)
1976                 iter->trace->close(iter);
1977
1978         /* reenable tracing if it was previously enabled */
1979         tracing_start();
1980         mutex_unlock(&trace_types_lock);
1981
1982         seq_release(inode, file);
1983         mutex_destroy(&iter->mutex);
1984         free_cpumask_var(iter->started);
1985         kfree(iter->trace);
1986         kfree(iter);
1987         return 0;
1988 }
1989
1990 static int tracing_open(struct inode *inode, struct file *file)
1991 {
1992         struct trace_iterator *iter;
1993         int ret = 0;
1994
1995         /* If this file was open for write, then erase contents */
1996         if ((file->f_mode & FMODE_WRITE) &&
1997             (file->f_flags & O_TRUNC)) {
1998                 long cpu = (long) inode->i_private;
1999
2000                 if (cpu == TRACE_PIPE_ALL_CPU)
2001                         tracing_reset_online_cpus(&global_trace);
2002                 else
2003                         tracing_reset(&global_trace, cpu);
2004         }
2005
2006         if (file->f_mode & FMODE_READ) {
2007                 iter = __tracing_open(inode, file);
2008                 if (IS_ERR(iter))
2009                         ret = PTR_ERR(iter);
2010                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2011                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2012         }
2013         return ret;
2014 }
2015
2016 static void *
2017 t_next(struct seq_file *m, void *v, loff_t *pos)
2018 {
2019         struct tracer *t = v;
2020
2021         (*pos)++;
2022
2023         if (t)
2024                 t = t->next;
2025
2026         return t;
2027 }
2028
2029 static void *t_start(struct seq_file *m, loff_t *pos)
2030 {
2031         struct tracer *t;
2032         loff_t l = 0;
2033
2034         mutex_lock(&trace_types_lock);
2035         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2036                 ;
2037
2038         return t;
2039 }
2040
2041 static void t_stop(struct seq_file *m, void *p)
2042 {
2043         mutex_unlock(&trace_types_lock);
2044 }
2045
2046 static int t_show(struct seq_file *m, void *v)
2047 {
2048         struct tracer *t = v;
2049
2050         if (!t)
2051                 return 0;
2052
2053         seq_printf(m, "%s", t->name);
2054         if (t->next)
2055                 seq_putc(m, ' ');
2056         else
2057                 seq_putc(m, '\n');
2058
2059         return 0;
2060 }
2061
2062 static struct seq_operations show_traces_seq_ops = {
2063         .start          = t_start,
2064         .next           = t_next,
2065         .stop           = t_stop,
2066         .show           = t_show,
2067 };
2068
2069 static int show_traces_open(struct inode *inode, struct file *file)
2070 {
2071         if (tracing_disabled)
2072                 return -ENODEV;
2073
2074         return seq_open(file, &show_traces_seq_ops);
2075 }
2076
2077 static ssize_t
2078 tracing_write_stub(struct file *filp, const char __user *ubuf,
2079                    size_t count, loff_t *ppos)
2080 {
2081         return count;
2082 }
2083
2084 static const struct file_operations tracing_fops = {
2085         .open           = tracing_open,
2086         .read           = seq_read,
2087         .write          = tracing_write_stub,
2088         .llseek         = seq_lseek,
2089         .release        = tracing_release,
2090 };
2091
2092 static const struct file_operations show_traces_fops = {
2093         .open           = show_traces_open,
2094         .read           = seq_read,
2095         .release        = seq_release,
2096 };
2097
2098 /*
2099  * Only trace on a CPU if the bitmask is set:
2100  */
2101 static cpumask_var_t tracing_cpumask;
2102
2103 /*
2104  * The tracer itself will not take this lock, but still we want
2105  * to provide a consistent cpumask to user-space:
2106  */
2107 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2108
2109 /*
2110  * Temporary storage for the character representation of the
2111  * CPU bitmask (and one more byte for the newline):
2112  */
2113 static char mask_str[NR_CPUS + 1];
2114
2115 static ssize_t
2116 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2117                      size_t count, loff_t *ppos)
2118 {
2119         int len;
2120
2121         mutex_lock(&tracing_cpumask_update_lock);
2122
2123         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2124         if (count - len < 2) {
2125                 count = -EINVAL;
2126                 goto out_err;
2127         }
2128         len += sprintf(mask_str + len, "\n");
2129         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2130
2131 out_err:
2132         mutex_unlock(&tracing_cpumask_update_lock);
2133
2134         return count;
2135 }
2136
2137 static ssize_t
2138 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2139                       size_t count, loff_t *ppos)
2140 {
2141         int err, cpu;
2142         cpumask_var_t tracing_cpumask_new;
2143
2144         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2145                 return -ENOMEM;
2146
2147         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2148         if (err)
2149                 goto err_unlock;
2150
2151         mutex_lock(&tracing_cpumask_update_lock);
2152
2153         local_irq_disable();
2154         __raw_spin_lock(&ftrace_max_lock);
2155         for_each_tracing_cpu(cpu) {
2156                 /*
2157                  * Increase/decrease the disabled counter if we are
2158                  * about to flip a bit in the cpumask:
2159                  */
2160                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2161                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2162                         atomic_inc(&global_trace.data[cpu]->disabled);
2163                 }
2164                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2165                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2166                         atomic_dec(&global_trace.data[cpu]->disabled);
2167                 }
2168         }
2169         __raw_spin_unlock(&ftrace_max_lock);
2170         local_irq_enable();
2171
2172         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2173
2174         mutex_unlock(&tracing_cpumask_update_lock);
2175         free_cpumask_var(tracing_cpumask_new);
2176
2177         return count;
2178
2179 err_unlock:
2180         free_cpumask_var(tracing_cpumask_new);
2181
2182         return err;
2183 }
2184
2185 static const struct file_operations tracing_cpumask_fops = {
2186         .open           = tracing_open_generic,
2187         .read           = tracing_cpumask_read,
2188         .write          = tracing_cpumask_write,
2189 };
2190
2191 static ssize_t
2192 tracing_trace_options_read(struct file *filp, char __user *ubuf,
2193                        size_t cnt, loff_t *ppos)
2194 {
2195         struct tracer_opt *trace_opts;
2196         u32 tracer_flags;
2197         int len = 0;
2198         char *buf;
2199         int r = 0;
2200         int i;
2201
2202
2203         /* calculate max size */
2204         for (i = 0; trace_options[i]; i++) {
2205                 len += strlen(trace_options[i]);
2206                 len += 3; /* "no" and newline */
2207         }
2208
2209         mutex_lock(&trace_types_lock);
2210         tracer_flags = current_trace->flags->val;
2211         trace_opts = current_trace->flags->opts;
2212
2213         /*
2214          * Increase the size with names of options specific
2215          * of the current tracer.
2216          */
2217         for (i = 0; trace_opts[i].name; i++) {
2218                 len += strlen(trace_opts[i].name);
2219                 len += 3; /* "no" and newline */
2220         }
2221
2222         /* +1 for \0 */
2223         buf = kmalloc(len + 1, GFP_KERNEL);
2224         if (!buf) {
2225                 mutex_unlock(&trace_types_lock);
2226                 return -ENOMEM;
2227         }
2228
2229         for (i = 0; trace_options[i]; i++) {
2230                 if (trace_flags & (1 << i))
2231                         r += sprintf(buf + r, "%s\n", trace_options[i]);
2232                 else
2233                         r += sprintf(buf + r, "no%s\n", trace_options[i]);
2234         }
2235
2236         for (i = 0; trace_opts[i].name; i++) {
2237                 if (tracer_flags & trace_opts[i].bit)
2238                         r += sprintf(buf + r, "%s\n",
2239                                 trace_opts[i].name);
2240                 else
2241                         r += sprintf(buf + r, "no%s\n",
2242                                 trace_opts[i].name);
2243         }
2244         mutex_unlock(&trace_types_lock);
2245
2246         WARN_ON(r >= len + 1);
2247
2248         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2249
2250         kfree(buf);
2251         return r;
2252 }
2253
2254 /* Try to assign a tracer specific option */
2255 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2256 {
2257         struct tracer_flags *tracer_flags = trace->flags;
2258         struct tracer_opt *opts = NULL;
2259         int ret = 0, i = 0;
2260         int len;
2261
2262         for (i = 0; tracer_flags->opts[i].name; i++) {
2263                 opts = &tracer_flags->opts[i];
2264                 len = strlen(opts->name);
2265
2266                 if (strncmp(cmp, opts->name, len) == 0) {
2267                         ret = trace->set_flag(tracer_flags->val,
2268                                 opts->bit, !neg);
2269                         break;
2270                 }
2271         }
2272         /* Not found */
2273         if (!tracer_flags->opts[i].name)
2274                 return -EINVAL;
2275
2276         /* Refused to handle */
2277         if (ret)
2278                 return ret;
2279
2280         if (neg)
2281                 tracer_flags->val &= ~opts->bit;
2282         else
2283                 tracer_flags->val |= opts->bit;
2284
2285         return 0;
2286 }
2287
2288 static void set_tracer_flags(unsigned int mask, int enabled)
2289 {
2290         /* do nothing if flag is already set */
2291         if (!!(trace_flags & mask) == !!enabled)
2292                 return;
2293
2294         if (enabled)
2295                 trace_flags |= mask;
2296         else
2297                 trace_flags &= ~mask;
2298 }
2299
2300 static ssize_t
2301 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2302                         size_t cnt, loff_t *ppos)
2303 {
2304         char buf[64];
2305         char *cmp = buf;
2306         int neg = 0;
2307         int ret;
2308         int i;
2309
2310         if (cnt >= sizeof(buf))
2311                 return -EINVAL;
2312
2313         if (copy_from_user(&buf, ubuf, cnt))
2314                 return -EFAULT;
2315
2316         buf[cnt] = 0;
2317
2318         if (strncmp(buf, "no", 2) == 0) {
2319                 neg = 1;
2320                 cmp += 2;
2321         }
2322
2323         for (i = 0; trace_options[i]; i++) {
2324                 int len = strlen(trace_options[i]);
2325
2326                 if (strncmp(cmp, trace_options[i], len) == 0) {
2327                         set_tracer_flags(1 << i, !neg);
2328                         break;
2329                 }
2330         }
2331
2332         /* If no option could be set, test the specific tracer options */
2333         if (!trace_options[i]) {
2334                 mutex_lock(&trace_types_lock);
2335                 ret = set_tracer_option(current_trace, cmp, neg);
2336                 mutex_unlock(&trace_types_lock);
2337                 if (ret)
2338                         return ret;
2339         }
2340
2341         filp->f_pos += cnt;
2342
2343         return cnt;
2344 }
2345
2346 static const struct file_operations tracing_iter_fops = {
2347         .open           = tracing_open_generic,
2348         .read           = tracing_trace_options_read,
2349         .write          = tracing_trace_options_write,
2350 };
2351
2352 static const char readme_msg[] =
2353         "tracing mini-HOWTO:\n\n"
2354         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2355         "# cat /sys/kernel/debug/tracing/available_tracers\n"
2356         "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
2357         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2358         "nop\n"
2359         "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
2360         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2361         "sched_switch\n"
2362         "# cat /sys/kernel/debug/tracing/trace_options\n"
2363         "noprint-parent nosym-offset nosym-addr noverbose\n"
2364         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2365         "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
2366         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2367         "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
2368 ;
2369
2370 static ssize_t
2371 tracing_readme_read(struct file *filp, char __user *ubuf,
2372                        size_t cnt, loff_t *ppos)
2373 {
2374         return simple_read_from_buffer(ubuf, cnt, ppos,
2375                                         readme_msg, strlen(readme_msg));
2376 }
2377
2378 static const struct file_operations tracing_readme_fops = {
2379         .open           = tracing_open_generic,
2380         .read           = tracing_readme_read,
2381 };
2382
2383 static ssize_t
2384 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2385                                 size_t cnt, loff_t *ppos)
2386 {
2387         char *buf_comm;
2388         char *file_buf;
2389         char *buf;
2390         int len = 0;
2391         int pid;
2392         int i;
2393
2394         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2395         if (!file_buf)
2396                 return -ENOMEM;
2397
2398         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2399         if (!buf_comm) {
2400                 kfree(file_buf);
2401                 return -ENOMEM;
2402         }
2403
2404         buf = file_buf;
2405
2406         for (i = 0; i < SAVED_CMDLINES; i++) {
2407                 int r;
2408
2409                 pid = map_cmdline_to_pid[i];
2410                 if (pid == -1 || pid == NO_CMDLINE_MAP)
2411                         continue;
2412
2413                 trace_find_cmdline(pid, buf_comm);
2414                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2415                 buf += r;
2416                 len += r;
2417         }
2418
2419         len = simple_read_from_buffer(ubuf, cnt, ppos,
2420                                       file_buf, len);
2421
2422         kfree(file_buf);
2423         kfree(buf_comm);
2424
2425         return len;
2426 }
2427
2428 static const struct file_operations tracing_saved_cmdlines_fops = {
2429     .open       = tracing_open_generic,
2430     .read       = tracing_saved_cmdlines_read,
2431 };
2432
2433 static ssize_t
2434 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2435                   size_t cnt, loff_t *ppos)
2436 {
2437         char buf[64];
2438         int r;
2439
2440         r = sprintf(buf, "%u\n", tracer_enabled);
2441         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2442 }
2443
2444 static ssize_t
2445 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2446                    size_t cnt, loff_t *ppos)
2447 {
2448         struct trace_array *tr = filp->private_data;
2449         char buf[64];
2450         unsigned long val;
2451         int ret;
2452
2453         if (cnt >= sizeof(buf))
2454                 return -EINVAL;
2455
2456         if (copy_from_user(&buf, ubuf, cnt))
2457                 return -EFAULT;
2458
2459         buf[cnt] = 0;
2460
2461         ret = strict_strtoul(buf, 10, &val);
2462         if (ret < 0)
2463                 return ret;
2464
2465         val = !!val;
2466
2467         mutex_lock(&trace_types_lock);
2468         if (tracer_enabled ^ val) {
2469                 if (val) {
2470                         tracer_enabled = 1;
2471                         if (current_trace->start)
2472                                 current_trace->start(tr);
2473                         tracing_start();
2474                 } else {
2475                         tracer_enabled = 0;
2476                         tracing_stop();
2477                         if (current_trace->stop)
2478                                 current_trace->stop(tr);
2479                 }
2480         }
2481         mutex_unlock(&trace_types_lock);
2482
2483         filp->f_pos += cnt;
2484
2485         return cnt;
2486 }
2487
2488 static ssize_t
2489 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2490                        size_t cnt, loff_t *ppos)
2491 {
2492         char buf[max_tracer_type_len+2];
2493         int r;
2494
2495         mutex_lock(&trace_types_lock);
2496         if (current_trace)
2497                 r = sprintf(buf, "%s\n", current_trace->name);
2498         else
2499                 r = sprintf(buf, "\n");
2500         mutex_unlock(&trace_types_lock);
2501
2502         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2503 }
2504
2505 int tracer_init(struct tracer *t, struct trace_array *tr)
2506 {
2507         tracing_reset_online_cpus(tr);
2508         return t->init(tr);
2509 }
2510
2511 static int tracing_resize_ring_buffer(unsigned long size)
2512 {
2513         int ret;
2514
2515         /*
2516          * If kernel or user changes the size of the ring buffer
2517          * we use the size that was given, and we can forget about
2518          * expanding it later.
2519          */
2520         ring_buffer_expanded = 1;
2521
2522         ret = ring_buffer_resize(global_trace.buffer, size);
2523         if (ret < 0)
2524                 return ret;
2525
2526         ret = ring_buffer_resize(max_tr.buffer, size);
2527         if (ret < 0) {
2528                 int r;
2529
2530                 r = ring_buffer_resize(global_trace.buffer,
2531                                        global_trace.entries);
2532                 if (r < 0) {
2533                         /*
2534                          * AARGH! We are left with different
2535                          * size max buffer!!!!
2536                          * The max buffer is our "snapshot" buffer.
2537                          * When a tracer needs a snapshot (one of the
2538                          * latency tracers), it swaps the max buffer
2539                          * with the saved snap shot. We succeeded to
2540                          * update the size of the main buffer, but failed to
2541                          * update the size of the max buffer. But when we tried
2542                          * to reset the main buffer to the original size, we
2543                          * failed there too. This is very unlikely to
2544                          * happen, but if it does, warn and kill all
2545                          * tracing.
2546                          */
2547                         WARN_ON(1);
2548                         tracing_disabled = 1;
2549                 }
2550                 return ret;
2551         }
2552
2553         global_trace.entries = size;
2554
2555         return ret;
2556 }
2557
2558 /**
2559  * tracing_update_buffers - used by tracing facility to expand ring buffers
2560  *
2561  * To save on memory when the tracing is never used on a system with it
2562  * configured in. The ring buffers are set to a minimum size. But once
2563  * a user starts to use the tracing facility, then they need to grow
2564  * to their default size.
2565  *
2566  * This function is to be called when a tracer is about to be used.
2567  */
2568 int tracing_update_buffers(void)
2569 {
2570         int ret = 0;
2571
2572         mutex_lock(&trace_types_lock);
2573         if (!ring_buffer_expanded)
2574                 ret = tracing_resize_ring_buffer(trace_buf_size);
2575         mutex_unlock(&trace_types_lock);
2576
2577         return ret;
2578 }
2579
2580 struct trace_option_dentry;
2581
2582 static struct trace_option_dentry *
2583 create_trace_option_files(struct tracer *tracer);
2584
2585 static void
2586 destroy_trace_option_files(struct trace_option_dentry *topts);
2587
2588 static int tracing_set_tracer(const char *buf)
2589 {
2590         static struct trace_option_dentry *topts;
2591         struct trace_array *tr = &global_trace;
2592         struct tracer *t;
2593         int ret = 0;
2594
2595         mutex_lock(&trace_types_lock);
2596
2597         if (!ring_buffer_expanded) {
2598                 ret = tracing_resize_ring_buffer(trace_buf_size);
2599                 if (ret < 0)
2600                         goto out;
2601                 ret = 0;
2602         }
2603
2604         for (t = trace_types; t; t = t->next) {
2605                 if (strcmp(t->name, buf) == 0)
2606                         break;
2607         }
2608         if (!t) {
2609                 ret = -EINVAL;
2610                 goto out;
2611         }
2612         if (t == current_trace)
2613                 goto out;
2614
2615         trace_branch_disable();
2616         if (current_trace && current_trace->reset)
2617                 current_trace->reset(tr);
2618
2619         destroy_trace_option_files(topts);
2620
2621         current_trace = t;
2622
2623         topts = create_trace_option_files(current_trace);
2624
2625         if (t->init) {
2626                 ret = tracer_init(t, tr);
2627                 if (ret)
2628                         goto out;
2629         }
2630
2631         trace_branch_enable(tr);
2632  out:
2633         mutex_unlock(&trace_types_lock);
2634
2635         return ret;
2636 }
2637
2638 static ssize_t
2639 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2640                         size_t cnt, loff_t *ppos)
2641 {
2642         char buf[max_tracer_type_len+1];
2643         int i;
2644         size_t ret;
2645         int err;
2646
2647         ret = cnt;
2648
2649         if (cnt > max_tracer_type_len)
2650                 cnt = max_tracer_type_len;
2651
2652         if (copy_from_user(&buf, ubuf, cnt))
2653                 return -EFAULT;
2654
2655         buf[cnt] = 0;
2656
2657         /* strip ending whitespace. */
2658         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2659                 buf[i] = 0;
2660
2661         err = tracing_set_tracer(buf);
2662         if (err)
2663                 return err;
2664
2665         filp->f_pos += ret;
2666
2667         return ret;
2668 }
2669
2670 static ssize_t
2671 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2672                      size_t cnt, loff_t *ppos)
2673 {
2674         unsigned long *ptr = filp->private_data;
2675         char buf[64];
2676         int r;
2677
2678         r = snprintf(buf, sizeof(buf), "%ld\n",
2679                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2680         if (r > sizeof(buf))
2681                 r = sizeof(buf);
2682         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2683 }
2684
2685 static ssize_t
2686 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2687                       size_t cnt, loff_t *ppos)
2688 {
2689         unsigned long *ptr = filp->private_data;
2690         char buf[64];
2691         unsigned long val;
2692         int ret;
2693
2694         if (cnt >= sizeof(buf))
2695                 return -EINVAL;
2696
2697         if (copy_from_user(&buf, ubuf, cnt))
2698                 return -EFAULT;
2699
2700         buf[cnt] = 0;
2701
2702         ret = strict_strtoul(buf, 10, &val);
2703         if (ret < 0)
2704                 return ret;
2705
2706         *ptr = val * 1000;
2707
2708         return cnt;
2709 }
2710
2711 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2712 {
2713         long cpu_file = (long) inode->i_private;
2714         struct trace_iterator *iter;
2715         int ret = 0;
2716
2717         if (tracing_disabled)
2718                 return -ENODEV;
2719
2720         mutex_lock(&trace_types_lock);
2721
2722         /* We only allow one reader per cpu */
2723         if (cpu_file == TRACE_PIPE_ALL_CPU) {
2724                 if (!cpumask_empty(tracing_reader_cpumask)) {
2725                         ret = -EBUSY;
2726                         goto out;
2727                 }
2728                 cpumask_setall(tracing_reader_cpumask);
2729         } else {
2730                 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
2731                         cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
2732                 else {
2733                         ret = -EBUSY;
2734                         goto out;
2735                 }
2736         }
2737
2738         /* create a buffer to store the information to pass to userspace */
2739         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2740         if (!iter) {
2741                 ret = -ENOMEM;
2742                 goto out;
2743         }
2744
2745         /*
2746          * We make a copy of the current tracer to avoid concurrent
2747          * changes on it while we are reading.
2748          */
2749         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
2750         if (!iter->trace) {
2751                 ret = -ENOMEM;
2752                 goto fail;
2753         }
2754         if (current_trace)
2755                 *iter->trace = *current_trace;
2756
2757         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
2758                 ret = -ENOMEM;
2759                 goto fail;
2760         }
2761
2762         /* trace pipe does not show start of buffer */
2763         cpumask_setall(iter->started);
2764
2765         if (trace_flags & TRACE_ITER_LATENCY_FMT)
2766                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2767
2768         iter->cpu_file = cpu_file;
2769         iter->tr = &global_trace;
2770         mutex_init(&iter->mutex);
2771         filp->private_data = iter;
2772
2773         if (iter->trace->pipe_open)
2774                 iter->trace->pipe_open(iter);
2775
2776 out:
2777         mutex_unlock(&trace_types_lock);
2778         return ret;
2779
2780 fail:
2781         kfree(iter->trace);
2782         kfree(iter);
2783         mutex_unlock(&trace_types_lock);
2784         return ret;
2785 }
2786
2787 static int tracing_release_pipe(struct inode *inode, struct file *file)
2788 {
2789         struct trace_iterator *iter = file->private_data;
2790
2791         mutex_lock(&trace_types_lock);
2792
2793         if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
2794                 cpumask_clear(tracing_reader_cpumask);
2795         else
2796                 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2797
2798         mutex_unlock(&trace_types_lock);
2799
2800         free_cpumask_var(iter->started);
2801         mutex_destroy(&iter->mutex);
2802         kfree(iter->trace);
2803         kfree(iter);
2804
2805         return 0;
2806 }
2807
2808 static unsigned int
2809 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2810 {
2811         struct trace_iterator *iter = filp->private_data;
2812
2813         if (trace_flags & TRACE_ITER_BLOCK) {
2814                 /*
2815                  * Always select as readable when in blocking mode
2816                  */
2817                 return POLLIN | POLLRDNORM;
2818         } else {
2819                 if (!trace_empty(iter))
2820                         return POLLIN | POLLRDNORM;
2821                 poll_wait(filp, &trace_wait, poll_table);
2822                 if (!trace_empty(iter))
2823                         return POLLIN | POLLRDNORM;
2824
2825                 return 0;
2826         }
2827 }
2828
2829
2830 void default_wait_pipe(struct trace_iterator *iter)
2831 {
2832         DEFINE_WAIT(wait);
2833
2834         prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
2835
2836         if (trace_empty(iter))
2837                 schedule();
2838
2839         finish_wait(&trace_wait, &wait);
2840 }
2841
2842 /*
2843  * This is a make-shift waitqueue.
2844  * A tracer might use this callback on some rare cases:
2845  *
2846  *  1) the current tracer might hold the runqueue lock when it wakes up
2847  *     a reader, hence a deadlock (sched, function, and function graph tracers)
2848  *  2) the function tracers, trace all functions, we don't want
2849  *     the overhead of calling wake_up and friends
2850  *     (and tracing them too)
2851  *
2852  *     Anyway, this is really very primitive wakeup.
2853  */
2854 void poll_wait_pipe(struct trace_iterator *iter)
2855 {
2856         set_current_state(TASK_INTERRUPTIBLE);
2857         /* sleep for 100 msecs, and try again. */
2858         schedule_timeout(HZ / 10);
2859 }
2860
2861 /* Must be called with trace_types_lock mutex held. */
2862 static int tracing_wait_pipe(struct file *filp)
2863 {
2864         struct trace_iterator *iter = filp->private_data;
2865
2866         while (trace_empty(iter)) {
2867
2868                 if ((filp->f_flags & O_NONBLOCK)) {
2869                         return -EAGAIN;
2870                 }
2871
2872                 mutex_unlock(&iter->mutex);
2873
2874                 iter->trace->wait_pipe(iter);
2875
2876                 mutex_lock(&iter->mutex);
2877
2878                 if (signal_pending(current))
2879                         return -EINTR;
2880
2881                 /*
2882                  * We block until we read something and tracing is disabled.
2883                  * We still block if tracing is disabled, but we have never
2884                  * read anything. This allows a user to cat this file, and
2885                  * then enable tracing. But after we have read something,
2886                  * we give an EOF when tracing is again disabled.
2887                  *
2888                  * iter->pos will be 0 if we haven't read anything.
2889                  */
2890                 if (!tracer_enabled && iter->pos)
2891                         break;
2892         }
2893
2894         return 1;
2895 }
2896
2897 /*
2898  * Consumer reader.
2899  */
2900 static ssize_t
2901 tracing_read_pipe(struct file *filp, char __user *ubuf,
2902                   size_t cnt, loff_t *ppos)
2903 {
2904         struct trace_iterator *iter = filp->private_data;
2905         static struct tracer *old_tracer;
2906         ssize_t sret;
2907
2908         /* return any leftover data */
2909         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2910         if (sret != -EBUSY)
2911                 return sret;
2912
2913         trace_seq_init(&iter->seq);
2914
2915         /* copy the tracer to avoid using a global lock all around */
2916         mutex_lock(&trace_types_lock);
2917         if (unlikely(old_tracer != current_trace && current_trace)) {
2918                 old_tracer = current_trace;
2919                 *iter->trace = *current_trace;
2920         }
2921         mutex_unlock(&trace_types_lock);
2922
2923         /*
2924          * Avoid more than one consumer on a single file descriptor
2925          * This is just a matter of traces coherency, the ring buffer itself
2926          * is protected.
2927          */
2928         mutex_lock(&iter->mutex);
2929         if (iter->trace->read) {
2930                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2931                 if (sret)
2932                         goto out;
2933         }
2934
2935 waitagain:
2936         sret = tracing_wait_pipe(filp);
2937         if (sret <= 0)
2938                 goto out;
2939
2940         /* stop when tracing is finished */
2941         if (trace_empty(iter)) {
2942                 sret = 0;
2943                 goto out;
2944         }
2945
2946         if (cnt >= PAGE_SIZE)
2947                 cnt = PAGE_SIZE - 1;
2948
2949         /* reset all but tr, trace, and overruns */
2950         memset(&iter->seq, 0,
2951                sizeof(struct trace_iterator) -
2952                offsetof(struct trace_iterator, seq));
2953         iter->pos = -1;
2954
2955         trace_event_read_lock();
2956         while (find_next_entry_inc(iter) != NULL) {
2957                 enum print_line_t ret;
2958                 int len = iter->seq.len;
2959
2960                 ret = print_trace_line(iter);
2961                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2962                         /* don't print partial lines */
2963                         iter->seq.len = len;
2964                         break;
2965                 }
2966                 if (ret != TRACE_TYPE_NO_CONSUME)
2967                         trace_consume(iter);
2968
2969                 if (iter->seq.len >= cnt)
2970                         break;
2971         }
2972         trace_event_read_unlock();
2973
2974         /* Now copy what we have to the user */
2975         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2976         if (iter->seq.readpos >= iter->seq.len)
2977                 trace_seq_init(&iter->seq);
2978
2979         /*
2980          * If there was nothing to send to user, inspite of consuming trace
2981          * entries, go back to wait for more entries.
2982          */
2983         if (sret == -EBUSY)
2984                 goto waitagain;
2985
2986 out:
2987         mutex_unlock(&iter->mutex);
2988
2989         return sret;
2990 }
2991
2992 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
2993                                      struct pipe_buffer *buf)
2994 {
2995         __free_page(buf->page);
2996 }
2997
2998 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
2999                                      unsigned int idx)
3000 {
3001         __free_page(spd->pages[idx]);
3002 }
3003
3004 static struct pipe_buf_operations tracing_pipe_buf_ops = {
3005         .can_merge              = 0,
3006         .map                    = generic_pipe_buf_map,
3007         .unmap                  = generic_pipe_buf_unmap,
3008         .confirm                = generic_pipe_buf_confirm,
3009         .release                = tracing_pipe_buf_release,
3010         .steal                  = generic_pipe_buf_steal,
3011         .get                    = generic_pipe_buf_get,
3012 };
3013
3014 static size_t
3015 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3016 {
3017         size_t count;
3018         int ret;
3019
3020         /* Seq buffer is page-sized, exactly what we need. */
3021         for (;;) {
3022                 count = iter->seq.len;
3023                 ret = print_trace_line(iter);
3024                 count = iter->seq.len - count;
3025                 if (rem < count) {
3026                         rem = 0;
3027                         iter->seq.len -= count;
3028                         break;
3029                 }
3030                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3031                         iter->seq.len -= count;
3032                         break;
3033                 }
3034
3035                 if (ret != TRACE_TYPE_NO_CONSUME)
3036                         trace_consume(iter);
3037                 rem -= count;
3038                 if (!find_next_entry_inc(iter)) {
3039                         rem = 0;
3040                         iter->ent = NULL;
3041                         break;
3042                 }
3043         }
3044
3045         return rem;
3046 }
3047
3048 static ssize_t tracing_splice_read_pipe(struct file *filp,
3049                                         loff_t *ppos,
3050                                         struct pipe_inode_info *pipe,
3051                                         size_t len,
3052                                         unsigned int flags)
3053 {
3054         struct page *pages[PIPE_BUFFERS];
3055         struct partial_page partial[PIPE_BUFFERS];
3056         struct trace_iterator *iter = filp->private_data;
3057         struct splice_pipe_desc spd = {
3058                 .pages          = pages,
3059                 .partial        = partial,
3060                 .nr_pages       = 0, /* This gets updated below. */
3061                 .flags          = flags,
3062                 .ops            = &tracing_pipe_buf_ops,
3063                 .spd_release    = tracing_spd_release_pipe,
3064         };
3065         static struct tracer *old_tracer;
3066         ssize_t ret;
3067         size_t rem;
3068         unsigned int i;
3069
3070         /* copy the tracer to avoid using a global lock all around */
3071         mutex_lock(&trace_types_lock);
3072         if (unlikely(old_tracer != current_trace && current_trace)) {
3073                 old_tracer = current_trace;
3074                 *iter->trace = *current_trace;
3075         }
3076         mutex_unlock(&trace_types_lock);
3077
3078         mutex_lock(&iter->mutex);
3079
3080         if (iter->trace->splice_read) {
3081                 ret = iter->trace->splice_read(iter, filp,
3082                                                ppos, pipe, len, flags);
3083                 if (ret)
3084                         goto out_err;
3085         }
3086
3087         ret = tracing_wait_pipe(filp);
3088         if (ret <= 0)
3089                 goto out_err;
3090
3091         if (!iter->ent && !find_next_entry_inc(iter)) {
3092                 ret = -EFAULT;
3093                 goto out_err;
3094         }
3095
3096         trace_event_read_lock();
3097
3098         /* Fill as many pages as possible. */
3099         for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
3100                 pages[i] = alloc_page(GFP_KERNEL);
3101                 if (!pages[i])
3102                         break;
3103
3104                 rem = tracing_fill_pipe_page(rem, iter);
3105
3106                 /* Copy the data into the page, so we can start over. */
3107                 ret = trace_seq_to_buffer(&iter->seq,
3108                                           page_address(pages[i]),
3109                                           iter->seq.len);
3110                 if (ret < 0) {
3111                         __free_page(pages[i]);
3112                         break;
3113                 }
3114                 partial[i].offset = 0;
3115                 partial[i].len = iter->seq.len;
3116
3117                 trace_seq_init(&iter->seq);
3118         }
3119
3120         trace_event_read_unlock();
3121         mutex_unlock(&iter->mutex);
3122
3123         spd.nr_pages = i;
3124
3125         return splice_to_pipe(pipe, &spd);
3126
3127 out_err:
3128         mutex_unlock(&iter->mutex);
3129
3130         return ret;
3131 }
3132
3133 static ssize_t
3134 tracing_entries_read(struct file *filp, char __user *ubuf,
3135                      size_t cnt, loff_t *ppos)
3136 {
3137         struct trace_array *tr = filp->private_data;
3138         char buf[96];
3139         int r;
3140
3141         mutex_lock(&trace_types_lock);
3142         if (!ring_buffer_expanded)
3143                 r = sprintf(buf, "%lu (expanded: %lu)\n",
3144                             tr->entries >> 10,
3145                             trace_buf_size >> 10);
3146         else
3147                 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3148         mutex_unlock(&trace_types_lock);
3149
3150         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3151 }
3152
3153 static ssize_t
3154 tracing_entries_write(struct file *filp, const char __user *ubuf,
3155                       size_t cnt, loff_t *ppos)
3156 {
3157         unsigned long val;
3158         char buf[64];
3159         int ret, cpu;
3160
3161         if (cnt >= sizeof(buf))
3162                 return -EINVAL;
3163
3164         if (copy_from_user(&buf, ubuf, cnt))
3165                 return -EFAULT;
3166
3167         buf[cnt] = 0;
3168
3169         ret = strict_strtoul(buf, 10, &val);
3170         if (ret < 0)
3171                 return ret;
3172
3173         /* must have at least 1 entry */
3174         if (!val)
3175                 return -EINVAL;
3176
3177         mutex_lock(&trace_types_lock);
3178
3179         tracing_stop();
3180
3181         /* disable all cpu buffers */
3182         for_each_tracing_cpu(cpu) {
3183                 if (global_trace.data[cpu])
3184                         atomic_inc(&global_trace.data[cpu]->disabled);
3185                 if (max_tr.data[cpu])
3186                         atomic_inc(&max_tr.data[cpu]->disabled);
3187         }
3188
3189         /* value is in KB */
3190         val <<= 10;
3191
3192         if (val != global_trace.entries) {
3193                 ret = tracing_resize_ring_buffer(val);
3194                 if (ret < 0) {
3195                         cnt = ret;
3196                         goto out;
3197                 }
3198         }
3199
3200         filp->f_pos += cnt;
3201
3202         /* If check pages failed, return ENOMEM */
3203         if (tracing_disabled)
3204                 cnt = -ENOMEM;
3205  out:
3206         for_each_tracing_cpu(cpu) {
3207                 if (global_trace.data[cpu])
3208                         atomic_dec(&global_trace.data[cpu]->disabled);
3209                 if (max_tr.data[cpu])
3210                         atomic_dec(&max_tr.data[cpu]->disabled);
3211         }
3212
3213         tracing_start();
3214         max_tr.entries = global_trace.entries;
3215         mutex_unlock(&trace_types_lock);
3216
3217         return cnt;
3218 }
3219
3220 static int mark_printk(const char *fmt, ...)
3221 {
3222         int ret;
3223         va_list args;
3224         va_start(args, fmt);
3225         ret = trace_vprintk(0, fmt, args);
3226         va_end(args);
3227         return ret;
3228 }
3229
3230 static ssize_t
3231 tracing_mark_write(struct file *filp, const char __user *ubuf,
3232                                         size_t cnt, loff_t *fpos)
3233 {
3234         char *buf;
3235         char *end;
3236
3237         if (tracing_disabled)
3238                 return -EINVAL;
3239
3240         if (cnt > TRACE_BUF_SIZE)
3241                 cnt = TRACE_BUF_SIZE;
3242
3243         buf = kmalloc(cnt + 1, GFP_KERNEL);
3244         if (buf == NULL)
3245                 return -ENOMEM;
3246
3247         if (copy_from_user(buf, ubuf, cnt)) {
3248                 kfree(buf);
3249                 return -EFAULT;
3250         }
3251
3252         /* Cut from the first nil or newline. */
3253         buf[cnt] = '\0';
3254         end = strchr(buf, '\n');
3255         if (end)
3256                 *end = '\0';
3257
3258         cnt = mark_printk("%s\n", buf);
3259         kfree(buf);
3260         *fpos += cnt;
3261
3262         return cnt;
3263 }
3264
3265 static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
3266                                   size_t cnt, loff_t *ppos)
3267 {
3268         char buf[64];
3269         int bufiter = 0;
3270         int i;
3271
3272         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3273                 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
3274                         "%s%s%s%s", i ? " " : "",
3275                         i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3276                         i == trace_clock_id ? "]" : "");
3277         bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
3278
3279         return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
3280 }
3281
3282 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3283                                    size_t cnt, loff_t *fpos)
3284 {
3285         char buf[64];
3286         const char *clockstr;
3287         int i;
3288
3289         if (cnt >= sizeof(buf))
3290                 return -EINVAL;
3291
3292         if (copy_from_user(&buf, ubuf, cnt))
3293                 return -EFAULT;
3294
3295         buf[cnt] = 0;
3296
3297         clockstr = strstrip(buf);
3298
3299         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3300                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3301                         break;
3302         }
3303         if (i == ARRAY_SIZE(trace_clocks))
3304                 return -EINVAL;
3305
3306         trace_clock_id = i;
3307
3308         mutex_lock(&trace_types_lock);
3309
3310         ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3311         if (max_tr.buffer)
3312                 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3313
3314         mutex_unlock(&trace_types_lock);
3315
3316         *fpos += cnt;
3317
3318         return cnt;
3319 }
3320
3321 static const struct file_operations tracing_max_lat_fops = {
3322         .open           = tracing_open_generic,
3323         .read           = tracing_max_lat_read,
3324         .write          = tracing_max_lat_write,
3325 };
3326
3327 static const struct file_operations tracing_ctrl_fops = {
3328         .open           = tracing_open_generic,
3329         .read           = tracing_ctrl_read,
3330         .write          = tracing_ctrl_write,
3331 };
3332
3333 static const struct file_operations set_tracer_fops = {
3334         .open           = tracing_open_generic,
3335         .read           = tracing_set_trace_read,
3336         .write          = tracing_set_trace_write,
3337 };
3338
3339 static const struct file_operations tracing_pipe_fops = {
3340         .open           = tracing_open_pipe,
3341         .poll           = tracing_poll_pipe,
3342         .read           = tracing_read_pipe,
3343         .splice_read    = tracing_splice_read_pipe,
3344         .release        = tracing_release_pipe,
3345 };
3346
3347 static const struct file_operations tracing_entries_fops = {
3348         .open           = tracing_open_generic,
3349         .read           = tracing_entries_read,
3350         .write          = tracing_entries_write,
3351 };
3352
3353 static const struct file_operations tracing_mark_fops = {
3354         .open           = tracing_open_generic,
3355         .write          = tracing_mark_write,
3356 };
3357
3358 static const struct file_operations trace_clock_fops = {
3359         .open           = tracing_open_generic,
3360         .read           = tracing_clock_read,
3361         .write          = tracing_clock_write,
3362 };
3363
3364 struct ftrace_buffer_info {
3365         struct trace_array      *tr;
3366         void                    *spare;
3367         int                     cpu;
3368         unsigned int            read;
3369 };
3370
3371 static int tracing_buffers_open(struct inode *inode, struct file *filp)
3372 {
3373         int cpu = (int)(long)inode->i_private;
3374         struct ftrace_buffer_info *info;
3375
3376         if (tracing_disabled)
3377                 return -ENODEV;
3378
3379         info = kzalloc(sizeof(*info), GFP_KERNEL);
3380         if (!info)
3381                 return -ENOMEM;
3382
3383         info->tr        = &global_trace;
3384         info->cpu       = cpu;
3385         info->spare     = NULL;
3386         /* Force reading ring buffer for first read */
3387         info->read      = (unsigned int)-1;
3388
3389         filp->private_data = info;
3390
3391         return nonseekable_open(inode, filp);
3392 }
3393
3394 static ssize_t
3395 tracing_buffers_read(struct file *filp, char __user *ubuf,
3396                      size_t count, loff_t *ppos)
3397 {
3398         struct ftrace_buffer_info *info = filp->private_data;
3399         unsigned int pos;
3400         ssize_t ret;
3401         size_t size;
3402
3403         if (!count)
3404                 return 0;
3405
3406         if (!info->spare)
3407                 info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
3408         if (!info->spare)
3409                 return -ENOMEM;
3410
3411         /* Do we have previous read data to read? */
3412         if (info->read < PAGE_SIZE)
3413                 goto read;
3414
3415         info->read = 0;
3416
3417         ret = ring_buffer_read_page(info->tr->buffer,
3418                                     &info->spare,
3419                                     count,
3420                                     info->cpu, 0);
3421         if (ret < 0)
3422                 return 0;
3423
3424         pos = ring_buffer_page_len(info->spare);
3425
3426         if (pos < PAGE_SIZE)
3427                 memset(info->spare + pos, 0, PAGE_SIZE - pos);
3428
3429 read:
3430         size = PAGE_SIZE - info->read;
3431         if (size > count)
3432                 size = count;
3433
3434         ret = copy_to_user(ubuf, info->spare + info->read, size);
3435         if (ret == size)
3436                 return -EFAULT;
3437         size -= ret;
3438
3439         *ppos += size;
3440         info->read += size;
3441
3442         return size;
3443 }
3444
3445 static int tracing_buffers_release(struct inode *inode, struct file *file)
3446 {
3447         struct ftrace_buffer_info *info = file->private_data;
3448
3449         if (info->spare)
3450                 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3451         kfree(info);
3452
3453         return 0;
3454 }
3455
3456 struct buffer_ref {
3457         struct ring_buffer      *buffer;
3458         void                    *page;
3459         int                     ref;
3460 };
3461
3462 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3463                                     struct pipe_buffer *buf)
3464 {
3465         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3466
3467         if (--ref->ref)
3468                 return;
3469
3470         ring_buffer_free_read_page(ref->buffer, ref->page);
3471         kfree(ref);
3472         buf->private = 0;
3473 }
3474
3475 static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3476                                  struct pipe_buffer *buf)
3477 {
3478         return 1;
3479 }
3480
3481 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3482                                 struct pipe_buffer *buf)
3483 {
3484         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3485
3486         ref->ref++;
3487 }
3488
3489 /* Pipe buffer operations for a buffer. */
3490 static struct pipe_buf_operations buffer_pipe_buf_ops = {
3491         .can_merge              = 0,
3492         .map                    = generic_pipe_buf_map,
3493         .unmap                  = generic_pipe_buf_unmap,
3494         .confirm                = generic_pipe_buf_confirm,
3495         .release                = buffer_pipe_buf_release,
3496         .steal                  = buffer_pipe_buf_steal,
3497         .get                    = buffer_pipe_buf_get,
3498 };
3499
3500 /*
3501  * Callback from splice_to_pipe(), if we need to release some pages
3502  * at the end of the spd in case we error'ed out in filling the pipe.
3503  */
3504 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3505 {
3506         struct buffer_ref *ref =
3507                 (struct buffer_ref *)spd->partial[i].private;
3508
3509         if (--ref->ref)
3510                 return;
3511
3512         ring_buffer_free_read_page(ref->buffer, ref->page);
3513         kfree(ref);
3514         spd->partial[i].private = 0;
3515 }
3516
3517 static ssize_t
3518 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3519                             struct pipe_inode_info *pipe, size_t len,
3520                             unsigned int flags)
3521 {
3522         struct ftrace_buffer_info *info = file->private_data;
3523         struct partial_page partial[PIPE_BUFFERS];
3524         struct page *pages[PIPE_BUFFERS];
3525         struct splice_pipe_desc spd = {
3526                 .pages          = pages,
3527                 .partial        = partial,
3528                 .flags          = flags,
3529                 .ops            = &buffer_pipe_buf_ops,
3530                 .spd_release    = buffer_spd_release,
3531         };
3532         struct buffer_ref *ref;
3533         int entries, size, i;
3534         size_t ret;
3535
3536         if (*ppos & (PAGE_SIZE - 1)) {
3537                 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3538                 return -EINVAL;
3539         }
3540
3541         if (len & (PAGE_SIZE - 1)) {
3542                 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3543                 if (len < PAGE_SIZE)
3544                         return -EINVAL;
3545                 len &= PAGE_MASK;
3546         }
3547
3548         entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3549
3550         for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
3551                 struct page *page;
3552                 int r;
3553
3554                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3555                 if (!ref)
3556                         break;
3557
3558                 ref->ref = 1;
3559                 ref->buffer = info->tr->buffer;
3560                 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3561                 if (!ref->page) {
3562                         kfree(ref);
3563                         break;
3564                 }
3565
3566                 r = ring_buffer_read_page(ref->buffer, &ref->page,
3567                                           len, info->cpu, 1);
3568                 if (r < 0) {
3569                         ring_buffer_free_read_page(ref->buffer,
3570                                                    ref->page);
3571                         kfree(ref);
3572                         break;
3573                 }
3574
3575                 /*
3576                  * zero out any left over data, this is going to
3577                  * user land.
3578                  */
3579                 size = ring_buffer_page_len(ref->page);
3580                 if (size < PAGE_SIZE)
3581                         memset(ref->page + size, 0, PAGE_SIZE - size);
3582
3583                 page = virt_to_page(ref->page);
3584
3585                 spd.pages[i] = page;
3586                 spd.partial[i].len = PAGE_SIZE;
3587                 spd.partial[i].offset = 0;
3588                 spd.partial[i].private = (unsigned long)ref;
3589                 spd.nr_pages++;
3590                 *ppos += PAGE_SIZE;
3591
3592                 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3593         }
3594
3595         spd.nr_pages = i;
3596
3597         /* did we read anything? */
3598         if (!spd.nr_pages) {
3599                 if (flags & SPLICE_F_NONBLOCK)
3600                         ret = -EAGAIN;
3601                 else
3602                         ret = 0;
3603                 /* TODO: block */
3604                 return ret;
3605         }
3606
3607         ret = splice_to_pipe(pipe, &spd);
3608
3609         return ret;
3610 }
3611
3612 static const struct file_operations tracing_buffers_fops = {
3613         .open           = tracing_buffers_open,
3614         .read           = tracing_buffers_read,
3615         .release        = tracing_buffers_release,
3616         .splice_read    = tracing_buffers_splice_read,
3617         .llseek         = no_llseek,
3618 };
3619
3620 static ssize_t
3621 tracing_stats_read(struct file *filp, char __user *ubuf,
3622                    size_t count, loff_t *ppos)
3623 {
3624         unsigned long cpu = (unsigned long)filp->private_data;
3625         struct trace_array *tr = &global_trace;
3626         struct trace_seq *s;
3627         unsigned long cnt;
3628
3629         s = kmalloc(sizeof(*s), GFP_KERNEL);
3630         if (!s)
3631                 return ENOMEM;
3632
3633         trace_seq_init(s);
3634
3635         cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
3636         trace_seq_printf(s, "entries: %ld\n", cnt);
3637
3638         cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
3639         trace_seq_printf(s, "overrun: %ld\n", cnt);
3640
3641         cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
3642         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
3643
3644         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
3645
3646         kfree(s);
3647
3648         return count;
3649 }
3650
3651 static const struct file_operations tracing_stats_fops = {
3652         .open           = tracing_open_generic,
3653         .read           = tracing_stats_read,
3654 };
3655
3656 #ifdef CONFIG_DYNAMIC_FTRACE
3657
3658 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3659 {
3660         return 0;
3661 }
3662
3663 static ssize_t
3664 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
3665                   size_t cnt, loff_t *ppos)
3666 {
3667         static char ftrace_dyn_info_buffer[1024];
3668         static DEFINE_MUTEX(dyn_info_mutex);
3669         unsigned long *p = filp->private_data;
3670         char *buf = ftrace_dyn_info_buffer;
3671         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
3672         int r;
3673
3674         mutex_lock(&dyn_info_mutex);
3675         r = sprintf(buf, "%ld ", *p);
3676
3677         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3678         buf[r++] = '\n';
3679
3680         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3681
3682         mutex_unlock(&dyn_info_mutex);
3683
3684         return r;
3685 }
3686
3687 static const struct file_operations tracing_dyn_info_fops = {
3688         .open           = tracing_open_generic,
3689         .read           = tracing_read_dyn_info,
3690 };
3691 #endif
3692
3693 static struct dentry *d_tracer;
3694
3695 struct dentry *tracing_init_dentry(void)
3696 {
3697         static int once;
3698
3699         if (d_tracer)
3700                 return d_tracer;
3701
3702         if (!debugfs_initialized())
3703                 return NULL;
3704
3705         d_tracer = debugfs_create_dir("tracing", NULL);
3706
3707         if (!d_tracer && !once) {
3708                 once = 1;
3709                 pr_warning("Could not create debugfs directory 'tracing'\n");
3710                 return NULL;
3711         }
3712
3713         return d_tracer;
3714 }
3715
3716 static struct dentry *d_percpu;
3717
3718 struct dentry *tracing_dentry_percpu(void)
3719 {
3720         static int once;
3721         struct dentry *d_tracer;
3722
3723         if (d_percpu)
3724                 return d_percpu;
3725
3726         d_tracer = tracing_init_dentry();
3727
3728         if (!d_tracer)
3729                 return NULL;
3730
3731         d_percpu = debugfs_create_dir("per_cpu", d_tracer);
3732
3733         if (!d_percpu && !once) {
3734                 once = 1;
3735                 pr_warning("Could not create debugfs directory 'per_cpu'\n");
3736                 return NULL;
3737         }
3738
3739         return d_percpu;
3740 }
3741
3742 static void tracing_init_debugfs_percpu(long cpu)
3743 {
3744         struct dentry *d_percpu = tracing_dentry_percpu();
3745         struct dentry *d_cpu;
3746         /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
3747         char cpu_dir[7];
3748
3749         if (cpu > 999 || cpu < 0)
3750                 return;
3751
3752         sprintf(cpu_dir, "cpu%ld", cpu);
3753         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
3754         if (!d_cpu) {
3755                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
3756                 return;
3757         }
3758
3759         /* per cpu trace_pipe */
3760         trace_create_file("trace_pipe", 0444, d_cpu,
3761                         (void *) cpu, &tracing_pipe_fops);
3762
3763         /* per cpu trace */
3764         trace_create_file("trace", 0644, d_cpu,
3765                         (void *) cpu, &tracing_fops);
3766
3767         trace_create_file("trace_pipe_raw", 0444, d_cpu,
3768                         (void *) cpu, &tracing_buffers_fops);
3769
3770         trace_create_file("stats", 0444, d_cpu,
3771                         (void *) cpu, &tracing_stats_fops);
3772 }
3773
3774 #ifdef CONFIG_FTRACE_SELFTEST
3775 /* Let selftest have access to static functions in this file */
3776 #include "trace_selftest.c"
3777 #endif
3778
3779 struct trace_option_dentry {
3780         struct tracer_opt               *opt;
3781         struct tracer_flags             *flags;
3782         struct dentry                   *entry;
3783 };
3784
3785 static ssize_t
3786 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
3787                         loff_t *ppos)
3788 {
3789         struct trace_option_dentry *topt = filp->private_data;
3790         char *buf;
3791
3792         if (topt->flags->val & topt->opt->bit)
3793                 buf = "1\n";
3794         else
3795                 buf = "0\n";
3796
3797         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3798 }
3799
3800 static ssize_t
3801 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3802                          loff_t *ppos)
3803 {
3804         struct trace_option_dentry *topt = filp->private_data;
3805         unsigned long val;
3806         char buf[64];
3807         int ret;
3808
3809         if (cnt >= sizeof(buf))
3810                 return -EINVAL;
3811
3812         if (copy_from_user(&buf, ubuf, cnt))
3813                 return -EFAULT;
3814
3815         buf[cnt] = 0;
3816
3817         ret = strict_strtoul(buf, 10, &val);
3818         if (ret < 0)
3819                 return ret;
3820
3821         ret = 0;
3822         switch (val) {
3823         case 0:
3824                 /* do nothing if already cleared */
3825                 if (!(topt->flags->val & topt->opt->bit))
3826                         break;
3827
3828                 mutex_lock(&trace_types_lock);
3829                 if (current_trace->set_flag)
3830                         ret = current_trace->set_flag(topt->flags->val,
3831                                                       topt->opt->bit, 0);
3832                 mutex_unlock(&trace_types_lock);
3833                 if (ret)
3834                         return ret;
3835                 topt->flags->val &= ~topt->opt->bit;
3836                 break;
3837         case 1:
3838                 /* do nothing if already set */
3839                 if (topt->flags->val & topt->opt->bit)
3840                         break;
3841
3842                 mutex_lock(&trace_types_lock);
3843                 if (current_trace->set_flag)
3844                         ret = current_trace->set_flag(topt->flags->val,
3845                                                       topt->opt->bit, 1);
3846                 mutex_unlock(&trace_types_lock);
3847                 if (ret)
3848                         return ret;
3849                 topt->flags->val |= topt->opt->bit;
3850                 break;
3851
3852         default:
3853                 return -EINVAL;
3854         }
3855
3856         *ppos += cnt;
3857
3858         return cnt;
3859 }
3860
3861
3862 static const struct file_operations trace_options_fops = {
3863         .open = tracing_open_generic,
3864         .read = trace_options_read,
3865         .write = trace_options_write,
3866 };
3867
3868 static ssize_t
3869 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
3870                         loff_t *ppos)
3871 {
3872         long index = (long)filp->private_data;
3873         char *buf;
3874
3875         if (trace_flags & (1 << index))
3876                 buf = "1\n";
3877         else
3878                 buf = "0\n";
3879
3880         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3881 }
3882
3883 static ssize_t
3884 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3885                          loff_t *ppos)
3886 {
3887         long index = (long)filp->private_data;
3888         char buf[64];
3889         unsigned long val;
3890         int ret;
3891
3892         if (cnt >= sizeof(buf))
3893                 return -EINVAL;
3894
3895         if (copy_from_user(&buf, ubuf, cnt))
3896                 return -EFAULT;
3897
3898         buf[cnt] = 0;
3899
3900         ret = strict_strtoul(buf, 10, &val);
3901         if (ret < 0)
3902                 return ret;
3903
3904         if (val != 0 && val != 1)
3905                 return -EINVAL;
3906         set_tracer_flags(1 << index, val);
3907
3908         *ppos += cnt;
3909
3910         return cnt;
3911 }
3912
3913 static const struct file_operations trace_options_core_fops = {
3914         .open = tracing_open_generic,
3915         .read = trace_options_core_read,
3916         .write = trace_options_core_write,
3917 };
3918
3919 struct dentry *trace_create_file(const char *name,
3920                                  mode_t mode,
3921                                  struct dentry *parent,
3922                                  void *data,
3923                                  const struct file_operations *fops)
3924 {
3925         struct dentry *ret;
3926
3927         ret = debugfs_create_file(name, mode, parent, data, fops);
3928         if (!ret)
3929                 pr_warning("Could not create debugfs '%s' entry\n", name);
3930
3931         return ret;
3932 }
3933
3934
3935 static struct dentry *trace_options_init_dentry(void)
3936 {
3937         struct dentry *d_tracer;
3938         static struct dentry *t_options;
3939
3940         if (t_options)
3941                 return t_options;
3942
3943         d_tracer = tracing_init_dentry();
3944         if (!d_tracer)
3945                 return NULL;
3946
3947         t_options = debugfs_create_dir("options", d_tracer);
3948         if (!t_options) {
3949                 pr_warning("Could not create debugfs directory 'options'\n");
3950                 return NULL;
3951         }
3952
3953         return t_options;
3954 }
3955
3956 static void
3957 create_trace_option_file(struct trace_option_dentry *topt,
3958                          struct tracer_flags *flags,
3959                          struct tracer_opt *opt)
3960 {
3961         struct dentry *t_options;
3962
3963         t_options = trace_options_init_dentry();
3964         if (!t_options)
3965                 return;
3966
3967         topt->flags = flags;
3968         topt->opt = opt;
3969
3970         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
3971                                     &trace_options_fops);
3972
3973 }
3974
3975 static struct trace_option_dentry *
3976 create_trace_option_files(struct tracer *tracer)
3977 {
3978         struct trace_option_dentry *topts;
3979         struct tracer_flags *flags;
3980         struct tracer_opt *opts;
3981         int cnt;
3982
3983         if (!tracer)
3984                 return NULL;
3985
3986         flags = tracer->flags;
3987
3988         if (!flags || !flags->opts)
3989                 return NULL;
3990
3991         opts = flags->opts;
3992
3993         for (cnt = 0; opts[cnt].name; cnt++)
3994                 ;
3995
3996         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
3997         if (!topts)
3998                 return NULL;
3999
4000         for (cnt = 0; opts[cnt].name; cnt++)
4001                 create_trace_option_file(&topts[cnt], flags,
4002                                          &opts[cnt]);
4003
4004         return topts;
4005 }
4006
4007 static void
4008 destroy_trace_option_files(struct trace_option_dentry *topts)
4009 {
4010         int cnt;
4011
4012         if (!topts)
4013                 return;
4014
4015         for (cnt = 0; topts[cnt].opt; cnt++) {
4016                 if (topts[cnt].entry)
4017                         debugfs_remove(topts[cnt].entry);
4018         }
4019
4020         kfree(topts);
4021 }
4022
4023 static struct dentry *
4024 create_trace_option_core_file(const char *option, long index)
4025 {
4026         struct dentry *t_options;
4027
4028         t_options = trace_options_init_dentry();
4029         if (!t_options)
4030                 return NULL;
4031
4032         return trace_create_file(option, 0644, t_options, (void *)index,
4033                                     &trace_options_core_fops);
4034 }
4035
4036 static __init void create_trace_options_dir(void)
4037 {
4038         struct dentry *t_options;
4039         int i;
4040
4041         t_options = trace_options_init_dentry();
4042         if (!t_options)
4043                 return;
4044
4045         for (i = 0; trace_options[i]; i++)
4046                 create_trace_option_core_file(trace_options[i], i);
4047 }
4048
4049 static __init int tracer_init_debugfs(void)
4050 {
4051         struct dentry *d_tracer;
4052         int cpu;
4053
4054         d_tracer = tracing_init_dentry();
4055
4056         trace_create_file("tracing_enabled", 0644, d_tracer,
4057                         &global_trace, &tracing_ctrl_fops);
4058
4059         trace_create_file("trace_options", 0644, d_tracer,
4060                         NULL, &tracing_iter_fops);
4061
4062         trace_create_file("tracing_cpumask", 0644, d_tracer,
4063                         NULL, &tracing_cpumask_fops);
4064
4065         trace_create_file("trace", 0644, d_tracer,
4066                         (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4067
4068         trace_create_file("available_tracers", 0444, d_tracer,
4069                         &global_trace, &show_traces_fops);
4070
4071         trace_create_file("current_tracer", 0644, d_tracer,
4072                         &global_trace, &set_tracer_fops);
4073
4074 #ifdef CONFIG_TRACER_MAX_TRACE
4075         trace_create_file("tracing_max_latency", 0644, d_tracer,
4076                         &tracing_max_latency, &tracing_max_lat_fops);
4077
4078         trace_create_file("tracing_thresh", 0644, d_tracer,
4079                         &tracing_thresh, &tracing_max_lat_fops);
4080 #endif
4081
4082         trace_create_file("README", 0444, d_tracer,
4083                         NULL, &tracing_readme_fops);
4084
4085         trace_create_file("trace_pipe", 0444, d_tracer,
4086                         (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4087
4088         trace_create_file("buffer_size_kb", 0644, d_tracer,
4089                         &global_trace, &tracing_entries_fops);
4090
4091         trace_create_file("trace_marker", 0220, d_tracer,
4092                         NULL, &tracing_mark_fops);
4093
4094         trace_create_file("saved_cmdlines", 0444, d_tracer,
4095                         NULL, &tracing_saved_cmdlines_fops);
4096
4097         trace_create_file("trace_clock", 0644, d_tracer, NULL,
4098                           &trace_clock_fops);
4099
4100 #ifdef CONFIG_DYNAMIC_FTRACE
4101         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4102                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4103 #endif
4104 #ifdef CONFIG_SYSPROF_TRACER
4105         init_tracer_sysprof_debugfs(d_tracer);
4106 #endif
4107
4108         create_trace_options_dir();
4109
4110         for_each_tracing_cpu(cpu)
4111                 tracing_init_debugfs_percpu(cpu);
4112
4113         return 0;
4114 }
4115
4116 static int trace_panic_handler(struct notifier_block *this,
4117                                unsigned long event, void *unused)
4118 {
4119         if (ftrace_dump_on_oops)
4120                 ftrace_dump();
4121         return NOTIFY_OK;
4122 }
4123
4124 static struct notifier_block trace_panic_notifier = {
4125         .notifier_call  = trace_panic_handler,
4126         .next           = NULL,
4127         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
4128 };
4129
4130 static int trace_die_handler(struct notifier_block *self,
4131                              unsigned long val,
4132                              void *data)
4133 {
4134         switch (val) {
4135         case DIE_OOPS:
4136                 if (ftrace_dump_on_oops)
4137                         ftrace_dump();
4138                 break;
4139         default:
4140                 break;
4141         }
4142         return NOTIFY_OK;
4143 }
4144
4145 static struct notifier_block trace_die_notifier = {
4146         .notifier_call = trace_die_handler,
4147         .priority = 200
4148 };
4149
4150 /*
4151  * printk is set to max of 1024, we really don't need it that big.
4152  * Nothing should be printing 1000 characters anyway.
4153  */
4154 #define TRACE_MAX_PRINT         1000
4155
4156 /*
4157  * Define here KERN_TRACE so that we have one place to modify
4158  * it if we decide to change what log level the ftrace dump
4159  * should be at.
4160  */
4161 #define KERN_TRACE              KERN_EMERG
4162
4163 static void
4164 trace_printk_seq(struct trace_seq *s)
4165 {
4166         /* Probably should print a warning here. */
4167         if (s->len >= 1000)
4168                 s->len = 1000;
4169
4170         /* should be zero ended, but we are paranoid. */
4171         s->buffer[s->len] = 0;
4172
4173         printk(KERN_TRACE "%s", s->buffer);
4174
4175         trace_seq_init(s);
4176 }
4177
4178 static void __ftrace_dump(bool disable_tracing)
4179 {
4180         static raw_spinlock_t ftrace_dump_lock =
4181                 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
4182         /* use static because iter can be a bit big for the stack */
4183         static struct trace_iterator iter;
4184         unsigned int old_userobj;
4185         static int dump_ran;
4186         unsigned long flags;
4187         int cnt = 0, cpu;
4188
4189         /* only one dump */
4190         local_irq_save(flags);
4191         __raw_spin_lock(&ftrace_dump_lock);
4192         if (dump_ran)
4193                 goto out;
4194
4195         dump_ran = 1;
4196
4197         tracing_off();
4198
4199         if (disable_tracing)
4200                 ftrace_kill();
4201
4202         for_each_tracing_cpu(cpu) {
4203                 atomic_inc(&global_trace.data[cpu]->disabled);
4204         }
4205
4206         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4207
4208         /* don't look at user memory in panic mode */
4209         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4210
4211         printk(KERN_TRACE "Dumping ftrace buffer:\n");
4212
4213         /* Simulate the iterator */
4214         iter.tr = &global_trace;
4215         iter.trace = current_trace;
4216         iter.cpu_file = TRACE_PIPE_ALL_CPU;
4217
4218         /*
4219          * We need to stop all tracing on all CPUS to read the
4220          * the next buffer. This is a bit expensive, but is
4221          * not done often. We fill all what we can read,
4222          * and then release the locks again.
4223          */
4224
4225         while (!trace_empty(&iter)) {
4226
4227                 if (!cnt)
4228                         printk(KERN_TRACE "---------------------------------\n");
4229
4230                 cnt++;
4231
4232                 /* reset all but tr, trace, and overruns */
4233                 memset(&iter.seq, 0,
4234                        sizeof(struct trace_iterator) -
4235                        offsetof(struct trace_iterator, seq));
4236                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
4237                 iter.pos = -1;
4238
4239                 if (find_next_entry_inc(&iter) != NULL) {
4240                         int ret;
4241
4242                         ret = print_trace_line(&iter);
4243                         if (ret != TRACE_TYPE_NO_CONSUME)
4244                                 trace_consume(&iter);
4245                 }
4246
4247                 trace_printk_seq(&iter.seq);
4248         }
4249
4250         if (!cnt)
4251                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
4252         else
4253                 printk(KERN_TRACE "---------------------------------\n");
4254
4255         /* Re-enable tracing if requested */
4256         if (!disable_tracing) {
4257                 trace_flags |= old_userobj;
4258
4259                 for_each_tracing_cpu(cpu) {
4260                         atomic_dec(&global_trace.data[cpu]->disabled);
4261                 }
4262                 tracing_on();
4263         }
4264
4265  out:
4266         __raw_spin_unlock(&ftrace_dump_lock);
4267         local_irq_restore(flags);
4268 }
4269
4270 /* By default: disable tracing after the dump */
4271 void ftrace_dump(void)
4272 {
4273         __ftrace_dump(true);
4274 }
4275
4276 __init static int tracer_alloc_buffers(void)
4277 {
4278         int ring_buf_size;
4279         int i;
4280         int ret = -ENOMEM;
4281
4282         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
4283                 goto out;
4284
4285         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4286                 goto out_free_buffer_mask;
4287
4288         if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4289                 goto out_free_tracing_cpumask;
4290
4291         /* To save memory, keep the ring buffer size to its minimum */
4292         if (ring_buffer_expanded)
4293                 ring_buf_size = trace_buf_size;
4294         else
4295                 ring_buf_size = 1;
4296
4297         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4298         cpumask_copy(tracing_cpumask, cpu_all_mask);
4299         cpumask_clear(tracing_reader_cpumask);
4300
4301         /* TODO: make the number of buffers hot pluggable with CPUS */
4302         global_trace.buffer = ring_buffer_alloc(ring_buf_size,
4303                                                    TRACE_BUFFER_FLAGS);
4304         if (!global_trace.buffer) {
4305                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
4306                 WARN_ON(1);
4307                 goto out_free_cpumask;
4308         }
4309         global_trace.entries = ring_buffer_size(global_trace.buffer);
4310
4311
4312 #ifdef CONFIG_TRACER_MAX_TRACE
4313         max_tr.buffer = ring_buffer_alloc(ring_buf_size,
4314                                              TRACE_BUFFER_FLAGS);
4315         if (!max_tr.buffer) {
4316                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4317                 WARN_ON(1);
4318                 ring_buffer_free(global_trace.buffer);
4319                 goto out_free_cpumask;
4320         }
4321         max_tr.entries = ring_buffer_size(max_tr.buffer);
4322         WARN_ON(max_tr.entries != global_trace.entries);
4323 #endif
4324
4325         /* Allocate the first page for all buffers */
4326         for_each_tracing_cpu(i) {
4327                 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4328                 max_tr.data[i] = &per_cpu(max_data, i);
4329         }
4330
4331         trace_init_cmdlines();
4332
4333         register_tracer(&nop_trace);
4334         current_trace = &nop_trace;
4335 #ifdef CONFIG_BOOT_TRACER
4336         register_tracer(&boot_tracer);
4337 #endif
4338         /* All seems OK, enable tracing */
4339         tracing_disabled = 0;
4340
4341         atomic_notifier_chain_register(&panic_notifier_list,
4342                                        &trace_panic_notifier);
4343
4344         register_die_notifier(&trace_die_notifier);
4345
4346         return 0;
4347
4348 out_free_cpumask:
4349         free_cpumask_var(tracing_reader_cpumask);
4350 out_free_tracing_cpumask:
4351         free_cpumask_var(tracing_cpumask);
4352 out_free_buffer_mask:
4353         free_cpumask_var(tracing_buffer_mask);
4354 out:
4355         return ret;
4356 }
4357
4358 __init static int clear_boot_tracer(void)
4359 {
4360         /*
4361          * The default tracer at boot buffer is an init section.
4362          * This function is called in lateinit. If we did not
4363          * find the boot tracer, then clear it out, to prevent
4364          * later registration from accessing the buffer that is
4365          * about to be freed.
4366          */
4367         if (!default_bootup_tracer)
4368                 return 0;
4369
4370         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4371                default_bootup_tracer);
4372         default_bootup_tracer = NULL;
4373
4374         return 0;
4375 }
4376
4377 early_initcall(tracer_alloc_buffers);
4378 fs_initcall(tracer_init_debugfs);
4379 late_initcall(clear_boot_tracer);