]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/trace/ftrace.c
function-graph: Init curr_ret_stack with ret_stack
[net-next-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29 #include <linux/hash.h>
30
31 #include <trace/events/sched.h>
32
33 #include <asm/ftrace.h>
34 #include <asm/setup.h>
35
36 #include "trace_output.h"
37 #include "trace_stat.h"
38
39 #define FTRACE_WARN_ON(cond)                    \
40         do {                                    \
41                 if (WARN_ON(cond))              \
42                         ftrace_kill();          \
43         } while (0)
44
45 #define FTRACE_WARN_ON_ONCE(cond)               \
46         do {                                    \
47                 if (WARN_ON_ONCE(cond))         \
48                         ftrace_kill();          \
49         } while (0)
50
51 /* hash bits for specific function selection */
52 #define FTRACE_HASH_BITS 7
53 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
55 /* ftrace_enabled is a method to turn ftrace on or off */
56 int ftrace_enabled __read_mostly;
57 static int last_ftrace_enabled;
58
59 /* Quick disabling of function tracer. */
60 int function_trace_stop;
61
62 /* List for set_ftrace_pid's pids. */
63 LIST_HEAD(ftrace_pids);
64 struct ftrace_pid {
65         struct list_head list;
66         struct pid *pid;
67 };
68
69 /*
70  * ftrace_disabled is set when an anomaly is discovered.
71  * ftrace_disabled is much stronger than ftrace_enabled.
72  */
73 static int ftrace_disabled __read_mostly;
74
75 static DEFINE_MUTEX(ftrace_lock);
76
77 static struct ftrace_ops ftrace_list_end __read_mostly =
78 {
79         .func           = ftrace_stub,
80 };
81
82 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
83 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
84 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
85 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
86
87 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
88 {
89         struct ftrace_ops *op = ftrace_list;
90
91         /* in case someone actually ports this to alpha! */
92         read_barrier_depends();
93
94         while (op != &ftrace_list_end) {
95                 /* silly alpha */
96                 read_barrier_depends();
97                 op->func(ip, parent_ip);
98                 op = op->next;
99         };
100 }
101
102 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
103 {
104         if (!test_tsk_trace_trace(current))
105                 return;
106
107         ftrace_pid_function(ip, parent_ip);
108 }
109
110 static void set_ftrace_pid_function(ftrace_func_t func)
111 {
112         /* do not set ftrace_pid_function to itself! */
113         if (func != ftrace_pid_func)
114                 ftrace_pid_function = func;
115 }
116
117 /**
118  * clear_ftrace_function - reset the ftrace function
119  *
120  * This NULLs the ftrace function and in essence stops
121  * tracing.  There may be lag
122  */
123 void clear_ftrace_function(void)
124 {
125         ftrace_trace_function = ftrace_stub;
126         __ftrace_trace_function = ftrace_stub;
127         ftrace_pid_function = ftrace_stub;
128 }
129
130 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
131 /*
132  * For those archs that do not test ftrace_trace_stop in their
133  * mcount call site, we need to do it from C.
134  */
135 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
136 {
137         if (function_trace_stop)
138                 return;
139
140         __ftrace_trace_function(ip, parent_ip);
141 }
142 #endif
143
144 static int __register_ftrace_function(struct ftrace_ops *ops)
145 {
146         ops->next = ftrace_list;
147         /*
148          * We are entering ops into the ftrace_list but another
149          * CPU might be walking that list. We need to make sure
150          * the ops->next pointer is valid before another CPU sees
151          * the ops pointer included into the ftrace_list.
152          */
153         smp_wmb();
154         ftrace_list = ops;
155
156         if (ftrace_enabled) {
157                 ftrace_func_t func;
158
159                 if (ops->next == &ftrace_list_end)
160                         func = ops->func;
161                 else
162                         func = ftrace_list_func;
163
164                 if (!list_empty(&ftrace_pids)) {
165                         set_ftrace_pid_function(func);
166                         func = ftrace_pid_func;
167                 }
168
169                 /*
170                  * For one func, simply call it directly.
171                  * For more than one func, call the chain.
172                  */
173 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
174                 ftrace_trace_function = func;
175 #else
176                 __ftrace_trace_function = func;
177                 ftrace_trace_function = ftrace_test_stop_func;
178 #endif
179         }
180
181         return 0;
182 }
183
184 static int __unregister_ftrace_function(struct ftrace_ops *ops)
185 {
186         struct ftrace_ops **p;
187
188         /*
189          * If we are removing the last function, then simply point
190          * to the ftrace_stub.
191          */
192         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193                 ftrace_trace_function = ftrace_stub;
194                 ftrace_list = &ftrace_list_end;
195                 return 0;
196         }
197
198         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199                 if (*p == ops)
200                         break;
201
202         if (*p != ops)
203                 return -1;
204
205         *p = (*p)->next;
206
207         if (ftrace_enabled) {
208                 /* If we only have one func left, then call that directly */
209                 if (ftrace_list->next == &ftrace_list_end) {
210                         ftrace_func_t func = ftrace_list->func;
211
212                         if (!list_empty(&ftrace_pids)) {
213                                 set_ftrace_pid_function(func);
214                                 func = ftrace_pid_func;
215                         }
216 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217                         ftrace_trace_function = func;
218 #else
219                         __ftrace_trace_function = func;
220 #endif
221                 }
222         }
223
224         return 0;
225 }
226
227 static void ftrace_update_pid_func(void)
228 {
229         ftrace_func_t func;
230
231         if (ftrace_trace_function == ftrace_stub)
232                 return;
233
234 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235         func = ftrace_trace_function;
236 #else
237         func = __ftrace_trace_function;
238 #endif
239
240         if (!list_empty(&ftrace_pids)) {
241                 set_ftrace_pid_function(func);
242                 func = ftrace_pid_func;
243         } else {
244                 if (func == ftrace_pid_func)
245                         func = ftrace_pid_function;
246         }
247
248 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
249         ftrace_trace_function = func;
250 #else
251         __ftrace_trace_function = func;
252 #endif
253 }
254
255 #ifdef CONFIG_FUNCTION_PROFILER
256 struct ftrace_profile {
257         struct hlist_node               node;
258         unsigned long                   ip;
259         unsigned long                   counter;
260 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
261         unsigned long long              time;
262 #endif
263 };
264
265 struct ftrace_profile_page {
266         struct ftrace_profile_page      *next;
267         unsigned long                   index;
268         struct ftrace_profile           records[];
269 };
270
271 struct ftrace_profile_stat {
272         atomic_t                        disabled;
273         struct hlist_head               *hash;
274         struct ftrace_profile_page      *pages;
275         struct ftrace_profile_page      *start;
276         struct tracer_stat              stat;
277 };
278
279 #define PROFILE_RECORDS_SIZE                                            \
280         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
281
282 #define PROFILES_PER_PAGE                                       \
283         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
284
285 static int ftrace_profile_bits __read_mostly;
286 static int ftrace_profile_enabled __read_mostly;
287
288 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
289 static DEFINE_MUTEX(ftrace_profile_lock);
290
291 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
292
293 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
294
295 static void *
296 function_stat_next(void *v, int idx)
297 {
298         struct ftrace_profile *rec = v;
299         struct ftrace_profile_page *pg;
300
301         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
302
303  again:
304         if (idx != 0)
305                 rec++;
306
307         if ((void *)rec >= (void *)&pg->records[pg->index]) {
308                 pg = pg->next;
309                 if (!pg)
310                         return NULL;
311                 rec = &pg->records[0];
312                 if (!rec->counter)
313                         goto again;
314         }
315
316         return rec;
317 }
318
319 static void *function_stat_start(struct tracer_stat *trace)
320 {
321         struct ftrace_profile_stat *stat =
322                 container_of(trace, struct ftrace_profile_stat, stat);
323
324         if (!stat || !stat->start)
325                 return NULL;
326
327         return function_stat_next(&stat->start->records[0], 0);
328 }
329
330 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
331 /* function graph compares on total time */
332 static int function_stat_cmp(void *p1, void *p2)
333 {
334         struct ftrace_profile *a = p1;
335         struct ftrace_profile *b = p2;
336
337         if (a->time < b->time)
338                 return -1;
339         if (a->time > b->time)
340                 return 1;
341         else
342                 return 0;
343 }
344 #else
345 /* not function graph compares against hits */
346 static int function_stat_cmp(void *p1, void *p2)
347 {
348         struct ftrace_profile *a = p1;
349         struct ftrace_profile *b = p2;
350
351         if (a->counter < b->counter)
352                 return -1;
353         if (a->counter > b->counter)
354                 return 1;
355         else
356                 return 0;
357 }
358 #endif
359
360 static int function_stat_headers(struct seq_file *m)
361 {
362 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
363         seq_printf(m, "  Function                               "
364                    "Hit    Time            Avg\n"
365                       "  --------                               "
366                    "---    ----            ---\n");
367 #else
368         seq_printf(m, "  Function                               Hit\n"
369                       "  --------                               ---\n");
370 #endif
371         return 0;
372 }
373
374 static int function_stat_show(struct seq_file *m, void *v)
375 {
376         struct ftrace_profile *rec = v;
377         char str[KSYM_SYMBOL_LEN];
378 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
379         static DEFINE_MUTEX(mutex);
380         static struct trace_seq s;
381         unsigned long long avg;
382 #endif
383
384         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
385         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
386
387 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
388         seq_printf(m, "    ");
389         avg = rec->time;
390         do_div(avg, rec->counter);
391
392         mutex_lock(&mutex);
393         trace_seq_init(&s);
394         trace_print_graph_duration(rec->time, &s);
395         trace_seq_puts(&s, "    ");
396         trace_print_graph_duration(avg, &s);
397         trace_print_seq(m, &s);
398         mutex_unlock(&mutex);
399 #endif
400         seq_putc(m, '\n');
401
402         return 0;
403 }
404
405 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
406 {
407         struct ftrace_profile_page *pg;
408
409         pg = stat->pages = stat->start;
410
411         while (pg) {
412                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
413                 pg->index = 0;
414                 pg = pg->next;
415         }
416
417         memset(stat->hash, 0,
418                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
419 }
420
421 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
422 {
423         struct ftrace_profile_page *pg;
424         int functions;
425         int pages;
426         int i;
427
428         /* If we already allocated, do nothing */
429         if (stat->pages)
430                 return 0;
431
432         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
433         if (!stat->pages)
434                 return -ENOMEM;
435
436 #ifdef CONFIG_DYNAMIC_FTRACE
437         functions = ftrace_update_tot_cnt;
438 #else
439         /*
440          * We do not know the number of functions that exist because
441          * dynamic tracing is what counts them. With past experience
442          * we have around 20K functions. That should be more than enough.
443          * It is highly unlikely we will execute every function in
444          * the kernel.
445          */
446         functions = 20000;
447 #endif
448
449         pg = stat->start = stat->pages;
450
451         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
452
453         for (i = 0; i < pages; i++) {
454                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
455                 if (!pg->next)
456                         goto out_free;
457                 pg = pg->next;
458         }
459
460         return 0;
461
462  out_free:
463         pg = stat->start;
464         while (pg) {
465                 unsigned long tmp = (unsigned long)pg;
466
467                 pg = pg->next;
468                 free_page(tmp);
469         }
470
471         free_page((unsigned long)stat->pages);
472         stat->pages = NULL;
473         stat->start = NULL;
474
475         return -ENOMEM;
476 }
477
478 static int ftrace_profile_init_cpu(int cpu)
479 {
480         struct ftrace_profile_stat *stat;
481         int size;
482
483         stat = &per_cpu(ftrace_profile_stats, cpu);
484
485         if (stat->hash) {
486                 /* If the profile is already created, simply reset it */
487                 ftrace_profile_reset(stat);
488                 return 0;
489         }
490
491         /*
492          * We are profiling all functions, but usually only a few thousand
493          * functions are hit. We'll make a hash of 1024 items.
494          */
495         size = FTRACE_PROFILE_HASH_SIZE;
496
497         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
498
499         if (!stat->hash)
500                 return -ENOMEM;
501
502         if (!ftrace_profile_bits) {
503                 size--;
504
505                 for (; size; size >>= 1)
506                         ftrace_profile_bits++;
507         }
508
509         /* Preallocate the function profiling pages */
510         if (ftrace_profile_pages_init(stat) < 0) {
511                 kfree(stat->hash);
512                 stat->hash = NULL;
513                 return -ENOMEM;
514         }
515
516         return 0;
517 }
518
519 static int ftrace_profile_init(void)
520 {
521         int cpu;
522         int ret = 0;
523
524         for_each_online_cpu(cpu) {
525                 ret = ftrace_profile_init_cpu(cpu);
526                 if (ret)
527                         break;
528         }
529
530         return ret;
531 }
532
533 /* interrupts must be disabled */
534 static struct ftrace_profile *
535 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
536 {
537         struct ftrace_profile *rec;
538         struct hlist_head *hhd;
539         struct hlist_node *n;
540         unsigned long key;
541
542         key = hash_long(ip, ftrace_profile_bits);
543         hhd = &stat->hash[key];
544
545         if (hlist_empty(hhd))
546                 return NULL;
547
548         hlist_for_each_entry_rcu(rec, n, hhd, node) {
549                 if (rec->ip == ip)
550                         return rec;
551         }
552
553         return NULL;
554 }
555
556 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
557                                struct ftrace_profile *rec)
558 {
559         unsigned long key;
560
561         key = hash_long(rec->ip, ftrace_profile_bits);
562         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
563 }
564
565 /*
566  * The memory is already allocated, this simply finds a new record to use.
567  */
568 static struct ftrace_profile *
569 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
570 {
571         struct ftrace_profile *rec = NULL;
572
573         /* prevent recursion (from NMIs) */
574         if (atomic_inc_return(&stat->disabled) != 1)
575                 goto out;
576
577         /*
578          * Try to find the function again since an NMI
579          * could have added it
580          */
581         rec = ftrace_find_profiled_func(stat, ip);
582         if (rec)
583                 goto out;
584
585         if (stat->pages->index == PROFILES_PER_PAGE) {
586                 if (!stat->pages->next)
587                         goto out;
588                 stat->pages = stat->pages->next;
589         }
590
591         rec = &stat->pages->records[stat->pages->index++];
592         rec->ip = ip;
593         ftrace_add_profile(stat, rec);
594
595  out:
596         atomic_dec(&stat->disabled);
597
598         return rec;
599 }
600
601 static void
602 function_profile_call(unsigned long ip, unsigned long parent_ip)
603 {
604         struct ftrace_profile_stat *stat;
605         struct ftrace_profile *rec;
606         unsigned long flags;
607
608         if (!ftrace_profile_enabled)
609                 return;
610
611         local_irq_save(flags);
612
613         stat = &__get_cpu_var(ftrace_profile_stats);
614         if (!stat->hash || !ftrace_profile_enabled)
615                 goto out;
616
617         rec = ftrace_find_profiled_func(stat, ip);
618         if (!rec) {
619                 rec = ftrace_profile_alloc(stat, ip);
620                 if (!rec)
621                         goto out;
622         }
623
624         rec->counter++;
625  out:
626         local_irq_restore(flags);
627 }
628
629 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
630 static int profile_graph_entry(struct ftrace_graph_ent *trace)
631 {
632         function_profile_call(trace->func, 0);
633         return 1;
634 }
635
636 static void profile_graph_return(struct ftrace_graph_ret *trace)
637 {
638         struct ftrace_profile_stat *stat;
639         unsigned long long calltime;
640         struct ftrace_profile *rec;
641         unsigned long flags;
642
643         local_irq_save(flags);
644         stat = &__get_cpu_var(ftrace_profile_stats);
645         if (!stat->hash || !ftrace_profile_enabled)
646                 goto out;
647
648         calltime = trace->rettime - trace->calltime;
649
650         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
651                 int index;
652
653                 index = trace->depth;
654
655                 /* Append this call time to the parent time to subtract */
656                 if (index)
657                         current->ret_stack[index - 1].subtime += calltime;
658
659                 if (current->ret_stack[index].subtime < calltime)
660                         calltime -= current->ret_stack[index].subtime;
661                 else
662                         calltime = 0;
663         }
664
665         rec = ftrace_find_profiled_func(stat, trace->func);
666         if (rec)
667                 rec->time += calltime;
668
669  out:
670         local_irq_restore(flags);
671 }
672
673 static int register_ftrace_profiler(void)
674 {
675         return register_ftrace_graph(&profile_graph_return,
676                                      &profile_graph_entry);
677 }
678
679 static void unregister_ftrace_profiler(void)
680 {
681         unregister_ftrace_graph();
682 }
683 #else
684 static struct ftrace_ops ftrace_profile_ops __read_mostly =
685 {
686         .func           = function_profile_call,
687 };
688
689 static int register_ftrace_profiler(void)
690 {
691         return register_ftrace_function(&ftrace_profile_ops);
692 }
693
694 static void unregister_ftrace_profiler(void)
695 {
696         unregister_ftrace_function(&ftrace_profile_ops);
697 }
698 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
699
700 static ssize_t
701 ftrace_profile_write(struct file *filp, const char __user *ubuf,
702                      size_t cnt, loff_t *ppos)
703 {
704         unsigned long val;
705         char buf[64];           /* big enough to hold a number */
706         int ret;
707
708         if (cnt >= sizeof(buf))
709                 return -EINVAL;
710
711         if (copy_from_user(&buf, ubuf, cnt))
712                 return -EFAULT;
713
714         buf[cnt] = 0;
715
716         ret = strict_strtoul(buf, 10, &val);
717         if (ret < 0)
718                 return ret;
719
720         val = !!val;
721
722         mutex_lock(&ftrace_profile_lock);
723         if (ftrace_profile_enabled ^ val) {
724                 if (val) {
725                         ret = ftrace_profile_init();
726                         if (ret < 0) {
727                                 cnt = ret;
728                                 goto out;
729                         }
730
731                         ret = register_ftrace_profiler();
732                         if (ret < 0) {
733                                 cnt = ret;
734                                 goto out;
735                         }
736                         ftrace_profile_enabled = 1;
737                 } else {
738                         ftrace_profile_enabled = 0;
739                         /*
740                          * unregister_ftrace_profiler calls stop_machine
741                          * so this acts like an synchronize_sched.
742                          */
743                         unregister_ftrace_profiler();
744                 }
745         }
746  out:
747         mutex_unlock(&ftrace_profile_lock);
748
749         *ppos += cnt;
750
751         return cnt;
752 }
753
754 static ssize_t
755 ftrace_profile_read(struct file *filp, char __user *ubuf,
756                      size_t cnt, loff_t *ppos)
757 {
758         char buf[64];           /* big enough to hold a number */
759         int r;
760
761         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
762         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
763 }
764
765 static const struct file_operations ftrace_profile_fops = {
766         .open           = tracing_open_generic,
767         .read           = ftrace_profile_read,
768         .write          = ftrace_profile_write,
769 };
770
771 /* used to initialize the real stat files */
772 static struct tracer_stat function_stats __initdata = {
773         .name           = "functions",
774         .stat_start     = function_stat_start,
775         .stat_next      = function_stat_next,
776         .stat_cmp       = function_stat_cmp,
777         .stat_headers   = function_stat_headers,
778         .stat_show      = function_stat_show
779 };
780
781 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
782 {
783         struct ftrace_profile_stat *stat;
784         struct dentry *entry;
785         char *name;
786         int ret;
787         int cpu;
788
789         for_each_possible_cpu(cpu) {
790                 stat = &per_cpu(ftrace_profile_stats, cpu);
791
792                 /* allocate enough for function name + cpu number */
793                 name = kmalloc(32, GFP_KERNEL);
794                 if (!name) {
795                         /*
796                          * The files created are permanent, if something happens
797                          * we still do not free memory.
798                          */
799                         WARN(1,
800                              "Could not allocate stat file for cpu %d\n",
801                              cpu);
802                         return;
803                 }
804                 stat->stat = function_stats;
805                 snprintf(name, 32, "function%d", cpu);
806                 stat->stat.name = name;
807                 ret = register_stat_tracer(&stat->stat);
808                 if (ret) {
809                         WARN(1,
810                              "Could not register function stat for cpu %d\n",
811                              cpu);
812                         kfree(name);
813                         return;
814                 }
815         }
816
817         entry = debugfs_create_file("function_profile_enabled", 0644,
818                                     d_tracer, NULL, &ftrace_profile_fops);
819         if (!entry)
820                 pr_warning("Could not create debugfs "
821                            "'function_profile_enabled' entry\n");
822 }
823
824 #else /* CONFIG_FUNCTION_PROFILER */
825 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
826 {
827 }
828 #endif /* CONFIG_FUNCTION_PROFILER */
829
830 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
831
832 #ifdef CONFIG_DYNAMIC_FTRACE
833
834 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
835 # error Dynamic ftrace depends on MCOUNT_RECORD
836 #endif
837
838 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
839
840 struct ftrace_func_probe {
841         struct hlist_node       node;
842         struct ftrace_probe_ops *ops;
843         unsigned long           flags;
844         unsigned long           ip;
845         void                    *data;
846         struct rcu_head         rcu;
847 };
848
849 enum {
850         FTRACE_ENABLE_CALLS             = (1 << 0),
851         FTRACE_DISABLE_CALLS            = (1 << 1),
852         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
853         FTRACE_ENABLE_MCOUNT            = (1 << 3),
854         FTRACE_DISABLE_MCOUNT           = (1 << 4),
855         FTRACE_START_FUNC_RET           = (1 << 5),
856         FTRACE_STOP_FUNC_RET            = (1 << 6),
857 };
858
859 static int ftrace_filtered;
860
861 static struct dyn_ftrace *ftrace_new_addrs;
862
863 static DEFINE_MUTEX(ftrace_regex_lock);
864
865 struct ftrace_page {
866         struct ftrace_page      *next;
867         int                     index;
868         struct dyn_ftrace       records[];
869 };
870
871 #define ENTRIES_PER_PAGE \
872   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
873
874 /* estimate from running different kernels */
875 #define NR_TO_INIT              10000
876
877 static struct ftrace_page       *ftrace_pages_start;
878 static struct ftrace_page       *ftrace_pages;
879
880 static struct dyn_ftrace *ftrace_free_records;
881
882 /*
883  * This is a double for. Do not use 'break' to break out of the loop,
884  * you must use a goto.
885  */
886 #define do_for_each_ftrace_rec(pg, rec)                                 \
887         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
888                 int _____i;                                             \
889                 for (_____i = 0; _____i < pg->index; _____i++) {        \
890                         rec = &pg->records[_____i];
891
892 #define while_for_each_ftrace_rec()             \
893                 }                               \
894         }
895
896 static void ftrace_free_rec(struct dyn_ftrace *rec)
897 {
898         rec->freelist = ftrace_free_records;
899         ftrace_free_records = rec;
900         rec->flags |= FTRACE_FL_FREE;
901 }
902
903 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
904 {
905         struct dyn_ftrace *rec;
906
907         /* First check for freed records */
908         if (ftrace_free_records) {
909                 rec = ftrace_free_records;
910
911                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
912                         FTRACE_WARN_ON_ONCE(1);
913                         ftrace_free_records = NULL;
914                         return NULL;
915                 }
916
917                 ftrace_free_records = rec->freelist;
918                 memset(rec, 0, sizeof(*rec));
919                 return rec;
920         }
921
922         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
923                 if (!ftrace_pages->next) {
924                         /* allocate another page */
925                         ftrace_pages->next =
926                                 (void *)get_zeroed_page(GFP_KERNEL);
927                         if (!ftrace_pages->next)
928                                 return NULL;
929                 }
930                 ftrace_pages = ftrace_pages->next;
931         }
932
933         return &ftrace_pages->records[ftrace_pages->index++];
934 }
935
936 static struct dyn_ftrace *
937 ftrace_record_ip(unsigned long ip)
938 {
939         struct dyn_ftrace *rec;
940
941         if (ftrace_disabled)
942                 return NULL;
943
944         rec = ftrace_alloc_dyn_node(ip);
945         if (!rec)
946                 return NULL;
947
948         rec->ip = ip;
949         rec->newlist = ftrace_new_addrs;
950         ftrace_new_addrs = rec;
951
952         return rec;
953 }
954
955 static void print_ip_ins(const char *fmt, unsigned char *p)
956 {
957         int i;
958
959         printk(KERN_CONT "%s", fmt);
960
961         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
962                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
963 }
964
965 static void ftrace_bug(int failed, unsigned long ip)
966 {
967         switch (failed) {
968         case -EFAULT:
969                 FTRACE_WARN_ON_ONCE(1);
970                 pr_info("ftrace faulted on modifying ");
971                 print_ip_sym(ip);
972                 break;
973         case -EINVAL:
974                 FTRACE_WARN_ON_ONCE(1);
975                 pr_info("ftrace failed to modify ");
976                 print_ip_sym(ip);
977                 print_ip_ins(" actual: ", (unsigned char *)ip);
978                 printk(KERN_CONT "\n");
979                 break;
980         case -EPERM:
981                 FTRACE_WARN_ON_ONCE(1);
982                 pr_info("ftrace faulted on writing ");
983                 print_ip_sym(ip);
984                 break;
985         default:
986                 FTRACE_WARN_ON_ONCE(1);
987                 pr_info("ftrace faulted on unknown error ");
988                 print_ip_sym(ip);
989         }
990 }
991
992
993 /* Return 1 if the address range is reserved for ftrace */
994 int ftrace_text_reserved(void *start, void *end)
995 {
996         struct dyn_ftrace *rec;
997         struct ftrace_page *pg;
998
999         do_for_each_ftrace_rec(pg, rec) {
1000                 if (rec->ip <= (unsigned long)end &&
1001                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1002                         return 1;
1003         } while_for_each_ftrace_rec();
1004         return 0;
1005 }
1006
1007
1008 static int
1009 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1010 {
1011         unsigned long ftrace_addr;
1012         unsigned long flag = 0UL;
1013
1014         ftrace_addr = (unsigned long)FTRACE_ADDR;
1015
1016         /*
1017          * If this record is not to be traced or we want to disable it,
1018          * then disable it.
1019          *
1020          * If we want to enable it and filtering is off, then enable it.
1021          *
1022          * If we want to enable it and filtering is on, enable it only if
1023          * it's filtered
1024          */
1025         if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1026                 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1027                         flag = FTRACE_FL_ENABLED;
1028         }
1029
1030         /* If the state of this record hasn't changed, then do nothing */
1031         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1032                 return 0;
1033
1034         if (flag) {
1035                 rec->flags |= FTRACE_FL_ENABLED;
1036                 return ftrace_make_call(rec, ftrace_addr);
1037         }
1038
1039         rec->flags &= ~FTRACE_FL_ENABLED;
1040         return ftrace_make_nop(NULL, rec, ftrace_addr);
1041 }
1042
1043 static void ftrace_replace_code(int enable)
1044 {
1045         struct dyn_ftrace *rec;
1046         struct ftrace_page *pg;
1047         int failed;
1048
1049         do_for_each_ftrace_rec(pg, rec) {
1050                 /*
1051                  * Skip over free records, records that have
1052                  * failed and not converted.
1053                  */
1054                 if (rec->flags & FTRACE_FL_FREE ||
1055                     rec->flags & FTRACE_FL_FAILED ||
1056                     !(rec->flags & FTRACE_FL_CONVERTED))
1057                         continue;
1058
1059                 failed = __ftrace_replace_code(rec, enable);
1060                 if (failed) {
1061                         rec->flags |= FTRACE_FL_FAILED;
1062                         ftrace_bug(failed, rec->ip);
1063                         /* Stop processing */
1064                         return;
1065                 }
1066         } while_for_each_ftrace_rec();
1067 }
1068
1069 static int
1070 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1071 {
1072         unsigned long ip;
1073         int ret;
1074
1075         ip = rec->ip;
1076
1077         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1078         if (ret) {
1079                 ftrace_bug(ret, ip);
1080                 rec->flags |= FTRACE_FL_FAILED;
1081                 return 0;
1082         }
1083         return 1;
1084 }
1085
1086 /*
1087  * archs can override this function if they must do something
1088  * before the modifying code is performed.
1089  */
1090 int __weak ftrace_arch_code_modify_prepare(void)
1091 {
1092         return 0;
1093 }
1094
1095 /*
1096  * archs can override this function if they must do something
1097  * after the modifying code is performed.
1098  */
1099 int __weak ftrace_arch_code_modify_post_process(void)
1100 {
1101         return 0;
1102 }
1103
1104 static int __ftrace_modify_code(void *data)
1105 {
1106         int *command = data;
1107
1108         if (*command & FTRACE_ENABLE_CALLS)
1109                 ftrace_replace_code(1);
1110         else if (*command & FTRACE_DISABLE_CALLS)
1111                 ftrace_replace_code(0);
1112
1113         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1114                 ftrace_update_ftrace_func(ftrace_trace_function);
1115
1116         if (*command & FTRACE_START_FUNC_RET)
1117                 ftrace_enable_ftrace_graph_caller();
1118         else if (*command & FTRACE_STOP_FUNC_RET)
1119                 ftrace_disable_ftrace_graph_caller();
1120
1121         return 0;
1122 }
1123
1124 static void ftrace_run_update_code(int command)
1125 {
1126         int ret;
1127
1128         ret = ftrace_arch_code_modify_prepare();
1129         FTRACE_WARN_ON(ret);
1130         if (ret)
1131                 return;
1132
1133         stop_machine(__ftrace_modify_code, &command, NULL);
1134
1135         ret = ftrace_arch_code_modify_post_process();
1136         FTRACE_WARN_ON(ret);
1137 }
1138
1139 static ftrace_func_t saved_ftrace_func;
1140 static int ftrace_start_up;
1141
1142 static void ftrace_startup_enable(int command)
1143 {
1144         if (saved_ftrace_func != ftrace_trace_function) {
1145                 saved_ftrace_func = ftrace_trace_function;
1146                 command |= FTRACE_UPDATE_TRACE_FUNC;
1147         }
1148
1149         if (!command || !ftrace_enabled)
1150                 return;
1151
1152         ftrace_run_update_code(command);
1153 }
1154
1155 static void ftrace_startup(int command)
1156 {
1157         if (unlikely(ftrace_disabled))
1158                 return;
1159
1160         ftrace_start_up++;
1161         command |= FTRACE_ENABLE_CALLS;
1162
1163         ftrace_startup_enable(command);
1164 }
1165
1166 static void ftrace_shutdown(int command)
1167 {
1168         if (unlikely(ftrace_disabled))
1169                 return;
1170
1171         ftrace_start_up--;
1172         /*
1173          * Just warn in case of unbalance, no need to kill ftrace, it's not
1174          * critical but the ftrace_call callers may be never nopped again after
1175          * further ftrace uses.
1176          */
1177         WARN_ON_ONCE(ftrace_start_up < 0);
1178
1179         if (!ftrace_start_up)
1180                 command |= FTRACE_DISABLE_CALLS;
1181
1182         if (saved_ftrace_func != ftrace_trace_function) {
1183                 saved_ftrace_func = ftrace_trace_function;
1184                 command |= FTRACE_UPDATE_TRACE_FUNC;
1185         }
1186
1187         if (!command || !ftrace_enabled)
1188                 return;
1189
1190         ftrace_run_update_code(command);
1191 }
1192
1193 static void ftrace_startup_sysctl(void)
1194 {
1195         int command = FTRACE_ENABLE_MCOUNT;
1196
1197         if (unlikely(ftrace_disabled))
1198                 return;
1199
1200         /* Force update next time */
1201         saved_ftrace_func = NULL;
1202         /* ftrace_start_up is true if we want ftrace running */
1203         if (ftrace_start_up)
1204                 command |= FTRACE_ENABLE_CALLS;
1205
1206         ftrace_run_update_code(command);
1207 }
1208
1209 static void ftrace_shutdown_sysctl(void)
1210 {
1211         int command = FTRACE_DISABLE_MCOUNT;
1212
1213         if (unlikely(ftrace_disabled))
1214                 return;
1215
1216         /* ftrace_start_up is true if ftrace is running */
1217         if (ftrace_start_up)
1218                 command |= FTRACE_DISABLE_CALLS;
1219
1220         ftrace_run_update_code(command);
1221 }
1222
1223 static cycle_t          ftrace_update_time;
1224 static unsigned long    ftrace_update_cnt;
1225 unsigned long           ftrace_update_tot_cnt;
1226
1227 static int ftrace_update_code(struct module *mod)
1228 {
1229         struct dyn_ftrace *p;
1230         cycle_t start, stop;
1231
1232         start = ftrace_now(raw_smp_processor_id());
1233         ftrace_update_cnt = 0;
1234
1235         while (ftrace_new_addrs) {
1236
1237                 /* If something went wrong, bail without enabling anything */
1238                 if (unlikely(ftrace_disabled))
1239                         return -1;
1240
1241                 p = ftrace_new_addrs;
1242                 ftrace_new_addrs = p->newlist;
1243                 p->flags = 0L;
1244
1245                 /*
1246                  * Do the initial record convertion from mcount jump
1247                  * to the NOP instructions.
1248                  */
1249                 if (!ftrace_code_disable(mod, p)) {
1250                         ftrace_free_rec(p);
1251                         continue;
1252                 }
1253
1254                 p->flags |= FTRACE_FL_CONVERTED;
1255                 ftrace_update_cnt++;
1256
1257                 /*
1258                  * If the tracing is enabled, go ahead and enable the record.
1259                  *
1260                  * The reason not to enable the record immediatelly is the
1261                  * inherent check of ftrace_make_nop/ftrace_make_call for
1262                  * correct previous instructions.  Making first the NOP
1263                  * conversion puts the module to the correct state, thus
1264                  * passing the ftrace_make_call check.
1265                  */
1266                 if (ftrace_start_up) {
1267                         int failed = __ftrace_replace_code(p, 1);
1268                         if (failed) {
1269                                 ftrace_bug(failed, p->ip);
1270                                 ftrace_free_rec(p);
1271                         }
1272                 }
1273         }
1274
1275         stop = ftrace_now(raw_smp_processor_id());
1276         ftrace_update_time = stop - start;
1277         ftrace_update_tot_cnt += ftrace_update_cnt;
1278
1279         return 0;
1280 }
1281
1282 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1283 {
1284         struct ftrace_page *pg;
1285         int cnt;
1286         int i;
1287
1288         /* allocate a few pages */
1289         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1290         if (!ftrace_pages_start)
1291                 return -1;
1292
1293         /*
1294          * Allocate a few more pages.
1295          *
1296          * TODO: have some parser search vmlinux before
1297          *   final linking to find all calls to ftrace.
1298          *   Then we can:
1299          *    a) know how many pages to allocate.
1300          *     and/or
1301          *    b) set up the table then.
1302          *
1303          *  The dynamic code is still necessary for
1304          *  modules.
1305          */
1306
1307         pg = ftrace_pages = ftrace_pages_start;
1308
1309         cnt = num_to_init / ENTRIES_PER_PAGE;
1310         pr_info("ftrace: allocating %ld entries in %d pages\n",
1311                 num_to_init, cnt + 1);
1312
1313         for (i = 0; i < cnt; i++) {
1314                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1315
1316                 /* If we fail, we'll try later anyway */
1317                 if (!pg->next)
1318                         break;
1319
1320                 pg = pg->next;
1321         }
1322
1323         return 0;
1324 }
1325
1326 enum {
1327         FTRACE_ITER_FILTER      = (1 << 0),
1328         FTRACE_ITER_NOTRACE     = (1 << 1),
1329         FTRACE_ITER_FAILURES    = (1 << 2),
1330         FTRACE_ITER_PRINTALL    = (1 << 3),
1331         FTRACE_ITER_HASH        = (1 << 4),
1332 };
1333
1334 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1335
1336 struct ftrace_iterator {
1337         struct ftrace_page      *pg;
1338         int                     hidx;
1339         int                     idx;
1340         unsigned                flags;
1341         struct trace_parser     parser;
1342 };
1343
1344 static void *
1345 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1346 {
1347         struct ftrace_iterator *iter = m->private;
1348         struct hlist_node *hnd = v;
1349         struct hlist_head *hhd;
1350
1351         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1352
1353         (*pos)++;
1354
1355  retry:
1356         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1357                 return NULL;
1358
1359         hhd = &ftrace_func_hash[iter->hidx];
1360
1361         if (hlist_empty(hhd)) {
1362                 iter->hidx++;
1363                 hnd = NULL;
1364                 goto retry;
1365         }
1366
1367         if (!hnd)
1368                 hnd = hhd->first;
1369         else {
1370                 hnd = hnd->next;
1371                 if (!hnd) {
1372                         iter->hidx++;
1373                         goto retry;
1374                 }
1375         }
1376
1377         return hnd;
1378 }
1379
1380 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1381 {
1382         struct ftrace_iterator *iter = m->private;
1383         void *p = NULL;
1384         loff_t l;
1385
1386         if (!(iter->flags & FTRACE_ITER_HASH))
1387                 *pos = 0;
1388
1389         iter->flags |= FTRACE_ITER_HASH;
1390
1391         iter->hidx = 0;
1392         for (l = 0; l <= *pos; ) {
1393                 p = t_hash_next(m, p, &l);
1394                 if (!p)
1395                         break;
1396         }
1397         return p;
1398 }
1399
1400 static int t_hash_show(struct seq_file *m, void *v)
1401 {
1402         struct ftrace_func_probe *rec;
1403         struct hlist_node *hnd = v;
1404
1405         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1406
1407         if (rec->ops->print)
1408                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1409
1410         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1411
1412         if (rec->data)
1413                 seq_printf(m, ":%p", rec->data);
1414         seq_putc(m, '\n');
1415
1416         return 0;
1417 }
1418
1419 static void *
1420 t_next(struct seq_file *m, void *v, loff_t *pos)
1421 {
1422         struct ftrace_iterator *iter = m->private;
1423         struct dyn_ftrace *rec = NULL;
1424
1425         if (iter->flags & FTRACE_ITER_HASH)
1426                 return t_hash_next(m, v, pos);
1427
1428         (*pos)++;
1429
1430         if (iter->flags & FTRACE_ITER_PRINTALL)
1431                 return NULL;
1432
1433  retry:
1434         if (iter->idx >= iter->pg->index) {
1435                 if (iter->pg->next) {
1436                         iter->pg = iter->pg->next;
1437                         iter->idx = 0;
1438                         goto retry;
1439                 }
1440         } else {
1441                 rec = &iter->pg->records[iter->idx++];
1442                 if ((rec->flags & FTRACE_FL_FREE) ||
1443
1444                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
1445                      (rec->flags & FTRACE_FL_FAILED)) ||
1446
1447                     ((iter->flags & FTRACE_ITER_FAILURES) &&
1448                      !(rec->flags & FTRACE_FL_FAILED)) ||
1449
1450                     ((iter->flags & FTRACE_ITER_FILTER) &&
1451                      !(rec->flags & FTRACE_FL_FILTER)) ||
1452
1453                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1454                      !(rec->flags & FTRACE_FL_NOTRACE))) {
1455                         rec = NULL;
1456                         goto retry;
1457                 }
1458         }
1459
1460         return rec;
1461 }
1462
1463 static void *t_start(struct seq_file *m, loff_t *pos)
1464 {
1465         struct ftrace_iterator *iter = m->private;
1466         void *p = NULL;
1467         loff_t l;
1468
1469         mutex_lock(&ftrace_lock);
1470         /*
1471          * For set_ftrace_filter reading, if we have the filter
1472          * off, we can short cut and just print out that all
1473          * functions are enabled.
1474          */
1475         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1476                 if (*pos > 0)
1477                         return t_hash_start(m, pos);
1478                 iter->flags |= FTRACE_ITER_PRINTALL;
1479                 return iter;
1480         }
1481
1482         if (iter->flags & FTRACE_ITER_HASH)
1483                 return t_hash_start(m, pos);
1484
1485         iter->pg = ftrace_pages_start;
1486         iter->idx = 0;
1487         for (l = 0; l <= *pos; ) {
1488                 p = t_next(m, p, &l);
1489                 if (!p)
1490                         break;
1491         }
1492
1493         if (!p && iter->flags & FTRACE_ITER_FILTER)
1494                 return t_hash_start(m, pos);
1495
1496         return p;
1497 }
1498
1499 static void t_stop(struct seq_file *m, void *p)
1500 {
1501         mutex_unlock(&ftrace_lock);
1502 }
1503
1504 static int t_show(struct seq_file *m, void *v)
1505 {
1506         struct ftrace_iterator *iter = m->private;
1507         struct dyn_ftrace *rec = v;
1508
1509         if (iter->flags & FTRACE_ITER_HASH)
1510                 return t_hash_show(m, v);
1511
1512         if (iter->flags & FTRACE_ITER_PRINTALL) {
1513                 seq_printf(m, "#### all functions enabled ####\n");
1514                 return 0;
1515         }
1516
1517         if (!rec)
1518                 return 0;
1519
1520         seq_printf(m, "%ps\n", (void *)rec->ip);
1521
1522         return 0;
1523 }
1524
1525 static const struct seq_operations show_ftrace_seq_ops = {
1526         .start = t_start,
1527         .next = t_next,
1528         .stop = t_stop,
1529         .show = t_show,
1530 };
1531
1532 static int
1533 ftrace_avail_open(struct inode *inode, struct file *file)
1534 {
1535         struct ftrace_iterator *iter;
1536         int ret;
1537
1538         if (unlikely(ftrace_disabled))
1539                 return -ENODEV;
1540
1541         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1542         if (!iter)
1543                 return -ENOMEM;
1544
1545         iter->pg = ftrace_pages_start;
1546
1547         ret = seq_open(file, &show_ftrace_seq_ops);
1548         if (!ret) {
1549                 struct seq_file *m = file->private_data;
1550
1551                 m->private = iter;
1552         } else {
1553                 kfree(iter);
1554         }
1555
1556         return ret;
1557 }
1558
1559 static int
1560 ftrace_failures_open(struct inode *inode, struct file *file)
1561 {
1562         int ret;
1563         struct seq_file *m;
1564         struct ftrace_iterator *iter;
1565
1566         ret = ftrace_avail_open(inode, file);
1567         if (!ret) {
1568                 m = (struct seq_file *)file->private_data;
1569                 iter = (struct ftrace_iterator *)m->private;
1570                 iter->flags = FTRACE_ITER_FAILURES;
1571         }
1572
1573         return ret;
1574 }
1575
1576
1577 static void ftrace_filter_reset(int enable)
1578 {
1579         struct ftrace_page *pg;
1580         struct dyn_ftrace *rec;
1581         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1582
1583         mutex_lock(&ftrace_lock);
1584         if (enable)
1585                 ftrace_filtered = 0;
1586         do_for_each_ftrace_rec(pg, rec) {
1587                 if (rec->flags & FTRACE_FL_FAILED)
1588                         continue;
1589                 rec->flags &= ~type;
1590         } while_for_each_ftrace_rec();
1591         mutex_unlock(&ftrace_lock);
1592 }
1593
1594 static int
1595 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1596 {
1597         struct ftrace_iterator *iter;
1598         int ret = 0;
1599
1600         if (unlikely(ftrace_disabled))
1601                 return -ENODEV;
1602
1603         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1604         if (!iter)
1605                 return -ENOMEM;
1606
1607         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1608                 kfree(iter);
1609                 return -ENOMEM;
1610         }
1611
1612         mutex_lock(&ftrace_regex_lock);
1613         if ((file->f_mode & FMODE_WRITE) &&
1614             (file->f_flags & O_TRUNC))
1615                 ftrace_filter_reset(enable);
1616
1617         if (file->f_mode & FMODE_READ) {
1618                 iter->pg = ftrace_pages_start;
1619                 iter->flags = enable ? FTRACE_ITER_FILTER :
1620                         FTRACE_ITER_NOTRACE;
1621
1622                 ret = seq_open(file, &show_ftrace_seq_ops);
1623                 if (!ret) {
1624                         struct seq_file *m = file->private_data;
1625                         m->private = iter;
1626                 } else {
1627                         trace_parser_put(&iter->parser);
1628                         kfree(iter);
1629                 }
1630         } else
1631                 file->private_data = iter;
1632         mutex_unlock(&ftrace_regex_lock);
1633
1634         return ret;
1635 }
1636
1637 static int
1638 ftrace_filter_open(struct inode *inode, struct file *file)
1639 {
1640         return ftrace_regex_open(inode, file, 1);
1641 }
1642
1643 static int
1644 ftrace_notrace_open(struct inode *inode, struct file *file)
1645 {
1646         return ftrace_regex_open(inode, file, 0);
1647 }
1648
1649 static loff_t
1650 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1651 {
1652         loff_t ret;
1653
1654         if (file->f_mode & FMODE_READ)
1655                 ret = seq_lseek(file, offset, origin);
1656         else
1657                 file->f_pos = ret = 1;
1658
1659         return ret;
1660 }
1661
1662 static int ftrace_match(char *str, char *regex, int len, int type)
1663 {
1664         int matched = 0;
1665         int slen;
1666
1667         switch (type) {
1668         case MATCH_FULL:
1669                 if (strcmp(str, regex) == 0)
1670                         matched = 1;
1671                 break;
1672         case MATCH_FRONT_ONLY:
1673                 if (strncmp(str, regex, len) == 0)
1674                         matched = 1;
1675                 break;
1676         case MATCH_MIDDLE_ONLY:
1677                 if (strstr(str, regex))
1678                         matched = 1;
1679                 break;
1680         case MATCH_END_ONLY:
1681                 slen = strlen(str);
1682                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1683                         matched = 1;
1684                 break;
1685         }
1686
1687         return matched;
1688 }
1689
1690 static int
1691 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1692 {
1693         char str[KSYM_SYMBOL_LEN];
1694
1695         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1696         return ftrace_match(str, regex, len, type);
1697 }
1698
1699 static int ftrace_match_records(char *buff, int len, int enable)
1700 {
1701         unsigned int search_len;
1702         struct ftrace_page *pg;
1703         struct dyn_ftrace *rec;
1704         unsigned long flag;
1705         char *search;
1706         int type;
1707         int not;
1708         int found = 0;
1709
1710         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1711         type = filter_parse_regex(buff, len, &search, &not);
1712
1713         search_len = strlen(search);
1714
1715         mutex_lock(&ftrace_lock);
1716         do_for_each_ftrace_rec(pg, rec) {
1717
1718                 if (rec->flags & FTRACE_FL_FAILED)
1719                         continue;
1720
1721                 if (ftrace_match_record(rec, search, search_len, type)) {
1722                         if (not)
1723                                 rec->flags &= ~flag;
1724                         else
1725                                 rec->flags |= flag;
1726                         found = 1;
1727                 }
1728                 /*
1729                  * Only enable filtering if we have a function that
1730                  * is filtered on.
1731                  */
1732                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1733                         ftrace_filtered = 1;
1734         } while_for_each_ftrace_rec();
1735         mutex_unlock(&ftrace_lock);
1736
1737         return found;
1738 }
1739
1740 static int
1741 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1742                            char *regex, int len, int type)
1743 {
1744         char str[KSYM_SYMBOL_LEN];
1745         char *modname;
1746
1747         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1748
1749         if (!modname || strcmp(modname, mod))
1750                 return 0;
1751
1752         /* blank search means to match all funcs in the mod */
1753         if (len)
1754                 return ftrace_match(str, regex, len, type);
1755         else
1756                 return 1;
1757 }
1758
1759 static int ftrace_match_module_records(char *buff, char *mod, int enable)
1760 {
1761         unsigned search_len = 0;
1762         struct ftrace_page *pg;
1763         struct dyn_ftrace *rec;
1764         int type = MATCH_FULL;
1765         char *search = buff;
1766         unsigned long flag;
1767         int not = 0;
1768         int found = 0;
1769
1770         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1771
1772         /* blank or '*' mean the same */
1773         if (strcmp(buff, "*") == 0)
1774                 buff[0] = 0;
1775
1776         /* handle the case of 'dont filter this module' */
1777         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1778                 buff[0] = 0;
1779                 not = 1;
1780         }
1781
1782         if (strlen(buff)) {
1783                 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1784                 search_len = strlen(search);
1785         }
1786
1787         mutex_lock(&ftrace_lock);
1788         do_for_each_ftrace_rec(pg, rec) {
1789
1790                 if (rec->flags & FTRACE_FL_FAILED)
1791                         continue;
1792
1793                 if (ftrace_match_module_record(rec, mod,
1794                                                search, search_len, type)) {
1795                         if (not)
1796                                 rec->flags &= ~flag;
1797                         else
1798                                 rec->flags |= flag;
1799                         found = 1;
1800                 }
1801                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1802                         ftrace_filtered = 1;
1803
1804         } while_for_each_ftrace_rec();
1805         mutex_unlock(&ftrace_lock);
1806
1807         return found;
1808 }
1809
1810 /*
1811  * We register the module command as a template to show others how
1812  * to register the a command as well.
1813  */
1814
1815 static int
1816 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1817 {
1818         char *mod;
1819
1820         /*
1821          * cmd == 'mod' because we only registered this func
1822          * for the 'mod' ftrace_func_command.
1823          * But if you register one func with multiple commands,
1824          * you can tell which command was used by the cmd
1825          * parameter.
1826          */
1827
1828         /* we must have a module name */
1829         if (!param)
1830                 return -EINVAL;
1831
1832         mod = strsep(&param, ":");
1833         if (!strlen(mod))
1834                 return -EINVAL;
1835
1836         if (ftrace_match_module_records(func, mod, enable))
1837                 return 0;
1838         return -EINVAL;
1839 }
1840
1841 static struct ftrace_func_command ftrace_mod_cmd = {
1842         .name                   = "mod",
1843         .func                   = ftrace_mod_callback,
1844 };
1845
1846 static int __init ftrace_mod_cmd_init(void)
1847 {
1848         return register_ftrace_command(&ftrace_mod_cmd);
1849 }
1850 device_initcall(ftrace_mod_cmd_init);
1851
1852 static void
1853 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1854 {
1855         struct ftrace_func_probe *entry;
1856         struct hlist_head *hhd;
1857         struct hlist_node *n;
1858         unsigned long key;
1859         int resched;
1860
1861         key = hash_long(ip, FTRACE_HASH_BITS);
1862
1863         hhd = &ftrace_func_hash[key];
1864
1865         if (hlist_empty(hhd))
1866                 return;
1867
1868         /*
1869          * Disable preemption for these calls to prevent a RCU grace
1870          * period. This syncs the hash iteration and freeing of items
1871          * on the hash. rcu_read_lock is too dangerous here.
1872          */
1873         resched = ftrace_preempt_disable();
1874         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1875                 if (entry->ip == ip)
1876                         entry->ops->func(ip, parent_ip, &entry->data);
1877         }
1878         ftrace_preempt_enable(resched);
1879 }
1880
1881 static struct ftrace_ops trace_probe_ops __read_mostly =
1882 {
1883         .func           = function_trace_probe_call,
1884 };
1885
1886 static int ftrace_probe_registered;
1887
1888 static void __enable_ftrace_function_probe(void)
1889 {
1890         int i;
1891
1892         if (ftrace_probe_registered)
1893                 return;
1894
1895         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1896                 struct hlist_head *hhd = &ftrace_func_hash[i];
1897                 if (hhd->first)
1898                         break;
1899         }
1900         /* Nothing registered? */
1901         if (i == FTRACE_FUNC_HASHSIZE)
1902                 return;
1903
1904         __register_ftrace_function(&trace_probe_ops);
1905         ftrace_startup(0);
1906         ftrace_probe_registered = 1;
1907 }
1908
1909 static void __disable_ftrace_function_probe(void)
1910 {
1911         int i;
1912
1913         if (!ftrace_probe_registered)
1914                 return;
1915
1916         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1917                 struct hlist_head *hhd = &ftrace_func_hash[i];
1918                 if (hhd->first)
1919                         return;
1920         }
1921
1922         /* no more funcs left */
1923         __unregister_ftrace_function(&trace_probe_ops);
1924         ftrace_shutdown(0);
1925         ftrace_probe_registered = 0;
1926 }
1927
1928
1929 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1930 {
1931         struct ftrace_func_probe *entry =
1932                 container_of(rhp, struct ftrace_func_probe, rcu);
1933
1934         if (entry->ops->free)
1935                 entry->ops->free(&entry->data);
1936         kfree(entry);
1937 }
1938
1939
1940 int
1941 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1942                               void *data)
1943 {
1944         struct ftrace_func_probe *entry;
1945         struct ftrace_page *pg;
1946         struct dyn_ftrace *rec;
1947         int type, len, not;
1948         unsigned long key;
1949         int count = 0;
1950         char *search;
1951
1952         type = filter_parse_regex(glob, strlen(glob), &search, &not);
1953         len = strlen(search);
1954
1955         /* we do not support '!' for function probes */
1956         if (WARN_ON(not))
1957                 return -EINVAL;
1958
1959         mutex_lock(&ftrace_lock);
1960         do_for_each_ftrace_rec(pg, rec) {
1961
1962                 if (rec->flags & FTRACE_FL_FAILED)
1963                         continue;
1964
1965                 if (!ftrace_match_record(rec, search, len, type))
1966                         continue;
1967
1968                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1969                 if (!entry) {
1970                         /* If we did not process any, then return error */
1971                         if (!count)
1972                                 count = -ENOMEM;
1973                         goto out_unlock;
1974                 }
1975
1976                 count++;
1977
1978                 entry->data = data;
1979
1980                 /*
1981                  * The caller might want to do something special
1982                  * for each function we find. We call the callback
1983                  * to give the caller an opportunity to do so.
1984                  */
1985                 if (ops->callback) {
1986                         if (ops->callback(rec->ip, &entry->data) < 0) {
1987                                 /* caller does not like this func */
1988                                 kfree(entry);
1989                                 continue;
1990                         }
1991                 }
1992
1993                 entry->ops = ops;
1994                 entry->ip = rec->ip;
1995
1996                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1997                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1998
1999         } while_for_each_ftrace_rec();
2000         __enable_ftrace_function_probe();
2001
2002  out_unlock:
2003         mutex_unlock(&ftrace_lock);
2004
2005         return count;
2006 }
2007
2008 enum {
2009         PROBE_TEST_FUNC         = 1,
2010         PROBE_TEST_DATA         = 2
2011 };
2012
2013 static void
2014 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2015                                   void *data, int flags)
2016 {
2017         struct ftrace_func_probe *entry;
2018         struct hlist_node *n, *tmp;
2019         char str[KSYM_SYMBOL_LEN];
2020         int type = MATCH_FULL;
2021         int i, len = 0;
2022         char *search;
2023
2024         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2025                 glob = NULL;
2026         else if (glob) {
2027                 int not;
2028
2029                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2030                 len = strlen(search);
2031
2032                 /* we do not support '!' for function probes */
2033                 if (WARN_ON(not))
2034                         return;
2035         }
2036
2037         mutex_lock(&ftrace_lock);
2038         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2039                 struct hlist_head *hhd = &ftrace_func_hash[i];
2040
2041                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2042
2043                         /* break up if statements for readability */
2044                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2045                                 continue;
2046
2047                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2048                                 continue;
2049
2050                         /* do this last, since it is the most expensive */
2051                         if (glob) {
2052                                 kallsyms_lookup(entry->ip, NULL, NULL,
2053                                                 NULL, str);
2054                                 if (!ftrace_match(str, glob, len, type))
2055                                         continue;
2056                         }
2057
2058                         hlist_del(&entry->node);
2059                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2060                 }
2061         }
2062         __disable_ftrace_function_probe();
2063         mutex_unlock(&ftrace_lock);
2064 }
2065
2066 void
2067 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2068                                 void *data)
2069 {
2070         __unregister_ftrace_function_probe(glob, ops, data,
2071                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2072 }
2073
2074 void
2075 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2076 {
2077         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2078 }
2079
2080 void unregister_ftrace_function_probe_all(char *glob)
2081 {
2082         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2083 }
2084
2085 static LIST_HEAD(ftrace_commands);
2086 static DEFINE_MUTEX(ftrace_cmd_mutex);
2087
2088 int register_ftrace_command(struct ftrace_func_command *cmd)
2089 {
2090         struct ftrace_func_command *p;
2091         int ret = 0;
2092
2093         mutex_lock(&ftrace_cmd_mutex);
2094         list_for_each_entry(p, &ftrace_commands, list) {
2095                 if (strcmp(cmd->name, p->name) == 0) {
2096                         ret = -EBUSY;
2097                         goto out_unlock;
2098                 }
2099         }
2100         list_add(&cmd->list, &ftrace_commands);
2101  out_unlock:
2102         mutex_unlock(&ftrace_cmd_mutex);
2103
2104         return ret;
2105 }
2106
2107 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2108 {
2109         struct ftrace_func_command *p, *n;
2110         int ret = -ENODEV;
2111
2112         mutex_lock(&ftrace_cmd_mutex);
2113         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2114                 if (strcmp(cmd->name, p->name) == 0) {
2115                         ret = 0;
2116                         list_del_init(&p->list);
2117                         goto out_unlock;
2118                 }
2119         }
2120  out_unlock:
2121         mutex_unlock(&ftrace_cmd_mutex);
2122
2123         return ret;
2124 }
2125
2126 static int ftrace_process_regex(char *buff, int len, int enable)
2127 {
2128         char *func, *command, *next = buff;
2129         struct ftrace_func_command *p;
2130         int ret = -EINVAL;
2131
2132         func = strsep(&next, ":");
2133
2134         if (!next) {
2135                 if (ftrace_match_records(func, len, enable))
2136                         return 0;
2137                 return ret;
2138         }
2139
2140         /* command found */
2141
2142         command = strsep(&next, ":");
2143
2144         mutex_lock(&ftrace_cmd_mutex);
2145         list_for_each_entry(p, &ftrace_commands, list) {
2146                 if (strcmp(p->name, command) == 0) {
2147                         ret = p->func(func, command, next, enable);
2148                         goto out_unlock;
2149                 }
2150         }
2151  out_unlock:
2152         mutex_unlock(&ftrace_cmd_mutex);
2153
2154         return ret;
2155 }
2156
2157 static ssize_t
2158 ftrace_regex_write(struct file *file, const char __user *ubuf,
2159                    size_t cnt, loff_t *ppos, int enable)
2160 {
2161         struct ftrace_iterator *iter;
2162         struct trace_parser *parser;
2163         ssize_t ret, read;
2164
2165         if (!cnt)
2166                 return 0;
2167
2168         mutex_lock(&ftrace_regex_lock);
2169
2170         if (file->f_mode & FMODE_READ) {
2171                 struct seq_file *m = file->private_data;
2172                 iter = m->private;
2173         } else
2174                 iter = file->private_data;
2175
2176         parser = &iter->parser;
2177         read = trace_get_user(parser, ubuf, cnt, ppos);
2178
2179         if (read >= 0 && trace_parser_loaded(parser) &&
2180             !trace_parser_cont(parser)) {
2181                 ret = ftrace_process_regex(parser->buffer,
2182                                            parser->idx, enable);
2183                 trace_parser_clear(parser);
2184                 if (ret)
2185                         goto out_unlock;
2186         }
2187
2188         ret = read;
2189 out_unlock:
2190         mutex_unlock(&ftrace_regex_lock);
2191
2192         return ret;
2193 }
2194
2195 static ssize_t
2196 ftrace_filter_write(struct file *file, const char __user *ubuf,
2197                     size_t cnt, loff_t *ppos)
2198 {
2199         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2200 }
2201
2202 static ssize_t
2203 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2204                      size_t cnt, loff_t *ppos)
2205 {
2206         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2207 }
2208
2209 static void
2210 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2211 {
2212         if (unlikely(ftrace_disabled))
2213                 return;
2214
2215         mutex_lock(&ftrace_regex_lock);
2216         if (reset)
2217                 ftrace_filter_reset(enable);
2218         if (buf)
2219                 ftrace_match_records(buf, len, enable);
2220         mutex_unlock(&ftrace_regex_lock);
2221 }
2222
2223 /**
2224  * ftrace_set_filter - set a function to filter on in ftrace
2225  * @buf - the string that holds the function filter text.
2226  * @len - the length of the string.
2227  * @reset - non zero to reset all filters before applying this filter.
2228  *
2229  * Filters denote which functions should be enabled when tracing is enabled.
2230  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2231  */
2232 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2233 {
2234         ftrace_set_regex(buf, len, reset, 1);
2235 }
2236
2237 /**
2238  * ftrace_set_notrace - set a function to not trace in ftrace
2239  * @buf - the string that holds the function notrace text.
2240  * @len - the length of the string.
2241  * @reset - non zero to reset all filters before applying this filter.
2242  *
2243  * Notrace Filters denote which functions should not be enabled when tracing
2244  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2245  * for tracing.
2246  */
2247 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2248 {
2249         ftrace_set_regex(buf, len, reset, 0);
2250 }
2251
2252 /*
2253  * command line interface to allow users to set filters on boot up.
2254  */
2255 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2256 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2257 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2258
2259 static int __init set_ftrace_notrace(char *str)
2260 {
2261         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2262         return 1;
2263 }
2264 __setup("ftrace_notrace=", set_ftrace_notrace);
2265
2266 static int __init set_ftrace_filter(char *str)
2267 {
2268         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2269         return 1;
2270 }
2271 __setup("ftrace_filter=", set_ftrace_filter);
2272
2273 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2274 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2275 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2276
2277 static int __init set_graph_function(char *str)
2278 {
2279         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2280         return 1;
2281 }
2282 __setup("ftrace_graph_filter=", set_graph_function);
2283
2284 static void __init set_ftrace_early_graph(char *buf)
2285 {
2286         int ret;
2287         char *func;
2288
2289         while (buf) {
2290                 func = strsep(&buf, ",");
2291                 /* we allow only one expression at a time */
2292                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2293                                       func);
2294                 if (ret)
2295                         printk(KERN_DEBUG "ftrace: function %s not "
2296                                           "traceable\n", func);
2297         }
2298 }
2299 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2300
2301 static void __init set_ftrace_early_filter(char *buf, int enable)
2302 {
2303         char *func;
2304
2305         while (buf) {
2306                 func = strsep(&buf, ",");
2307                 ftrace_set_regex(func, strlen(func), 0, enable);
2308         }
2309 }
2310
2311 static void __init set_ftrace_early_filters(void)
2312 {
2313         if (ftrace_filter_buf[0])
2314                 set_ftrace_early_filter(ftrace_filter_buf, 1);
2315         if (ftrace_notrace_buf[0])
2316                 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2317 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2318         if (ftrace_graph_buf[0])
2319                 set_ftrace_early_graph(ftrace_graph_buf);
2320 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2321 }
2322
2323 static int
2324 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2325 {
2326         struct seq_file *m = (struct seq_file *)file->private_data;
2327         struct ftrace_iterator *iter;
2328         struct trace_parser *parser;
2329
2330         mutex_lock(&ftrace_regex_lock);
2331         if (file->f_mode & FMODE_READ) {
2332                 iter = m->private;
2333
2334                 seq_release(inode, file);
2335         } else
2336                 iter = file->private_data;
2337
2338         parser = &iter->parser;
2339         if (trace_parser_loaded(parser)) {
2340                 parser->buffer[parser->idx] = 0;
2341                 ftrace_match_records(parser->buffer, parser->idx, enable);
2342         }
2343
2344         mutex_lock(&ftrace_lock);
2345         if (ftrace_start_up && ftrace_enabled)
2346                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2347         mutex_unlock(&ftrace_lock);
2348
2349         trace_parser_put(parser);
2350         kfree(iter);
2351
2352         mutex_unlock(&ftrace_regex_lock);
2353         return 0;
2354 }
2355
2356 static int
2357 ftrace_filter_release(struct inode *inode, struct file *file)
2358 {
2359         return ftrace_regex_release(inode, file, 1);
2360 }
2361
2362 static int
2363 ftrace_notrace_release(struct inode *inode, struct file *file)
2364 {
2365         return ftrace_regex_release(inode, file, 0);
2366 }
2367
2368 static const struct file_operations ftrace_avail_fops = {
2369         .open = ftrace_avail_open,
2370         .read = seq_read,
2371         .llseek = seq_lseek,
2372         .release = seq_release_private,
2373 };
2374
2375 static const struct file_operations ftrace_failures_fops = {
2376         .open = ftrace_failures_open,
2377         .read = seq_read,
2378         .llseek = seq_lseek,
2379         .release = seq_release_private,
2380 };
2381
2382 static const struct file_operations ftrace_filter_fops = {
2383         .open = ftrace_filter_open,
2384         .read = seq_read,
2385         .write = ftrace_filter_write,
2386         .llseek = ftrace_regex_lseek,
2387         .release = ftrace_filter_release,
2388 };
2389
2390 static const struct file_operations ftrace_notrace_fops = {
2391         .open = ftrace_notrace_open,
2392         .read = seq_read,
2393         .write = ftrace_notrace_write,
2394         .llseek = ftrace_regex_lseek,
2395         .release = ftrace_notrace_release,
2396 };
2397
2398 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2399
2400 static DEFINE_MUTEX(graph_lock);
2401
2402 int ftrace_graph_count;
2403 int ftrace_graph_filter_enabled;
2404 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2405
2406 static void *
2407 __g_next(struct seq_file *m, loff_t *pos)
2408 {
2409         if (*pos >= ftrace_graph_count)
2410                 return NULL;
2411         return &ftrace_graph_funcs[*pos];
2412 }
2413
2414 static void *
2415 g_next(struct seq_file *m, void *v, loff_t *pos)
2416 {
2417         (*pos)++;
2418         return __g_next(m, pos);
2419 }
2420
2421 static void *g_start(struct seq_file *m, loff_t *pos)
2422 {
2423         mutex_lock(&graph_lock);
2424
2425         /* Nothing, tell g_show to print all functions are enabled */
2426         if (!ftrace_graph_filter_enabled && !*pos)
2427                 return (void *)1;
2428
2429         return __g_next(m, pos);
2430 }
2431
2432 static void g_stop(struct seq_file *m, void *p)
2433 {
2434         mutex_unlock(&graph_lock);
2435 }
2436
2437 static int g_show(struct seq_file *m, void *v)
2438 {
2439         unsigned long *ptr = v;
2440
2441         if (!ptr)
2442                 return 0;
2443
2444         if (ptr == (unsigned long *)1) {
2445                 seq_printf(m, "#### all functions enabled ####\n");
2446                 return 0;
2447         }
2448
2449         seq_printf(m, "%ps\n", (void *)*ptr);
2450
2451         return 0;
2452 }
2453
2454 static const struct seq_operations ftrace_graph_seq_ops = {
2455         .start = g_start,
2456         .next = g_next,
2457         .stop = g_stop,
2458         .show = g_show,
2459 };
2460
2461 static int
2462 ftrace_graph_open(struct inode *inode, struct file *file)
2463 {
2464         int ret = 0;
2465
2466         if (unlikely(ftrace_disabled))
2467                 return -ENODEV;
2468
2469         mutex_lock(&graph_lock);
2470         if ((file->f_mode & FMODE_WRITE) &&
2471             (file->f_flags & O_TRUNC)) {
2472                 ftrace_graph_filter_enabled = 0;
2473                 ftrace_graph_count = 0;
2474                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2475         }
2476         mutex_unlock(&graph_lock);
2477
2478         if (file->f_mode & FMODE_READ)
2479                 ret = seq_open(file, &ftrace_graph_seq_ops);
2480
2481         return ret;
2482 }
2483
2484 static int
2485 ftrace_graph_release(struct inode *inode, struct file *file)
2486 {
2487         if (file->f_mode & FMODE_READ)
2488                 seq_release(inode, file);
2489         return 0;
2490 }
2491
2492 static int
2493 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2494 {
2495         struct dyn_ftrace *rec;
2496         struct ftrace_page *pg;
2497         int search_len;
2498         int fail = 1;
2499         int type, not;
2500         char *search;
2501         bool exists;
2502         int i;
2503
2504         if (ftrace_disabled)
2505                 return -ENODEV;
2506
2507         /* decode regex */
2508         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2509         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2510                 return -EBUSY;
2511
2512         search_len = strlen(search);
2513
2514         mutex_lock(&ftrace_lock);
2515         do_for_each_ftrace_rec(pg, rec) {
2516
2517                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2518                         continue;
2519
2520                 if (ftrace_match_record(rec, search, search_len, type)) {
2521                         /* if it is in the array */
2522                         exists = false;
2523                         for (i = 0; i < *idx; i++) {
2524                                 if (array[i] == rec->ip) {
2525                                         exists = true;
2526                                         break;
2527                                 }
2528                         }
2529
2530                         if (!not) {
2531                                 fail = 0;
2532                                 if (!exists) {
2533                                         array[(*idx)++] = rec->ip;
2534                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2535                                                 goto out;
2536                                 }
2537                         } else {
2538                                 if (exists) {
2539                                         array[i] = array[--(*idx)];
2540                                         array[*idx] = 0;
2541                                         fail = 0;
2542                                 }
2543                         }
2544                 }
2545         } while_for_each_ftrace_rec();
2546 out:
2547         mutex_unlock(&ftrace_lock);
2548
2549         if (fail)
2550                 return -EINVAL;
2551
2552         ftrace_graph_filter_enabled = 1;
2553         return 0;
2554 }
2555
2556 static ssize_t
2557 ftrace_graph_write(struct file *file, const char __user *ubuf,
2558                    size_t cnt, loff_t *ppos)
2559 {
2560         struct trace_parser parser;
2561         ssize_t read, ret;
2562
2563         if (!cnt)
2564                 return 0;
2565
2566         mutex_lock(&graph_lock);
2567
2568         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2569                 ret = -ENOMEM;
2570                 goto out_unlock;
2571         }
2572
2573         read = trace_get_user(&parser, ubuf, cnt, ppos);
2574
2575         if (read >= 0 && trace_parser_loaded((&parser))) {
2576                 parser.buffer[parser.idx] = 0;
2577
2578                 /* we allow only one expression at a time */
2579                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2580                                         parser.buffer);
2581                 if (ret)
2582                         goto out_free;
2583         }
2584
2585         ret = read;
2586
2587 out_free:
2588         trace_parser_put(&parser);
2589 out_unlock:
2590         mutex_unlock(&graph_lock);
2591
2592         return ret;
2593 }
2594
2595 static const struct file_operations ftrace_graph_fops = {
2596         .open           = ftrace_graph_open,
2597         .read           = seq_read,
2598         .write          = ftrace_graph_write,
2599         .release        = ftrace_graph_release,
2600 };
2601 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2602
2603 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2604 {
2605
2606         trace_create_file("available_filter_functions", 0444,
2607                         d_tracer, NULL, &ftrace_avail_fops);
2608
2609         trace_create_file("failures", 0444,
2610                         d_tracer, NULL, &ftrace_failures_fops);
2611
2612         trace_create_file("set_ftrace_filter", 0644, d_tracer,
2613                         NULL, &ftrace_filter_fops);
2614
2615         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2616                                     NULL, &ftrace_notrace_fops);
2617
2618 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2619         trace_create_file("set_graph_function", 0444, d_tracer,
2620                                     NULL,
2621                                     &ftrace_graph_fops);
2622 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2623
2624         return 0;
2625 }
2626
2627 static int ftrace_process_locs(struct module *mod,
2628                                unsigned long *start,
2629                                unsigned long *end)
2630 {
2631         unsigned long *p;
2632         unsigned long addr;
2633         unsigned long flags;
2634
2635         mutex_lock(&ftrace_lock);
2636         p = start;
2637         while (p < end) {
2638                 addr = ftrace_call_adjust(*p++);
2639                 /*
2640                  * Some architecture linkers will pad between
2641                  * the different mcount_loc sections of different
2642                  * object files to satisfy alignments.
2643                  * Skip any NULL pointers.
2644                  */
2645                 if (!addr)
2646                         continue;
2647                 ftrace_record_ip(addr);
2648         }
2649
2650         /* disable interrupts to prevent kstop machine */
2651         local_irq_save(flags);
2652         ftrace_update_code(mod);
2653         local_irq_restore(flags);
2654         mutex_unlock(&ftrace_lock);
2655
2656         return 0;
2657 }
2658
2659 #ifdef CONFIG_MODULES
2660 void ftrace_release_mod(struct module *mod)
2661 {
2662         struct dyn_ftrace *rec;
2663         struct ftrace_page *pg;
2664
2665         if (ftrace_disabled)
2666                 return;
2667
2668         mutex_lock(&ftrace_lock);
2669         do_for_each_ftrace_rec(pg, rec) {
2670                 if (within_module_core(rec->ip, mod)) {
2671                         /*
2672                          * rec->ip is changed in ftrace_free_rec()
2673                          * It should not between s and e if record was freed.
2674                          */
2675                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2676                         ftrace_free_rec(rec);
2677                 }
2678         } while_for_each_ftrace_rec();
2679         mutex_unlock(&ftrace_lock);
2680 }
2681
2682 static void ftrace_init_module(struct module *mod,
2683                                unsigned long *start, unsigned long *end)
2684 {
2685         if (ftrace_disabled || start == end)
2686                 return;
2687         ftrace_process_locs(mod, start, end);
2688 }
2689
2690 static int ftrace_module_notify(struct notifier_block *self,
2691                                 unsigned long val, void *data)
2692 {
2693         struct module *mod = data;
2694
2695         switch (val) {
2696         case MODULE_STATE_COMING:
2697                 ftrace_init_module(mod, mod->ftrace_callsites,
2698                                    mod->ftrace_callsites +
2699                                    mod->num_ftrace_callsites);
2700                 break;
2701         case MODULE_STATE_GOING:
2702                 ftrace_release_mod(mod);
2703                 break;
2704         }
2705
2706         return 0;
2707 }
2708 #else
2709 static int ftrace_module_notify(struct notifier_block *self,
2710                                 unsigned long val, void *data)
2711 {
2712         return 0;
2713 }
2714 #endif /* CONFIG_MODULES */
2715
2716 struct notifier_block ftrace_module_nb = {
2717         .notifier_call = ftrace_module_notify,
2718         .priority = 0,
2719 };
2720
2721 extern unsigned long __start_mcount_loc[];
2722 extern unsigned long __stop_mcount_loc[];
2723
2724 void __init ftrace_init(void)
2725 {
2726         unsigned long count, addr, flags;
2727         int ret;
2728
2729         /* Keep the ftrace pointer to the stub */
2730         addr = (unsigned long)ftrace_stub;
2731
2732         local_irq_save(flags);
2733         ftrace_dyn_arch_init(&addr);
2734         local_irq_restore(flags);
2735
2736         /* ftrace_dyn_arch_init places the return code in addr */
2737         if (addr)
2738                 goto failed;
2739
2740         count = __stop_mcount_loc - __start_mcount_loc;
2741
2742         ret = ftrace_dyn_table_alloc(count);
2743         if (ret)
2744                 goto failed;
2745
2746         last_ftrace_enabled = ftrace_enabled = 1;
2747
2748         ret = ftrace_process_locs(NULL,
2749                                   __start_mcount_loc,
2750                                   __stop_mcount_loc);
2751
2752         ret = register_module_notifier(&ftrace_module_nb);
2753         if (ret)
2754                 pr_warning("Failed to register trace ftrace module notifier\n");
2755
2756         set_ftrace_early_filters();
2757
2758         return;
2759  failed:
2760         ftrace_disabled = 1;
2761 }
2762
2763 #else
2764
2765 static int __init ftrace_nodyn_init(void)
2766 {
2767         ftrace_enabled = 1;
2768         return 0;
2769 }
2770 device_initcall(ftrace_nodyn_init);
2771
2772 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2773 static inline void ftrace_startup_enable(int command) { }
2774 /* Keep as macros so we do not need to define the commands */
2775 # define ftrace_startup(command)        do { } while (0)
2776 # define ftrace_shutdown(command)       do { } while (0)
2777 # define ftrace_startup_sysctl()        do { } while (0)
2778 # define ftrace_shutdown_sysctl()       do { } while (0)
2779 #endif /* CONFIG_DYNAMIC_FTRACE */
2780
2781 static void clear_ftrace_swapper(void)
2782 {
2783         struct task_struct *p;
2784         int cpu;
2785
2786         get_online_cpus();
2787         for_each_online_cpu(cpu) {
2788                 p = idle_task(cpu);
2789                 clear_tsk_trace_trace(p);
2790         }
2791         put_online_cpus();
2792 }
2793
2794 static void set_ftrace_swapper(void)
2795 {
2796         struct task_struct *p;
2797         int cpu;
2798
2799         get_online_cpus();
2800         for_each_online_cpu(cpu) {
2801                 p = idle_task(cpu);
2802                 set_tsk_trace_trace(p);
2803         }
2804         put_online_cpus();
2805 }
2806
2807 static void clear_ftrace_pid(struct pid *pid)
2808 {
2809         struct task_struct *p;
2810
2811         rcu_read_lock();
2812         do_each_pid_task(pid, PIDTYPE_PID, p) {
2813                 clear_tsk_trace_trace(p);
2814         } while_each_pid_task(pid, PIDTYPE_PID, p);
2815         rcu_read_unlock();
2816
2817         put_pid(pid);
2818 }
2819
2820 static void set_ftrace_pid(struct pid *pid)
2821 {
2822         struct task_struct *p;
2823
2824         rcu_read_lock();
2825         do_each_pid_task(pid, PIDTYPE_PID, p) {
2826                 set_tsk_trace_trace(p);
2827         } while_each_pid_task(pid, PIDTYPE_PID, p);
2828         rcu_read_unlock();
2829 }
2830
2831 static void clear_ftrace_pid_task(struct pid *pid)
2832 {
2833         if (pid == ftrace_swapper_pid)
2834                 clear_ftrace_swapper();
2835         else
2836                 clear_ftrace_pid(pid);
2837 }
2838
2839 static void set_ftrace_pid_task(struct pid *pid)
2840 {
2841         if (pid == ftrace_swapper_pid)
2842                 set_ftrace_swapper();
2843         else
2844                 set_ftrace_pid(pid);
2845 }
2846
2847 static int ftrace_pid_add(int p)
2848 {
2849         struct pid *pid;
2850         struct ftrace_pid *fpid;
2851         int ret = -EINVAL;
2852
2853         mutex_lock(&ftrace_lock);
2854
2855         if (!p)
2856                 pid = ftrace_swapper_pid;
2857         else
2858                 pid = find_get_pid(p);
2859
2860         if (!pid)
2861                 goto out;
2862
2863         ret = 0;
2864
2865         list_for_each_entry(fpid, &ftrace_pids, list)
2866                 if (fpid->pid == pid)
2867                         goto out_put;
2868
2869         ret = -ENOMEM;
2870
2871         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2872         if (!fpid)
2873                 goto out_put;
2874
2875         list_add(&fpid->list, &ftrace_pids);
2876         fpid->pid = pid;
2877
2878         set_ftrace_pid_task(pid);
2879
2880         ftrace_update_pid_func();
2881         ftrace_startup_enable(0);
2882
2883         mutex_unlock(&ftrace_lock);
2884         return 0;
2885
2886 out_put:
2887         if (pid != ftrace_swapper_pid)
2888                 put_pid(pid);
2889
2890 out:
2891         mutex_unlock(&ftrace_lock);
2892         return ret;
2893 }
2894
2895 static void ftrace_pid_reset(void)
2896 {
2897         struct ftrace_pid *fpid, *safe;
2898
2899         mutex_lock(&ftrace_lock);
2900         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2901                 struct pid *pid = fpid->pid;
2902
2903                 clear_ftrace_pid_task(pid);
2904
2905                 list_del(&fpid->list);
2906                 kfree(fpid);
2907         }
2908
2909         ftrace_update_pid_func();
2910         ftrace_startup_enable(0);
2911
2912         mutex_unlock(&ftrace_lock);
2913 }
2914
2915 static void *fpid_start(struct seq_file *m, loff_t *pos)
2916 {
2917         mutex_lock(&ftrace_lock);
2918
2919         if (list_empty(&ftrace_pids) && (!*pos))
2920                 return (void *) 1;
2921
2922         return seq_list_start(&ftrace_pids, *pos);
2923 }
2924
2925 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2926 {
2927         if (v == (void *)1)
2928                 return NULL;
2929
2930         return seq_list_next(v, &ftrace_pids, pos);
2931 }
2932
2933 static void fpid_stop(struct seq_file *m, void *p)
2934 {
2935         mutex_unlock(&ftrace_lock);
2936 }
2937
2938 static int fpid_show(struct seq_file *m, void *v)
2939 {
2940         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2941
2942         if (v == (void *)1) {
2943                 seq_printf(m, "no pid\n");
2944                 return 0;
2945         }
2946
2947         if (fpid->pid == ftrace_swapper_pid)
2948                 seq_printf(m, "swapper tasks\n");
2949         else
2950                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2951
2952         return 0;
2953 }
2954
2955 static const struct seq_operations ftrace_pid_sops = {
2956         .start = fpid_start,
2957         .next = fpid_next,
2958         .stop = fpid_stop,
2959         .show = fpid_show,
2960 };
2961
2962 static int
2963 ftrace_pid_open(struct inode *inode, struct file *file)
2964 {
2965         int ret = 0;
2966
2967         if ((file->f_mode & FMODE_WRITE) &&
2968             (file->f_flags & O_TRUNC))
2969                 ftrace_pid_reset();
2970
2971         if (file->f_mode & FMODE_READ)
2972                 ret = seq_open(file, &ftrace_pid_sops);
2973
2974         return ret;
2975 }
2976
2977 static ssize_t
2978 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2979                    size_t cnt, loff_t *ppos)
2980 {
2981         char buf[64], *tmp;
2982         long val;
2983         int ret;
2984
2985         if (cnt >= sizeof(buf))
2986                 return -EINVAL;
2987
2988         if (copy_from_user(&buf, ubuf, cnt))
2989                 return -EFAULT;
2990
2991         buf[cnt] = 0;
2992
2993         /*
2994          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
2995          * to clean the filter quietly.
2996          */
2997         tmp = strstrip(buf);
2998         if (strlen(tmp) == 0)
2999                 return 1;
3000
3001         ret = strict_strtol(tmp, 10, &val);
3002         if (ret < 0)
3003                 return ret;
3004
3005         ret = ftrace_pid_add(val);
3006
3007         return ret ? ret : cnt;
3008 }
3009
3010 static int
3011 ftrace_pid_release(struct inode *inode, struct file *file)
3012 {
3013         if (file->f_mode & FMODE_READ)
3014                 seq_release(inode, file);
3015
3016         return 0;
3017 }
3018
3019 static const struct file_operations ftrace_pid_fops = {
3020         .open           = ftrace_pid_open,
3021         .write          = ftrace_pid_write,
3022         .read           = seq_read,
3023         .llseek         = seq_lseek,
3024         .release        = ftrace_pid_release,
3025 };
3026
3027 static __init int ftrace_init_debugfs(void)
3028 {
3029         struct dentry *d_tracer;
3030
3031         d_tracer = tracing_init_dentry();
3032         if (!d_tracer)
3033                 return 0;
3034
3035         ftrace_init_dyn_debugfs(d_tracer);
3036
3037         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3038                             NULL, &ftrace_pid_fops);
3039
3040         ftrace_profile_debugfs(d_tracer);
3041
3042         return 0;
3043 }
3044 fs_initcall(ftrace_init_debugfs);
3045
3046 /**
3047  * ftrace_kill - kill ftrace
3048  *
3049  * This function should be used by panic code. It stops ftrace
3050  * but in a not so nice way. If you need to simply kill ftrace
3051  * from a non-atomic section, use ftrace_kill.
3052  */
3053 void ftrace_kill(void)
3054 {
3055         ftrace_disabled = 1;
3056         ftrace_enabled = 0;
3057         clear_ftrace_function();
3058 }
3059
3060 /**
3061  * register_ftrace_function - register a function for profiling
3062  * @ops - ops structure that holds the function for profiling.
3063  *
3064  * Register a function to be called by all functions in the
3065  * kernel.
3066  *
3067  * Note: @ops->func and all the functions it calls must be labeled
3068  *       with "notrace", otherwise it will go into a
3069  *       recursive loop.
3070  */
3071 int register_ftrace_function(struct ftrace_ops *ops)
3072 {
3073         int ret;
3074
3075         if (unlikely(ftrace_disabled))
3076                 return -1;
3077
3078         mutex_lock(&ftrace_lock);
3079
3080         ret = __register_ftrace_function(ops);
3081         ftrace_startup(0);
3082
3083         mutex_unlock(&ftrace_lock);
3084         return ret;
3085 }
3086
3087 /**
3088  * unregister_ftrace_function - unregister a function for profiling.
3089  * @ops - ops structure that holds the function to unregister
3090  *
3091  * Unregister a function that was added to be called by ftrace profiling.
3092  */
3093 int unregister_ftrace_function(struct ftrace_ops *ops)
3094 {
3095         int ret;
3096
3097         mutex_lock(&ftrace_lock);
3098         ret = __unregister_ftrace_function(ops);
3099         ftrace_shutdown(0);
3100         mutex_unlock(&ftrace_lock);
3101
3102         return ret;
3103 }
3104
3105 int
3106 ftrace_enable_sysctl(struct ctl_table *table, int write,
3107                      void __user *buffer, size_t *lenp,
3108                      loff_t *ppos)
3109 {
3110         int ret;
3111
3112         if (unlikely(ftrace_disabled))
3113                 return -ENODEV;
3114
3115         mutex_lock(&ftrace_lock);
3116
3117         ret  = proc_dointvec(table, write, buffer, lenp, ppos);
3118
3119         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3120                 goto out;
3121
3122         last_ftrace_enabled = !!ftrace_enabled;
3123
3124         if (ftrace_enabled) {
3125
3126                 ftrace_startup_sysctl();
3127
3128                 /* we are starting ftrace again */
3129                 if (ftrace_list != &ftrace_list_end) {
3130                         if (ftrace_list->next == &ftrace_list_end)
3131                                 ftrace_trace_function = ftrace_list->func;
3132                         else
3133                                 ftrace_trace_function = ftrace_list_func;
3134                 }
3135
3136         } else {
3137                 /* stopping ftrace calls (just send to ftrace_stub) */
3138                 ftrace_trace_function = ftrace_stub;
3139
3140                 ftrace_shutdown_sysctl();
3141         }
3142
3143  out:
3144         mutex_unlock(&ftrace_lock);
3145         return ret;
3146 }
3147
3148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3149
3150 static int ftrace_graph_active;
3151 static struct notifier_block ftrace_suspend_notifier;
3152
3153 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3154 {
3155         return 0;
3156 }
3157
3158 /* The callbacks that hook a function */
3159 trace_func_graph_ret_t ftrace_graph_return =
3160                         (trace_func_graph_ret_t)ftrace_stub;
3161 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3162
3163 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3164 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3165 {
3166         int i;
3167         int ret = 0;
3168         unsigned long flags;
3169         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3170         struct task_struct *g, *t;
3171
3172         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3173                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3174                                         * sizeof(struct ftrace_ret_stack),
3175                                         GFP_KERNEL);
3176                 if (!ret_stack_list[i]) {
3177                         start = 0;
3178                         end = i;
3179                         ret = -ENOMEM;
3180                         goto free;
3181                 }
3182         }
3183
3184         read_lock_irqsave(&tasklist_lock, flags);
3185         do_each_thread(g, t) {
3186                 if (start == end) {
3187                         ret = -EAGAIN;
3188                         goto unlock;
3189                 }
3190
3191                 if (t->ret_stack == NULL) {
3192                         atomic_set(&t->tracing_graph_pause, 0);
3193                         atomic_set(&t->trace_overrun, 0);
3194                         t->curr_ret_stack = -1;
3195                         /* Make sure the tasks see the -1 first: */
3196                         smp_wmb();
3197                         t->ret_stack = ret_stack_list[start++];
3198                 }
3199         } while_each_thread(g, t);
3200
3201 unlock:
3202         read_unlock_irqrestore(&tasklist_lock, flags);
3203 free:
3204         for (i = start; i < end; i++)
3205                 kfree(ret_stack_list[i]);
3206         return ret;
3207 }
3208
3209 static void
3210 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3211                                 struct task_struct *next)
3212 {
3213         unsigned long long timestamp;
3214         int index;
3215
3216         /*
3217          * Does the user want to count the time a function was asleep.
3218          * If so, do not update the time stamps.
3219          */
3220         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3221                 return;
3222
3223         timestamp = trace_clock_local();
3224
3225         prev->ftrace_timestamp = timestamp;
3226
3227         /* only process tasks that we timestamped */
3228         if (!next->ftrace_timestamp)
3229                 return;
3230
3231         /*
3232          * Update all the counters in next to make up for the
3233          * time next was sleeping.
3234          */
3235         timestamp -= next->ftrace_timestamp;
3236
3237         for (index = next->curr_ret_stack; index >= 0; index--)
3238                 next->ret_stack[index].calltime += timestamp;
3239 }
3240
3241 /* Allocate a return stack for each task */
3242 static int start_graph_tracing(void)
3243 {
3244         struct ftrace_ret_stack **ret_stack_list;
3245         int ret, cpu;
3246
3247         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3248                                 sizeof(struct ftrace_ret_stack *),
3249                                 GFP_KERNEL);
3250
3251         if (!ret_stack_list)
3252                 return -ENOMEM;
3253
3254         /* The cpu_boot init_task->ret_stack will never be freed */
3255         for_each_online_cpu(cpu) {
3256                 if (!idle_task(cpu)->ret_stack)
3257                         ftrace_graph_init_task(idle_task(cpu));
3258         }
3259
3260         do {
3261                 ret = alloc_retstack_tasklist(ret_stack_list);
3262         } while (ret == -EAGAIN);
3263
3264         if (!ret) {
3265                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3266                 if (ret)
3267                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3268                                 " probe to kernel_sched_switch\n");
3269         }
3270
3271         kfree(ret_stack_list);
3272         return ret;
3273 }
3274
3275 /*
3276  * Hibernation protection.
3277  * The state of the current task is too much unstable during
3278  * suspend/restore to disk. We want to protect against that.
3279  */
3280 static int
3281 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3282                                                         void *unused)
3283 {
3284         switch (state) {
3285         case PM_HIBERNATION_PREPARE:
3286                 pause_graph_tracing();
3287                 break;
3288
3289         case PM_POST_HIBERNATION:
3290                 unpause_graph_tracing();
3291                 break;
3292         }
3293         return NOTIFY_DONE;
3294 }
3295
3296 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3297                         trace_func_graph_ent_t entryfunc)
3298 {
3299         int ret = 0;
3300
3301         mutex_lock(&ftrace_lock);
3302
3303         /* we currently allow only one tracer registered at a time */
3304         if (ftrace_graph_active) {
3305                 ret = -EBUSY;
3306                 goto out;
3307         }
3308
3309         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3310         register_pm_notifier(&ftrace_suspend_notifier);
3311
3312         ftrace_graph_active++;
3313         ret = start_graph_tracing();
3314         if (ret) {
3315                 ftrace_graph_active--;
3316                 goto out;
3317         }
3318
3319         ftrace_graph_return = retfunc;
3320         ftrace_graph_entry = entryfunc;
3321
3322         ftrace_startup(FTRACE_START_FUNC_RET);
3323
3324 out:
3325         mutex_unlock(&ftrace_lock);
3326         return ret;
3327 }
3328
3329 void unregister_ftrace_graph(void)
3330 {
3331         mutex_lock(&ftrace_lock);
3332
3333         if (unlikely(!ftrace_graph_active))
3334                 goto out;
3335
3336         ftrace_graph_active--;
3337         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3338         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3339         ftrace_graph_entry = ftrace_graph_entry_stub;
3340         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3341         unregister_pm_notifier(&ftrace_suspend_notifier);
3342
3343  out:
3344         mutex_unlock(&ftrace_lock);
3345 }
3346
3347 /* Allocate a return stack for newly created task */
3348 void ftrace_graph_init_task(struct task_struct *t)
3349 {
3350         /* Make sure we do not use the parent ret_stack */
3351         t->ret_stack = NULL;
3352         t->curr_ret_stack = -1;
3353
3354         if (ftrace_graph_active) {
3355                 struct ftrace_ret_stack *ret_stack;
3356
3357                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3358                                 * sizeof(struct ftrace_ret_stack),
3359                                 GFP_KERNEL);
3360                 if (!ret_stack)
3361                         return;
3362                 atomic_set(&t->tracing_graph_pause, 0);
3363                 atomic_set(&t->trace_overrun, 0);
3364                 t->ftrace_timestamp = 0;
3365                 /* make curr_ret_stack visable before we add the ret_stack */
3366                 smp_wmb();
3367                 t->ret_stack = ret_stack;
3368         }
3369 }
3370
3371 void ftrace_graph_exit_task(struct task_struct *t)
3372 {
3373         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3374
3375         t->ret_stack = NULL;
3376         /* NULL must become visible to IRQs before we free it: */
3377         barrier();
3378
3379         kfree(ret_stack);
3380 }
3381
3382 void ftrace_graph_stop(void)
3383 {
3384         ftrace_stop();
3385 }
3386 #endif