]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/trace/ftrace.c
ftrace: mcount call site on boot nops core
[net-next-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157
158 enum {
159         FTRACE_ENABLE_CALLS             = (1 << 0),
160         FTRACE_DISABLE_CALLS            = (1 << 1),
161         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
162         FTRACE_ENABLE_MCOUNT            = (1 << 3),
163         FTRACE_DISABLE_MCOUNT           = (1 << 4),
164 };
165
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         unsigned long           index;
181         struct dyn_ftrace       records[];
182 };
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
196
197 static int ftrace_record_suspend;
198
199 static struct dyn_ftrace *ftrace_free_records;
200
201
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
204 {
205         if (!(rec->flags & FTRACE_FL_FROZEN)) {
206                 rec->flags |= FTRACE_FL_FROZEN;
207                 frozen_record_count++;
208         }
209 }
210
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
212 {
213         if (rec->flags & FTRACE_FL_FROZEN) {
214                 rec->flags &= ~FTRACE_FL_FROZEN;
215                 frozen_record_count--;
216         }
217 }
218
219 static inline int record_frozen(struct dyn_ftrace *rec)
220 {
221         return rec->flags & FTRACE_FL_FROZEN;
222 }
223 #else
224 # define freeze_record(rec)                     ({ 0; })
225 # define unfreeze_record(rec)                   ({ 0; })
226 # define record_frozen(rec)                     ({ 0; })
227 #endif /* CONFIG_KPROBES */
228
229 int skip_trace(unsigned long ip)
230 {
231         unsigned long fl;
232         struct dyn_ftrace *rec;
233         struct hlist_node *t;
234         struct hlist_head *head;
235
236         if (frozen_record_count == 0)
237                 return 0;
238
239         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240         hlist_for_each_entry_rcu(rec, t, head, node) {
241                 if (rec->ip == ip) {
242                         if (record_frozen(rec)) {
243                                 if (rec->flags & FTRACE_FL_FAILED)
244                                         return 1;
245
246                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
247                                         return 1;
248
249                                 if (!tracing_on || !ftrace_enabled)
250                                         return 1;
251
252                                 if (ftrace_filtered) {
253                                         fl = rec->flags & (FTRACE_FL_FILTER |
254                                                            FTRACE_FL_NOTRACE);
255                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
256                                                 return 1;
257                                 }
258                         }
259                         break;
260                 }
261         }
262
263         return 0;
264 }
265
266 static inline int
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
268 {
269         struct dyn_ftrace *p;
270         struct hlist_node *t;
271         int found = 0;
272
273         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
274                 if (p->ip == ip) {
275                         found = 1;
276                         break;
277                 }
278         }
279
280         return found;
281 }
282
283 static inline void
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285 {
286         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
287 }
288
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
291 {
292         hlist_del(&node->node);
293 }
294
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
296 {
297         /* no locking, only called from kstop_machine */
298
299         rec->ip = (unsigned long)ftrace_free_records;
300         ftrace_free_records = rec;
301         rec->flags |= FTRACE_FL_FREE;
302 }
303
304 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
305 {
306         struct dyn_ftrace *rec;
307
308         /* First check for freed records */
309         if (ftrace_free_records) {
310                 rec = ftrace_free_records;
311
312                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
313                         WARN_ON_ONCE(1);
314                         ftrace_free_records = NULL;
315                         ftrace_disabled = 1;
316                         ftrace_enabled = 0;
317                         return NULL;
318                 }
319
320                 ftrace_free_records = (void *)rec->ip;
321                 memset(rec, 0, sizeof(*rec));
322                 return rec;
323         }
324
325         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326                 if (!ftrace_pages->next)
327                         return NULL;
328                 ftrace_pages = ftrace_pages->next;
329         }
330
331         return &ftrace_pages->records[ftrace_pages->index++];
332 }
333
334 static void
335 ftrace_record_ip(unsigned long ip)
336 {
337         struct dyn_ftrace *node;
338         unsigned long flags;
339         unsigned long key;
340         int resched;
341         int atomic;
342         int cpu;
343
344         if (!ftrace_enabled || ftrace_disabled)
345                 return;
346
347         resched = need_resched();
348         preempt_disable_notrace();
349
350         /*
351          * We simply need to protect against recursion.
352          * Use the the raw version of smp_processor_id and not
353          * __get_cpu_var which can call debug hooks that can
354          * cause a recursive crash here.
355          */
356         cpu = raw_smp_processor_id();
357         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
359                 goto out;
360
361         if (unlikely(ftrace_record_suspend))
362                 goto out;
363
364         key = hash_long(ip, FTRACE_HASHBITS);
365
366         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
367
368         if (ftrace_ip_in_hash(ip, key))
369                 goto out;
370
371         atomic = irqs_disabled();
372
373         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374
375         /* This ip may have hit the hash before the lock */
376         if (ftrace_ip_in_hash(ip, key))
377                 goto out_unlock;
378
379         node = ftrace_alloc_dyn_node(ip);
380         if (!node)
381                 goto out_unlock;
382
383         node->ip = ip;
384
385         ftrace_add_hash(node, key);
386
387         ftraced_trigger = 1;
388
389  out_unlock:
390         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
391  out:
392         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393
394         /* prevent recursion with scheduler */
395         if (resched)
396                 preempt_enable_no_resched_notrace();
397         else
398                 preempt_enable_notrace();
399 }
400
401 #define FTRACE_ADDR ((long)(ftrace_caller))
402
403 static int
404 __ftrace_replace_code(struct dyn_ftrace *rec,
405                       unsigned char *old, unsigned char *new, int enable)
406 {
407         unsigned long ip, fl;
408
409         ip = rec->ip;
410
411         if (ftrace_filtered && enable) {
412                 /*
413                  * If filtering is on:
414                  *
415                  * If this record is set to be filtered and
416                  * is enabled then do nothing.
417                  *
418                  * If this record is set to be filtered and
419                  * it is not enabled, enable it.
420                  *
421                  * If this record is not set to be filtered
422                  * and it is not enabled do nothing.
423                  *
424                  * If this record is set not to trace then
425                  * do nothing.
426                  *
427                  * If this record is set not to trace and
428                  * it is enabled then disable it.
429                  *
430                  * If this record is not set to be filtered and
431                  * it is enabled, disable it.
432                  */
433
434                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
435                                    FTRACE_FL_ENABLED);
436
437                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
438                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
439                     !fl || (fl == FTRACE_FL_NOTRACE))
440                         return 0;
441
442                 /*
443                  * If it is enabled disable it,
444                  * otherwise enable it!
445                  */
446                 if (fl & FTRACE_FL_ENABLED) {
447                         /* swap new and old */
448                         new = old;
449                         old = ftrace_call_replace(ip, FTRACE_ADDR);
450                         rec->flags &= ~FTRACE_FL_ENABLED;
451                 } else {
452                         new = ftrace_call_replace(ip, FTRACE_ADDR);
453                         rec->flags |= FTRACE_FL_ENABLED;
454                 }
455         } else {
456
457                 if (enable) {
458                         /*
459                          * If this record is set not to trace and is
460                          * not enabled, do nothing.
461                          */
462                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463                         if (fl == FTRACE_FL_NOTRACE)
464                                 return 0;
465
466                         new = ftrace_call_replace(ip, FTRACE_ADDR);
467                 } else
468                         old = ftrace_call_replace(ip, FTRACE_ADDR);
469
470                 if (enable) {
471                         if (rec->flags & FTRACE_FL_ENABLED)
472                                 return 0;
473                         rec->flags |= FTRACE_FL_ENABLED;
474                 } else {
475                         if (!(rec->flags & FTRACE_FL_ENABLED))
476                                 return 0;
477                         rec->flags &= ~FTRACE_FL_ENABLED;
478                 }
479         }
480
481         return ftrace_modify_code(ip, old, new);
482 }
483
484 static void ftrace_replace_code(int enable)
485 {
486         int i, failed;
487         unsigned char *new = NULL, *old = NULL;
488         struct dyn_ftrace *rec;
489         struct ftrace_page *pg;
490
491         if (enable)
492                 old = ftrace_nop_replace();
493         else
494                 new = ftrace_nop_replace();
495
496         for (pg = ftrace_pages_start; pg; pg = pg->next) {
497                 for (i = 0; i < pg->index; i++) {
498                         rec = &pg->records[i];
499
500                         /* don't modify code that has already faulted */
501                         if (rec->flags & FTRACE_FL_FAILED)
502                                 continue;
503
504                         /* ignore updates to this record's mcount site */
505                         if (get_kprobe((void *)rec->ip)) {
506                                 freeze_record(rec);
507                                 continue;
508                         } else {
509                                 unfreeze_record(rec);
510                         }
511
512                         failed = __ftrace_replace_code(rec, old, new, enable);
513                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
514                                 rec->flags |= FTRACE_FL_FAILED;
515                                 if ((system_state == SYSTEM_BOOTING) ||
516                                     !core_kernel_text(rec->ip)) {
517                                         ftrace_del_hash(rec);
518                                         ftrace_free_rec(rec);
519                                 }
520                         }
521                 }
522         }
523 }
524
525 static void ftrace_shutdown_replenish(void)
526 {
527         if (ftrace_pages->next)
528                 return;
529
530         /* allocate another page */
531         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
532 }
533
534 static int
535 ftrace_code_disable(struct dyn_ftrace *rec)
536 {
537         unsigned long ip;
538         unsigned char *nop, *call;
539         int failed;
540
541         ip = rec->ip;
542
543         nop = ftrace_nop_replace();
544         call = ftrace_call_replace(ip, MCOUNT_ADDR);
545
546         failed = ftrace_modify_code(ip, call, nop);
547         if (failed) {
548                 rec->flags |= FTRACE_FL_FAILED;
549                 return 0;
550         }
551         return 1;
552 }
553
554 static int __ftrace_update_code(void *ignore);
555
556 static int __ftrace_modify_code(void *data)
557 {
558         unsigned long addr;
559         int *command = data;
560
561         if (*command & FTRACE_ENABLE_CALLS) {
562                 /*
563                  * Update any recorded ips now that we have the
564                  * machine stopped
565                  */
566                 __ftrace_update_code(NULL);
567                 ftrace_replace_code(1);
568                 tracing_on = 1;
569         } else if (*command & FTRACE_DISABLE_CALLS) {
570                 ftrace_replace_code(0);
571                 tracing_on = 0;
572         }
573
574         if (*command & FTRACE_UPDATE_TRACE_FUNC)
575                 ftrace_update_ftrace_func(ftrace_trace_function);
576
577         if (*command & FTRACE_ENABLE_MCOUNT) {
578                 addr = (unsigned long)ftrace_record_ip;
579                 ftrace_mcount_set(&addr);
580         } else if (*command & FTRACE_DISABLE_MCOUNT) {
581                 addr = (unsigned long)ftrace_stub;
582                 ftrace_mcount_set(&addr);
583         }
584
585         return 0;
586 }
587
588 static void ftrace_run_update_code(int command)
589 {
590         stop_machine(__ftrace_modify_code, &command, NULL);
591 }
592
593 void ftrace_disable_daemon(void)
594 {
595         /* Stop the daemon from calling kstop_machine */
596         mutex_lock(&ftraced_lock);
597         ftraced_stop = 1;
598         mutex_unlock(&ftraced_lock);
599
600         ftrace_force_update();
601 }
602
603 void ftrace_enable_daemon(void)
604 {
605         mutex_lock(&ftraced_lock);
606         ftraced_stop = 0;
607         mutex_unlock(&ftraced_lock);
608
609         ftrace_force_update();
610 }
611
612 static ftrace_func_t saved_ftrace_func;
613
614 static void ftrace_startup(void)
615 {
616         int command = 0;
617
618         if (unlikely(ftrace_disabled))
619                 return;
620
621         mutex_lock(&ftraced_lock);
622         ftraced_suspend++;
623         if (ftraced_suspend == 1)
624                 command |= FTRACE_ENABLE_CALLS;
625
626         if (saved_ftrace_func != ftrace_trace_function) {
627                 saved_ftrace_func = ftrace_trace_function;
628                 command |= FTRACE_UPDATE_TRACE_FUNC;
629         }
630
631         if (!command || !ftrace_enabled)
632                 goto out;
633
634         ftrace_run_update_code(command);
635  out:
636         mutex_unlock(&ftraced_lock);
637 }
638
639 static void ftrace_shutdown(void)
640 {
641         int command = 0;
642
643         if (unlikely(ftrace_disabled))
644                 return;
645
646         mutex_lock(&ftraced_lock);
647         ftraced_suspend--;
648         if (!ftraced_suspend)
649                 command |= FTRACE_DISABLE_CALLS;
650
651         if (saved_ftrace_func != ftrace_trace_function) {
652                 saved_ftrace_func = ftrace_trace_function;
653                 command |= FTRACE_UPDATE_TRACE_FUNC;
654         }
655
656         if (!command || !ftrace_enabled)
657                 goto out;
658
659         ftrace_run_update_code(command);
660  out:
661         mutex_unlock(&ftraced_lock);
662 }
663
664 static void ftrace_startup_sysctl(void)
665 {
666         int command = FTRACE_ENABLE_MCOUNT;
667
668         if (unlikely(ftrace_disabled))
669                 return;
670
671         mutex_lock(&ftraced_lock);
672         /* Force update next time */
673         saved_ftrace_func = NULL;
674         /* ftraced_suspend is true if we want ftrace running */
675         if (ftraced_suspend)
676                 command |= FTRACE_ENABLE_CALLS;
677
678         ftrace_run_update_code(command);
679         mutex_unlock(&ftraced_lock);
680 }
681
682 static void ftrace_shutdown_sysctl(void)
683 {
684         int command = FTRACE_DISABLE_MCOUNT;
685
686         if (unlikely(ftrace_disabled))
687                 return;
688
689         mutex_lock(&ftraced_lock);
690         /* ftraced_suspend is true if ftrace is running */
691         if (ftraced_suspend)
692                 command |= FTRACE_DISABLE_CALLS;
693
694         ftrace_run_update_code(command);
695         mutex_unlock(&ftraced_lock);
696 }
697
698 static cycle_t          ftrace_update_time;
699 static unsigned long    ftrace_update_cnt;
700 unsigned long           ftrace_update_tot_cnt;
701
702 static int __ftrace_update_code(void *ignore)
703 {
704         int i, save_ftrace_enabled;
705         cycle_t start, stop;
706         struct dyn_ftrace *p;
707         struct hlist_node *t, *n;
708         struct hlist_head *head, temp_list;
709
710         /* Don't be recording funcs now */
711         ftrace_record_suspend++;
712         save_ftrace_enabled = ftrace_enabled;
713         ftrace_enabled = 0;
714
715         start = ftrace_now(raw_smp_processor_id());
716         ftrace_update_cnt = 0;
717
718         /* No locks needed, the machine is stopped! */
719         for (i = 0; i < FTRACE_HASHSIZE; i++) {
720                 INIT_HLIST_HEAD(&temp_list);
721                 head = &ftrace_hash[i];
722
723                 /* all CPUS are stopped, we are safe to modify code */
724                 hlist_for_each_entry_safe(p, t, n, head, node) {
725                         /* Skip over failed records which have not been
726                          * freed. */
727                         if (p->flags & FTRACE_FL_FAILED)
728                                 continue;
729
730                         /* Unconverted records are always at the head of the
731                          * hash bucket. Once we encounter a converted record,
732                          * simply skip over to the next bucket. Saves ftraced
733                          * some processor cycles (ftrace does its bid for
734                          * global warming :-p ). */
735                         if (p->flags & (FTRACE_FL_CONVERTED))
736                                 break;
737
738                         /* Ignore updates to this record's mcount site.
739                          * Reintroduce this record at the head of this
740                          * bucket to attempt to "convert" it again if
741                          * the kprobe on it is unregistered before the
742                          * next run. */
743                         if (get_kprobe((void *)p->ip)) {
744                                 ftrace_del_hash(p);
745                                 INIT_HLIST_NODE(&p->node);
746                                 hlist_add_head(&p->node, &temp_list);
747                                 freeze_record(p);
748                                 continue;
749                         } else {
750                                 unfreeze_record(p);
751                         }
752
753                         /* convert record (i.e, patch mcount-call with NOP) */
754                         if (ftrace_code_disable(p)) {
755                                 p->flags |= FTRACE_FL_CONVERTED;
756                                 ftrace_update_cnt++;
757                         } else {
758                                 if ((system_state == SYSTEM_BOOTING) ||
759                                     !core_kernel_text(p->ip)) {
760                                         ftrace_del_hash(p);
761                                         ftrace_free_rec(p);
762                                 }
763                         }
764                 }
765
766                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
767                         hlist_del(&p->node);
768                         INIT_HLIST_NODE(&p->node);
769                         hlist_add_head(&p->node, head);
770                 }
771         }
772
773         stop = ftrace_now(raw_smp_processor_id());
774         ftrace_update_time = stop - start;
775         ftrace_update_tot_cnt += ftrace_update_cnt;
776         ftraced_trigger = 0;
777
778         ftrace_enabled = save_ftrace_enabled;
779         ftrace_record_suspend--;
780
781         return 0;
782 }
783
784 static int ftrace_update_code(void)
785 {
786         if (unlikely(ftrace_disabled) ||
787             !ftrace_enabled || !ftraced_trigger)
788                 return 0;
789
790         stop_machine(__ftrace_update_code, NULL, NULL);
791
792         return 1;
793 }
794
795 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
796 {
797         struct ftrace_page *pg;
798         int cnt;
799         int i;
800
801         /* allocate a few pages */
802         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
803         if (!ftrace_pages_start)
804                 return -1;
805
806         /*
807          * Allocate a few more pages.
808          *
809          * TODO: have some parser search vmlinux before
810          *   final linking to find all calls to ftrace.
811          *   Then we can:
812          *    a) know how many pages to allocate.
813          *     and/or
814          *    b) set up the table then.
815          *
816          *  The dynamic code is still necessary for
817          *  modules.
818          */
819
820         pg = ftrace_pages = ftrace_pages_start;
821
822         cnt = num_to_init / ENTRIES_PER_PAGE;
823         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
824                 num_to_init, cnt);
825
826         for (i = 0; i < cnt; i++) {
827                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
828
829                 /* If we fail, we'll try later anyway */
830                 if (!pg->next)
831                         break;
832
833                 pg = pg->next;
834         }
835
836         return 0;
837 }
838
839 enum {
840         FTRACE_ITER_FILTER      = (1 << 0),
841         FTRACE_ITER_CONT        = (1 << 1),
842         FTRACE_ITER_NOTRACE     = (1 << 2),
843         FTRACE_ITER_FAILURES    = (1 << 3),
844 };
845
846 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
847
848 struct ftrace_iterator {
849         loff_t                  pos;
850         struct ftrace_page      *pg;
851         unsigned                idx;
852         unsigned                flags;
853         unsigned char           buffer[FTRACE_BUFF_MAX+1];
854         unsigned                buffer_idx;
855         unsigned                filtered;
856 };
857
858 static void *
859 t_next(struct seq_file *m, void *v, loff_t *pos)
860 {
861         struct ftrace_iterator *iter = m->private;
862         struct dyn_ftrace *rec = NULL;
863
864         (*pos)++;
865
866  retry:
867         if (iter->idx >= iter->pg->index) {
868                 if (iter->pg->next) {
869                         iter->pg = iter->pg->next;
870                         iter->idx = 0;
871                         goto retry;
872                 }
873         } else {
874                 rec = &iter->pg->records[iter->idx++];
875                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
876                      (rec->flags & FTRACE_FL_FAILED)) ||
877
878                     ((iter->flags & FTRACE_ITER_FAILURES) &&
879                      (!(rec->flags & FTRACE_FL_FAILED) ||
880                       (rec->flags & FTRACE_FL_FREE))) ||
881
882                     ((iter->flags & FTRACE_ITER_FILTER) &&
883                      !(rec->flags & FTRACE_FL_FILTER)) ||
884
885                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
886                      !(rec->flags & FTRACE_FL_NOTRACE))) {
887                         rec = NULL;
888                         goto retry;
889                 }
890         }
891
892         iter->pos = *pos;
893
894         return rec;
895 }
896
897 static void *t_start(struct seq_file *m, loff_t *pos)
898 {
899         struct ftrace_iterator *iter = m->private;
900         void *p = NULL;
901         loff_t l = -1;
902
903         if (*pos != iter->pos) {
904                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
905                         ;
906         } else {
907                 l = *pos;
908                 p = t_next(m, p, &l);
909         }
910
911         return p;
912 }
913
914 static void t_stop(struct seq_file *m, void *p)
915 {
916 }
917
918 static int t_show(struct seq_file *m, void *v)
919 {
920         struct dyn_ftrace *rec = v;
921         char str[KSYM_SYMBOL_LEN];
922
923         if (!rec)
924                 return 0;
925
926         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
927
928         seq_printf(m, "%s\n", str);
929
930         return 0;
931 }
932
933 static struct seq_operations show_ftrace_seq_ops = {
934         .start = t_start,
935         .next = t_next,
936         .stop = t_stop,
937         .show = t_show,
938 };
939
940 static int
941 ftrace_avail_open(struct inode *inode, struct file *file)
942 {
943         struct ftrace_iterator *iter;
944         int ret;
945
946         if (unlikely(ftrace_disabled))
947                 return -ENODEV;
948
949         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
950         if (!iter)
951                 return -ENOMEM;
952
953         iter->pg = ftrace_pages_start;
954         iter->pos = -1;
955
956         ret = seq_open(file, &show_ftrace_seq_ops);
957         if (!ret) {
958                 struct seq_file *m = file->private_data;
959
960                 m->private = iter;
961         } else {
962                 kfree(iter);
963         }
964
965         return ret;
966 }
967
968 int ftrace_avail_release(struct inode *inode, struct file *file)
969 {
970         struct seq_file *m = (struct seq_file *)file->private_data;
971         struct ftrace_iterator *iter = m->private;
972
973         seq_release(inode, file);
974         kfree(iter);
975
976         return 0;
977 }
978
979 static int
980 ftrace_failures_open(struct inode *inode, struct file *file)
981 {
982         int ret;
983         struct seq_file *m;
984         struct ftrace_iterator *iter;
985
986         ret = ftrace_avail_open(inode, file);
987         if (!ret) {
988                 m = (struct seq_file *)file->private_data;
989                 iter = (struct ftrace_iterator *)m->private;
990                 iter->flags = FTRACE_ITER_FAILURES;
991         }
992
993         return ret;
994 }
995
996
997 static void ftrace_filter_reset(int enable)
998 {
999         struct ftrace_page *pg;
1000         struct dyn_ftrace *rec;
1001         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1002         unsigned i;
1003
1004         /* keep kstop machine from running */
1005         preempt_disable();
1006         if (enable)
1007                 ftrace_filtered = 0;
1008         pg = ftrace_pages_start;
1009         while (pg) {
1010                 for (i = 0; i < pg->index; i++) {
1011                         rec = &pg->records[i];
1012                         if (rec->flags & FTRACE_FL_FAILED)
1013                                 continue;
1014                         rec->flags &= ~type;
1015                 }
1016                 pg = pg->next;
1017         }
1018         preempt_enable();
1019 }
1020
1021 static int
1022 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1023 {
1024         struct ftrace_iterator *iter;
1025         int ret = 0;
1026
1027         if (unlikely(ftrace_disabled))
1028                 return -ENODEV;
1029
1030         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1031         if (!iter)
1032                 return -ENOMEM;
1033
1034         mutex_lock(&ftrace_regex_lock);
1035         if ((file->f_mode & FMODE_WRITE) &&
1036             !(file->f_flags & O_APPEND))
1037                 ftrace_filter_reset(enable);
1038
1039         if (file->f_mode & FMODE_READ) {
1040                 iter->pg = ftrace_pages_start;
1041                 iter->pos = -1;
1042                 iter->flags = enable ? FTRACE_ITER_FILTER :
1043                         FTRACE_ITER_NOTRACE;
1044
1045                 ret = seq_open(file, &show_ftrace_seq_ops);
1046                 if (!ret) {
1047                         struct seq_file *m = file->private_data;
1048                         m->private = iter;
1049                 } else
1050                         kfree(iter);
1051         } else
1052                 file->private_data = iter;
1053         mutex_unlock(&ftrace_regex_lock);
1054
1055         return ret;
1056 }
1057
1058 static int
1059 ftrace_filter_open(struct inode *inode, struct file *file)
1060 {
1061         return ftrace_regex_open(inode, file, 1);
1062 }
1063
1064 static int
1065 ftrace_notrace_open(struct inode *inode, struct file *file)
1066 {
1067         return ftrace_regex_open(inode, file, 0);
1068 }
1069
1070 static ssize_t
1071 ftrace_regex_read(struct file *file, char __user *ubuf,
1072                        size_t cnt, loff_t *ppos)
1073 {
1074         if (file->f_mode & FMODE_READ)
1075                 return seq_read(file, ubuf, cnt, ppos);
1076         else
1077                 return -EPERM;
1078 }
1079
1080 static loff_t
1081 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1082 {
1083         loff_t ret;
1084
1085         if (file->f_mode & FMODE_READ)
1086                 ret = seq_lseek(file, offset, origin);
1087         else
1088                 file->f_pos = ret = 1;
1089
1090         return ret;
1091 }
1092
1093 enum {
1094         MATCH_FULL,
1095         MATCH_FRONT_ONLY,
1096         MATCH_MIDDLE_ONLY,
1097         MATCH_END_ONLY,
1098 };
1099
1100 static void
1101 ftrace_match(unsigned char *buff, int len, int enable)
1102 {
1103         char str[KSYM_SYMBOL_LEN];
1104         char *search = NULL;
1105         struct ftrace_page *pg;
1106         struct dyn_ftrace *rec;
1107         int type = MATCH_FULL;
1108         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1109         unsigned i, match = 0, search_len = 0;
1110
1111         for (i = 0; i < len; i++) {
1112                 if (buff[i] == '*') {
1113                         if (!i) {
1114                                 search = buff + i + 1;
1115                                 type = MATCH_END_ONLY;
1116                                 search_len = len - (i + 1);
1117                         } else {
1118                                 if (type == MATCH_END_ONLY) {
1119                                         type = MATCH_MIDDLE_ONLY;
1120                                 } else {
1121                                         match = i;
1122                                         type = MATCH_FRONT_ONLY;
1123                                 }
1124                                 buff[i] = 0;
1125                                 break;
1126                         }
1127                 }
1128         }
1129
1130         /* keep kstop machine from running */
1131         preempt_disable();
1132         if (enable)
1133                 ftrace_filtered = 1;
1134         pg = ftrace_pages_start;
1135         while (pg) {
1136                 for (i = 0; i < pg->index; i++) {
1137                         int matched = 0;
1138                         char *ptr;
1139
1140                         rec = &pg->records[i];
1141                         if (rec->flags & FTRACE_FL_FAILED)
1142                                 continue;
1143                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1144                         switch (type) {
1145                         case MATCH_FULL:
1146                                 if (strcmp(str, buff) == 0)
1147                                         matched = 1;
1148                                 break;
1149                         case MATCH_FRONT_ONLY:
1150                                 if (memcmp(str, buff, match) == 0)
1151                                         matched = 1;
1152                                 break;
1153                         case MATCH_MIDDLE_ONLY:
1154                                 if (strstr(str, search))
1155                                         matched = 1;
1156                                 break;
1157                         case MATCH_END_ONLY:
1158                                 ptr = strstr(str, search);
1159                                 if (ptr && (ptr[search_len] == 0))
1160                                         matched = 1;
1161                                 break;
1162                         }
1163                         if (matched)
1164                                 rec->flags |= flag;
1165                 }
1166                 pg = pg->next;
1167         }
1168         preempt_enable();
1169 }
1170
1171 static ssize_t
1172 ftrace_regex_write(struct file *file, const char __user *ubuf,
1173                    size_t cnt, loff_t *ppos, int enable)
1174 {
1175         struct ftrace_iterator *iter;
1176         char ch;
1177         size_t read = 0;
1178         ssize_t ret;
1179
1180         if (!cnt || cnt < 0)
1181                 return 0;
1182
1183         mutex_lock(&ftrace_regex_lock);
1184
1185         if (file->f_mode & FMODE_READ) {
1186                 struct seq_file *m = file->private_data;
1187                 iter = m->private;
1188         } else
1189                 iter = file->private_data;
1190
1191         if (!*ppos) {
1192                 iter->flags &= ~FTRACE_ITER_CONT;
1193                 iter->buffer_idx = 0;
1194         }
1195
1196         ret = get_user(ch, ubuf++);
1197         if (ret)
1198                 goto out;
1199         read++;
1200         cnt--;
1201
1202         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1203                 /* skip white space */
1204                 while (cnt && isspace(ch)) {
1205                         ret = get_user(ch, ubuf++);
1206                         if (ret)
1207                                 goto out;
1208                         read++;
1209                         cnt--;
1210                 }
1211
1212                 if (isspace(ch)) {
1213                         file->f_pos += read;
1214                         ret = read;
1215                         goto out;
1216                 }
1217
1218                 iter->buffer_idx = 0;
1219         }
1220
1221         while (cnt && !isspace(ch)) {
1222                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1223                         iter->buffer[iter->buffer_idx++] = ch;
1224                 else {
1225                         ret = -EINVAL;
1226                         goto out;
1227                 }
1228                 ret = get_user(ch, ubuf++);
1229                 if (ret)
1230                         goto out;
1231                 read++;
1232                 cnt--;
1233         }
1234
1235         if (isspace(ch)) {
1236                 iter->filtered++;
1237                 iter->buffer[iter->buffer_idx] = 0;
1238                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1239                 iter->buffer_idx = 0;
1240         } else
1241                 iter->flags |= FTRACE_ITER_CONT;
1242
1243
1244         file->f_pos += read;
1245
1246         ret = read;
1247  out:
1248         mutex_unlock(&ftrace_regex_lock);
1249
1250         return ret;
1251 }
1252
1253 static ssize_t
1254 ftrace_filter_write(struct file *file, const char __user *ubuf,
1255                     size_t cnt, loff_t *ppos)
1256 {
1257         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1258 }
1259
1260 static ssize_t
1261 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1262                      size_t cnt, loff_t *ppos)
1263 {
1264         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1265 }
1266
1267 static void
1268 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1269 {
1270         if (unlikely(ftrace_disabled))
1271                 return;
1272
1273         mutex_lock(&ftrace_regex_lock);
1274         if (reset)
1275                 ftrace_filter_reset(enable);
1276         if (buf)
1277                 ftrace_match(buf, len, enable);
1278         mutex_unlock(&ftrace_regex_lock);
1279 }
1280
1281 /**
1282  * ftrace_set_filter - set a function to filter on in ftrace
1283  * @buf - the string that holds the function filter text.
1284  * @len - the length of the string.
1285  * @reset - non zero to reset all filters before applying this filter.
1286  *
1287  * Filters denote which functions should be enabled when tracing is enabled.
1288  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1289  */
1290 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1291 {
1292         ftrace_set_regex(buf, len, reset, 1);
1293 }
1294
1295 /**
1296  * ftrace_set_notrace - set a function to not trace in ftrace
1297  * @buf - the string that holds the function notrace text.
1298  * @len - the length of the string.
1299  * @reset - non zero to reset all filters before applying this filter.
1300  *
1301  * Notrace Filters denote which functions should not be enabled when tracing
1302  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1303  * for tracing.
1304  */
1305 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1306 {
1307         ftrace_set_regex(buf, len, reset, 0);
1308 }
1309
1310 static int
1311 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1312 {
1313         struct seq_file *m = (struct seq_file *)file->private_data;
1314         struct ftrace_iterator *iter;
1315
1316         mutex_lock(&ftrace_regex_lock);
1317         if (file->f_mode & FMODE_READ) {
1318                 iter = m->private;
1319
1320                 seq_release(inode, file);
1321         } else
1322                 iter = file->private_data;
1323
1324         if (iter->buffer_idx) {
1325                 iter->filtered++;
1326                 iter->buffer[iter->buffer_idx] = 0;
1327                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1328         }
1329
1330         mutex_lock(&ftrace_sysctl_lock);
1331         mutex_lock(&ftraced_lock);
1332         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1333                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1334         mutex_unlock(&ftraced_lock);
1335         mutex_unlock(&ftrace_sysctl_lock);
1336
1337         kfree(iter);
1338         mutex_unlock(&ftrace_regex_lock);
1339         return 0;
1340 }
1341
1342 static int
1343 ftrace_filter_release(struct inode *inode, struct file *file)
1344 {
1345         return ftrace_regex_release(inode, file, 1);
1346 }
1347
1348 static int
1349 ftrace_notrace_release(struct inode *inode, struct file *file)
1350 {
1351         return ftrace_regex_release(inode, file, 0);
1352 }
1353
1354 static ssize_t
1355 ftraced_read(struct file *filp, char __user *ubuf,
1356                      size_t cnt, loff_t *ppos)
1357 {
1358         /* don't worry about races */
1359         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1360         int r = strlen(buf);
1361
1362         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1363 }
1364
1365 static ssize_t
1366 ftraced_write(struct file *filp, const char __user *ubuf,
1367                       size_t cnt, loff_t *ppos)
1368 {
1369         char buf[64];
1370         long val;
1371         int ret;
1372
1373         if (cnt >= sizeof(buf))
1374                 return -EINVAL;
1375
1376         if (copy_from_user(&buf, ubuf, cnt))
1377                 return -EFAULT;
1378
1379         if (strncmp(buf, "enable", 6) == 0)
1380                 val = 1;
1381         else if (strncmp(buf, "disable", 7) == 0)
1382                 val = 0;
1383         else {
1384                 buf[cnt] = 0;
1385
1386                 ret = strict_strtoul(buf, 10, &val);
1387                 if (ret < 0)
1388                         return ret;
1389
1390                 val = !!val;
1391         }
1392
1393         if (val)
1394                 ftrace_enable_daemon();
1395         else
1396                 ftrace_disable_daemon();
1397
1398         filp->f_pos += cnt;
1399
1400         return cnt;
1401 }
1402
1403 static struct file_operations ftrace_avail_fops = {
1404         .open = ftrace_avail_open,
1405         .read = seq_read,
1406         .llseek = seq_lseek,
1407         .release = ftrace_avail_release,
1408 };
1409
1410 static struct file_operations ftrace_failures_fops = {
1411         .open = ftrace_failures_open,
1412         .read = seq_read,
1413         .llseek = seq_lseek,
1414         .release = ftrace_avail_release,
1415 };
1416
1417 static struct file_operations ftrace_filter_fops = {
1418         .open = ftrace_filter_open,
1419         .read = ftrace_regex_read,
1420         .write = ftrace_filter_write,
1421         .llseek = ftrace_regex_lseek,
1422         .release = ftrace_filter_release,
1423 };
1424
1425 static struct file_operations ftrace_notrace_fops = {
1426         .open = ftrace_notrace_open,
1427         .read = ftrace_regex_read,
1428         .write = ftrace_notrace_write,
1429         .llseek = ftrace_regex_lseek,
1430         .release = ftrace_notrace_release,
1431 };
1432
1433 static struct file_operations ftraced_fops = {
1434         .open = tracing_open_generic,
1435         .read = ftraced_read,
1436         .write = ftraced_write,
1437 };
1438
1439 /**
1440  * ftrace_force_update - force an update to all recording ftrace functions
1441  */
1442 int ftrace_force_update(void)
1443 {
1444         int ret = 0;
1445
1446         if (unlikely(ftrace_disabled))
1447                 return -ENODEV;
1448
1449         mutex_lock(&ftrace_sysctl_lock);
1450         mutex_lock(&ftraced_lock);
1451
1452         /*
1453          * If ftraced_trigger is not set, then there is nothing
1454          * to update.
1455          */
1456         if (ftraced_trigger && !ftrace_update_code())
1457                 ret = -EBUSY;
1458
1459         mutex_unlock(&ftraced_lock);
1460         mutex_unlock(&ftrace_sysctl_lock);
1461
1462         return ret;
1463 }
1464
1465 static void ftrace_force_shutdown(void)
1466 {
1467         struct task_struct *task;
1468         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1469
1470         mutex_lock(&ftraced_lock);
1471         task = ftraced_task;
1472         ftraced_task = NULL;
1473         ftraced_suspend = -1;
1474         ftrace_run_update_code(command);
1475         mutex_unlock(&ftraced_lock);
1476
1477         if (task)
1478                 kthread_stop(task);
1479 }
1480
1481 static __init int ftrace_init_debugfs(void)
1482 {
1483         struct dentry *d_tracer;
1484         struct dentry *entry;
1485
1486         d_tracer = tracing_init_dentry();
1487
1488         entry = debugfs_create_file("available_filter_functions", 0444,
1489                                     d_tracer, NULL, &ftrace_avail_fops);
1490         if (!entry)
1491                 pr_warning("Could not create debugfs "
1492                            "'available_filter_functions' entry\n");
1493
1494         entry = debugfs_create_file("failures", 0444,
1495                                     d_tracer, NULL, &ftrace_failures_fops);
1496         if (!entry)
1497                 pr_warning("Could not create debugfs 'failures' entry\n");
1498
1499         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1500                                     NULL, &ftrace_filter_fops);
1501         if (!entry)
1502                 pr_warning("Could not create debugfs "
1503                            "'set_ftrace_filter' entry\n");
1504
1505         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1506                                     NULL, &ftrace_notrace_fops);
1507         if (!entry)
1508                 pr_warning("Could not create debugfs "
1509                            "'set_ftrace_notrace' entry\n");
1510
1511         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1512                                     NULL, &ftraced_fops);
1513         if (!entry)
1514                 pr_warning("Could not create debugfs "
1515                            "'ftraced_enabled' entry\n");
1516         return 0;
1517 }
1518
1519 fs_initcall(ftrace_init_debugfs);
1520
1521 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1522 static int ftrace_convert_nops(unsigned long *start,
1523                                unsigned long *end)
1524 {
1525         unsigned long *p;
1526         unsigned long addr;
1527         unsigned long flags;
1528
1529         p = start;
1530         while (p < end) {
1531                 addr = ftrace_call_adjust(*p++);
1532                 ftrace_record_ip(addr);
1533                 ftrace_shutdown_replenish();
1534         }
1535
1536         /* p is ignored */
1537         local_irq_save(flags);
1538         __ftrace_update_code(p);
1539         local_irq_restore(flags);
1540
1541         return 0;
1542 }
1543
1544 extern unsigned long __start_mcount_loc[];
1545 extern unsigned long __stop_mcount_loc[];
1546
1547 void __init ftrace_init(void)
1548 {
1549         unsigned long count, addr, flags;
1550         int ret;
1551
1552         /* Keep the ftrace pointer to the stub */
1553         addr = (unsigned long)ftrace_stub;
1554
1555         local_irq_save(flags);
1556         ftrace_dyn_arch_init(&addr);
1557         local_irq_restore(flags);
1558
1559         /* ftrace_dyn_arch_init places the return code in addr */
1560         if (addr)
1561                 goto failed;
1562
1563         count = __stop_mcount_loc - __start_mcount_loc;
1564
1565         ret = ftrace_dyn_table_alloc(count);
1566         if (ret)
1567                 goto failed;
1568
1569         last_ftrace_enabled = ftrace_enabled = 1;
1570
1571         ret = ftrace_convert_nops(__start_mcount_loc,
1572                                   __stop_mcount_loc);
1573
1574         return;
1575  failed:
1576         ftrace_disabled = 1;
1577 }
1578 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1579 static int ftraced(void *ignore)
1580 {
1581         unsigned long usecs;
1582
1583         while (!kthread_should_stop()) {
1584
1585                 set_current_state(TASK_INTERRUPTIBLE);
1586
1587                 /* check once a second */
1588                 schedule_timeout(HZ);
1589
1590                 if (unlikely(ftrace_disabled))
1591                         continue;
1592
1593                 mutex_lock(&ftrace_sysctl_lock);
1594                 mutex_lock(&ftraced_lock);
1595                 if (!ftraced_suspend && !ftraced_stop &&
1596                     ftrace_update_code()) {
1597                         usecs = nsecs_to_usecs(ftrace_update_time);
1598                         if (ftrace_update_tot_cnt > 100000) {
1599                                 ftrace_update_tot_cnt = 0;
1600                                 pr_info("hm, dftrace overflow: %lu change%s"
1601                                         " (%lu total) in %lu usec%s\n",
1602                                         ftrace_update_cnt,
1603                                         ftrace_update_cnt != 1 ? "s" : "",
1604                                         ftrace_update_tot_cnt,
1605                                         usecs, usecs != 1 ? "s" : "");
1606                                 ftrace_disabled = 1;
1607                                 WARN_ON_ONCE(1);
1608                         }
1609                 }
1610                 mutex_unlock(&ftraced_lock);
1611                 mutex_unlock(&ftrace_sysctl_lock);
1612
1613                 ftrace_shutdown_replenish();
1614         }
1615         __set_current_state(TASK_RUNNING);
1616         return 0;
1617 }
1618
1619 static int __init ftrace_dynamic_init(void)
1620 {
1621         struct task_struct *p;
1622         unsigned long addr;
1623         int ret;
1624
1625         addr = (unsigned long)ftrace_record_ip;
1626
1627         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1628
1629         /* ftrace_dyn_arch_init places the return code in addr */
1630         if (addr) {
1631                 ret = (int)addr;
1632                 goto failed;
1633         }
1634
1635         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1636         if (ret)
1637                 goto failed;
1638
1639         p = kthread_run(ftraced, NULL, "ftraced");
1640         if (IS_ERR(p)) {
1641                 ret = -1;
1642                 goto failed;
1643         }
1644
1645         last_ftrace_enabled = ftrace_enabled = 1;
1646         ftraced_task = p;
1647
1648         return 0;
1649
1650  failed:
1651         ftrace_disabled = 1;
1652         return ret;
1653 }
1654
1655 core_initcall(ftrace_dynamic_init);
1656 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1657
1658 #else
1659 # define ftrace_startup()               do { } while (0)
1660 # define ftrace_shutdown()              do { } while (0)
1661 # define ftrace_startup_sysctl()        do { } while (0)
1662 # define ftrace_shutdown_sysctl()       do { } while (0)
1663 # define ftrace_force_shutdown()        do { } while (0)
1664 #endif /* CONFIG_DYNAMIC_FTRACE */
1665
1666 /**
1667  * ftrace_kill_atomic - kill ftrace from critical sections
1668  *
1669  * This function should be used by panic code. It stops ftrace
1670  * but in a not so nice way. If you need to simply kill ftrace
1671  * from a non-atomic section, use ftrace_kill.
1672  */
1673 void ftrace_kill_atomic(void)
1674 {
1675         ftrace_disabled = 1;
1676         ftrace_enabled = 0;
1677 #ifdef CONFIG_DYNAMIC_FTRACE
1678         ftraced_suspend = -1;
1679 #endif
1680         clear_ftrace_function();
1681 }
1682
1683 /**
1684  * ftrace_kill - totally shutdown ftrace
1685  *
1686  * This is a safety measure. If something was detected that seems
1687  * wrong, calling this function will keep ftrace from doing
1688  * any more modifications, and updates.
1689  * used when something went wrong.
1690  */
1691 void ftrace_kill(void)
1692 {
1693         mutex_lock(&ftrace_sysctl_lock);
1694         ftrace_disabled = 1;
1695         ftrace_enabled = 0;
1696
1697         clear_ftrace_function();
1698         mutex_unlock(&ftrace_sysctl_lock);
1699
1700         /* Try to totally disable ftrace */
1701         ftrace_force_shutdown();
1702 }
1703
1704 /**
1705  * register_ftrace_function - register a function for profiling
1706  * @ops - ops structure that holds the function for profiling.
1707  *
1708  * Register a function to be called by all functions in the
1709  * kernel.
1710  *
1711  * Note: @ops->func and all the functions it calls must be labeled
1712  *       with "notrace", otherwise it will go into a
1713  *       recursive loop.
1714  */
1715 int register_ftrace_function(struct ftrace_ops *ops)
1716 {
1717         int ret;
1718
1719         if (unlikely(ftrace_disabled))
1720                 return -1;
1721
1722         mutex_lock(&ftrace_sysctl_lock);
1723         ret = __register_ftrace_function(ops);
1724         ftrace_startup();
1725         mutex_unlock(&ftrace_sysctl_lock);
1726
1727         return ret;
1728 }
1729
1730 /**
1731  * unregister_ftrace_function - unresgister a function for profiling.
1732  * @ops - ops structure that holds the function to unregister
1733  *
1734  * Unregister a function that was added to be called by ftrace profiling.
1735  */
1736 int unregister_ftrace_function(struct ftrace_ops *ops)
1737 {
1738         int ret;
1739
1740         mutex_lock(&ftrace_sysctl_lock);
1741         ret = __unregister_ftrace_function(ops);
1742         ftrace_shutdown();
1743         mutex_unlock(&ftrace_sysctl_lock);
1744
1745         return ret;
1746 }
1747
1748 int
1749 ftrace_enable_sysctl(struct ctl_table *table, int write,
1750                      struct file *file, void __user *buffer, size_t *lenp,
1751                      loff_t *ppos)
1752 {
1753         int ret;
1754
1755         if (unlikely(ftrace_disabled))
1756                 return -ENODEV;
1757
1758         mutex_lock(&ftrace_sysctl_lock);
1759
1760         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1761
1762         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1763                 goto out;
1764
1765         last_ftrace_enabled = ftrace_enabled;
1766
1767         if (ftrace_enabled) {
1768
1769                 ftrace_startup_sysctl();
1770
1771                 /* we are starting ftrace again */
1772                 if (ftrace_list != &ftrace_list_end) {
1773                         if (ftrace_list->next == &ftrace_list_end)
1774                                 ftrace_trace_function = ftrace_list->func;
1775                         else
1776                                 ftrace_trace_function = ftrace_list_func;
1777                 }
1778
1779         } else {
1780                 /* stopping ftrace calls (just send to ftrace_stub) */
1781                 ftrace_trace_function = ftrace_stub;
1782
1783                 ftrace_shutdown_sysctl();
1784         }
1785
1786  out:
1787         mutex_unlock(&ftrace_sysctl_lock);
1788         return ret;
1789 }