]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/ftrace.c
ftrace: only have ftrace_kill atomic
[net-next-2.6.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
f22f9a89 24#include <linux/kprobes.h>
2d8b820b 25#include <linux/ftrace.h>
b0fc494f 26#include <linux/sysctl.h>
5072c59f 27#include <linux/ctype.h>
2d8b820b 28#include <linux/hash.h>
3d083395
SR
29#include <linux/list.h>
30
395a59d0
AS
31#include <asm/ftrace.h>
32
3d083395 33#include "trace.h"
16444a8a 34
4eebcc81
SR
35/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
d61f82d0 37static int last_ftrace_enabled;
b0fc494f 38
4eebcc81
SR
39/*
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
42 */
43static int ftrace_disabled __read_mostly;
44
3d083395 45static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
46static DEFINE_MUTEX(ftrace_sysctl_lock);
47
16444a8a
ACM
48static struct ftrace_ops ftrace_list_end __read_mostly =
49{
50 .func = ftrace_stub,
51};
52
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
f2252935 56static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
57{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
3d083395 72 * clear_ftrace_function - reset the ftrace function
16444a8a 73 *
3d083395
SR
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
16444a8a 76 */
3d083395 77void clear_ftrace_function(void)
16444a8a 78{
3d083395
SR
79 ftrace_trace_function = ftrace_stub;
80}
81
e309b41d 82static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 83{
99ecdc43 84 /* should not be called from interrupt context */
3d083395 85 spin_lock(&ftrace_lock);
16444a8a 86
16444a8a
ACM
87 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
3d083395 96
b0fc494f
SR
97 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
3d083395
SR
107
108 spin_unlock(&ftrace_lock);
16444a8a
ACM
109
110 return 0;
111}
112
e309b41d 113static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 114{
16444a8a
ACM
115 struct ftrace_ops **p;
116 int ret = 0;
117
99ecdc43 118 /* should not be called from interrupt context */
3d083395 119 spin_lock(&ftrace_lock);
16444a8a
ACM
120
121 /*
3d083395
SR
122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
16444a8a
ACM
124 */
125 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126 ftrace_trace_function = ftrace_stub;
127 ftrace_list = &ftrace_list_end;
128 goto out;
129 }
130
131 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132 if (*p == ops)
133 break;
134
135 if (*p != ops) {
136 ret = -1;
137 goto out;
138 }
139
140 *p = (*p)->next;
141
b0fc494f
SR
142 if (ftrace_enabled) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list == &ftrace_list_end ||
145 ftrace_list->next == &ftrace_list_end)
146 ftrace_trace_function = ftrace_list->func;
147 }
16444a8a
ACM
148
149 out:
3d083395
SR
150 spin_unlock(&ftrace_lock);
151
152 return ret;
153}
154
155#ifdef CONFIG_DYNAMIC_FTRACE
156
99ecdc43
SR
157#ifndef CONFIG_FTRACE_MCOUNT_RECORD
158/*
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
162 */
163static DEFINE_SPINLOCK(ftrace_hash_lock);
2d7da80f 164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
644f991d
SR
165#define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
bd95b88d 167static void ftrace_release_hash(unsigned long start, unsigned long end);
99ecdc43
SR
168#else
169/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
ac8825ec 170#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
99ecdc43 171#define ftrace_hash_unlock(flags) do { } while(0)
bd95b88d
SR
172static inline void ftrace_release_hash(unsigned long start, unsigned long end)
173{
174}
99ecdc43
SR
175#endif
176
71c67d58
SN
177/*
178 * Since MCOUNT_ADDR may point to mcount itself, we do not want
179 * to get it confused by reading a reference in the code as we
180 * are parsing on objcopy output of text. Use a variable for
181 * it instead.
182 */
183static unsigned long mcount_addr = MCOUNT_ADDR;
184
e1c08bdd 185static struct task_struct *ftraced_task;
e1c08bdd 186
d61f82d0
SR
187enum {
188 FTRACE_ENABLE_CALLS = (1 << 0),
189 FTRACE_DISABLE_CALLS = (1 << 1),
190 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
191 FTRACE_ENABLE_MCOUNT = (1 << 3),
192 FTRACE_DISABLE_MCOUNT = (1 << 4),
193};
194
5072c59f 195static int ftrace_filtered;
ecea656d
AS
196static int tracing_on;
197static int frozen_record_count;
5072c59f 198
3d083395
SR
199static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
200
201static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
202
3d083395 203static DEFINE_MUTEX(ftraced_lock);
41c52c0d 204static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 205
3c1720f0
SR
206struct ftrace_page {
207 struct ftrace_page *next;
aa5e5cea 208 unsigned long index;
3c1720f0 209 struct dyn_ftrace records[];
aa5e5cea 210};
3c1720f0
SR
211
212#define ENTRIES_PER_PAGE \
213 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
214
215/* estimate from running different kernels */
216#define NR_TO_INIT 10000
217
218static struct ftrace_page *ftrace_pages_start;
219static struct ftrace_page *ftrace_pages;
220
3d083395
SR
221static int ftraced_trigger;
222static int ftraced_suspend;
ad90c0e3 223static int ftraced_stop;
3d083395
SR
224
225static int ftrace_record_suspend;
226
37ad5084
SR
227static struct dyn_ftrace *ftrace_free_records;
228
ecea656d
AS
229
230#ifdef CONFIG_KPROBES
231static inline void freeze_record(struct dyn_ftrace *rec)
232{
233 if (!(rec->flags & FTRACE_FL_FROZEN)) {
234 rec->flags |= FTRACE_FL_FROZEN;
235 frozen_record_count++;
236 }
237}
238
239static inline void unfreeze_record(struct dyn_ftrace *rec)
240{
241 if (rec->flags & FTRACE_FL_FROZEN) {
242 rec->flags &= ~FTRACE_FL_FROZEN;
243 frozen_record_count--;
244 }
245}
246
247static inline int record_frozen(struct dyn_ftrace *rec)
248{
249 return rec->flags & FTRACE_FL_FROZEN;
250}
251#else
252# define freeze_record(rec) ({ 0; })
253# define unfreeze_record(rec) ({ 0; })
254# define record_frozen(rec) ({ 0; })
255#endif /* CONFIG_KPROBES */
256
257int skip_trace(unsigned long ip)
258{
259 unsigned long fl;
260 struct dyn_ftrace *rec;
261 struct hlist_node *t;
262 struct hlist_head *head;
263
264 if (frozen_record_count == 0)
265 return 0;
266
267 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
268 hlist_for_each_entry_rcu(rec, t, head, node) {
269 if (rec->ip == ip) {
270 if (record_frozen(rec)) {
271 if (rec->flags & FTRACE_FL_FAILED)
272 return 1;
273
274 if (!(rec->flags & FTRACE_FL_CONVERTED))
275 return 1;
276
277 if (!tracing_on || !ftrace_enabled)
278 return 1;
279
280 if (ftrace_filtered) {
281 fl = rec->flags & (FTRACE_FL_FILTER |
282 FTRACE_FL_NOTRACE);
283 if (!fl || (fl & FTRACE_FL_NOTRACE))
284 return 1;
285 }
286 }
287 break;
288 }
289 }
290
291 return 0;
292}
293
e309b41d 294static inline int
9ff9cdb2 295ftrace_ip_in_hash(unsigned long ip, unsigned long key)
3d083395
SR
296{
297 struct dyn_ftrace *p;
298 struct hlist_node *t;
299 int found = 0;
300
ffdaa358 301 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
3d083395
SR
302 if (p->ip == ip) {
303 found = 1;
304 break;
305 }
306 }
307
308 return found;
309}
310
e309b41d 311static inline void
3d083395
SR
312ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
313{
ffdaa358 314 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
3d083395
SR
315}
316
0eb96701
AS
317/* called from kstop_machine */
318static inline void ftrace_del_hash(struct dyn_ftrace *node)
319{
320 hlist_del(&node->node);
321}
322
e309b41d 323static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 324{
37ad5084
SR
325 rec->ip = (unsigned long)ftrace_free_records;
326 ftrace_free_records = rec;
327 rec->flags |= FTRACE_FL_FREE;
328}
329
fed1939c
SR
330void ftrace_release(void *start, unsigned long size)
331{
332 struct dyn_ftrace *rec;
333 struct ftrace_page *pg;
334 unsigned long s = (unsigned long)start;
335 unsigned long e = s + size;
336 int i;
337
00fd61ae 338 if (ftrace_disabled || !start)
fed1939c
SR
339 return;
340
99ecdc43 341 /* should not be called from interrupt context */
fed1939c
SR
342 spin_lock(&ftrace_lock);
343
344 for (pg = ftrace_pages_start; pg; pg = pg->next) {
345 for (i = 0; i < pg->index; i++) {
346 rec = &pg->records[i];
347
348 if ((rec->ip >= s) && (rec->ip < e))
349 ftrace_free_rec(rec);
350 }
351 }
352 spin_unlock(&ftrace_lock);
353
bd95b88d 354 ftrace_release_hash(s, e);
fed1939c
SR
355}
356
e309b41d 357static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 358{
37ad5084
SR
359 struct dyn_ftrace *rec;
360
361 /* First check for freed records */
362 if (ftrace_free_records) {
363 rec = ftrace_free_records;
364
37ad5084
SR
365 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
366 WARN_ON_ONCE(1);
367 ftrace_free_records = NULL;
4eebcc81
SR
368 ftrace_disabled = 1;
369 ftrace_enabled = 0;
37ad5084
SR
370 return NULL;
371 }
372
373 ftrace_free_records = (void *)rec->ip;
374 memset(rec, 0, sizeof(*rec));
375 return rec;
376 }
377
3c1720f0
SR
378 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
379 if (!ftrace_pages->next)
380 return NULL;
381 ftrace_pages = ftrace_pages->next;
382 }
383
384 return &ftrace_pages->records[ftrace_pages->index++];
385}
386
e309b41d 387static void
d61f82d0 388ftrace_record_ip(unsigned long ip)
3d083395
SR
389{
390 struct dyn_ftrace *node;
391 unsigned long flags;
392 unsigned long key;
393 int resched;
2bb6f8d6 394 int cpu;
3d083395 395
4eebcc81 396 if (!ftrace_enabled || ftrace_disabled)
d61f82d0
SR
397 return;
398
3d083395
SR
399 resched = need_resched();
400 preempt_disable_notrace();
401
2bb6f8d6
SR
402 /*
403 * We simply need to protect against recursion.
404 * Use the the raw version of smp_processor_id and not
405 * __get_cpu_var which can call debug hooks that can
406 * cause a recursive crash here.
407 */
408 cpu = raw_smp_processor_id();
409 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
410 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
3d083395
SR
411 goto out;
412
413 if (unlikely(ftrace_record_suspend))
414 goto out;
415
416 key = hash_long(ip, FTRACE_HASHBITS);
417
418 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
419
420 if (ftrace_ip_in_hash(ip, key))
421 goto out;
422
99ecdc43 423 ftrace_hash_lock(flags);
3d083395
SR
424
425 /* This ip may have hit the hash before the lock */
426 if (ftrace_ip_in_hash(ip, key))
427 goto out_unlock;
428
d61f82d0 429 node = ftrace_alloc_dyn_node(ip);
3d083395
SR
430 if (!node)
431 goto out_unlock;
432
433 node->ip = ip;
434
435 ftrace_add_hash(node, key);
436
437 ftraced_trigger = 1;
438
439 out_unlock:
99ecdc43 440 ftrace_hash_unlock(flags);
3d083395 441 out:
2bb6f8d6 442 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
3d083395
SR
443
444 /* prevent recursion with scheduler */
445 if (resched)
446 preempt_enable_no_resched_notrace();
447 else
448 preempt_enable_notrace();
449}
450
caf8cdeb 451#define FTRACE_ADDR ((long)(ftrace_caller))
3c1720f0 452
0eb96701 453static int
5072c59f
SR
454__ftrace_replace_code(struct dyn_ftrace *rec,
455 unsigned char *old, unsigned char *new, int enable)
456{
41c52c0d 457 unsigned long ip, fl;
5072c59f
SR
458
459 ip = rec->ip;
460
461 if (ftrace_filtered && enable) {
5072c59f
SR
462 /*
463 * If filtering is on:
464 *
465 * If this record is set to be filtered and
466 * is enabled then do nothing.
467 *
468 * If this record is set to be filtered and
469 * it is not enabled, enable it.
470 *
471 * If this record is not set to be filtered
472 * and it is not enabled do nothing.
473 *
41c52c0d
SR
474 * If this record is set not to trace then
475 * do nothing.
476 *
a4500b84
AS
477 * If this record is set not to trace and
478 * it is enabled then disable it.
479 *
5072c59f
SR
480 * If this record is not set to be filtered and
481 * it is enabled, disable it.
482 */
a4500b84
AS
483
484 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
485 FTRACE_FL_ENABLED);
5072c59f
SR
486
487 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
a4500b84
AS
488 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
489 !fl || (fl == FTRACE_FL_NOTRACE))
0eb96701 490 return 0;
5072c59f
SR
491
492 /*
493 * If it is enabled disable it,
494 * otherwise enable it!
495 */
a4500b84 496 if (fl & FTRACE_FL_ENABLED) {
5072c59f
SR
497 /* swap new and old */
498 new = old;
499 old = ftrace_call_replace(ip, FTRACE_ADDR);
500 rec->flags &= ~FTRACE_FL_ENABLED;
501 } else {
502 new = ftrace_call_replace(ip, FTRACE_ADDR);
503 rec->flags |= FTRACE_FL_ENABLED;
504 }
505 } else {
506
41c52c0d
SR
507 if (enable) {
508 /*
509 * If this record is set not to trace and is
510 * not enabled, do nothing.
511 */
512 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
513 if (fl == FTRACE_FL_NOTRACE)
0eb96701 514 return 0;
41c52c0d 515
5072c59f 516 new = ftrace_call_replace(ip, FTRACE_ADDR);
41c52c0d 517 } else
5072c59f
SR
518 old = ftrace_call_replace(ip, FTRACE_ADDR);
519
520 if (enable) {
521 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 522 return 0;
5072c59f
SR
523 rec->flags |= FTRACE_FL_ENABLED;
524 } else {
525 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 526 return 0;
5072c59f
SR
527 rec->flags &= ~FTRACE_FL_ENABLED;
528 }
529 }
530
0eb96701 531 return ftrace_modify_code(ip, old, new);
5072c59f
SR
532}
533
e309b41d 534static void ftrace_replace_code(int enable)
3c1720f0 535{
0eb96701 536 int i, failed;
3c1720f0
SR
537 unsigned char *new = NULL, *old = NULL;
538 struct dyn_ftrace *rec;
539 struct ftrace_page *pg;
3c1720f0 540
5072c59f 541 if (enable)
3c1720f0
SR
542 old = ftrace_nop_replace();
543 else
544 new = ftrace_nop_replace();
545
546 for (pg = ftrace_pages_start; pg; pg = pg->next) {
547 for (i = 0; i < pg->index; i++) {
548 rec = &pg->records[i];
549
550 /* don't modify code that has already faulted */
551 if (rec->flags & FTRACE_FL_FAILED)
552 continue;
553
f22f9a89 554 /* ignore updates to this record's mcount site */
98a05ed4
AS
555 if (get_kprobe((void *)rec->ip)) {
556 freeze_record(rec);
f22f9a89 557 continue;
98a05ed4
AS
558 } else {
559 unfreeze_record(rec);
560 }
f22f9a89 561
0eb96701
AS
562 failed = __ftrace_replace_code(rec, old, new, enable);
563 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
564 rec->flags |= FTRACE_FL_FAILED;
565 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 566 !core_kernel_text(rec->ip)) {
0eb96701
AS
567 ftrace_del_hash(rec);
568 ftrace_free_rec(rec);
569 }
570 }
3c1720f0
SR
571 }
572 }
573}
574
e309b41d 575static void ftrace_shutdown_replenish(void)
3c1720f0
SR
576{
577 if (ftrace_pages->next)
578 return;
579
580 /* allocate another page */
581 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
582}
3d083395 583
05736a42
SR
584static void print_ip_ins(const char *fmt, unsigned char *p)
585{
586 int i;
587
588 printk(KERN_CONT "%s", fmt);
589
590 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
591 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
592}
593
492a7ea5 594static int
d61f82d0 595ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
596{
597 unsigned long ip;
598 unsigned char *nop, *call;
593eb8a2 599 int ret;
3c1720f0
SR
600
601 ip = rec->ip;
602
603 nop = ftrace_nop_replace();
3b47bfc1 604 call = ftrace_call_replace(ip, mcount_addr);
3c1720f0 605
593eb8a2
SR
606 ret = ftrace_modify_code(ip, call, nop);
607 if (ret) {
608 switch (ret) {
609 case -EFAULT:
05736a42
SR
610 WARN_ON_ONCE(1);
611 pr_info("ftrace faulted on modifying ");
612 print_ip_sym(ip);
613 break;
593eb8a2 614 case -EINVAL:
05736a42
SR
615 WARN_ON_ONCE(1);
616 pr_info("ftrace failed to modify ");
617 print_ip_sym(ip);
618 print_ip_ins(" expected: ", call);
619 print_ip_ins(" actual: ", (unsigned char *)ip);
620 print_ip_ins(" replace: ", nop);
621 printk(KERN_CONT "\n");
622 break;
593eb8a2
SR
623 case -EPERM:
624 WARN_ON_ONCE(1);
625 pr_info("ftrace faulted on writing ");
626 print_ip_sym(ip);
627 break;
628 default:
629 WARN_ON_ONCE(1);
630 pr_info("ftrace faulted on unknown error ");
631 print_ip_sym(ip);
05736a42
SR
632 }
633
3c1720f0 634 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 635 return 0;
37ad5084 636 }
492a7ea5 637 return 1;
3c1720f0
SR
638}
639
ad90c0e3
SR
640static int __ftrace_update_code(void *ignore);
641
e309b41d 642static int __ftrace_modify_code(void *data)
3d083395 643{
d61f82d0
SR
644 unsigned long addr;
645 int *command = data;
646
ad90c0e3
SR
647 if (*command & FTRACE_ENABLE_CALLS) {
648 /*
649 * Update any recorded ips now that we have the
650 * machine stopped
651 */
652 __ftrace_update_code(NULL);
d61f82d0 653 ftrace_replace_code(1);
ecea656d
AS
654 tracing_on = 1;
655 } else if (*command & FTRACE_DISABLE_CALLS) {
d61f82d0 656 ftrace_replace_code(0);
ecea656d
AS
657 tracing_on = 0;
658 }
d61f82d0
SR
659
660 if (*command & FTRACE_UPDATE_TRACE_FUNC)
661 ftrace_update_ftrace_func(ftrace_trace_function);
662
663 if (*command & FTRACE_ENABLE_MCOUNT) {
664 addr = (unsigned long)ftrace_record_ip;
665 ftrace_mcount_set(&addr);
666 } else if (*command & FTRACE_DISABLE_MCOUNT) {
667 addr = (unsigned long)ftrace_stub;
668 ftrace_mcount_set(&addr);
669 }
670
671 return 0;
3d083395
SR
672}
673
e309b41d 674static void ftrace_run_update_code(int command)
3d083395 675{
784e2d76 676 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
677}
678
ad90c0e3
SR
679void ftrace_disable_daemon(void)
680{
681 /* Stop the daemon from calling kstop_machine */
682 mutex_lock(&ftraced_lock);
683 ftraced_stop = 1;
684 mutex_unlock(&ftraced_lock);
685
686 ftrace_force_update();
687}
688
689void ftrace_enable_daemon(void)
690{
691 mutex_lock(&ftraced_lock);
692 ftraced_stop = 0;
693 mutex_unlock(&ftraced_lock);
694
695 ftrace_force_update();
696}
697
d61f82d0
SR
698static ftrace_func_t saved_ftrace_func;
699
e309b41d 700static void ftrace_startup(void)
3d083395 701{
d61f82d0
SR
702 int command = 0;
703
4eebcc81
SR
704 if (unlikely(ftrace_disabled))
705 return;
706
3d083395
SR
707 mutex_lock(&ftraced_lock);
708 ftraced_suspend++;
d61f82d0
SR
709 if (ftraced_suspend == 1)
710 command |= FTRACE_ENABLE_CALLS;
711
712 if (saved_ftrace_func != ftrace_trace_function) {
713 saved_ftrace_func = ftrace_trace_function;
714 command |= FTRACE_UPDATE_TRACE_FUNC;
715 }
716
717 if (!command || !ftrace_enabled)
3d083395 718 goto out;
3d083395 719
d61f82d0 720 ftrace_run_update_code(command);
3d083395
SR
721 out:
722 mutex_unlock(&ftraced_lock);
723}
724
e309b41d 725static void ftrace_shutdown(void)
3d083395 726{
d61f82d0
SR
727 int command = 0;
728
4eebcc81
SR
729 if (unlikely(ftrace_disabled))
730 return;
731
3d083395
SR
732 mutex_lock(&ftraced_lock);
733 ftraced_suspend--;
d61f82d0
SR
734 if (!ftraced_suspend)
735 command |= FTRACE_DISABLE_CALLS;
3d083395 736
d61f82d0
SR
737 if (saved_ftrace_func != ftrace_trace_function) {
738 saved_ftrace_func = ftrace_trace_function;
739 command |= FTRACE_UPDATE_TRACE_FUNC;
740 }
3d083395 741
d61f82d0
SR
742 if (!command || !ftrace_enabled)
743 goto out;
744
745 ftrace_run_update_code(command);
3d083395
SR
746 out:
747 mutex_unlock(&ftraced_lock);
748}
749
e309b41d 750static void ftrace_startup_sysctl(void)
b0fc494f 751{
d61f82d0
SR
752 int command = FTRACE_ENABLE_MCOUNT;
753
4eebcc81
SR
754 if (unlikely(ftrace_disabled))
755 return;
756
b0fc494f 757 mutex_lock(&ftraced_lock);
d61f82d0
SR
758 /* Force update next time */
759 saved_ftrace_func = NULL;
b0fc494f
SR
760 /* ftraced_suspend is true if we want ftrace running */
761 if (ftraced_suspend)
d61f82d0
SR
762 command |= FTRACE_ENABLE_CALLS;
763
764 ftrace_run_update_code(command);
b0fc494f
SR
765 mutex_unlock(&ftraced_lock);
766}
767
e309b41d 768static void ftrace_shutdown_sysctl(void)
b0fc494f 769{
d61f82d0
SR
770 int command = FTRACE_DISABLE_MCOUNT;
771
4eebcc81
SR
772 if (unlikely(ftrace_disabled))
773 return;
774
b0fc494f
SR
775 mutex_lock(&ftraced_lock);
776 /* ftraced_suspend is true if ftrace is running */
777 if (ftraced_suspend)
d61f82d0
SR
778 command |= FTRACE_DISABLE_CALLS;
779
780 ftrace_run_update_code(command);
b0fc494f
SR
781 mutex_unlock(&ftraced_lock);
782}
783
3d083395
SR
784static cycle_t ftrace_update_time;
785static unsigned long ftrace_update_cnt;
786unsigned long ftrace_update_tot_cnt;
787
e309b41d 788static int __ftrace_update_code(void *ignore)
3d083395 789{
f22f9a89
AS
790 int i, save_ftrace_enabled;
791 cycle_t start, stop;
3d083395 792 struct dyn_ftrace *p;
0eb96701 793 struct hlist_node *t, *n;
f22f9a89 794 struct hlist_head *head, temp_list;
3d083395 795
d61f82d0 796 /* Don't be recording funcs now */
ad90c0e3 797 ftrace_record_suspend++;
d61f82d0
SR
798 save_ftrace_enabled = ftrace_enabled;
799 ftrace_enabled = 0;
3d083395 800
750ed1a4 801 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
802 ftrace_update_cnt = 0;
803
804 /* No locks needed, the machine is stopped! */
805 for (i = 0; i < FTRACE_HASHSIZE; i++) {
f22f9a89
AS
806 INIT_HLIST_HEAD(&temp_list);
807 head = &ftrace_hash[i];
808
0eb96701 809 /* all CPUS are stopped, we are safe to modify code */
f22f9a89 810 hlist_for_each_entry_safe(p, t, n, head, node) {
0eb96701
AS
811 /* Skip over failed records which have not been
812 * freed. */
813 if (p->flags & FTRACE_FL_FAILED)
814 continue;
3d083395 815
0eb96701
AS
816 /* Unconverted records are always at the head of the
817 * hash bucket. Once we encounter a converted record,
818 * simply skip over to the next bucket. Saves ftraced
819 * some processor cycles (ftrace does its bid for
820 * global warming :-p ). */
821 if (p->flags & (FTRACE_FL_CONVERTED))
822 break;
3d083395 823
f22f9a89
AS
824 /* Ignore updates to this record's mcount site.
825 * Reintroduce this record at the head of this
826 * bucket to attempt to "convert" it again if
827 * the kprobe on it is unregistered before the
828 * next run. */
829 if (get_kprobe((void *)p->ip)) {
830 ftrace_del_hash(p);
831 INIT_HLIST_NODE(&p->node);
832 hlist_add_head(&p->node, &temp_list);
98a05ed4 833 freeze_record(p);
f22f9a89 834 continue;
98a05ed4
AS
835 } else {
836 unfreeze_record(p);
f22f9a89
AS
837 }
838
839 /* convert record (i.e, patch mcount-call with NOP) */
0eb96701
AS
840 if (ftrace_code_disable(p)) {
841 p->flags |= FTRACE_FL_CONVERTED;
492a7ea5 842 ftrace_update_cnt++;
0eb96701
AS
843 } else {
844 if ((system_state == SYSTEM_BOOTING) ||
34078a5e 845 !core_kernel_text(p->ip)) {
0eb96701
AS
846 ftrace_del_hash(p);
847 ftrace_free_rec(p);
0eb96701
AS
848 }
849 }
3d083395 850 }
f22f9a89
AS
851
852 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
853 hlist_del(&p->node);
854 INIT_HLIST_NODE(&p->node);
855 hlist_add_head(&p->node, head);
856 }
3d083395
SR
857 }
858
750ed1a4 859 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
860 ftrace_update_time = stop - start;
861 ftrace_update_tot_cnt += ftrace_update_cnt;
ad90c0e3 862 ftraced_trigger = 0;
3d083395 863
d61f82d0 864 ftrace_enabled = save_ftrace_enabled;
ad90c0e3 865 ftrace_record_suspend--;
16444a8a
ACM
866
867 return 0;
868}
869
ad90c0e3 870static int ftrace_update_code(void)
3d083395 871{
ad90c0e3
SR
872 if (unlikely(ftrace_disabled) ||
873 !ftrace_enabled || !ftraced_trigger)
874 return 0;
4eebcc81 875
784e2d76 876 stop_machine(__ftrace_update_code, NULL, NULL);
ad90c0e3
SR
877
878 return 1;
3d083395
SR
879}
880
68bf21aa 881static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
882{
883 struct ftrace_page *pg;
884 int cnt;
885 int i;
3c1720f0
SR
886
887 /* allocate a few pages */
888 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
889 if (!ftrace_pages_start)
890 return -1;
891
892 /*
893 * Allocate a few more pages.
894 *
895 * TODO: have some parser search vmlinux before
896 * final linking to find all calls to ftrace.
897 * Then we can:
898 * a) know how many pages to allocate.
899 * and/or
900 * b) set up the table then.
901 *
902 * The dynamic code is still necessary for
903 * modules.
904 */
905
906 pg = ftrace_pages = ftrace_pages_start;
907
68bf21aa
SR
908 cnt = num_to_init / ENTRIES_PER_PAGE;
909 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
910 num_to_init, cnt);
3c1720f0
SR
911
912 for (i = 0; i < cnt; i++) {
913 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
914
915 /* If we fail, we'll try later anyway */
916 if (!pg->next)
917 break;
918
919 pg = pg->next;
920 }
921
922 return 0;
923}
924
5072c59f
SR
925enum {
926 FTRACE_ITER_FILTER = (1 << 0),
927 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 928 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 929 FTRACE_ITER_FAILURES = (1 << 3),
5072c59f
SR
930};
931
932#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
933
934struct ftrace_iterator {
935 loff_t pos;
936 struct ftrace_page *pg;
937 unsigned idx;
938 unsigned flags;
939 unsigned char buffer[FTRACE_BUFF_MAX+1];
940 unsigned buffer_idx;
941 unsigned filtered;
942};
943
e309b41d 944static void *
5072c59f
SR
945t_next(struct seq_file *m, void *v, loff_t *pos)
946{
947 struct ftrace_iterator *iter = m->private;
948 struct dyn_ftrace *rec = NULL;
949
950 (*pos)++;
951
99ecdc43
SR
952 /* should not be called from interrupt context */
953 spin_lock(&ftrace_lock);
5072c59f
SR
954 retry:
955 if (iter->idx >= iter->pg->index) {
956 if (iter->pg->next) {
957 iter->pg = iter->pg->next;
958 iter->idx = 0;
959 goto retry;
960 }
961 } else {
962 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
963 if ((rec->flags & FTRACE_FL_FREE) ||
964
965 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
966 (rec->flags & FTRACE_FL_FAILED)) ||
967
968 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 969 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 970
41c52c0d
SR
971 ((iter->flags & FTRACE_ITER_NOTRACE) &&
972 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
973 rec = NULL;
974 goto retry;
975 }
976 }
99ecdc43 977 spin_unlock(&ftrace_lock);
5072c59f
SR
978
979 iter->pos = *pos;
980
981 return rec;
982}
983
984static void *t_start(struct seq_file *m, loff_t *pos)
985{
986 struct ftrace_iterator *iter = m->private;
987 void *p = NULL;
988 loff_t l = -1;
989
990 if (*pos != iter->pos) {
991 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
992 ;
993 } else {
994 l = *pos;
995 p = t_next(m, p, &l);
996 }
997
998 return p;
999}
1000
1001static void t_stop(struct seq_file *m, void *p)
1002{
1003}
1004
1005static int t_show(struct seq_file *m, void *v)
1006{
1007 struct dyn_ftrace *rec = v;
1008 char str[KSYM_SYMBOL_LEN];
1009
1010 if (!rec)
1011 return 0;
1012
1013 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1014
1015 seq_printf(m, "%s\n", str);
1016
1017 return 0;
1018}
1019
1020static struct seq_operations show_ftrace_seq_ops = {
1021 .start = t_start,
1022 .next = t_next,
1023 .stop = t_stop,
1024 .show = t_show,
1025};
1026
e309b41d 1027static int
5072c59f
SR
1028ftrace_avail_open(struct inode *inode, struct file *file)
1029{
1030 struct ftrace_iterator *iter;
1031 int ret;
1032
4eebcc81
SR
1033 if (unlikely(ftrace_disabled))
1034 return -ENODEV;
1035
5072c59f
SR
1036 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1037 if (!iter)
1038 return -ENOMEM;
1039
1040 iter->pg = ftrace_pages_start;
1041 iter->pos = -1;
1042
1043 ret = seq_open(file, &show_ftrace_seq_ops);
1044 if (!ret) {
1045 struct seq_file *m = file->private_data;
4bf39a94 1046
5072c59f 1047 m->private = iter;
4bf39a94 1048 } else {
5072c59f 1049 kfree(iter);
4bf39a94 1050 }
5072c59f
SR
1051
1052 return ret;
1053}
1054
1055int ftrace_avail_release(struct inode *inode, struct file *file)
1056{
1057 struct seq_file *m = (struct seq_file *)file->private_data;
1058 struct ftrace_iterator *iter = m->private;
1059
1060 seq_release(inode, file);
1061 kfree(iter);
4bf39a94 1062
5072c59f
SR
1063 return 0;
1064}
1065
eb9a7bf0
AS
1066static int
1067ftrace_failures_open(struct inode *inode, struct file *file)
1068{
1069 int ret;
1070 struct seq_file *m;
1071 struct ftrace_iterator *iter;
1072
1073 ret = ftrace_avail_open(inode, file);
1074 if (!ret) {
1075 m = (struct seq_file *)file->private_data;
1076 iter = (struct ftrace_iterator *)m->private;
1077 iter->flags = FTRACE_ITER_FAILURES;
1078 }
1079
1080 return ret;
1081}
1082
1083
41c52c0d 1084static void ftrace_filter_reset(int enable)
5072c59f
SR
1085{
1086 struct ftrace_page *pg;
1087 struct dyn_ftrace *rec;
41c52c0d 1088 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1089 unsigned i;
1090
99ecdc43
SR
1091 /* should not be called from interrupt context */
1092 spin_lock(&ftrace_lock);
41c52c0d
SR
1093 if (enable)
1094 ftrace_filtered = 0;
5072c59f
SR
1095 pg = ftrace_pages_start;
1096 while (pg) {
1097 for (i = 0; i < pg->index; i++) {
1098 rec = &pg->records[i];
1099 if (rec->flags & FTRACE_FL_FAILED)
1100 continue;
41c52c0d 1101 rec->flags &= ~type;
5072c59f
SR
1102 }
1103 pg = pg->next;
1104 }
99ecdc43 1105 spin_unlock(&ftrace_lock);
5072c59f
SR
1106}
1107
e309b41d 1108static int
41c52c0d 1109ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1110{
1111 struct ftrace_iterator *iter;
1112 int ret = 0;
1113
4eebcc81
SR
1114 if (unlikely(ftrace_disabled))
1115 return -ENODEV;
1116
5072c59f
SR
1117 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1118 if (!iter)
1119 return -ENOMEM;
1120
41c52c0d 1121 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1122 if ((file->f_mode & FMODE_WRITE) &&
1123 !(file->f_flags & O_APPEND))
41c52c0d 1124 ftrace_filter_reset(enable);
5072c59f
SR
1125
1126 if (file->f_mode & FMODE_READ) {
1127 iter->pg = ftrace_pages_start;
1128 iter->pos = -1;
41c52c0d
SR
1129 iter->flags = enable ? FTRACE_ITER_FILTER :
1130 FTRACE_ITER_NOTRACE;
5072c59f
SR
1131
1132 ret = seq_open(file, &show_ftrace_seq_ops);
1133 if (!ret) {
1134 struct seq_file *m = file->private_data;
1135 m->private = iter;
1136 } else
1137 kfree(iter);
1138 } else
1139 file->private_data = iter;
41c52c0d 1140 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1141
1142 return ret;
1143}
1144
41c52c0d
SR
1145static int
1146ftrace_filter_open(struct inode *inode, struct file *file)
1147{
1148 return ftrace_regex_open(inode, file, 1);
1149}
1150
1151static int
1152ftrace_notrace_open(struct inode *inode, struct file *file)
1153{
1154 return ftrace_regex_open(inode, file, 0);
1155}
1156
e309b41d 1157static ssize_t
41c52c0d 1158ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
1159 size_t cnt, loff_t *ppos)
1160{
1161 if (file->f_mode & FMODE_READ)
1162 return seq_read(file, ubuf, cnt, ppos);
1163 else
1164 return -EPERM;
1165}
1166
e309b41d 1167static loff_t
41c52c0d 1168ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1169{
1170 loff_t ret;
1171
1172 if (file->f_mode & FMODE_READ)
1173 ret = seq_lseek(file, offset, origin);
1174 else
1175 file->f_pos = ret = 1;
1176
1177 return ret;
1178}
1179
1180enum {
1181 MATCH_FULL,
1182 MATCH_FRONT_ONLY,
1183 MATCH_MIDDLE_ONLY,
1184 MATCH_END_ONLY,
1185};
1186
e309b41d 1187static void
41c52c0d 1188ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
1189{
1190 char str[KSYM_SYMBOL_LEN];
1191 char *search = NULL;
1192 struct ftrace_page *pg;
1193 struct dyn_ftrace *rec;
1194 int type = MATCH_FULL;
41c52c0d 1195 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
1196 unsigned i, match = 0, search_len = 0;
1197
1198 for (i = 0; i < len; i++) {
1199 if (buff[i] == '*') {
1200 if (!i) {
1201 search = buff + i + 1;
1202 type = MATCH_END_ONLY;
1203 search_len = len - (i + 1);
1204 } else {
1205 if (type == MATCH_END_ONLY) {
1206 type = MATCH_MIDDLE_ONLY;
1207 } else {
1208 match = i;
1209 type = MATCH_FRONT_ONLY;
1210 }
1211 buff[i] = 0;
1212 break;
1213 }
1214 }
1215 }
1216
99ecdc43
SR
1217 /* should not be called from interrupt context */
1218 spin_lock(&ftrace_lock);
41c52c0d
SR
1219 if (enable)
1220 ftrace_filtered = 1;
5072c59f
SR
1221 pg = ftrace_pages_start;
1222 while (pg) {
1223 for (i = 0; i < pg->index; i++) {
1224 int matched = 0;
1225 char *ptr;
1226
1227 rec = &pg->records[i];
1228 if (rec->flags & FTRACE_FL_FAILED)
1229 continue;
1230 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1231 switch (type) {
1232 case MATCH_FULL:
1233 if (strcmp(str, buff) == 0)
1234 matched = 1;
1235 break;
1236 case MATCH_FRONT_ONLY:
1237 if (memcmp(str, buff, match) == 0)
1238 matched = 1;
1239 break;
1240 case MATCH_MIDDLE_ONLY:
1241 if (strstr(str, search))
1242 matched = 1;
1243 break;
1244 case MATCH_END_ONLY:
1245 ptr = strstr(str, search);
1246 if (ptr && (ptr[search_len] == 0))
1247 matched = 1;
1248 break;
1249 }
1250 if (matched)
41c52c0d 1251 rec->flags |= flag;
5072c59f
SR
1252 }
1253 pg = pg->next;
1254 }
99ecdc43 1255 spin_unlock(&ftrace_lock);
5072c59f
SR
1256}
1257
e309b41d 1258static ssize_t
41c52c0d
SR
1259ftrace_regex_write(struct file *file, const char __user *ubuf,
1260 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1261{
1262 struct ftrace_iterator *iter;
1263 char ch;
1264 size_t read = 0;
1265 ssize_t ret;
1266
1267 if (!cnt || cnt < 0)
1268 return 0;
1269
41c52c0d 1270 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1271
1272 if (file->f_mode & FMODE_READ) {
1273 struct seq_file *m = file->private_data;
1274 iter = m->private;
1275 } else
1276 iter = file->private_data;
1277
1278 if (!*ppos) {
1279 iter->flags &= ~FTRACE_ITER_CONT;
1280 iter->buffer_idx = 0;
1281 }
1282
1283 ret = get_user(ch, ubuf++);
1284 if (ret)
1285 goto out;
1286 read++;
1287 cnt--;
1288
1289 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1290 /* skip white space */
1291 while (cnt && isspace(ch)) {
1292 ret = get_user(ch, ubuf++);
1293 if (ret)
1294 goto out;
1295 read++;
1296 cnt--;
1297 }
1298
5072c59f
SR
1299 if (isspace(ch)) {
1300 file->f_pos += read;
1301 ret = read;
1302 goto out;
1303 }
1304
1305 iter->buffer_idx = 0;
1306 }
1307
1308 while (cnt && !isspace(ch)) {
1309 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1310 iter->buffer[iter->buffer_idx++] = ch;
1311 else {
1312 ret = -EINVAL;
1313 goto out;
1314 }
1315 ret = get_user(ch, ubuf++);
1316 if (ret)
1317 goto out;
1318 read++;
1319 cnt--;
1320 }
1321
1322 if (isspace(ch)) {
1323 iter->filtered++;
1324 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1325 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1326 iter->buffer_idx = 0;
1327 } else
1328 iter->flags |= FTRACE_ITER_CONT;
1329
1330
1331 file->f_pos += read;
1332
1333 ret = read;
1334 out:
41c52c0d 1335 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1336
1337 return ret;
1338}
1339
41c52c0d
SR
1340static ssize_t
1341ftrace_filter_write(struct file *file, const char __user *ubuf,
1342 size_t cnt, loff_t *ppos)
1343{
1344 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1345}
1346
1347static ssize_t
1348ftrace_notrace_write(struct file *file, const char __user *ubuf,
1349 size_t cnt, loff_t *ppos)
1350{
1351 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1352}
1353
1354static void
1355ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1356{
1357 if (unlikely(ftrace_disabled))
1358 return;
1359
1360 mutex_lock(&ftrace_regex_lock);
1361 if (reset)
1362 ftrace_filter_reset(enable);
1363 if (buf)
1364 ftrace_match(buf, len, enable);
1365 mutex_unlock(&ftrace_regex_lock);
1366}
1367
77a2b37d
SR
1368/**
1369 * ftrace_set_filter - set a function to filter on in ftrace
1370 * @buf - the string that holds the function filter text.
1371 * @len - the length of the string.
1372 * @reset - non zero to reset all filters before applying this filter.
1373 *
1374 * Filters denote which functions should be enabled when tracing is enabled.
1375 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1376 */
e309b41d 1377void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1378{
41c52c0d
SR
1379 ftrace_set_regex(buf, len, reset, 1);
1380}
4eebcc81 1381
41c52c0d
SR
1382/**
1383 * ftrace_set_notrace - set a function to not trace in ftrace
1384 * @buf - the string that holds the function notrace text.
1385 * @len - the length of the string.
1386 * @reset - non zero to reset all filters before applying this filter.
1387 *
1388 * Notrace Filters denote which functions should not be enabled when tracing
1389 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1390 * for tracing.
1391 */
1392void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1393{
1394 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1395}
1396
e309b41d 1397static int
41c52c0d 1398ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1399{
1400 struct seq_file *m = (struct seq_file *)file->private_data;
1401 struct ftrace_iterator *iter;
1402
41c52c0d 1403 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1404 if (file->f_mode & FMODE_READ) {
1405 iter = m->private;
1406
1407 seq_release(inode, file);
1408 } else
1409 iter = file->private_data;
1410
1411 if (iter->buffer_idx) {
1412 iter->filtered++;
1413 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1414 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1415 }
1416
1417 mutex_lock(&ftrace_sysctl_lock);
1418 mutex_lock(&ftraced_lock);
1419 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1420 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1421 mutex_unlock(&ftraced_lock);
1422 mutex_unlock(&ftrace_sysctl_lock);
1423
1424 kfree(iter);
41c52c0d 1425 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1426 return 0;
1427}
1428
41c52c0d
SR
1429static int
1430ftrace_filter_release(struct inode *inode, struct file *file)
1431{
1432 return ftrace_regex_release(inode, file, 1);
1433}
1434
1435static int
1436ftrace_notrace_release(struct inode *inode, struct file *file)
1437{
1438 return ftrace_regex_release(inode, file, 0);
1439}
1440
ad90c0e3
SR
1441static ssize_t
1442ftraced_read(struct file *filp, char __user *ubuf,
1443 size_t cnt, loff_t *ppos)
1444{
1445 /* don't worry about races */
1446 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1447 int r = strlen(buf);
1448
1449 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1450}
1451
1452static ssize_t
1453ftraced_write(struct file *filp, const char __user *ubuf,
1454 size_t cnt, loff_t *ppos)
1455{
1456 char buf[64];
1457 long val;
1458 int ret;
1459
1460 if (cnt >= sizeof(buf))
1461 return -EINVAL;
1462
1463 if (copy_from_user(&buf, ubuf, cnt))
1464 return -EFAULT;
1465
1466 if (strncmp(buf, "enable", 6) == 0)
1467 val = 1;
1468 else if (strncmp(buf, "disable", 7) == 0)
1469 val = 0;
1470 else {
1471 buf[cnt] = 0;
1472
1473 ret = strict_strtoul(buf, 10, &val);
1474 if (ret < 0)
1475 return ret;
1476
1477 val = !!val;
1478 }
1479
1480 if (val)
1481 ftrace_enable_daemon();
1482 else
1483 ftrace_disable_daemon();
1484
1485 filp->f_pos += cnt;
1486
1487 return cnt;
1488}
1489
5072c59f
SR
1490static struct file_operations ftrace_avail_fops = {
1491 .open = ftrace_avail_open,
1492 .read = seq_read,
1493 .llseek = seq_lseek,
1494 .release = ftrace_avail_release,
1495};
1496
eb9a7bf0
AS
1497static struct file_operations ftrace_failures_fops = {
1498 .open = ftrace_failures_open,
1499 .read = seq_read,
1500 .llseek = seq_lseek,
1501 .release = ftrace_avail_release,
1502};
1503
5072c59f
SR
1504static struct file_operations ftrace_filter_fops = {
1505 .open = ftrace_filter_open,
41c52c0d 1506 .read = ftrace_regex_read,
5072c59f 1507 .write = ftrace_filter_write,
41c52c0d 1508 .llseek = ftrace_regex_lseek,
5072c59f
SR
1509 .release = ftrace_filter_release,
1510};
1511
41c52c0d
SR
1512static struct file_operations ftrace_notrace_fops = {
1513 .open = ftrace_notrace_open,
1514 .read = ftrace_regex_read,
1515 .write = ftrace_notrace_write,
1516 .llseek = ftrace_regex_lseek,
1517 .release = ftrace_notrace_release,
1518};
1519
ad90c0e3
SR
1520static struct file_operations ftraced_fops = {
1521 .open = tracing_open_generic,
1522 .read = ftraced_read,
1523 .write = ftraced_write,
1524};
1525
e1c08bdd
SR
1526/**
1527 * ftrace_force_update - force an update to all recording ftrace functions
e1c08bdd
SR
1528 */
1529int ftrace_force_update(void)
1530{
e1c08bdd
SR
1531 int ret = 0;
1532
4eebcc81 1533 if (unlikely(ftrace_disabled))
e1c08bdd
SR
1534 return -ENODEV;
1535
ad90c0e3 1536 mutex_lock(&ftrace_sysctl_lock);
e1c08bdd 1537 mutex_lock(&ftraced_lock);
e1c08bdd 1538
ad90c0e3
SR
1539 /*
1540 * If ftraced_trigger is not set, then there is nothing
1541 * to update.
1542 */
1543 if (ftraced_trigger && !ftrace_update_code())
1544 ret = -EBUSY;
e1c08bdd
SR
1545
1546 mutex_unlock(&ftraced_lock);
ad90c0e3 1547 mutex_unlock(&ftrace_sysctl_lock);
e1c08bdd
SR
1548
1549 return ret;
1550}
1551
5072c59f
SR
1552static __init int ftrace_init_debugfs(void)
1553{
1554 struct dentry *d_tracer;
1555 struct dentry *entry;
1556
1557 d_tracer = tracing_init_dentry();
1558
1559 entry = debugfs_create_file("available_filter_functions", 0444,
1560 d_tracer, NULL, &ftrace_avail_fops);
1561 if (!entry)
1562 pr_warning("Could not create debugfs "
1563 "'available_filter_functions' entry\n");
1564
eb9a7bf0
AS
1565 entry = debugfs_create_file("failures", 0444,
1566 d_tracer, NULL, &ftrace_failures_fops);
1567 if (!entry)
1568 pr_warning("Could not create debugfs 'failures' entry\n");
1569
5072c59f
SR
1570 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1571 NULL, &ftrace_filter_fops);
1572 if (!entry)
1573 pr_warning("Could not create debugfs "
1574 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1575
1576 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1577 NULL, &ftrace_notrace_fops);
1578 if (!entry)
1579 pr_warning("Could not create debugfs "
1580 "'set_ftrace_notrace' entry\n");
ad90c0e3
SR
1581
1582 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1583 NULL, &ftraced_fops);
1584 if (!entry)
1585 pr_warning("Could not create debugfs "
1586 "'ftraced_enabled' entry\n");
5072c59f
SR
1587 return 0;
1588}
1589
1590fs_initcall(ftrace_init_debugfs);
1591
68bf21aa
SR
1592#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1593static int ftrace_convert_nops(unsigned long *start,
1594 unsigned long *end)
1595{
1596 unsigned long *p;
1597 unsigned long addr;
1598 unsigned long flags;
1599
1600 p = start;
1601 while (p < end) {
1602 addr = ftrace_call_adjust(*p++);
99ecdc43 1603 /* should not be called from interrupt context */
fed1939c 1604 spin_lock(&ftrace_lock);
68bf21aa 1605 ftrace_record_ip(addr);
fed1939c 1606 spin_unlock(&ftrace_lock);
68bf21aa
SR
1607 ftrace_shutdown_replenish();
1608 }
1609
1610 /* p is ignored */
1611 local_irq_save(flags);
1612 __ftrace_update_code(p);
1613 local_irq_restore(flags);
1614
1615 return 0;
1616}
1617
90d595fe
SR
1618void ftrace_init_module(unsigned long *start, unsigned long *end)
1619{
00fd61ae 1620 if (ftrace_disabled || start == end)
fed1939c 1621 return;
90d595fe
SR
1622 ftrace_convert_nops(start, end);
1623}
1624
68bf21aa
SR
1625extern unsigned long __start_mcount_loc[];
1626extern unsigned long __stop_mcount_loc[];
1627
1628void __init ftrace_init(void)
1629{
1630 unsigned long count, addr, flags;
1631 int ret;
1632
1633 /* Keep the ftrace pointer to the stub */
1634 addr = (unsigned long)ftrace_stub;
1635
1636 local_irq_save(flags);
1637 ftrace_dyn_arch_init(&addr);
1638 local_irq_restore(flags);
1639
1640 /* ftrace_dyn_arch_init places the return code in addr */
1641 if (addr)
1642 goto failed;
1643
1644 count = __stop_mcount_loc - __start_mcount_loc;
1645
1646 ret = ftrace_dyn_table_alloc(count);
1647 if (ret)
1648 goto failed;
1649
1650 last_ftrace_enabled = ftrace_enabled = 1;
1651
1652 ret = ftrace_convert_nops(__start_mcount_loc,
1653 __stop_mcount_loc);
1654
1655 return;
1656 failed:
1657 ftrace_disabled = 1;
1658}
1659#else /* CONFIG_FTRACE_MCOUNT_RECORD */
bd95b88d
SR
1660
1661static void ftrace_release_hash(unsigned long start, unsigned long end)
1662{
1663 struct dyn_ftrace *rec;
1664 struct hlist_node *t, *n;
1665 struct hlist_head *head, temp_list;
1666 unsigned long flags;
1667 int i, cpu;
1668
1669 preempt_disable_notrace();
1670
1671 /* disable incase we call something that calls mcount */
1672 cpu = raw_smp_processor_id();
1673 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1674
1675 ftrace_hash_lock(flags);
1676
1677 for (i = 0; i < FTRACE_HASHSIZE; i++) {
1678 INIT_HLIST_HEAD(&temp_list);
1679 head = &ftrace_hash[i];
1680
1681 /* all CPUS are stopped, we are safe to modify code */
1682 hlist_for_each_entry_safe(rec, t, n, head, node) {
1683 if (rec->flags & FTRACE_FL_FREE)
1684 continue;
1685
1686 if ((rec->ip >= start) && (rec->ip < end))
1687 ftrace_free_rec(rec);
1688 }
1689 }
1690
1691 ftrace_hash_unlock(flags);
1692
1693 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1694 preempt_enable_notrace();
1695
1696}
1697
68bf21aa
SR
1698static int ftraced(void *ignore)
1699{
1700 unsigned long usecs;
1701
1702 while (!kthread_should_stop()) {
1703
1704 set_current_state(TASK_INTERRUPTIBLE);
1705
1706 /* check once a second */
1707 schedule_timeout(HZ);
1708
1709 if (unlikely(ftrace_disabled))
1710 continue;
1711
1712 mutex_lock(&ftrace_sysctl_lock);
1713 mutex_lock(&ftraced_lock);
1714 if (!ftraced_suspend && !ftraced_stop &&
1715 ftrace_update_code()) {
1716 usecs = nsecs_to_usecs(ftrace_update_time);
1717 if (ftrace_update_tot_cnt > 100000) {
1718 ftrace_update_tot_cnt = 0;
1719 pr_info("hm, dftrace overflow: %lu change%s"
1720 " (%lu total) in %lu usec%s\n",
1721 ftrace_update_cnt,
1722 ftrace_update_cnt != 1 ? "s" : "",
1723 ftrace_update_tot_cnt,
1724 usecs, usecs != 1 ? "s" : "");
1725 ftrace_disabled = 1;
1726 WARN_ON_ONCE(1);
1727 }
1728 }
1729 mutex_unlock(&ftraced_lock);
1730 mutex_unlock(&ftrace_sysctl_lock);
1731
1732 ftrace_shutdown_replenish();
1733 }
1734 __set_current_state(TASK_RUNNING);
1735 return 0;
1736}
1737
e309b41d 1738static int __init ftrace_dynamic_init(void)
3d083395
SR
1739{
1740 struct task_struct *p;
d61f82d0 1741 unsigned long addr;
3d083395
SR
1742 int ret;
1743
d61f82d0 1744 addr = (unsigned long)ftrace_record_ip;
9ff9cdb2 1745
784e2d76 1746 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
d61f82d0
SR
1747
1748 /* ftrace_dyn_arch_init places the return code in addr */
4eebcc81
SR
1749 if (addr) {
1750 ret = (int)addr;
1751 goto failed;
1752 }
d61f82d0 1753
68bf21aa 1754 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
3d083395 1755 if (ret)
4eebcc81 1756 goto failed;
3d083395
SR
1757
1758 p = kthread_run(ftraced, NULL, "ftraced");
4eebcc81
SR
1759 if (IS_ERR(p)) {
1760 ret = -1;
1761 goto failed;
1762 }
3d083395 1763
d61f82d0 1764 last_ftrace_enabled = ftrace_enabled = 1;
e1c08bdd 1765 ftraced_task = p;
3d083395
SR
1766
1767 return 0;
4eebcc81
SR
1768
1769 failed:
1770 ftrace_disabled = 1;
1771 return ret;
3d083395
SR
1772}
1773
d61f82d0 1774core_initcall(ftrace_dynamic_init);
68bf21aa
SR
1775#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1776
3d083395 1777#else
c7aafc54
IM
1778# define ftrace_startup() do { } while (0)
1779# define ftrace_shutdown() do { } while (0)
1780# define ftrace_startup_sysctl() do { } while (0)
1781# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
1782#endif /* CONFIG_DYNAMIC_FTRACE */
1783
a2bb6a3d 1784/**
81adbdc0 1785 * ftrace_kill - kill ftrace
a2bb6a3d
SR
1786 *
1787 * This function should be used by panic code. It stops ftrace
1788 * but in a not so nice way. If you need to simply kill ftrace
1789 * from a non-atomic section, use ftrace_kill.
1790 */
81adbdc0 1791void ftrace_kill(void)
a2bb6a3d
SR
1792{
1793 ftrace_disabled = 1;
1794 ftrace_enabled = 0;
b2613e37 1795#ifdef CONFIG_DYNAMIC_FTRACE
a2bb6a3d 1796 ftraced_suspend = -1;
b2613e37 1797#endif
a2bb6a3d
SR
1798 clear_ftrace_function();
1799}
1800
16444a8a 1801/**
3d083395
SR
1802 * register_ftrace_function - register a function for profiling
1803 * @ops - ops structure that holds the function for profiling.
16444a8a 1804 *
3d083395
SR
1805 * Register a function to be called by all functions in the
1806 * kernel.
1807 *
1808 * Note: @ops->func and all the functions it calls must be labeled
1809 * with "notrace", otherwise it will go into a
1810 * recursive loop.
16444a8a 1811 */
3d083395 1812int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1813{
b0fc494f
SR
1814 int ret;
1815
4eebcc81
SR
1816 if (unlikely(ftrace_disabled))
1817 return -1;
1818
b0fc494f 1819 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1820 ret = __register_ftrace_function(ops);
d61f82d0 1821 ftrace_startup();
b0fc494f
SR
1822 mutex_unlock(&ftrace_sysctl_lock);
1823
1824 return ret;
3d083395
SR
1825}
1826
1827/**
1828 * unregister_ftrace_function - unresgister a function for profiling.
1829 * @ops - ops structure that holds the function to unregister
1830 *
1831 * Unregister a function that was added to be called by ftrace profiling.
1832 */
1833int unregister_ftrace_function(struct ftrace_ops *ops)
1834{
1835 int ret;
1836
b0fc494f 1837 mutex_lock(&ftrace_sysctl_lock);
3d083395 1838 ret = __unregister_ftrace_function(ops);
d61f82d0 1839 ftrace_shutdown();
b0fc494f
SR
1840 mutex_unlock(&ftrace_sysctl_lock);
1841
1842 return ret;
1843}
1844
e309b41d 1845int
b0fc494f 1846ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1847 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1848 loff_t *ppos)
1849{
1850 int ret;
1851
4eebcc81
SR
1852 if (unlikely(ftrace_disabled))
1853 return -ENODEV;
1854
b0fc494f
SR
1855 mutex_lock(&ftrace_sysctl_lock);
1856
5072c59f 1857 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1858
1859 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1860 goto out;
1861
1862 last_ftrace_enabled = ftrace_enabled;
1863
1864 if (ftrace_enabled) {
1865
1866 ftrace_startup_sysctl();
1867
1868 /* we are starting ftrace again */
1869 if (ftrace_list != &ftrace_list_end) {
1870 if (ftrace_list->next == &ftrace_list_end)
1871 ftrace_trace_function = ftrace_list->func;
1872 else
1873 ftrace_trace_function = ftrace_list_func;
1874 }
1875
1876 } else {
1877 /* stopping ftrace calls (just send to ftrace_stub) */
1878 ftrace_trace_function = ftrace_stub;
1879
1880 ftrace_shutdown_sysctl();
1881 }
1882
1883 out:
1884 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1885 return ret;
16444a8a 1886}