]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_events.c
tracing: move tgid out of generic entry and into userstack
[net-next-2.6.git] / kernel / trace / trace_events.c
CommitLineData
b77e38aa
SR
1/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
981d081e
SR
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
b77e38aa
SR
9 */
10
e6187007
SR
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
b77e38aa
SR
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
e6187007 18#include <linux/delay.h>
b77e38aa 19
020e5f85
LZ
20#include <asm/setup.h>
21
91729ef9 22#include "trace_output.h"
b77e38aa 23
b628b3e6
SR
24#define TRACE_SYSTEM "TRACE_SYSTEM"
25
20c8928a 26DEFINE_MUTEX(event_mutex);
11a241a3 27
a59fd602
SR
28LIST_HEAD(ftrace_events);
29
540b7b8d 30int trace_define_field(struct ftrace_event_call *call, const char *type,
43b51ead
LZ
31 const char *name, int offset, int size, int is_signed,
32 int filter_type)
cf027f64
TZ
33{
34 struct ftrace_event_field *field;
35
fe9f57f2 36 field = kzalloc(sizeof(*field), GFP_KERNEL);
cf027f64
TZ
37 if (!field)
38 goto err;
fe9f57f2 39
cf027f64
TZ
40 field->name = kstrdup(name, GFP_KERNEL);
41 if (!field->name)
42 goto err;
fe9f57f2 43
cf027f64
TZ
44 field->type = kstrdup(type, GFP_KERNEL);
45 if (!field->type)
46 goto err;
fe9f57f2 47
43b51ead
LZ
48 if (filter_type == FILTER_OTHER)
49 field->filter_type = filter_assign_type(type);
50 else
51 field->filter_type = filter_type;
52
cf027f64
TZ
53 field->offset = offset;
54 field->size = size;
a118e4d1 55 field->is_signed = is_signed;
aa38e9fc 56
cf027f64
TZ
57 list_add(&field->link, &call->fields);
58
59 return 0;
fe9f57f2 60
cf027f64
TZ
61err:
62 if (field) {
63 kfree(field->name);
64 kfree(field->type);
65 }
66 kfree(field);
fe9f57f2 67
cf027f64
TZ
68 return -ENOMEM;
69}
17c873ec 70EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 71
e647d6b3
LZ
72#define __common_field(type, item) \
73 ret = trace_define_field(call, #type, "common_" #item, \
74 offsetof(typeof(ent), item), \
75 sizeof(ent.item), \
43b51ead 76 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
77 if (ret) \
78 return ret;
79
80int trace_define_common_fields(struct ftrace_event_call *call)
81{
82 int ret;
83 struct trace_entry ent;
84
85 __common_field(unsigned short, type);
86 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid);
e647d6b3
LZ
89
90 return ret;
91}
540b7b8d 92EXPORT_SYMBOL_GPL(trace_define_common_fields);
e647d6b3 93
2df75e41
LZ
94#ifdef CONFIG_MODULES
95
96static void trace_destroy_fields(struct ftrace_event_call *call)
97{
98 struct ftrace_event_field *field, *next;
99
100 list_for_each_entry_safe(field, next, &call->fields, link) {
101 list_del(&field->link);
102 kfree(field->type);
103 kfree(field->name);
104 kfree(field);
105 }
106}
107
108#endif /* CONFIG_MODULES */
109
fd994989
SR
110static void ftrace_event_enable_disable(struct ftrace_event_call *call,
111 int enable)
112{
fd994989
SR
113 switch (enable) {
114 case 0:
115 if (call->enabled) {
116 call->enabled = 0;
b11c53e1 117 tracing_stop_cmdline_record();
69fd4f0e 118 call->unregfunc(call->data);
fd994989 119 }
fd994989
SR
120 break;
121 case 1:
da4d0302 122 if (!call->enabled) {
fd994989 123 call->enabled = 1;
b11c53e1 124 tracing_start_cmdline_record();
69fd4f0e 125 call->regfunc(call->data);
fd994989 126 }
fd994989
SR
127 break;
128 }
129}
130
0e907c99
Z
131static void ftrace_clear_events(void)
132{
133 struct ftrace_event_call *call;
134
135 mutex_lock(&event_mutex);
136 list_for_each_entry(call, &ftrace_events, list) {
137 ftrace_event_enable_disable(call, 0);
138 }
139 mutex_unlock(&event_mutex);
140}
141
8f31bfe5
LZ
142/*
143 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
144 */
145static int __ftrace_set_clr_event(const char *match, const char *sub,
146 const char *event, int set)
b77e38aa 147{
a59fd602 148 struct ftrace_event_call *call;
29f93943 149 int ret = -EINVAL;
8f31bfe5
LZ
150
151 mutex_lock(&event_mutex);
152 list_for_each_entry(call, &ftrace_events, list) {
153
154 if (!call->name || !call->regfunc)
155 continue;
156
157 if (match &&
158 strcmp(match, call->name) != 0 &&
159 strcmp(match, call->system) != 0)
160 continue;
161
162 if (sub && strcmp(sub, call->system) != 0)
163 continue;
164
165 if (event && strcmp(event, call->name) != 0)
166 continue;
167
168 ftrace_event_enable_disable(call, set);
169
170 ret = 0;
171 }
172 mutex_unlock(&event_mutex);
173
174 return ret;
175}
176
177static int ftrace_set_clr_event(char *buf, int set)
178{
b628b3e6 179 char *event = NULL, *sub = NULL, *match;
b628b3e6
SR
180
181 /*
182 * The buf format can be <subsystem>:<event-name>
183 * *:<event-name> means any event by that name.
184 * :<event-name> is the same.
185 *
186 * <subsystem>:* means all events in that subsystem
187 * <subsystem>: means the same.
188 *
189 * <name> (no ':') means all events in a subsystem with
190 * the name <name> or any event that matches <name>
191 */
192
193 match = strsep(&buf, ":");
194 if (buf) {
195 sub = match;
196 event = buf;
197 match = NULL;
198
199 if (!strlen(sub) || strcmp(sub, "*") == 0)
200 sub = NULL;
201 if (!strlen(event) || strcmp(event, "*") == 0)
202 event = NULL;
203 }
b77e38aa 204
8f31bfe5 205 return __ftrace_set_clr_event(match, sub, event, set);
b77e38aa
SR
206}
207
4671c794
SR
208/**
209 * trace_set_clr_event - enable or disable an event
210 * @system: system name to match (NULL for any system)
211 * @event: event name to match (NULL for all events, within system)
212 * @set: 1 to enable, 0 to disable
213 *
214 * This is a way for other parts of the kernel to enable or disable
215 * event recording.
216 *
217 * Returns 0 on success, -EINVAL if the parameters do not match any
218 * registered events.
219 */
220int trace_set_clr_event(const char *system, const char *event, int set)
221{
222 return __ftrace_set_clr_event(NULL, system, event, set);
223}
224
b77e38aa
SR
225/* 128 should be much more than enough */
226#define EVENT_BUF_SIZE 127
227
228static ssize_t
229ftrace_event_write(struct file *file, const char __user *ubuf,
230 size_t cnt, loff_t *ppos)
231{
232 size_t read = 0;
233 int i, set = 1;
234 ssize_t ret;
235 char *buf;
236 char ch;
237
238 if (!cnt || cnt < 0)
239 return 0;
240
1852fcce
SR
241 ret = tracing_update_buffers();
242 if (ret < 0)
243 return ret;
244
b77e38aa
SR
245 ret = get_user(ch, ubuf++);
246 if (ret)
247 return ret;
248 read++;
249 cnt--;
250
251 /* skip white space */
252 while (cnt && isspace(ch)) {
253 ret = get_user(ch, ubuf++);
254 if (ret)
255 return ret;
256 read++;
257 cnt--;
258 }
259
260 /* Only white space found? */
261 if (isspace(ch)) {
262 file->f_pos += read;
263 ret = read;
264 return ret;
265 }
266
267 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
268 if (!buf)
269 return -ENOMEM;
270
271 if (cnt > EVENT_BUF_SIZE)
272 cnt = EVENT_BUF_SIZE;
273
274 i = 0;
275 while (cnt && !isspace(ch)) {
276 if (!i && ch == '!')
277 set = 0;
278 else
279 buf[i++] = ch;
280
281 ret = get_user(ch, ubuf++);
282 if (ret)
283 goto out_free;
284 read++;
285 cnt--;
286 }
287 buf[i] = 0;
288
289 file->f_pos += read;
290
291 ret = ftrace_set_clr_event(buf, set);
292 if (ret)
293 goto out_free;
294
295 ret = read;
296
297 out_free:
298 kfree(buf);
299
300 return ret;
301}
302
303static void *
304t_next(struct seq_file *m, void *v, loff_t *pos)
305{
a59fd602
SR
306 struct list_head *list = m->private;
307 struct ftrace_event_call *call;
b77e38aa
SR
308
309 (*pos)++;
310
40e26815 311 for (;;) {
a59fd602 312 if (list == &ftrace_events)
40e26815
SR
313 return NULL;
314
a59fd602
SR
315 call = list_entry(list, struct ftrace_event_call, list);
316
40e26815
SR
317 /*
318 * The ftrace subsystem is for showing formats only.
319 * They can not be enabled or disabled via the event files.
320 */
321 if (call->regfunc)
322 break;
323
a59fd602 324 list = list->next;
40e26815 325 }
b77e38aa 326
a59fd602 327 m->private = list->next;
b77e38aa
SR
328
329 return call;
330}
331
332static void *t_start(struct seq_file *m, loff_t *pos)
333{
e1c7e2a6
LZ
334 struct ftrace_event_call *call = NULL;
335 loff_t l;
336
20c8928a 337 mutex_lock(&event_mutex);
e1c7e2a6
LZ
338
339 m->private = ftrace_events.next;
340 for (l = 0; l <= *pos; ) {
341 call = t_next(m, NULL, &l);
342 if (!call)
343 break;
344 }
345 return call;
b77e38aa
SR
346}
347
348static void *
349s_next(struct seq_file *m, void *v, loff_t *pos)
350{
a59fd602
SR
351 struct list_head *list = m->private;
352 struct ftrace_event_call *call;
b77e38aa
SR
353
354 (*pos)++;
355
356 retry:
a59fd602 357 if (list == &ftrace_events)
b77e38aa
SR
358 return NULL;
359
a59fd602
SR
360 call = list_entry(list, struct ftrace_event_call, list);
361
b77e38aa 362 if (!call->enabled) {
a59fd602 363 list = list->next;
b77e38aa
SR
364 goto retry;
365 }
366
a59fd602 367 m->private = list->next;
b77e38aa
SR
368
369 return call;
370}
371
372static void *s_start(struct seq_file *m, loff_t *pos)
373{
e1c7e2a6
LZ
374 struct ftrace_event_call *call = NULL;
375 loff_t l;
376
20c8928a 377 mutex_lock(&event_mutex);
e1c7e2a6
LZ
378
379 m->private = ftrace_events.next;
380 for (l = 0; l <= *pos; ) {
381 call = s_next(m, NULL, &l);
382 if (!call)
383 break;
384 }
385 return call;
b77e38aa
SR
386}
387
388static int t_show(struct seq_file *m, void *v)
389{
390 struct ftrace_event_call *call = v;
391
b628b3e6
SR
392 if (strcmp(call->system, TRACE_SYSTEM) != 0)
393 seq_printf(m, "%s:", call->system);
b77e38aa
SR
394 seq_printf(m, "%s\n", call->name);
395
396 return 0;
397}
398
399static void t_stop(struct seq_file *m, void *p)
400{
20c8928a 401 mutex_unlock(&event_mutex);
b77e38aa
SR
402}
403
404static int
405ftrace_event_seq_open(struct inode *inode, struct file *file)
406{
b77e38aa
SR
407 const struct seq_operations *seq_ops;
408
409 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 410 (file->f_flags & O_TRUNC))
b77e38aa
SR
411 ftrace_clear_events();
412
413 seq_ops = inode->i_private;
20c8928a 414 return seq_open(file, seq_ops);
b77e38aa
SR
415}
416
1473e441
SR
417static ssize_t
418event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
419 loff_t *ppos)
420{
421 struct ftrace_event_call *call = filp->private_data;
422 char *buf;
423
da4d0302 424 if (call->enabled)
1473e441
SR
425 buf = "1\n";
426 else
427 buf = "0\n";
428
429 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
430}
431
432static ssize_t
433event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
434 loff_t *ppos)
435{
436 struct ftrace_event_call *call = filp->private_data;
437 char buf[64];
438 unsigned long val;
439 int ret;
440
441 if (cnt >= sizeof(buf))
442 return -EINVAL;
443
444 if (copy_from_user(&buf, ubuf, cnt))
445 return -EFAULT;
446
447 buf[cnt] = 0;
448
449 ret = strict_strtoul(buf, 10, &val);
450 if (ret < 0)
451 return ret;
452
1852fcce
SR
453 ret = tracing_update_buffers();
454 if (ret < 0)
455 return ret;
456
1473e441
SR
457 switch (val) {
458 case 0:
1473e441 459 case 1:
11a241a3 460 mutex_lock(&event_mutex);
fd994989 461 ftrace_event_enable_disable(call, val);
11a241a3 462 mutex_unlock(&event_mutex);
1473e441
SR
463 break;
464
465 default:
466 return -EINVAL;
467 }
468
469 *ppos += cnt;
470
471 return cnt;
472}
473
8ae79a13
SR
474static ssize_t
475system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
476 loff_t *ppos)
477{
c142b15d 478 const char set_to_char[4] = { '?', '0', '1', 'X' };
8ae79a13
SR
479 const char *system = filp->private_data;
480 struct ftrace_event_call *call;
481 char buf[2];
c142b15d 482 int set = 0;
8ae79a13
SR
483 int ret;
484
8ae79a13
SR
485 mutex_lock(&event_mutex);
486 list_for_each_entry(call, &ftrace_events, list) {
487 if (!call->name || !call->regfunc)
488 continue;
489
8f31bfe5 490 if (system && strcmp(call->system, system) != 0)
8ae79a13
SR
491 continue;
492
493 /*
494 * We need to find out if all the events are set
495 * or if all events or cleared, or if we have
496 * a mixture.
497 */
c142b15d
LZ
498 set |= (1 << !!call->enabled);
499
8ae79a13
SR
500 /*
501 * If we have a mixture, no need to look further.
502 */
c142b15d 503 if (set == 3)
8ae79a13
SR
504 break;
505 }
506 mutex_unlock(&event_mutex);
507
c142b15d 508 buf[0] = set_to_char[set];
8ae79a13 509 buf[1] = '\n';
8ae79a13
SR
510
511 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
512
513 return ret;
514}
515
516static ssize_t
517system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
518 loff_t *ppos)
519{
520 const char *system = filp->private_data;
521 unsigned long val;
8ae79a13
SR
522 char buf[64];
523 ssize_t ret;
524
525 if (cnt >= sizeof(buf))
526 return -EINVAL;
527
528 if (copy_from_user(&buf, ubuf, cnt))
529 return -EFAULT;
530
531 buf[cnt] = 0;
532
533 ret = strict_strtoul(buf, 10, &val);
534 if (ret < 0)
535 return ret;
536
537 ret = tracing_update_buffers();
538 if (ret < 0)
539 return ret;
540
8f31bfe5 541 if (val != 0 && val != 1)
8ae79a13 542 return -EINVAL;
8ae79a13 543
8f31bfe5 544 ret = __ftrace_set_clr_event(NULL, system, NULL, val);
8ae79a13 545 if (ret)
8f31bfe5 546 goto out;
8ae79a13
SR
547
548 ret = cnt;
549
8f31bfe5 550out:
8ae79a13
SR
551 *ppos += cnt;
552
553 return ret;
554}
555
75db37d2
SR
556extern char *__bad_type_size(void);
557
91729ef9 558#undef FIELD
156b5f17 559#define FIELD(type, name) \
75db37d2 560 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
cf027f64
TZ
561 #type, "common_" #name, offsetof(typeof(field), name), \
562 sizeof(field.name)
91729ef9
SR
563
564static int trace_write_header(struct trace_seq *s)
565{
566 struct trace_entry field;
567
568 /* struct trace_entry */
569 return trace_seq_printf(s,
ce8eb2bf
SR
570 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
571 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
572 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
573 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
91729ef9 574 "\n",
89ec0dee 575 FIELD(unsigned short, type),
91729ef9
SR
576 FIELD(unsigned char, flags),
577 FIELD(unsigned char, preempt_count),
48659d31 578 FIELD(int, pid));
91729ef9 579}
da4d0302 580
981d081e
SR
581static ssize_t
582event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
583 loff_t *ppos)
584{
585 struct ftrace_event_call *call = filp->private_data;
586 struct trace_seq *s;
587 char *buf;
588 int r;
589
c269fc8c
TZ
590 if (*ppos)
591 return 0;
592
981d081e
SR
593 s = kmalloc(sizeof(*s), GFP_KERNEL);
594 if (!s)
595 return -ENOMEM;
596
597 trace_seq_init(s);
598
c5e4e192
SR
599 /* If any of the first writes fail, so will the show_format. */
600
601 trace_seq_printf(s, "name: %s\n", call->name);
602 trace_seq_printf(s, "ID: %d\n", call->id);
603 trace_seq_printf(s, "format:\n");
91729ef9
SR
604 trace_write_header(s);
605
e8f9f4d7 606 r = call->show_format(call, s);
981d081e
SR
607 if (!r) {
608 /*
609 * ug! The format output is bigger than a PAGE!!
610 */
611 buf = "FORMAT TOO BIG\n";
612 r = simple_read_from_buffer(ubuf, cnt, ppos,
613 buf, strlen(buf));
614 goto out;
615 }
616
617 r = simple_read_from_buffer(ubuf, cnt, ppos,
618 s->buffer, s->len);
619 out:
620 kfree(s);
621 return r;
622}
623
23725aee
PZ
624static ssize_t
625event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
626{
627 struct ftrace_event_call *call = filp->private_data;
628 struct trace_seq *s;
629 int r;
630
631 if (*ppos)
632 return 0;
633
634 s = kmalloc(sizeof(*s), GFP_KERNEL);
635 if (!s)
636 return -ENOMEM;
637
638 trace_seq_init(s);
639 trace_seq_printf(s, "%d\n", call->id);
640
641 r = simple_read_from_buffer(ubuf, cnt, ppos,
642 s->buffer, s->len);
643 kfree(s);
644 return r;
645}
646
7ce7e424
TZ
647static ssize_t
648event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
649 loff_t *ppos)
650{
651 struct ftrace_event_call *call = filp->private_data;
652 struct trace_seq *s;
653 int r;
654
655 if (*ppos)
656 return 0;
657
658 s = kmalloc(sizeof(*s), GFP_KERNEL);
659 if (!s)
660 return -ENOMEM;
661
662 trace_seq_init(s);
663
8b372562 664 print_event_filter(call, s);
4bda2d51 665 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
7ce7e424
TZ
666
667 kfree(s);
668
669 return r;
670}
671
672static ssize_t
673event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
674 loff_t *ppos)
675{
676 struct ftrace_event_call *call = filp->private_data;
8b372562 677 char *buf;
7ce7e424
TZ
678 int err;
679
8b372562 680 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
681 return -EINVAL;
682
8b372562
TZ
683 buf = (char *)__get_free_page(GFP_TEMPORARY);
684 if (!buf)
7ce7e424
TZ
685 return -ENOMEM;
686
8b372562
TZ
687 if (copy_from_user(buf, ubuf, cnt)) {
688 free_page((unsigned long) buf);
689 return -EFAULT;
7ce7e424 690 }
8b372562 691 buf[cnt] = '\0';
7ce7e424 692
8b372562
TZ
693 err = apply_event_filter(call, buf);
694 free_page((unsigned long) buf);
695 if (err < 0)
44e9c8b7 696 return err;
0a19e53c 697
7ce7e424
TZ
698 *ppos += cnt;
699
700 return cnt;
701}
702
cfb180f3
TZ
703static ssize_t
704subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
705 loff_t *ppos)
706{
707 struct event_subsystem *system = filp->private_data;
708 struct trace_seq *s;
709 int r;
710
711 if (*ppos)
712 return 0;
713
714 s = kmalloc(sizeof(*s), GFP_KERNEL);
715 if (!s)
716 return -ENOMEM;
717
718 trace_seq_init(s);
719
8b372562 720 print_subsystem_event_filter(system, s);
4bda2d51 721 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
cfb180f3
TZ
722
723 kfree(s);
724
725 return r;
726}
727
728static ssize_t
729subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
730 loff_t *ppos)
731{
732 struct event_subsystem *system = filp->private_data;
8b372562 733 char *buf;
cfb180f3
TZ
734 int err;
735
8b372562 736 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
737 return -EINVAL;
738
8b372562
TZ
739 buf = (char *)__get_free_page(GFP_TEMPORARY);
740 if (!buf)
cfb180f3
TZ
741 return -ENOMEM;
742
8b372562
TZ
743 if (copy_from_user(buf, ubuf, cnt)) {
744 free_page((unsigned long) buf);
745 return -EFAULT;
cfb180f3 746 }
8b372562 747 buf[cnt] = '\0';
cfb180f3 748
8b372562
TZ
749 err = apply_subsystem_event_filter(system, buf);
750 free_page((unsigned long) buf);
751 if (err < 0)
44e9c8b7 752 return err;
cfb180f3
TZ
753
754 *ppos += cnt;
755
756 return cnt;
757}
758
d1b182a8
SR
759static ssize_t
760show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
761{
762 int (*func)(struct trace_seq *s) = filp->private_data;
763 struct trace_seq *s;
764 int r;
765
766 if (*ppos)
767 return 0;
768
769 s = kmalloc(sizeof(*s), GFP_KERNEL);
770 if (!s)
771 return -ENOMEM;
772
773 trace_seq_init(s);
774
775 func(s);
776 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
777
778 kfree(s);
779
780 return r;
781}
782
b77e38aa
SR
783static const struct seq_operations show_event_seq_ops = {
784 .start = t_start,
785 .next = t_next,
786 .show = t_show,
787 .stop = t_stop,
788};
789
790static const struct seq_operations show_set_event_seq_ops = {
791 .start = s_start,
792 .next = s_next,
793 .show = t_show,
794 .stop = t_stop,
795};
796
2314c4ae
SR
797static const struct file_operations ftrace_avail_fops = {
798 .open = ftrace_event_seq_open,
799 .read = seq_read,
800 .llseek = seq_lseek,
801 .release = seq_release,
802};
803
b77e38aa
SR
804static const struct file_operations ftrace_set_event_fops = {
805 .open = ftrace_event_seq_open,
806 .read = seq_read,
807 .write = ftrace_event_write,
808 .llseek = seq_lseek,
809 .release = seq_release,
810};
811
1473e441
SR
812static const struct file_operations ftrace_enable_fops = {
813 .open = tracing_open_generic,
814 .read = event_enable_read,
815 .write = event_enable_write,
816};
817
981d081e
SR
818static const struct file_operations ftrace_event_format_fops = {
819 .open = tracing_open_generic,
820 .read = event_format_read,
821};
822
23725aee
PZ
823static const struct file_operations ftrace_event_id_fops = {
824 .open = tracing_open_generic,
825 .read = event_id_read,
826};
827
7ce7e424
TZ
828static const struct file_operations ftrace_event_filter_fops = {
829 .open = tracing_open_generic,
830 .read = event_filter_read,
831 .write = event_filter_write,
832};
833
cfb180f3
TZ
834static const struct file_operations ftrace_subsystem_filter_fops = {
835 .open = tracing_open_generic,
836 .read = subsystem_filter_read,
837 .write = subsystem_filter_write,
838};
839
8ae79a13
SR
840static const struct file_operations ftrace_system_enable_fops = {
841 .open = tracing_open_generic,
842 .read = system_enable_read,
843 .write = system_enable_write,
844};
845
d1b182a8
SR
846static const struct file_operations ftrace_show_header_fops = {
847 .open = tracing_open_generic,
848 .read = show_header,
849};
850
1473e441
SR
851static struct dentry *event_trace_events_dir(void)
852{
853 static struct dentry *d_tracer;
854 static struct dentry *d_events;
855
856 if (d_events)
857 return d_events;
858
859 d_tracer = tracing_init_dentry();
860 if (!d_tracer)
861 return NULL;
862
863 d_events = debugfs_create_dir("events", d_tracer);
864 if (!d_events)
865 pr_warning("Could not create debugfs "
866 "'events' directory\n");
867
868 return d_events;
869}
870
6ecc2d1c
SR
871static LIST_HEAD(event_subsystems);
872
873static struct dentry *
874event_subsystem_dir(const char *name, struct dentry *d_events)
875{
876 struct event_subsystem *system;
e1112b4d 877 struct dentry *entry;
6ecc2d1c
SR
878
879 /* First see if we did not already create this dir */
880 list_for_each_entry(system, &event_subsystems, list) {
dc82ec98
XG
881 if (strcmp(system->name, name) == 0) {
882 system->nr_events++;
6ecc2d1c 883 return system->entry;
dc82ec98 884 }
6ecc2d1c
SR
885 }
886
887 /* need to create new entry */
888 system = kmalloc(sizeof(*system), GFP_KERNEL);
889 if (!system) {
890 pr_warning("No memory to create event subsystem %s\n",
891 name);
892 return d_events;
893 }
894
895 system->entry = debugfs_create_dir(name, d_events);
896 if (!system->entry) {
897 pr_warning("Could not create event subsystem %s\n",
898 name);
899 kfree(system);
900 return d_events;
901 }
902
dc82ec98 903 system->nr_events = 1;
6d723736
SR
904 system->name = kstrdup(name, GFP_KERNEL);
905 if (!system->name) {
906 debugfs_remove(system->entry);
907 kfree(system);
908 return d_events;
909 }
910
6ecc2d1c
SR
911 list_add(&system->list, &event_subsystems);
912
30e673b2 913 system->filter = NULL;
cfb180f3 914
8b372562
TZ
915 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
916 if (!system->filter) {
917 pr_warning("Could not allocate filter for subsystem "
918 "'%s'\n", name);
919 return system->entry;
920 }
921
e1112b4d
TZ
922 entry = debugfs_create_file("filter", 0644, system->entry, system,
923 &ftrace_subsystem_filter_fops);
8b372562
TZ
924 if (!entry) {
925 kfree(system->filter);
926 system->filter = NULL;
e1112b4d
TZ
927 pr_warning("Could not create debugfs "
928 "'%s/filter' entry\n", name);
8b372562 929 }
e1112b4d 930
8ae79a13
SR
931 entry = trace_create_file("enable", 0644, system->entry,
932 (void *)system->name,
933 &ftrace_system_enable_fops);
934
6ecc2d1c
SR
935 return system->entry;
936}
937
1473e441 938static int
701970b3
SR
939event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
940 const struct file_operations *id,
941 const struct file_operations *enable,
942 const struct file_operations *filter,
943 const struct file_operations *format)
1473e441
SR
944{
945 struct dentry *entry;
fd994989 946 int ret;
1473e441 947
6ecc2d1c
SR
948 /*
949 * If the trace point header did not define TRACE_SYSTEM
950 * then the system would be called "TRACE_SYSTEM".
951 */
6d723736 952 if (strcmp(call->system, TRACE_SYSTEM) != 0)
6ecc2d1c
SR
953 d_events = event_subsystem_dir(call->system, d_events);
954
1473e441
SR
955 call->dir = debugfs_create_dir(call->name, d_events);
956 if (!call->dir) {
957 pr_warning("Could not create debugfs "
958 "'%s' directory\n", call->name);
959 return -1;
960 }
961
6d723736
SR
962 if (call->regfunc)
963 entry = trace_create_file("enable", 0644, call->dir, call,
701970b3 964 enable);
1473e441 965
af6af30c 966 if (call->id && call->profile_enable)
6d723736 967 entry = trace_create_file("id", 0444, call->dir, call,
701970b3 968 id);
23725aee 969
cf027f64 970 if (call->define_fields) {
14be96c9 971 ret = call->define_fields(call);
cf027f64
TZ
972 if (ret < 0) {
973 pr_warning("Could not initialize trace point"
974 " events/%s\n", call->name);
975 return ret;
976 }
6d723736 977 entry = trace_create_file("filter", 0644, call->dir, call,
701970b3 978 filter);
cf027f64
TZ
979 }
980
981d081e
SR
981 /* A trace may not want to export its format */
982 if (!call->show_format)
983 return 0;
984
6d723736 985 entry = trace_create_file("format", 0444, call->dir, call,
701970b3 986 format);
6d723736
SR
987
988 return 0;
989}
990
991#define for_each_event(event, start, end) \
992 for (event = start; \
993 (unsigned long)event < (unsigned long)end; \
994 event++)
995
61f919a1 996#ifdef CONFIG_MODULES
701970b3
SR
997
998static LIST_HEAD(ftrace_module_file_list);
999
1000/*
1001 * Modules must own their file_operations to keep up with
1002 * reference counting.
1003 */
1004struct ftrace_module_file_ops {
1005 struct list_head list;
1006 struct module *mod;
1007 struct file_operations id;
1008 struct file_operations enable;
1009 struct file_operations format;
1010 struct file_operations filter;
1011};
1012
a2ca5e03
FW
1013static void remove_subsystem_dir(const char *name)
1014{
1015 struct event_subsystem *system;
1016
1017 if (strcmp(name, TRACE_SYSTEM) == 0)
1018 return;
1019
1020 list_for_each_entry(system, &event_subsystems, list) {
1021 if (strcmp(system->name, name) == 0) {
1022 if (!--system->nr_events) {
1023 struct event_filter *filter = system->filter;
1024
1025 debugfs_remove_recursive(system->entry);
1026 list_del(&system->list);
1027 if (filter) {
1028 kfree(filter->filter_string);
1029 kfree(filter);
1030 }
1031 kfree(system->name);
1032 kfree(system);
1033 }
1034 break;
1035 }
1036 }
1037}
1038
701970b3
SR
1039static struct ftrace_module_file_ops *
1040trace_create_file_ops(struct module *mod)
1041{
1042 struct ftrace_module_file_ops *file_ops;
1043
1044 /*
1045 * This is a bit of a PITA. To allow for correct reference
1046 * counting, modules must "own" their file_operations.
1047 * To do this, we allocate the file operations that will be
1048 * used in the event directory.
1049 */
1050
1051 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1052 if (!file_ops)
1053 return NULL;
1054
1055 file_ops->mod = mod;
1056
1057 file_ops->id = ftrace_event_id_fops;
1058 file_ops->id.owner = mod;
1059
1060 file_ops->enable = ftrace_enable_fops;
1061 file_ops->enable.owner = mod;
1062
1063 file_ops->filter = ftrace_event_filter_fops;
1064 file_ops->filter.owner = mod;
1065
1066 file_ops->format = ftrace_event_format_fops;
1067 file_ops->format.owner = mod;
1068
1069 list_add(&file_ops->list, &ftrace_module_file_list);
1070
1071 return file_ops;
1072}
1073
6d723736
SR
1074static void trace_module_add_events(struct module *mod)
1075{
701970b3 1076 struct ftrace_module_file_ops *file_ops = NULL;
6d723736
SR
1077 struct ftrace_event_call *call, *start, *end;
1078 struct dentry *d_events;
f744bd57 1079 int ret;
6d723736
SR
1080
1081 start = mod->trace_events;
1082 end = mod->trace_events + mod->num_trace_events;
1083
1084 if (start == end)
1085 return;
1086
1087 d_events = event_trace_events_dir();
1088 if (!d_events)
1089 return;
1090
1091 for_each_event(call, start, end) {
1092 /* The linker may leave blanks */
1093 if (!call->name)
1094 continue;
f744bd57
JB
1095 if (call->raw_init) {
1096 ret = call->raw_init();
1097 if (ret < 0) {
1098 if (ret != -ENOSYS)
1099 pr_warning("Could not initialize trace "
1100 "point events/%s\n", call->name);
1101 continue;
1102 }
1103 }
701970b3
SR
1104 /*
1105 * This module has events, create file ops for this module
1106 * if not already done.
1107 */
1108 if (!file_ops) {
1109 file_ops = trace_create_file_ops(mod);
1110 if (!file_ops)
1111 return;
1112 }
6d723736
SR
1113 call->mod = mod;
1114 list_add(&call->list, &ftrace_events);
701970b3
SR
1115 event_create_dir(call, d_events,
1116 &file_ops->id, &file_ops->enable,
1117 &file_ops->filter, &file_ops->format);
6d723736
SR
1118 }
1119}
1120
1121static void trace_module_remove_events(struct module *mod)
1122{
701970b3 1123 struct ftrace_module_file_ops *file_ops;
6d723736 1124 struct ftrace_event_call *call, *p;
9456f0fa 1125 bool found = false;
6d723736 1126
110bf2b7 1127 down_write(&trace_event_mutex);
6d723736
SR
1128 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1129 if (call->mod == mod) {
9456f0fa 1130 found = true;
0e907c99 1131 ftrace_event_enable_disable(call, 0);
6d723736 1132 if (call->event)
110bf2b7 1133 __unregister_ftrace_event(call->event);
6d723736
SR
1134 debugfs_remove_recursive(call->dir);
1135 list_del(&call->list);
2df75e41
LZ
1136 trace_destroy_fields(call);
1137 destroy_preds(call);
dc82ec98 1138 remove_subsystem_dir(call->system);
6d723736
SR
1139 }
1140 }
701970b3
SR
1141
1142 /* Now free the file_operations */
1143 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1144 if (file_ops->mod == mod)
1145 break;
1146 }
1147 if (&file_ops->list != &ftrace_module_file_list) {
1148 list_del(&file_ops->list);
1149 kfree(file_ops);
1150 }
9456f0fa
SR
1151
1152 /*
1153 * It is safest to reset the ring buffer if the module being unloaded
1154 * registered any events.
1155 */
1156 if (found)
1157 tracing_reset_current_online_cpus();
110bf2b7 1158 up_write(&trace_event_mutex);
6d723736
SR
1159}
1160
61f919a1
SR
1161static int trace_module_notify(struct notifier_block *self,
1162 unsigned long val, void *data)
6d723736
SR
1163{
1164 struct module *mod = data;
1165
1166 mutex_lock(&event_mutex);
1167 switch (val) {
1168 case MODULE_STATE_COMING:
1169 trace_module_add_events(mod);
1170 break;
1171 case MODULE_STATE_GOING:
1172 trace_module_remove_events(mod);
1173 break;
1174 }
1175 mutex_unlock(&event_mutex);
fd994989 1176
1473e441
SR
1177 return 0;
1178}
61f919a1
SR
1179#else
1180static int trace_module_notify(struct notifier_block *self,
1181 unsigned long val, void *data)
1182{
1183 return 0;
1184}
1185#endif /* CONFIG_MODULES */
1473e441 1186
6d723736
SR
1187struct notifier_block trace_module_nb = {
1188 .notifier_call = trace_module_notify,
1189 .priority = 0,
1190};
1191
a59fd602
SR
1192extern struct ftrace_event_call __start_ftrace_events[];
1193extern struct ftrace_event_call __stop_ftrace_events[];
1194
020e5f85
LZ
1195static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1196
1197static __init int setup_trace_event(char *str)
1198{
1199 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1200 ring_buffer_expanded = 1;
1201 tracing_selftest_disabled = 1;
1202
1203 return 1;
1204}
1205__setup("trace_event=", setup_trace_event);
1206
b77e38aa
SR
1207static __init int event_trace_init(void)
1208{
a59fd602 1209 struct ftrace_event_call *call;
b77e38aa
SR
1210 struct dentry *d_tracer;
1211 struct dentry *entry;
1473e441 1212 struct dentry *d_events;
6d723736 1213 int ret;
020e5f85
LZ
1214 char *buf = bootup_event_buf;
1215 char *token;
b77e38aa
SR
1216
1217 d_tracer = tracing_init_dentry();
1218 if (!d_tracer)
1219 return 0;
1220
2314c4ae
SR
1221 entry = debugfs_create_file("available_events", 0444, d_tracer,
1222 (void *)&show_event_seq_ops,
1223 &ftrace_avail_fops);
1224 if (!entry)
1225 pr_warning("Could not create debugfs "
1226 "'available_events' entry\n");
1227
b77e38aa
SR
1228 entry = debugfs_create_file("set_event", 0644, d_tracer,
1229 (void *)&show_set_event_seq_ops,
1230 &ftrace_set_event_fops);
1231 if (!entry)
1232 pr_warning("Could not create debugfs "
1233 "'set_event' entry\n");
1234
1473e441
SR
1235 d_events = event_trace_events_dir();
1236 if (!d_events)
1237 return 0;
1238
d1b182a8
SR
1239 /* ring buffer internal formats */
1240 trace_create_file("header_page", 0444, d_events,
1241 ring_buffer_print_page_header,
1242 &ftrace_show_header_fops);
1243
1244 trace_create_file("header_event", 0444, d_events,
1245 ring_buffer_print_entry_header,
1246 &ftrace_show_header_fops);
1247
8ae79a13 1248 trace_create_file("enable", 0644, d_events,
8f31bfe5 1249 NULL, &ftrace_system_enable_fops);
8ae79a13 1250
6d723736 1251 for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1473e441
SR
1252 /* The linker may leave blanks */
1253 if (!call->name)
1254 continue;
f744bd57
JB
1255 if (call->raw_init) {
1256 ret = call->raw_init();
1257 if (ret < 0) {
1258 if (ret != -ENOSYS)
1259 pr_warning("Could not initialize trace "
1260 "point events/%s\n", call->name);
1261 continue;
1262 }
1263 }
a59fd602 1264 list_add(&call->list, &ftrace_events);
701970b3
SR
1265 event_create_dir(call, d_events, &ftrace_event_id_fops,
1266 &ftrace_enable_fops, &ftrace_event_filter_fops,
1267 &ftrace_event_format_fops);
1473e441
SR
1268 }
1269
020e5f85
LZ
1270 while (true) {
1271 token = strsep(&buf, ",");
1272
1273 if (!token)
1274 break;
1275 if (!*token)
1276 continue;
1277
1278 ret = ftrace_set_clr_event(token, 1);
1279 if (ret)
1280 pr_warning("Failed to enable trace event: %s\n", token);
1281 }
1282
6d723736 1283 ret = register_module_notifier(&trace_module_nb);
55379376 1284 if (ret)
6d723736
SR
1285 pr_warning("Failed to register trace events module notifier\n");
1286
b77e38aa
SR
1287 return 0;
1288}
1289fs_initcall(event_trace_init);
e6187007
SR
1290
1291#ifdef CONFIG_FTRACE_STARTUP_TEST
1292
1293static DEFINE_SPINLOCK(test_spinlock);
1294static DEFINE_SPINLOCK(test_spinlock_irq);
1295static DEFINE_MUTEX(test_mutex);
1296
1297static __init void test_work(struct work_struct *dummy)
1298{
1299 spin_lock(&test_spinlock);
1300 spin_lock_irq(&test_spinlock_irq);
1301 udelay(1);
1302 spin_unlock_irq(&test_spinlock_irq);
1303 spin_unlock(&test_spinlock);
1304
1305 mutex_lock(&test_mutex);
1306 msleep(1);
1307 mutex_unlock(&test_mutex);
1308}
1309
1310static __init int event_test_thread(void *unused)
1311{
1312 void *test_malloc;
1313
1314 test_malloc = kmalloc(1234, GFP_KERNEL);
1315 if (!test_malloc)
1316 pr_info("failed to kmalloc\n");
1317
1318 schedule_on_each_cpu(test_work);
1319
1320 kfree(test_malloc);
1321
1322 set_current_state(TASK_INTERRUPTIBLE);
1323 while (!kthread_should_stop())
1324 schedule();
1325
1326 return 0;
1327}
1328
1329/*
1330 * Do various things that may trigger events.
1331 */
1332static __init void event_test_stuff(void)
1333{
1334 struct task_struct *test_thread;
1335
1336 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1337 msleep(1);
1338 kthread_stop(test_thread);
1339}
1340
1341/*
1342 * For every trace event defined, we will test each trace point separately,
1343 * and then by groups, and finally all trace points.
1344 */
9ea21c1e 1345static __init void event_trace_self_tests(void)
e6187007
SR
1346{
1347 struct ftrace_event_call *call;
1348 struct event_subsystem *system;
e6187007
SR
1349 int ret;
1350
1351 pr_info("Running tests on trace events:\n");
1352
1353 list_for_each_entry(call, &ftrace_events, list) {
1354
1355 /* Only test those that have a regfunc */
1356 if (!call->regfunc)
1357 continue;
1358
1359 pr_info("Testing event %s: ", call->name);
1360
1361 /*
1362 * If an event is already enabled, someone is using
1363 * it and the self test should not be on.
1364 */
1365 if (call->enabled) {
1366 pr_warning("Enabled event during self test!\n");
1367 WARN_ON_ONCE(1);
1368 continue;
1369 }
1370
0e907c99 1371 ftrace_event_enable_disable(call, 1);
e6187007 1372 event_test_stuff();
0e907c99 1373 ftrace_event_enable_disable(call, 0);
e6187007
SR
1374
1375 pr_cont("OK\n");
1376 }
1377
1378 /* Now test at the sub system level */
1379
1380 pr_info("Running tests on trace event systems:\n");
1381
1382 list_for_each_entry(system, &event_subsystems, list) {
1383
1384 /* the ftrace system is special, skip it */
1385 if (strcmp(system->name, "ftrace") == 0)
1386 continue;
1387
1388 pr_info("Testing event system %s: ", system->name);
1389
8f31bfe5 1390 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
e6187007
SR
1391 if (WARN_ON_ONCE(ret)) {
1392 pr_warning("error enabling system %s\n",
1393 system->name);
1394 continue;
1395 }
1396
1397 event_test_stuff();
1398
8f31bfe5 1399 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
e6187007
SR
1400 if (WARN_ON_ONCE(ret))
1401 pr_warning("error disabling system %s\n",
1402 system->name);
1403
1404 pr_cont("OK\n");
1405 }
1406
1407 /* Test with all events enabled */
1408
1409 pr_info("Running tests on all trace events:\n");
1410 pr_info("Testing all events: ");
1411
8f31bfe5 1412 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
e6187007 1413 if (WARN_ON_ONCE(ret)) {
e6187007 1414 pr_warning("error enabling all events\n");
9ea21c1e 1415 return;
e6187007
SR
1416 }
1417
1418 event_test_stuff();
1419
1420 /* reset sysname */
8f31bfe5 1421 ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
e6187007
SR
1422 if (WARN_ON_ONCE(ret)) {
1423 pr_warning("error disabling all events\n");
9ea21c1e 1424 return;
e6187007
SR
1425 }
1426
1427 pr_cont("OK\n");
9ea21c1e
SR
1428}
1429
1430#ifdef CONFIG_FUNCTION_TRACER
1431
1432static DEFINE_PER_CPU(atomic_t, test_event_disable);
1433
1434static void
1435function_test_events_call(unsigned long ip, unsigned long parent_ip)
1436{
1437 struct ring_buffer_event *event;
e77405ad 1438 struct ring_buffer *buffer;
9ea21c1e
SR
1439 struct ftrace_entry *entry;
1440 unsigned long flags;
1441 long disabled;
1442 int resched;
1443 int cpu;
1444 int pc;
1445
1446 pc = preempt_count();
1447 resched = ftrace_preempt_disable();
1448 cpu = raw_smp_processor_id();
1449 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
1450
1451 if (disabled != 1)
1452 goto out;
1453
1454 local_save_flags(flags);
1455
e77405ad
SR
1456 event = trace_current_buffer_lock_reserve(&buffer,
1457 TRACE_FN, sizeof(*entry),
9ea21c1e
SR
1458 flags, pc);
1459 if (!event)
1460 goto out;
1461 entry = ring_buffer_event_data(event);
1462 entry->ip = ip;
1463 entry->parent_ip = parent_ip;
1464
e77405ad 1465 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
9ea21c1e
SR
1466
1467 out:
1468 atomic_dec(&per_cpu(test_event_disable, cpu));
1469 ftrace_preempt_enable(resched);
1470}
1471
1472static struct ftrace_ops trace_ops __initdata =
1473{
1474 .func = function_test_events_call,
1475};
1476
1477static __init void event_trace_self_test_with_function(void)
1478{
1479 register_ftrace_function(&trace_ops);
1480 pr_info("Running tests again, along with the function tracer\n");
1481 event_trace_self_tests();
1482 unregister_ftrace_function(&trace_ops);
1483}
1484#else
1485static __init void event_trace_self_test_with_function(void)
1486{
1487}
1488#endif
1489
1490static __init int event_trace_self_tests_init(void)
1491{
020e5f85
LZ
1492 if (!tracing_selftest_disabled) {
1493 event_trace_self_tests();
1494 event_trace_self_test_with_function();
1495 }
e6187007
SR
1496
1497 return 0;
1498}
1499
28d20e2d 1500late_initcall(event_trace_self_tests_init);
e6187007
SR
1501
1502#endif