]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_FTRACE_EVENT_H | |
2 | #define _LINUX_FTRACE_EVENT_H | |
3 | ||
4 | #include <linux/ring_buffer.h> | |
5 | #include <linux/trace_seq.h> | |
6 | #include <linux/percpu.h> | |
7 | #include <linux/hardirq.h> | |
8 | #include <linux/perf_event.h> | |
9 | ||
10 | struct trace_array; | |
11 | struct tracer; | |
12 | struct dentry; | |
13 | ||
14 | DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); | |
15 | ||
16 | struct trace_print_flags { | |
17 | unsigned long mask; | |
18 | const char *name; | |
19 | }; | |
20 | ||
21 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | |
22 | unsigned long flags, | |
23 | const struct trace_print_flags *flag_array); | |
24 | ||
25 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |
26 | const struct trace_print_flags *symbol_array); | |
27 | ||
28 | /* | |
29 | * The trace entry - the most basic unit of tracing. This is what | |
30 | * is printed in the end as a single line in the trace output, such as: | |
31 | * | |
32 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | |
33 | */ | |
34 | struct trace_entry { | |
35 | unsigned short type; | |
36 | unsigned char flags; | |
37 | unsigned char preempt_count; | |
38 | int pid; | |
39 | int lock_depth; | |
40 | }; | |
41 | ||
42 | #define FTRACE_MAX_EVENT \ | |
43 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) | |
44 | ||
45 | /* | |
46 | * Trace iterator - used by printout routines who present trace | |
47 | * results to users and which routines might sleep, etc: | |
48 | */ | |
49 | struct trace_iterator { | |
50 | struct trace_array *tr; | |
51 | struct tracer *trace; | |
52 | void *private; | |
53 | int cpu_file; | |
54 | struct mutex mutex; | |
55 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | |
56 | unsigned long iter_flags; | |
57 | ||
58 | /* The below is zeroed out in pipe_read */ | |
59 | struct trace_seq seq; | |
60 | struct trace_entry *ent; | |
61 | unsigned long lost_events; | |
62 | int leftover; | |
63 | int cpu; | |
64 | u64 ts; | |
65 | ||
66 | loff_t pos; | |
67 | long idx; | |
68 | ||
69 | cpumask_var_t started; | |
70 | }; | |
71 | ||
72 | ||
73 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | |
74 | int flags); | |
75 | struct trace_event { | |
76 | struct hlist_node node; | |
77 | struct list_head list; | |
78 | int type; | |
79 | trace_print_func trace; | |
80 | trace_print_func raw; | |
81 | trace_print_func hex; | |
82 | trace_print_func binary; | |
83 | }; | |
84 | ||
85 | extern int register_ftrace_event(struct trace_event *event); | |
86 | extern int unregister_ftrace_event(struct trace_event *event); | |
87 | ||
88 | /* Return values for print_line callback */ | |
89 | enum print_line_t { | |
90 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | |
91 | TRACE_TYPE_HANDLED = 1, | |
92 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ | |
93 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | |
94 | }; | |
95 | ||
96 | void tracing_generic_entry_update(struct trace_entry *entry, | |
97 | unsigned long flags, | |
98 | int pc); | |
99 | struct ring_buffer_event * | |
100 | trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, | |
101 | int type, unsigned long len, | |
102 | unsigned long flags, int pc); | |
103 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | |
104 | struct ring_buffer_event *event, | |
105 | unsigned long flags, int pc); | |
106 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | |
107 | struct ring_buffer_event *event, | |
108 | unsigned long flags, int pc); | |
109 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | |
110 | struct ring_buffer_event *event); | |
111 | ||
112 | void tracing_record_cmdline(struct task_struct *tsk); | |
113 | ||
114 | struct event_filter; | |
115 | ||
116 | struct ftrace_event_call { | |
117 | struct list_head list; | |
118 | char *name; | |
119 | char *system; | |
120 | struct dentry *dir; | |
121 | struct trace_event *event; | |
122 | int enabled; | |
123 | int (*regfunc)(struct ftrace_event_call *); | |
124 | void (*unregfunc)(struct ftrace_event_call *); | |
125 | int id; | |
126 | const char *print_fmt; | |
127 | int (*raw_init)(struct ftrace_event_call *); | |
128 | int (*define_fields)(struct ftrace_event_call *); | |
129 | struct list_head fields; | |
130 | int filter_active; | |
131 | struct event_filter *filter; | |
132 | void *mod; | |
133 | void *data; | |
134 | ||
135 | int perf_refcount; | |
136 | struct hlist_head *perf_events; | |
137 | int (*perf_event_enable)(struct ftrace_event_call *); | |
138 | void (*perf_event_disable)(struct ftrace_event_call *); | |
139 | }; | |
140 | ||
141 | #define PERF_MAX_TRACE_SIZE 2048 | |
142 | ||
143 | #define MAX_FILTER_PRED 32 | |
144 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | |
145 | ||
146 | extern void destroy_preds(struct ftrace_event_call *call); | |
147 | extern int filter_match_preds(struct event_filter *filter, void *rec); | |
148 | extern int filter_current_check_discard(struct ring_buffer *buffer, | |
149 | struct ftrace_event_call *call, | |
150 | void *rec, | |
151 | struct ring_buffer_event *event); | |
152 | ||
153 | enum { | |
154 | FILTER_OTHER = 0, | |
155 | FILTER_STATIC_STRING, | |
156 | FILTER_DYN_STRING, | |
157 | FILTER_PTR_STRING, | |
158 | }; | |
159 | ||
160 | extern int trace_event_raw_init(struct ftrace_event_call *call); | |
161 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | |
162 | const char *name, int offset, int size, | |
163 | int is_signed, int filter_type); | |
164 | extern int trace_add_event_call(struct ftrace_event_call *call); | |
165 | extern void trace_remove_event_call(struct ftrace_event_call *call); | |
166 | ||
167 | #define is_signed_type(type) (((type)(-1)) < 0) | |
168 | ||
169 | int trace_set_clr_event(const char *system, const char *event, int set); | |
170 | ||
171 | /* | |
172 | * The double __builtin_constant_p is because gcc will give us an error | |
173 | * if we try to allocate the static variable to fmt if it is not a | |
174 | * constant. Even with the outer if statement optimizing out. | |
175 | */ | |
176 | #define event_trace_printk(ip, fmt, args...) \ | |
177 | do { \ | |
178 | __trace_printk_check_format(fmt, ##args); \ | |
179 | tracing_record_cmdline(current); \ | |
180 | if (__builtin_constant_p(fmt)) { \ | |
181 | static const char *trace_printk_fmt \ | |
182 | __attribute__((section("__trace_printk_fmt"))) = \ | |
183 | __builtin_constant_p(fmt) ? fmt : NULL; \ | |
184 | \ | |
185 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ | |
186 | } else \ | |
187 | __trace_printk(ip, fmt, ##args); \ | |
188 | } while (0) | |
189 | ||
190 | #ifdef CONFIG_PERF_EVENTS | |
191 | struct perf_event; | |
192 | ||
193 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | |
194 | ||
195 | extern int perf_trace_init(struct perf_event *event); | |
196 | extern void perf_trace_destroy(struct perf_event *event); | |
197 | extern int perf_trace_enable(struct perf_event *event); | |
198 | extern void perf_trace_disable(struct perf_event *event); | |
199 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | |
200 | char *filter_str); | |
201 | extern void ftrace_profile_free_filter(struct perf_event *event); | |
202 | extern void *perf_trace_buf_prepare(int size, unsigned short type, | |
203 | struct pt_regs *regs, int *rctxp); | |
204 | ||
205 | static inline void | |
206 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, | |
207 | u64 count, struct pt_regs *regs, void *head) | |
208 | { | |
209 | perf_tp_event(addr, count, raw_data, size, regs, head); | |
210 | perf_swevent_put_recursion_context(rctx); | |
211 | } | |
212 | #endif | |
213 | ||
214 | #endif /* _LINUX_FTRACE_EVENT_H */ |