]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/trace/ftrace.h
Merge branch 'v4l_for_2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/mcheha...
[net-next-2.6.git] / include / trace / ftrace.h
CommitLineData
f42c85e7
SR
1/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
ff038f5c 21/*
091ad365 22 * DECLARE_EVENT_CLASS can be used to add a generic function
ff038f5c
SR
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
091ad365 26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
ff038f5c
SR
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
091ad365 32 DECLARE_EVENT_CLASS(name, \
ff038f5c
SR
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
7fcb7c47
LZ
41#undef __field
42#define __field(type, item) type item;
43
43b51ead
LZ
44#undef __field_ext
45#define __field_ext(type, item, filter_type) type item;
46
f42c85e7
SR
47#undef __array
48#define __array(type, item, len) type item[len];
49
7fcb7c47 50#undef __dynamic_array
7d536cb3 51#define __dynamic_array(type, item, len) u32 __data_loc_##item;
f42c85e7 52
9cbf1176 53#undef __string
7fcb7c47 54#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 55
f42c85e7
SR
56#undef TP_STRUCT__entry
57#define TP_STRUCT__entry(args...) args
58
091ad365
IM
59#undef DECLARE_EVENT_CLASS
60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
ff038f5c
SR
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
8f082018
SR
65 }; \
66 \
67 static struct ftrace_event_class event_class_##name;
68
ff038f5c
SR
69#undef DEFINE_EVENT
70#define DEFINE_EVENT(template, name, proto, args) \
49c17746 71 static struct ftrace_event_call __used \
86c38a31 72 __attribute__((__aligned__(4))) event_##name
f42c85e7 73
e5bc9721
SR
74#undef DEFINE_EVENT_PRINT
75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
77
0dd7b747
FW
78#undef __cpparg
79#define __cpparg(arg...) arg
80
97419875
JS
81/* Callbacks are meaningless to ftrace. */
82#undef TRACE_EVENT_FN
0dd7b747
FW
83#define TRACE_EVENT_FN(name, proto, args, tstruct, \
84 assign, print, reg, unreg) \
85 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
86 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
97419875 87
f42c85e7
SR
88#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
89
9cbf1176 90
f42c85e7
SR
91/*
92 * Stage 2 of the trace events.
93 *
9cbf1176
FW
94 * Include the following:
95 *
7fcb7c47 96 * struct ftrace_data_offsets_<call> {
7d536cb3
LZ
97 * u32 <item1>;
98 * u32 <item2>;
9cbf1176
FW
99 * [...]
100 * };
101 *
7d536cb3 102 * The __dynamic_array() macro will create each u32 <item>, this is
7fcb7c47 103 * to keep the offset of each array from the beginning of the event.
7d536cb3 104 * The size of an array is also encoded, in the higher 16 bits of <item>.
9cbf1176
FW
105 */
106
7fcb7c47 107#undef __field
43b51ead
LZ
108#define __field(type, item)
109
110#undef __field_ext
111#define __field_ext(type, item, filter_type)
7fcb7c47 112
9cbf1176
FW
113#undef __array
114#define __array(type, item, len)
115
7fcb7c47 116#undef __dynamic_array
7d536cb3 117#define __dynamic_array(type, item, len) u32 item;
9cbf1176
FW
118
119#undef __string
7fcb7c47 120#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 121
091ad365
IM
122#undef DECLARE_EVENT_CLASS
123#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
7fcb7c47 124 struct ftrace_data_offsets_##call { \
9cbf1176
FW
125 tstruct; \
126 };
127
ff038f5c
SR
128#undef DEFINE_EVENT
129#define DEFINE_EVENT(template, name, proto, args)
130
e5bc9721
SR
131#undef DEFINE_EVENT_PRINT
132#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
133 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
134
9cbf1176
FW
135#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
136
137/*
138 * Stage 3 of the trace events.
139 *
f42c85e7
SR
140 * Override the macros in <trace/trace_events.h> to include the following:
141 *
142 * enum print_line_t
143 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
144 * {
145 * struct trace_seq *s = &iter->seq;
146 * struct ftrace_raw_<call> *field; <-- defined in stage 1
147 * struct trace_entry *entry;
be74b73a 148 * struct trace_seq *p;
f42c85e7
SR
149 * int ret;
150 *
151 * entry = iter->ent;
152 *
32c0edae 153 * if (entry->type != event_<call>->event.type) {
f42c85e7
SR
154 * WARN_ON_ONCE(1);
155 * return TRACE_TYPE_UNHANDLED;
156 * }
157 *
158 * field = (typeof(field))entry;
159 *
50354a8a 160 * p = &get_cpu_var(ftrace_event_seq);
56d8bd3f 161 * trace_seq_init(p);
50354a8a
LZ
162 * ret = trace_seq_printf(s, "%s: ", <call>);
163 * if (ret)
164 * ret = trace_seq_printf(s, <TP_printk> "\n");
be74b73a 165 * put_cpu();
f42c85e7
SR
166 * if (!ret)
167 * return TRACE_TYPE_PARTIAL_LINE;
168 *
169 * return TRACE_TYPE_HANDLED;
170 * }
171 *
172 * This is the method used to print the raw event to the trace
173 * output format. Note, this is not needed if the data is read
174 * in binary.
175 */
176
177#undef __entry
178#define __entry field
179
180#undef TP_printk
181#define TP_printk(fmt, args...) fmt "\n", args
182
7fcb7c47
LZ
183#undef __get_dynamic_array
184#define __get_dynamic_array(field) \
7d536cb3 185 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
7fcb7c47 186
9cbf1176 187#undef __get_str
7fcb7c47 188#define __get_str(field) (char *)__get_dynamic_array(field)
9cbf1176 189
be74b73a
SR
190#undef __print_flags
191#define __print_flags(flag, delim, flag_array...) \
192 ({ \
a48f494e 193 static const struct trace_print_flags __flags[] = \
be74b73a 194 { flag_array, { -1, NULL }}; \
a48f494e 195 ftrace_print_flags_seq(p, delim, flag, __flags); \
be74b73a
SR
196 })
197
0f4fc29d
SR
198#undef __print_symbolic
199#define __print_symbolic(value, symbol_array...) \
200 ({ \
201 static const struct trace_print_flags symbols[] = \
202 { symbol_array, { -1, NULL }}; \
203 ftrace_print_symbols_seq(p, value, symbols); \
204 })
205
5a2e3995
KT
206#undef __print_hex
207#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
208
091ad365
IM
209#undef DECLARE_EVENT_CLASS
210#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
83f0d539 211static notrace enum print_line_t \
80decc70
SR
212ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
213 struct trace_event *trace_event) \
f42c85e7 214{ \
80decc70 215 struct ftrace_event_call *event; \
f42c85e7
SR
216 struct trace_seq *s = &iter->seq; \
217 struct ftrace_raw_##call *field; \
218 struct trace_entry *entry; \
be74b73a 219 struct trace_seq *p; \
f42c85e7
SR
220 int ret; \
221 \
80decc70
SR
222 event = container_of(trace_event, struct ftrace_event_call, \
223 event); \
224 \
f42c85e7
SR
225 entry = iter->ent; \
226 \
32c0edae 227 if (entry->type != event->event.type) { \
f42c85e7
SR
228 WARN_ON_ONCE(1); \
229 return TRACE_TYPE_UNHANDLED; \
230 } \
231 \
232 field = (typeof(field))entry; \
233 \
be74b73a 234 p = &get_cpu_var(ftrace_event_seq); \
56d8bd3f 235 trace_seq_init(p); \
80decc70 236 ret = trace_seq_printf(s, "%s: ", event->name); \
ff038f5c
SR
237 if (ret) \
238 ret = trace_seq_printf(s, print); \
be74b73a 239 put_cpu(); \
f42c85e7
SR
240 if (!ret) \
241 return TRACE_TYPE_PARTIAL_LINE; \
242 \
243 return TRACE_TYPE_HANDLED; \
80decc70
SR
244} \
245static struct trace_event_functions ftrace_event_type_funcs_##call = { \
246 .trace = ftrace_raw_output_##call, \
247};
ff038f5c 248
e5bc9721
SR
249#undef DEFINE_EVENT_PRINT
250#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
83f0d539 251static notrace enum print_line_t \
a9a57763
SR
252ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
253 struct trace_event *event) \
e5bc9721
SR
254{ \
255 struct trace_seq *s = &iter->seq; \
256 struct ftrace_raw_##template *field; \
257 struct trace_entry *entry; \
258 struct trace_seq *p; \
f42c85e7
SR
259 int ret; \
260 \
261 entry = iter->ent; \
262 \
32c0edae 263 if (entry->type != event_##call.event.type) { \
f42c85e7
SR
264 WARN_ON_ONCE(1); \
265 return TRACE_TYPE_UNHANDLED; \
266 } \
267 \
268 field = (typeof(field))entry; \
269 \
be74b73a 270 p = &get_cpu_var(ftrace_event_seq); \
56d8bd3f 271 trace_seq_init(p); \
e5bc9721
SR
272 ret = trace_seq_printf(s, "%s: ", #call); \
273 if (ret) \
274 ret = trace_seq_printf(s, print); \
be74b73a 275 put_cpu(); \
f42c85e7
SR
276 if (!ret) \
277 return TRACE_TYPE_PARTIAL_LINE; \
278 \
279 return TRACE_TYPE_HANDLED; \
80decc70
SR
280} \
281static struct trace_event_functions ftrace_event_type_funcs_##call = { \
282 .trace = ftrace_raw_output_##call, \
283};
e5bc9721 284
f42c85e7
SR
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286
43b51ead
LZ
287#undef __field_ext
288#define __field_ext(type, item, filter_type) \
f42c85e7
SR
289 ret = trace_define_field(event_call, #type, #item, \
290 offsetof(typeof(field), item), \
43b51ead
LZ
291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
f42c85e7
SR
293 if (ret) \
294 return ret;
295
43b51ead
LZ
296#undef __field
297#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
298
f42c85e7
SR
299#undef __array
300#define __array(type, item, len) \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
303 offsetof(typeof(field), item), \
fb7ae981
LJ
304 sizeof(field.item), \
305 is_signed_type(type), FILTER_OTHER); \
f42c85e7
SR
306 if (ret) \
307 return ret;
308
7fcb7c47
LZ
309#undef __dynamic_array
310#define __dynamic_array(type, item, len) \
68fd60a8 311 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
43b51ead 312 offsetof(typeof(field), __data_loc_##item), \
fb7ae981
LJ
313 sizeof(field.__data_loc_##item), \
314 is_signed_type(type), FILTER_OTHER);
7fcb7c47 315
9cbf1176 316#undef __string
7fcb7c47 317#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 318
091ad365
IM
319#undef DECLARE_EVENT_CLASS
320#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
83f0d539 321static int notrace \
14be96c9 322ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
f42c85e7
SR
323{ \
324 struct ftrace_raw_##call field; \
f42c85e7
SR
325 int ret; \
326 \
f42c85e7
SR
327 tstruct; \
328 \
329 return ret; \
330}
331
ff038f5c
SR
332#undef DEFINE_EVENT
333#define DEFINE_EVENT(template, name, proto, args)
334
e5bc9721
SR
335#undef DEFINE_EVENT_PRINT
336#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
337 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
338
f42c85e7
SR
339#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
340
7fcb7c47
LZ
341/*
342 * remember the offset of each array from the beginning of the event.
343 */
344
345#undef __entry
346#define __entry entry
347
348#undef __field
349#define __field(type, item)
350
43b51ead
LZ
351#undef __field_ext
352#define __field_ext(type, item, filter_type)
353
7fcb7c47
LZ
354#undef __array
355#define __array(type, item, len)
356
357#undef __dynamic_array
358#define __dynamic_array(type, item, len) \
359 __data_offsets->item = __data_size + \
360 offsetof(typeof(*entry), __data); \
7d536cb3 361 __data_offsets->item |= (len * sizeof(type)) << 16; \
7fcb7c47
LZ
362 __data_size += (len) * sizeof(type);
363
364#undef __string
ff038f5c 365#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
7fcb7c47 366
091ad365
IM
367#undef DECLARE_EVENT_CLASS
368#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
83f0d539 369static inline notrace int ftrace_get_offsets_##call( \
7fcb7c47
LZ
370 struct ftrace_data_offsets_##call *__data_offsets, proto) \
371{ \
372 int __data_size = 0; \
373 struct ftrace_raw_##call __maybe_unused *entry; \
374 \
375 tstruct; \
376 \
377 return __data_size; \
378}
379
ff038f5c
SR
380#undef DEFINE_EVENT
381#define DEFINE_EVENT(template, name, proto, args)
382
e5bc9721
SR
383#undef DEFINE_EVENT_PRINT
384#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
385 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
386
7fcb7c47
LZ
387#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
388
c32e827b 389/*
9cbf1176 390 * Stage 4 of the trace events.
c32e827b 391 *
ea20d929 392 * Override the macros in <trace/trace_events.h> to include the following:
c32e827b 393 *
157587d7 394 * For those macros defined with TRACE_EVENT:
c32e827b
SR
395 *
396 * static struct ftrace_event_call event_<call>;
397 *
2239291a 398 * static void ftrace_raw_event_<call>(void *__data, proto)
c32e827b 399 * {
2239291a 400 * struct ftrace_event_call *event_call = __data;
50354a8a 401 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
ef18012b
SR
402 * struct ring_buffer_event *event;
403 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
e77405ad 404 * struct ring_buffer *buffer;
ef18012b 405 * unsigned long irq_flags;
50354a8a 406 * int __data_size;
ef18012b
SR
407 * int pc;
408 *
409 * local_save_flags(irq_flags);
410 * pc = preempt_count();
411 *
50354a8a
LZ
412 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
413 *
e77405ad 414 * event = trace_current_buffer_lock_reserve(&buffer,
32c0edae 415 * event_<call>->event.type,
50354a8a 416 * sizeof(*entry) + __data_size,
ef18012b
SR
417 * irq_flags, pc);
418 * if (!event)
419 * return;
420 * entry = ring_buffer_event_data(event);
421 *
50354a8a
LZ
422 * { <assign>; } <-- Here we assign the entries by the __field and
423 * __array macros.
c32e827b 424 *
50354a8a
LZ
425 * if (!filter_current_check_discard(buffer, event_call, entry, event))
426 * trace_current_buffer_unlock_commit(buffer,
427 * event, irq_flags, pc);
c32e827b
SR
428 * }
429 *
c32e827b 430 * static struct trace_event ftrace_event_type_<call> = {
ef18012b 431 * .trace = ftrace_raw_output_<call>, <-- stage 2
c32e827b
SR
432 * };
433 *
50354a8a
LZ
434 * static const char print_fmt_<call>[] = <TP_printk>;
435 *
8f082018
SR
436 * static struct ftrace_event_class __used event_class_<template> = {
437 * .system = "<system>",
2e33af02 438 * .define_fields = ftrace_define_fields_<call>,
0405ab80
SR
439 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
440 * .raw_init = trace_event_raw_init,
441 * .probe = ftrace_raw_event_##call,
8f082018
SR
442 * };
443 *
c32e827b
SR
444 * static struct ftrace_event_call __used
445 * __attribute__((__aligned__(4)))
446 * __attribute__((section("_ftrace_events"))) event_<call> = {
ef18012b 447 * .name = "<call>",
8f082018 448 * .class = event_class_<template>,
2e33af02 449 * .event = &ftrace_event_type_<call>,
50354a8a 450 * .print_fmt = print_fmt_<call>,
8f082018 451 * };
c32e827b
SR
452 *
453 */
454
07b139c8 455#ifdef CONFIG_PERF_EVENTS
ac199db0 456
2239291a
SR
457#define _TRACE_PERF_PROTO(call, proto) \
458 static notrace void \
459 perf_trace_##call(void *__data, proto);
460
97d5a220 461#define _TRACE_PERF_INIT(call) \
2239291a 462 .perf_probe = perf_trace_##call,
ac199db0
PZ
463
464#else
2239291a 465#define _TRACE_PERF_PROTO(call, proto)
97d5a220 466#define _TRACE_PERF_INIT(call)
07b139c8 467#endif /* CONFIG_PERF_EVENTS */
ac199db0 468
da4d0302
SR
469#undef __entry
470#define __entry entry
d20e3b03 471
9cbf1176
FW
472#undef __field
473#define __field(type, item)
474
475#undef __array
476#define __array(type, item, len)
477
7fcb7c47
LZ
478#undef __dynamic_array
479#define __dynamic_array(type, item, len) \
480 __entry->__data_loc_##item = __data_offsets.item;
481
9cbf1176 482#undef __string
7fcb7c47 483#define __string(item, src) __dynamic_array(char, item, -1) \
9cbf1176
FW
484
485#undef __assign_str
486#define __assign_str(dst, src) \
9cbf1176
FW
487 strcpy(__get_str(dst), src);
488
0fa0edaf
LJ
489#undef TP_fast_assign
490#define TP_fast_assign(args...) args
491
492#undef TP_perf_assign
493#define TP_perf_assign(args...)
494
091ad365
IM
495#undef DECLARE_EVENT_CLASS
496#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
c32e827b 497 \
83f0d539 498static notrace void \
2239291a 499ftrace_raw_event_##call(void *__data, proto) \
c32e827b 500{ \
2239291a 501 struct ftrace_event_call *event_call = __data; \
7fcb7c47 502 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
c32e827b
SR
503 struct ring_buffer_event *event; \
504 struct ftrace_raw_##call *entry; \
e77405ad 505 struct ring_buffer *buffer; \
c32e827b 506 unsigned long irq_flags; \
7fcb7c47 507 int __data_size; \
c32e827b
SR
508 int pc; \
509 \
510 local_save_flags(irq_flags); \
511 pc = preempt_count(); \
512 \
7fcb7c47 513 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
9cbf1176 514 \
e77405ad 515 event = trace_current_buffer_lock_reserve(&buffer, \
32c0edae 516 event_call->event.type, \
7fcb7c47 517 sizeof(*entry) + __data_size, \
9cbf1176 518 irq_flags, pc); \
c32e827b
SR
519 if (!event) \
520 return; \
521 entry = ring_buffer_event_data(event); \
522 \
7fcb7c47
LZ
523 tstruct \
524 \
a9c1c3ab 525 { assign; } \
c32e827b 526 \
e77405ad
SR
527 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
528 trace_nowake_buffer_unlock_commit(buffer, \
529 event, irq_flags, pc); \
ff038f5c 530}
2239291a
SR
531/*
532 * The ftrace_test_probe is compiled out, it is only here as a build time check
533 * to make sure that if the tracepoint handling changes, the ftrace probe will
534 * fail to compile unless it too is updated.
535 */
ff038f5c
SR
536
537#undef DEFINE_EVENT
538#define DEFINE_EVENT(template, call, proto, args) \
2239291a 539static inline void ftrace_test_probe_##call(void) \
c32e827b 540{ \
2239291a
SR
541 check_trace_callback_type_##call(ftrace_raw_event_##template); \
542}
e5bc9721
SR
543
544#undef DEFINE_EVENT_PRINT
80decc70 545#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
e5bc9721
SR
546
547#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
548
509e760c
LJ
549#undef __entry
550#define __entry REC
551
552#undef __print_flags
553#undef __print_symbolic
554#undef __get_dynamic_array
555#undef __get_str
556
557#undef TP_printk
558#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
559
091ad365 560#undef DECLARE_EVENT_CLASS
509e760c 561#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
2239291a 562_TRACE_PERF_PROTO(call, PARAMS(proto)); \
8f082018
SR
563static const char print_fmt_##call[] = print; \
564static struct ftrace_event_class __used event_class_##call = { \
2239291a 565 .system = __stringify(TRACE_SYSTEM), \
2e33af02
SR
566 .define_fields = ftrace_define_fields_##call, \
567 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
0405ab80 568 .raw_init = trace_event_raw_init, \
2239291a
SR
569 .probe = ftrace_raw_event_##call, \
570 _TRACE_PERF_INIT(call) \
8f082018 571};
e5bc9721
SR
572
573#undef DEFINE_EVENT
574#define DEFINE_EVENT(template, call, proto, args) \
c32e827b
SR
575 \
576static struct ftrace_event_call __used \
577__attribute__((__aligned__(4))) \
578__attribute__((section("_ftrace_events"))) event_##call = { \
ef18012b 579 .name = #call, \
8f082018 580 .class = &event_class_##template, \
80decc70 581 .event.funcs = &ftrace_event_type_funcs_##template, \
509e760c 582 .print_fmt = print_fmt_##template, \
8f082018 583};
ac199db0 584
e5bc9721
SR
585#undef DEFINE_EVENT_PRINT
586#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
c32e827b 587 \
509e760c
LJ
588static const char print_fmt_##call[] = print; \
589 \
c32e827b
SR
590static struct ftrace_event_call __used \
591__attribute__((__aligned__(4))) \
592__attribute__((section("_ftrace_events"))) event_##call = { \
ef18012b 593 .name = #call, \
8f082018 594 .class = &event_class_##template, \
80decc70 595 .event.funcs = &ftrace_event_type_funcs_##call, \
509e760c 596 .print_fmt = print_fmt_##call, \
c32e827b 597}
ac199db0 598
f42c85e7 599#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
ac199db0 600
f413cdb8 601/*
97d5a220 602 * Define the insertion callback to perf events
f413cdb8
FW
603 *
604 * The job is very similar to ftrace_raw_event_<call> except that we don't
605 * insert in the ring buffer but in a perf counter.
606 *
97d5a220 607 * static void ftrace_perf_<call>(proto)
f413cdb8
FW
608 * {
609 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
610 * struct ftrace_event_call *event_call = &event_<call>;
cdd6c482 611 * extern void perf_tp_event(int, u64, u64, void *, int);
f413cdb8 612 * struct ftrace_raw_##call *entry;
444a2a3b 613 * struct perf_trace_buf *trace_buf;
f413cdb8
FW
614 * u64 __addr = 0, __count = 1;
615 * unsigned long irq_flags;
20ab4425 616 * struct trace_entry *ent;
f413cdb8
FW
617 * int __entry_size;
618 * int __data_size;
20ab4425 619 * int __cpu
f413cdb8
FW
620 * int pc;
621 *
f413cdb8
FW
622 * pc = preempt_count();
623 *
624 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
304703ab
FW
625 *
626 * // Below we want to get the aligned size by taking into account
627 * // the u32 field that will later store the buffer size
628 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
629 * sizeof(u64));
630 * __entry_size -= sizeof(u32);
f413cdb8 631 *
20ab4425
FW
632 * // Protect the non nmi buffer
633 * // This also protects the rcu read side
634 * local_irq_save(irq_flags);
635 * __cpu = smp_processor_id();
636 *
637 * if (in_nmi())
8d53dd54 638 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
20ab4425 639 * else
8d53dd54 640 * trace_buf = rcu_dereference_sched(perf_trace_buf);
20ab4425 641 *
444a2a3b 642 * if (!trace_buf)
20ab4425 643 * goto end;
f413cdb8 644 *
444a2a3b
FW
645 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
646 *
647 * // Avoid recursion from perf that could mess up the buffer
648 * if (trace_buf->recursion++)
649 * goto end_recursion;
650 *
651 * raw_data = trace_buf->buf;
652 *
653 * // Make recursion update visible before entering perf_tp_event
654 * // so that we protect from perf recursions.
655 *
656 * barrier();
1853db0e 657 *
20ab4425
FW
658 * //zero dead bytes from alignment to avoid stack leak to userspace:
659 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
660 * entry = (struct ftrace_raw_<call> *)raw_data;
661 * ent = &entry->ent;
662 * tracing_generic_entry_update(ent, irq_flags, pc);
663 * ent->type = event_call->id;
f413cdb8 664 *
20ab4425 665 * <tstruct> <- do some jobs with dynamic arrays
f413cdb8 666 *
20ab4425 667 * <assign> <- affect our values
f413cdb8 668 *
43c1266c 669 * perf_tp_event(event_call->id, __addr, __count, entry,
20ab4425 670 * __entry_size); <- submit them to perf counter
f413cdb8
FW
671 *
672 * }
673 */
674
07b139c8 675#ifdef CONFIG_PERF_EVENTS
f413cdb8 676
509e760c
LJ
677#undef __entry
678#define __entry entry
679
680#undef __get_dynamic_array
681#define __get_dynamic_array(field) \
682 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
683
684#undef __get_str
685#define __get_str(field) (char *)__get_dynamic_array(field)
686
f413cdb8
FW
687#undef __perf_addr
688#define __perf_addr(a) __addr = (a)
689
690#undef __perf_count
691#define __perf_count(c) __count = (c)
692
091ad365
IM
693#undef DECLARE_EVENT_CLASS
694#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
83f0d539 695static notrace void \
2239291a 696perf_trace_##call(void *__data, proto) \
f413cdb8 697{ \
2239291a 698 struct ftrace_event_call *event_call = __data; \
f413cdb8 699 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
f413cdb8 700 struct ftrace_raw_##call *entry; \
ff5f149b 701 struct pt_regs __regs; \
f413cdb8 702 u64 __addr = 0, __count = 1; \
1c024eca 703 struct hlist_head *head; \
f413cdb8
FW
704 int __entry_size; \
705 int __data_size; \
4ed7c92d 706 int rctx; \
f413cdb8 707 \
ff5f149b 708 perf_fetch_caller_regs(&__regs, 1); \
f0218b3e 709 \
f413cdb8 710 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
a044560c
PZ
711 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
712 sizeof(u64)); \
304703ab 713 __entry_size -= sizeof(u32); \
f413cdb8 714 \
97d5a220 715 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
20ab4425
FW
716 "profile buffer not large enough")) \
717 return; \
b7e2ecef 718 \
97d5a220 719 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
ff5f149b 720 __entry_size, event_call->event.type, &__regs, &rctx); \
430ad5a6
XG
721 if (!entry) \
722 return; \
b7e2ecef 723 \
20ab4425
FW
724 tstruct \
725 \
726 { assign; } \
727 \
3771f077 728 head = this_cpu_ptr(event_call->perf_events); \
97d5a220 729 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
ff5f149b 730 __count, &__regs, head); \
f413cdb8
FW
731}
732
2239291a
SR
733/*
734 * This part is compiled out, it is only here as a build time check
735 * to make sure that if the tracepoint handling changes, the
736 * perf probe will fail to compile unless it too is updated.
737 */
ff038f5c 738#undef DEFINE_EVENT
6cc8a7c1 739#define DEFINE_EVENT(template, call, proto, args) \
2239291a 740static inline void perf_test_probe_##call(void) \
6cc8a7c1 741{ \
2239291a 742 check_trace_callback_type_##call(perf_trace_##template); \
ff038f5c
SR
743}
744
2239291a 745
e5bc9721
SR
746#undef DEFINE_EVENT_PRINT
747#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
748 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
749
f413cdb8 750#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
07b139c8 751#endif /* CONFIG_PERF_EVENTS */
f413cdb8 752
ac199db0
PZ
753#undef _TRACE_PROFILE_INIT
754