]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/perf_event.h
perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track...
[net-next-2.6.git] / include / linux / perf_event.h
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events:
0793a61d 3 *
a308444c
IM
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
0793a61d 7 *
57c0c15b 8 * Data type definitions, declarations, prototypes.
0793a61d 9 *
a308444c 10 * Started by: Thomas Gleixner and Ingo Molnar
0793a61d 11 *
57c0c15b 12 * For licencing details see kernel-base/COPYING
0793a61d 13 */
cdd6c482
IM
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
0793a61d 16
f3dfd265
PM
17#include <linux/types.h>
18#include <linux/ioctl.h>
9aaa131a 19#include <asm/byteorder.h>
0793a61d
TG
20
21/*
9f66a381
IM
22 * User-space ABI bits:
23 */
24
25/*
0d48696f 26 * attr.type
0793a61d 27 */
1c432d89 28enum perf_type_id {
a308444c
IM
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
24f1e32c 34 PERF_TYPE_BREAKPOINT = 5,
b8e83514 35
a308444c 36 PERF_TYPE_MAX, /* non-ABI */
b8e83514 37};
6c594c21 38
b8e83514 39/*
cdd6c482
IM
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
a308444c 42 * syscall:
b8e83514 43 */
1c432d89 44enum perf_hw_id {
9f66a381 45 /*
b8e83514 46 * Common hardware events, generalized by the kernel:
9f66a381 47 */
f4dbfa8f
PZ
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55
a308444c 56 PERF_COUNT_HW_MAX, /* non-ABI */
b8e83514 57};
e077df4f 58
8326f44d 59/*
cdd6c482 60 * Generalized hardware cache events:
8326f44d 61 *
8be6e8f3 62 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
8326f44d
IM
63 * { read, write, prefetch } x
64 * { accesses, misses }
65 */
1c432d89 66enum perf_hw_cache_id {
a308444c
IM
67 PERF_COUNT_HW_CACHE_L1D = 0,
68 PERF_COUNT_HW_CACHE_L1I = 1,
69 PERF_COUNT_HW_CACHE_LL = 2,
70 PERF_COUNT_HW_CACHE_DTLB = 3,
71 PERF_COUNT_HW_CACHE_ITLB = 4,
72 PERF_COUNT_HW_CACHE_BPU = 5,
73
74 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
8326f44d
IM
75};
76
1c432d89 77enum perf_hw_cache_op_id {
a308444c
IM
78 PERF_COUNT_HW_CACHE_OP_READ = 0,
79 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
80 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
8326f44d 81
a308444c 82 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
8326f44d
IM
83};
84
1c432d89
PZ
85enum perf_hw_cache_op_result_id {
86 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
87 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
8326f44d 88
a308444c 89 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
8326f44d
IM
90};
91
b8e83514 92/*
cdd6c482
IM
93 * Special "software" events provided by the kernel, even if the hardware
94 * does not support performance events. These events measure various
b8e83514
PZ
95 * physical and sw events of the kernel (and allow the profiling of them as
96 * well):
97 */
1c432d89 98enum perf_sw_ids {
a308444c
IM
99 PERF_COUNT_SW_CPU_CLOCK = 0,
100 PERF_COUNT_SW_TASK_CLOCK = 1,
101 PERF_COUNT_SW_PAGE_FAULTS = 2,
102 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
103 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
104 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
105 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
f7d79860
AB
106 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
107 PERF_COUNT_SW_EMULATION_FAULTS = 8,
a308444c
IM
108
109 PERF_COUNT_SW_MAX, /* non-ABI */
0793a61d
TG
110};
111
8a057d84 112/*
0d48696f 113 * Bits that can be set in attr.sample_type to request information
8a057d84
PZ
114 * in the overflow packets.
115 */
cdd6c482 116enum perf_event_sample_format {
a308444c
IM
117 PERF_SAMPLE_IP = 1U << 0,
118 PERF_SAMPLE_TID = 1U << 1,
119 PERF_SAMPLE_TIME = 1U << 2,
120 PERF_SAMPLE_ADDR = 1U << 3,
3dab77fb 121 PERF_SAMPLE_READ = 1U << 4,
a308444c
IM
122 PERF_SAMPLE_CALLCHAIN = 1U << 5,
123 PERF_SAMPLE_ID = 1U << 6,
124 PERF_SAMPLE_CPU = 1U << 7,
125 PERF_SAMPLE_PERIOD = 1U << 8,
7f453c24 126 PERF_SAMPLE_STREAM_ID = 1U << 9,
3a43ce68 127 PERF_SAMPLE_RAW = 1U << 10,
974802ea 128
f413cdb8 129 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
8a057d84
PZ
130};
131
53cfbf59 132/*
cdd6c482 133 * The format of the data returned by read() on a perf event fd,
3dab77fb
PZ
134 * as specified by attr.read_format:
135 *
136 * struct read_format {
57c0c15b
IM
137 * { u64 value;
138 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
139 * { u64 time_running; } && PERF_FORMAT_RUNNING
140 * { u64 id; } && PERF_FORMAT_ID
141 * } && !PERF_FORMAT_GROUP
3dab77fb 142 *
57c0c15b
IM
143 * { u64 nr;
144 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
145 * { u64 time_running; } && PERF_FORMAT_RUNNING
146 * { u64 value;
147 * { u64 id; } && PERF_FORMAT_ID
148 * } cntr[nr];
149 * } && PERF_FORMAT_GROUP
3dab77fb 150 * };
53cfbf59 151 */
cdd6c482 152enum perf_event_read_format {
a308444c
IM
153 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
154 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
155 PERF_FORMAT_ID = 1U << 2,
3dab77fb 156 PERF_FORMAT_GROUP = 1U << 3,
974802ea 157
57c0c15b 158 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
53cfbf59
PM
159};
160
974802ea
PZ
161#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
162
9f66a381 163/*
cdd6c482 164 * Hardware event_id to monitor via a performance monitoring event:
9f66a381 165 */
cdd6c482 166struct perf_event_attr {
974802ea 167
f4a2deb4 168 /*
a21ca2ca
IM
169 * Major type: hardware/software/tracepoint/etc.
170 */
171 __u32 type;
974802ea
PZ
172
173 /*
174 * Size of the attr structure, for fwd/bwd compat.
175 */
176 __u32 size;
a21ca2ca
IM
177
178 /*
179 * Type specific configuration information.
f4a2deb4
PZ
180 */
181 __u64 config;
9f66a381 182
60db5e09 183 union {
b23f3325
PZ
184 __u64 sample_period;
185 __u64 sample_freq;
60db5e09
PZ
186 };
187
b23f3325
PZ
188 __u64 sample_type;
189 __u64 read_format;
9f66a381 190
2743a5b0 191 __u64 disabled : 1, /* off by default */
0475f9ea
PM
192 inherit : 1, /* children inherit it */
193 pinned : 1, /* must always be on PMU */
194 exclusive : 1, /* only group on PMU */
195 exclude_user : 1, /* don't count user */
196 exclude_kernel : 1, /* ditto kernel */
197 exclude_hv : 1, /* ditto hypervisor */
2743a5b0 198 exclude_idle : 1, /* don't count when idle */
0a4a9391 199 mmap : 1, /* include mmap data */
8d1b2d93 200 comm : 1, /* include comm data */
60db5e09 201 freq : 1, /* use freq, not period */
bfbd3381 202 inherit_stat : 1, /* per task counts */
57e7986e 203 enable_on_exec : 1, /* next exec enables */
9f498cc5 204 task : 1, /* trace fork/exit */
2667de81 205 watermark : 1, /* wakeup_watermark */
ab608344
PZ
206 /*
207 * precise_ip:
208 *
209 * 0 - SAMPLE_IP can have arbitrary skid
210 * 1 - SAMPLE_IP must have constant skid
211 * 2 - SAMPLE_IP requested to have 0 skid
212 * 3 - SAMPLE_IP must have 0 skid
213 *
214 * See also PERF_RECORD_MISC_EXACT_IP
215 */
216 precise_ip : 2, /* skid constraint */
217
218 __reserved_1 : 47;
2743a5b0 219
2667de81
PZ
220 union {
221 __u32 wakeup_events; /* wakeup every n events */
222 __u32 wakeup_watermark; /* bytes before wakeup */
223 };
24f1e32c 224
f13c12c6 225 __u32 bp_type;
cd757645
MS
226 __u64 bp_addr;
227 __u64 bp_len;
eab656ae
TG
228};
229
d859e29f 230/*
cdd6c482 231 * Ioctls that can be done on a perf event fd:
d859e29f 232 */
cdd6c482 233#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
57c0c15b
IM
234#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
235#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
cdd6c482 236#define PERF_EVENT_IOC_RESET _IO ('$', 3)
4c49b128 237#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
cdd6c482 238#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
6fb2915d 239#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
cdd6c482
IM
240
241enum perf_event_ioc_flags {
3df5edad
PZ
242 PERF_IOC_FLAG_GROUP = 1U << 0,
243};
d859e29f 244
37d81828
PM
245/*
246 * Structure of the page that can be mapped via mmap
247 */
cdd6c482 248struct perf_event_mmap_page {
37d81828
PM
249 __u32 version; /* version number of this structure */
250 __u32 compat_version; /* lowest version this is compat with */
38ff667b
PZ
251
252 /*
cdd6c482 253 * Bits needed to read the hw events in user-space.
38ff667b 254 *
92f22a38
PZ
255 * u32 seq;
256 * s64 count;
38ff667b 257 *
a2e87d06
PZ
258 * do {
259 * seq = pc->lock;
38ff667b 260 *
a2e87d06
PZ
261 * barrier()
262 * if (pc->index) {
263 * count = pmc_read(pc->index - 1);
264 * count += pc->offset;
265 * } else
266 * goto regular_read;
38ff667b 267 *
a2e87d06
PZ
268 * barrier();
269 * } while (pc->lock != seq);
38ff667b 270 *
92f22a38
PZ
271 * NOTE: for obvious reason this only works on self-monitoring
272 * processes.
38ff667b 273 */
37d81828 274 __u32 lock; /* seqlock for synchronization */
cdd6c482
IM
275 __u32 index; /* hardware event identifier */
276 __s64 offset; /* add to hardware event value */
277 __u64 time_enabled; /* time event active */
278 __u64 time_running; /* time event on cpu */
7b732a75 279
41f95331
PZ
280 /*
281 * Hole for extension of the self monitor capabilities
282 */
283
7f8b4e4e 284 __u64 __reserved[123]; /* align to 1k */
41f95331 285
38ff667b
PZ
286 /*
287 * Control data for the mmap() data buffer.
288 *
43a21ea8
PZ
289 * User-space reading the @data_head value should issue an rmb(), on
290 * SMP capable platforms, after reading this value -- see
cdd6c482 291 * perf_event_wakeup().
43a21ea8
PZ
292 *
293 * When the mapping is PROT_WRITE the @data_tail value should be
294 * written by userspace to reflect the last read data. In this case
295 * the kernel will not over-write unread data.
38ff667b 296 */
8e3747c1 297 __u64 data_head; /* head in the data section */
43a21ea8 298 __u64 data_tail; /* user-space written tail */
37d81828
PM
299};
300
39447b38 301#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
184f412c 302#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
cdd6c482
IM
303#define PERF_RECORD_MISC_KERNEL (1 << 0)
304#define PERF_RECORD_MISC_USER (2 << 0)
305#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
39447b38
ZY
306#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
307#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
6fab0192 308
ab608344
PZ
309/*
310 * Indicates that the content of PERF_SAMPLE_IP points to
311 * the actual instruction that triggered the event. See also
312 * perf_event_attr::precise_ip.
313 */
314#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
ef21f683
PZ
315/*
316 * Reserve the last bit to indicate some extended misc field
317 */
318#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
319
5c148194
PZ
320struct perf_event_header {
321 __u32 type;
6fab0192
PZ
322 __u16 misc;
323 __u16 size;
5c148194
PZ
324};
325
326enum perf_event_type {
5ed00415 327
0c593b34
PZ
328 /*
329 * The MMAP events record the PROT_EXEC mappings so that we can
330 * correlate userspace IPs to code. They have the following structure:
331 *
332 * struct {
0127c3ea 333 * struct perf_event_header header;
0c593b34 334 *
0127c3ea
IM
335 * u32 pid, tid;
336 * u64 addr;
337 * u64 len;
338 * u64 pgoff;
339 * char filename[];
0c593b34
PZ
340 * };
341 */
cdd6c482 342 PERF_RECORD_MMAP = 1,
0a4a9391 343
43a21ea8
PZ
344 /*
345 * struct {
57c0c15b
IM
346 * struct perf_event_header header;
347 * u64 id;
348 * u64 lost;
43a21ea8
PZ
349 * };
350 */
cdd6c482 351 PERF_RECORD_LOST = 2,
43a21ea8 352
8d1b2d93
PZ
353 /*
354 * struct {
0127c3ea 355 * struct perf_event_header header;
8d1b2d93 356 *
0127c3ea
IM
357 * u32 pid, tid;
358 * char comm[];
8d1b2d93
PZ
359 * };
360 */
cdd6c482 361 PERF_RECORD_COMM = 3,
8d1b2d93 362
9f498cc5
PZ
363 /*
364 * struct {
365 * struct perf_event_header header;
366 * u32 pid, ppid;
367 * u32 tid, ptid;
393b2ad8 368 * u64 time;
9f498cc5
PZ
369 * };
370 */
cdd6c482 371 PERF_RECORD_EXIT = 4,
9f498cc5 372
26b119bc
PZ
373 /*
374 * struct {
0127c3ea
IM
375 * struct perf_event_header header;
376 * u64 time;
689802b2 377 * u64 id;
7f453c24 378 * u64 stream_id;
a78ac325
PZ
379 * };
380 */
184f412c
IM
381 PERF_RECORD_THROTTLE = 5,
382 PERF_RECORD_UNTHROTTLE = 6,
a78ac325 383
60313ebe
PZ
384 /*
385 * struct {
a21ca2ca
IM
386 * struct perf_event_header header;
387 * u32 pid, ppid;
9f498cc5 388 * u32 tid, ptid;
a6f10a2f 389 * u64 time;
60313ebe
PZ
390 * };
391 */
cdd6c482 392 PERF_RECORD_FORK = 7,
60313ebe 393
38b200d6
PZ
394 /*
395 * struct {
184f412c
IM
396 * struct perf_event_header header;
397 * u32 pid, tid;
3dab77fb 398 *
184f412c 399 * struct read_format values;
38b200d6
PZ
400 * };
401 */
cdd6c482 402 PERF_RECORD_READ = 8,
38b200d6 403
8a057d84 404 /*
0c593b34 405 * struct {
0127c3ea 406 * struct perf_event_header header;
0c593b34 407 *
43a21ea8
PZ
408 * { u64 ip; } && PERF_SAMPLE_IP
409 * { u32 pid, tid; } && PERF_SAMPLE_TID
410 * { u64 time; } && PERF_SAMPLE_TIME
411 * { u64 addr; } && PERF_SAMPLE_ADDR
e6e18ec7 412 * { u64 id; } && PERF_SAMPLE_ID
7f453c24 413 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
43a21ea8 414 * { u32 cpu, res; } && PERF_SAMPLE_CPU
57c0c15b 415 * { u64 period; } && PERF_SAMPLE_PERIOD
0c593b34 416 *
3dab77fb 417 * { struct read_format values; } && PERF_SAMPLE_READ
0c593b34 418 *
f9188e02 419 * { u64 nr,
43a21ea8 420 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
3dab77fb 421 *
57c0c15b
IM
422 * #
423 * # The RAW record below is opaque data wrt the ABI
424 * #
425 * # That is, the ABI doesn't make any promises wrt to
426 * # the stability of its content, it may vary depending
427 * # on event, hardware, kernel version and phase of
428 * # the moon.
429 * #
430 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
431 * #
3dab77fb 432 *
a044560c
PZ
433 * { u32 size;
434 * char data[size];}&& PERF_SAMPLE_RAW
0c593b34 435 * };
8a057d84 436 */
184f412c 437 PERF_RECORD_SAMPLE = 9,
e6e18ec7 438
cdd6c482 439 PERF_RECORD_MAX, /* non-ABI */
5c148194
PZ
440};
441
f9188e02
PZ
442enum perf_callchain_context {
443 PERF_CONTEXT_HV = (__u64)-32,
444 PERF_CONTEXT_KERNEL = (__u64)-128,
445 PERF_CONTEXT_USER = (__u64)-512,
7522060c 446
f9188e02
PZ
447 PERF_CONTEXT_GUEST = (__u64)-2048,
448 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
449 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
450
451 PERF_CONTEXT_MAX = (__u64)-4095,
7522060c
IM
452};
453
a4be7c27
PZ
454#define PERF_FLAG_FD_NO_GROUP (1U << 0)
455#define PERF_FLAG_FD_OUTPUT (1U << 1)
456
f3dfd265 457#ifdef __KERNEL__
9f66a381 458/*
f3dfd265 459 * Kernel-internal data types and definitions:
9f66a381
IM
460 */
461
cdd6c482
IM
462#ifdef CONFIG_PERF_EVENTS
463# include <asm/perf_event.h>
f3dfd265
PM
464#endif
465
39447b38
ZY
466struct perf_guest_info_callbacks {
467 int (*is_in_guest) (void);
468 int (*is_user_mode) (void);
469 unsigned long (*get_guest_ip) (void);
470};
471
2ff6cfd7
AB
472#ifdef CONFIG_HAVE_HW_BREAKPOINT
473#include <asm/hw_breakpoint.h>
474#endif
475
f3dfd265
PM
476#include <linux/list.h>
477#include <linux/mutex.h>
478#include <linux/rculist.h>
479#include <linux/rcupdate.h>
480#include <linux/spinlock.h>
d6d020e9 481#include <linux/hrtimer.h>
3c446b3d 482#include <linux/fs.h>
709e50cf 483#include <linux/pid_namespace.h>
906010b2 484#include <linux/workqueue.h>
5331d7b8 485#include <linux/ftrace.h>
85cfabbc 486#include <linux/cpu.h>
f3dfd265 487#include <asm/atomic.h>
fa588151 488#include <asm/local.h>
f3dfd265 489
f9188e02
PZ
490#define PERF_MAX_STACK_DEPTH 255
491
492struct perf_callchain_entry {
493 __u64 nr;
494 __u64 ip[PERF_MAX_STACK_DEPTH];
495};
496
3a43ce68
FW
497struct perf_raw_record {
498 u32 size;
499 void *data;
f413cdb8
FW
500};
501
caff2bef
PZ
502struct perf_branch_entry {
503 __u64 from;
504 __u64 to;
505 __u64 flags;
506};
507
508struct perf_branch_stack {
509 __u64 nr;
510 struct perf_branch_entry entries[0];
511};
512
f3dfd265
PM
513struct task_struct;
514
0793a61d 515/**
cdd6c482 516 * struct hw_perf_event - performance event hardware details:
0793a61d 517 */
cdd6c482
IM
518struct hw_perf_event {
519#ifdef CONFIG_PERF_EVENTS
d6d020e9
PZ
520 union {
521 struct { /* hardware */
a308444c 522 u64 config;
447a194b 523 u64 last_tag;
a308444c 524 unsigned long config_base;
cdd6c482 525 unsigned long event_base;
a308444c 526 int idx;
447a194b 527 int last_cpu;
d6d020e9 528 };
721a669b
SS
529 struct { /* software */
530 s64 remaining;
a308444c 531 struct hrtimer hrtimer;
d6d020e9 532 };
24f1e32c 533#ifdef CONFIG_HAVE_HW_BREAKPOINT
dd8b1cf6
FW
534 /* breakpoint */
535 struct arch_hw_breakpoint info;
24f1e32c 536#endif
d6d020e9 537 };
ee06094f 538 atomic64_t prev_count;
b23f3325 539 u64 sample_period;
9e350de3 540 u64 last_period;
ee06094f 541 atomic64_t period_left;
60db5e09 542 u64 interrupts;
6a24ed6c 543
abd50713
PZ
544 u64 freq_time_stamp;
545 u64 freq_count_stamp;
ee06094f 546#endif
0793a61d
TG
547};
548
cdd6c482 549struct perf_event;
621a01ea 550
6bde9b6c
LM
551#define PERF_EVENT_TXN_STARTED 1
552
621a01ea 553/**
4aeb0b42 554 * struct pmu - generic performance monitoring unit
621a01ea 555 */
4aeb0b42 556struct pmu {
cdd6c482
IM
557 int (*enable) (struct perf_event *event);
558 void (*disable) (struct perf_event *event);
d76a0812
SE
559 int (*start) (struct perf_event *event);
560 void (*stop) (struct perf_event *event);
cdd6c482
IM
561 void (*read) (struct perf_event *event);
562 void (*unthrottle) (struct perf_event *event);
6bde9b6c
LM
563
564 /*
565 * group events scheduling is treated as a transaction,
566 * add group events as a whole and perform one schedulability test.
567 * If test fails, roll back the whole group
568 */
569
570 void (*start_txn) (const struct pmu *pmu);
571 void (*cancel_txn) (const struct pmu *pmu);
572 int (*commit_txn) (const struct pmu *pmu);
621a01ea
IM
573};
574
6a930700 575/**
cdd6c482 576 * enum perf_event_active_state - the states of a event
6a930700 577 */
cdd6c482 578enum perf_event_active_state {
57c0c15b 579 PERF_EVENT_STATE_ERROR = -2,
cdd6c482
IM
580 PERF_EVENT_STATE_OFF = -1,
581 PERF_EVENT_STATE_INACTIVE = 0,
57c0c15b 582 PERF_EVENT_STATE_ACTIVE = 1,
6a930700
IM
583};
584
9b51f66d
IM
585struct file;
586
7b732a75
PZ
587struct perf_mmap_data {
588 struct rcu_head rcu_head;
906010b2
PZ
589#ifdef CONFIG_PERF_USE_VMALLOC
590 struct work_struct work;
591#endif
fa588151 592 int data_order; /* allocation order */
8740f941 593 int nr_pages; /* nr of data pages */
43a21ea8 594 int writable; /* are we writable */
c5078f78 595 int nr_locked; /* nr pages mlocked */
8740f941 596
c33a0bc4 597 atomic_t poll; /* POLL_ for wakeups */
8740f941 598
fa588151
PZ
599 local_t head; /* write position */
600 local_t nest; /* nested writers */
601 local_t events; /* event limit */
602 local_t wakeup; /* needs a wakeup */
603 local_t lost; /* nr records lost */
ef60777c 604
2667de81
PZ
605 long watermark; /* wakeup watermark */
606
57c0c15b 607 struct perf_event_mmap_page *user_page;
0127c3ea 608 void *data_pages[0];
7b732a75
PZ
609};
610
671dec5d
PZ
611struct perf_pending_entry {
612 struct perf_pending_entry *next;
613 void (*func)(struct perf_pending_entry *);
925d519a
PZ
614};
615
453f19ee
PZ
616struct perf_sample_data;
617
b326e956
FW
618typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
619 struct perf_sample_data *,
620 struct pt_regs *regs);
621
d6f962b5
FW
622enum perf_group_flag {
623 PERF_GROUP_SOFTWARE = 0x1,
624};
625
76e1d904
FW
626#define SWEVENT_HLIST_BITS 8
627#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
628
629struct swevent_hlist {
630 struct hlist_head heads[SWEVENT_HLIST_SIZE];
631 struct rcu_head rcu_head;
632};
633
0793a61d 634/**
cdd6c482 635 * struct perf_event - performance event kernel representation:
0793a61d 636 */
cdd6c482
IM
637struct perf_event {
638#ifdef CONFIG_PERF_EVENTS
65abc865 639 struct list_head group_entry;
592903cd 640 struct list_head event_entry;
04289bb9 641 struct list_head sibling_list;
76e1d904 642 struct hlist_node hlist_entry;
0127c3ea 643 int nr_siblings;
d6f962b5 644 int group_flags;
cdd6c482
IM
645 struct perf_event *group_leader;
646 struct perf_event *output;
4aeb0b42 647 const struct pmu *pmu;
04289bb9 648
cdd6c482 649 enum perf_event_active_state state;
0793a61d 650 atomic64_t count;
ee06094f 651
53cfbf59 652 /*
cdd6c482 653 * These are the total time in nanoseconds that the event
53cfbf59 654 * has been enabled (i.e. eligible to run, and the task has
cdd6c482 655 * been scheduled in, if this is a per-task event)
53cfbf59
PM
656 * and running (scheduled onto the CPU), respectively.
657 *
658 * They are computed from tstamp_enabled, tstamp_running and
cdd6c482 659 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
53cfbf59
PM
660 */
661 u64 total_time_enabled;
662 u64 total_time_running;
663
664 /*
665 * These are timestamps used for computing total_time_enabled
cdd6c482 666 * and total_time_running when the event is in INACTIVE or
53cfbf59
PM
667 * ACTIVE state, measured in nanoseconds from an arbitrary point
668 * in time.
cdd6c482
IM
669 * tstamp_enabled: the notional time when the event was enabled
670 * tstamp_running: the notional time when the event was scheduled on
53cfbf59 671 * tstamp_stopped: in INACTIVE state, the notional time when the
cdd6c482 672 * event was scheduled off.
53cfbf59
PM
673 */
674 u64 tstamp_enabled;
675 u64 tstamp_running;
676 u64 tstamp_stopped;
677
24f1e32c 678 struct perf_event_attr attr;
cdd6c482 679 struct hw_perf_event hw;
0793a61d 680
cdd6c482 681 struct perf_event_context *ctx;
9b51f66d 682 struct file *filp;
0793a61d 683
53cfbf59
PM
684 /*
685 * These accumulate total time (in nanoseconds) that children
cdd6c482 686 * events have been enabled and running, respectively.
53cfbf59
PM
687 */
688 atomic64_t child_total_time_enabled;
689 atomic64_t child_total_time_running;
690
0793a61d 691 /*
d859e29f 692 * Protect attach/detach and child_list:
0793a61d 693 */
fccc714b
PZ
694 struct mutex child_mutex;
695 struct list_head child_list;
cdd6c482 696 struct perf_event *parent;
0793a61d
TG
697
698 int oncpu;
699 int cpu;
700
082ff5a2
PZ
701 struct list_head owner_entry;
702 struct task_struct *owner;
703
7b732a75
PZ
704 /* mmap bits */
705 struct mutex mmap_mutex;
706 atomic_t mmap_count;
707 struct perf_mmap_data *data;
37d81828 708
7b732a75 709 /* poll related */
0793a61d 710 wait_queue_head_t waitq;
3c446b3d 711 struct fasync_struct *fasync;
79f14641
PZ
712
713 /* delayed work for NMIs and such */
714 int pending_wakeup;
4c9e2542 715 int pending_kill;
79f14641 716 int pending_disable;
671dec5d 717 struct perf_pending_entry pending;
592903cd 718
79f14641
PZ
719 atomic_t event_limit;
720
cdd6c482 721 void (*destroy)(struct perf_event *);
592903cd 722 struct rcu_head rcu_head;
709e50cf
PZ
723
724 struct pid_namespace *ns;
8e5799b1 725 u64 id;
6fb2915d 726
b326e956 727 perf_overflow_handler_t overflow_handler;
453f19ee 728
07b139c8 729#ifdef CONFIG_EVENT_TRACING
1c024eca 730 struct ftrace_event_call *tp_event;
6fb2915d 731 struct event_filter *filter;
ee06094f 732#endif
6fb2915d
LZ
733
734#endif /* CONFIG_PERF_EVENTS */
0793a61d
TG
735};
736
737/**
cdd6c482 738 * struct perf_event_context - event context structure
0793a61d 739 *
cdd6c482 740 * Used as a container for task events and CPU events as well:
0793a61d 741 */
cdd6c482 742struct perf_event_context {
0793a61d 743 /*
cdd6c482 744 * Protect the states of the events in the list,
d859e29f 745 * nr_active, and the list:
0793a61d 746 */
e625cce1 747 raw_spinlock_t lock;
d859e29f 748 /*
cdd6c482 749 * Protect the list of events. Locking either mutex or lock
d859e29f
PM
750 * is sufficient to ensure the list doesn't change; to change
751 * the list you need to lock both the mutex and the spinlock.
752 */
a308444c 753 struct mutex mutex;
04289bb9 754
889ff015
FW
755 struct list_head pinned_groups;
756 struct list_head flexible_groups;
a308444c 757 struct list_head event_list;
cdd6c482 758 int nr_events;
a308444c
IM
759 int nr_active;
760 int is_active;
bfbd3381 761 int nr_stat;
a308444c
IM
762 atomic_t refcount;
763 struct task_struct *task;
53cfbf59
PM
764
765 /*
4af4998b 766 * Context clock, runs when context enabled.
53cfbf59 767 */
a308444c
IM
768 u64 time;
769 u64 timestamp;
564c2b21
PM
770
771 /*
772 * These fields let us detect when two contexts have both
773 * been cloned (inherited) from a common ancestor.
774 */
cdd6c482 775 struct perf_event_context *parent_ctx;
a308444c
IM
776 u64 parent_gen;
777 u64 generation;
778 int pin_count;
779 struct rcu_head rcu_head;
0793a61d
TG
780};
781
782/**
cdd6c482 783 * struct perf_event_cpu_context - per cpu event context structure
0793a61d
TG
784 */
785struct perf_cpu_context {
cdd6c482
IM
786 struct perf_event_context ctx;
787 struct perf_event_context *task_ctx;
0793a61d
TG
788 int active_oncpu;
789 int max_pertask;
3b6f9e5c 790 int exclusive;
76e1d904
FW
791 struct swevent_hlist *swevent_hlist;
792 struct mutex hlist_mutex;
793 int hlist_refcount;
96f6d444
PZ
794
795 /*
796 * Recursion avoidance:
797 *
798 * task, softirq, irq, nmi context
799 */
22a4f650 800 int recursion[4];
0793a61d
TG
801};
802
5622f295 803struct perf_output_handle {
57c0c15b
IM
804 struct perf_event *event;
805 struct perf_mmap_data *data;
806 unsigned long head;
807 unsigned long offset;
6d1acfd5 808 unsigned long wakeup;
57c0c15b
IM
809 int nmi;
810 int sample;
5622f295
MM
811};
812
cdd6c482 813#ifdef CONFIG_PERF_EVENTS
829b42dd 814
0793a61d
TG
815/*
816 * Set by architecture code:
817 */
cdd6c482 818extern int perf_max_events;
0793a61d 819
cdd6c482 820extern const struct pmu *hw_perf_event_init(struct perf_event *event);
621a01ea 821
49f47433 822extern void perf_event_task_sched_in(struct task_struct *task);
184f412c 823extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
49f47433 824extern void perf_event_task_tick(struct task_struct *task);
cdd6c482
IM
825extern int perf_event_init_task(struct task_struct *child);
826extern void perf_event_exit_task(struct task_struct *child);
827extern void perf_event_free_task(struct task_struct *task);
828extern void set_perf_event_pending(void);
829extern void perf_event_do_pending(void);
830extern void perf_event_print_debug(void);
9e35ad38
PZ
831extern void __perf_disable(void);
832extern bool __perf_enable(void);
833extern void perf_disable(void);
834extern void perf_enable(void);
cdd6c482
IM
835extern int perf_event_task_disable(void);
836extern int perf_event_task_enable(void);
cdd6c482 837extern void perf_event_update_userpage(struct perf_event *event);
fb0459d7
AV
838extern int perf_event_release_kernel(struct perf_event *event);
839extern struct perf_event *
840perf_event_create_kernel_counter(struct perf_event_attr *attr,
841 int cpu,
97eaf530 842 pid_t pid,
b326e956 843 perf_overflow_handler_t callback);
59ed446f
PZ
844extern u64 perf_event_read_value(struct perf_event *event,
845 u64 *enabled, u64 *running);
5c92d124 846
df1a132b 847struct perf_sample_data {
5622f295
MM
848 u64 type;
849
850 u64 ip;
851 struct {
852 u32 pid;
853 u32 tid;
854 } tid_entry;
855 u64 time;
a308444c 856 u64 addr;
5622f295
MM
857 u64 id;
858 u64 stream_id;
859 struct {
860 u32 cpu;
861 u32 reserved;
862 } cpu_entry;
a308444c 863 u64 period;
5622f295 864 struct perf_callchain_entry *callchain;
3a43ce68 865 struct perf_raw_record *raw;
df1a132b
PZ
866};
867
dc1d628a
PZ
868static inline
869void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
870{
871 data->addr = addr;
872 data->raw = NULL;
873}
874
5622f295
MM
875extern void perf_output_sample(struct perf_output_handle *handle,
876 struct perf_event_header *header,
877 struct perf_sample_data *data,
cdd6c482 878 struct perf_event *event);
5622f295
MM
879extern void perf_prepare_sample(struct perf_event_header *header,
880 struct perf_sample_data *data,
cdd6c482 881 struct perf_event *event,
5622f295
MM
882 struct pt_regs *regs);
883
cdd6c482 884extern int perf_event_overflow(struct perf_event *event, int nmi,
5622f295
MM
885 struct perf_sample_data *data,
886 struct pt_regs *regs);
df1a132b 887
3b6f9e5c 888/*
cdd6c482 889 * Return 1 for a software event, 0 for a hardware event
3b6f9e5c 890 */
cdd6c482 891static inline int is_software_event(struct perf_event *event)
3b6f9e5c 892{
92b67598
PZ
893 switch (event->attr.type) {
894 case PERF_TYPE_SOFTWARE:
895 case PERF_TYPE_TRACEPOINT:
896 /* for now the breakpoint stuff also works as software event */
897 case PERF_TYPE_BREAKPOINT:
898 return 1;
899 }
900 return 0;
3b6f9e5c
PM
901}
902
cdd6c482 903extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
f29ac756 904
cdd6c482 905extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
f29ac756 906
5331d7b8
FW
907extern void
908perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
909
910/*
911 * Take a snapshot of the regs. Skip ip and frame pointer to
912 * the nth caller. We only need a few of the regs:
913 * - ip for PERF_SAMPLE_IP
914 * - cs for user_mode() tests
915 * - bp for callchains
916 * - eflags, for future purposes, just in case
917 */
918static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
919{
920 unsigned long ip;
921
922 memset(regs, 0, sizeof(*regs));
923
924 switch (skip) {
925 case 1 :
926 ip = CALLER_ADDR0;
927 break;
928 case 2 :
929 ip = CALLER_ADDR1;
930 break;
931 case 3 :
932 ip = CALLER_ADDR2;
933 break;
934 case 4:
935 ip = CALLER_ADDR3;
936 break;
937 /* No need to support further for now */
938 default:
939 ip = 0;
940 }
941
942 return perf_arch_fetch_caller_regs(regs, ip, skip);
943}
944
e49a5bd3
FW
945static inline void
946perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
947{
948 if (atomic_read(&perf_swevent_enabled[event_id])) {
949 struct pt_regs hot_regs;
950
951 if (!regs) {
952 perf_fetch_caller_regs(&hot_regs, 1);
953 regs = &hot_regs;
954 }
955 __perf_sw_event(event_id, nr, nmi, regs, addr);
956 }
957}
958
cdd6c482 959extern void __perf_event_mmap(struct vm_area_struct *vma);
089dd79d 960
cdd6c482 961static inline void perf_event_mmap(struct vm_area_struct *vma)
089dd79d
PZ
962{
963 if (vma->vm_flags & VM_EXEC)
cdd6c482 964 __perf_event_mmap(vma);
089dd79d 965}
0a4a9391 966
39447b38 967extern struct perf_guest_info_callbacks *perf_guest_cbs;
dcf46b94
ZY
968extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
969extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
39447b38 970
cdd6c482
IM
971extern void perf_event_comm(struct task_struct *tsk);
972extern void perf_event_fork(struct task_struct *tsk);
8d1b2d93 973
394ee076
PZ
974extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
975
cdd6c482
IM
976extern int sysctl_perf_event_paranoid;
977extern int sysctl_perf_event_mlock;
978extern int sysctl_perf_event_sample_rate;
1ccd1549 979
320ebf09
PZ
980static inline bool perf_paranoid_tracepoint_raw(void)
981{
982 return sysctl_perf_event_paranoid > -1;
983}
984
985static inline bool perf_paranoid_cpu(void)
986{
987 return sysctl_perf_event_paranoid > 0;
988}
989
990static inline bool perf_paranoid_kernel(void)
991{
992 return sysctl_perf_event_paranoid > 1;
993}
994
cdd6c482 995extern void perf_event_init(void);
1c024eca
PZ
996extern void perf_tp_event(u64 addr, u64 count, void *record,
997 int entry_size, struct pt_regs *regs,
998 struct hlist_head *head);
24f1e32c 999extern void perf_bp_event(struct perf_event *event, void *data);
0d905bca 1000
9d23a90a 1001#ifndef perf_misc_flags
cdd6c482
IM
1002#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1003 PERF_RECORD_MISC_KERNEL)
9d23a90a
PM
1004#define perf_instruction_pointer(regs) instruction_pointer(regs)
1005#endif
1006
5622f295 1007extern int perf_output_begin(struct perf_output_handle *handle,
cdd6c482 1008 struct perf_event *event, unsigned int size,
5622f295
MM
1009 int nmi, int sample);
1010extern void perf_output_end(struct perf_output_handle *handle);
1011extern void perf_output_copy(struct perf_output_handle *handle,
1012 const void *buf, unsigned int len);
4ed7c92d
PZ
1013extern int perf_swevent_get_recursion_context(void);
1014extern void perf_swevent_put_recursion_context(int rctx);
44234adc
FW
1015extern void perf_event_enable(struct perf_event *event);
1016extern void perf_event_disable(struct perf_event *event);
0793a61d
TG
1017#else
1018static inline void
49f47433 1019perf_event_task_sched_in(struct task_struct *task) { }
0793a61d 1020static inline void
cdd6c482 1021perf_event_task_sched_out(struct task_struct *task,
49f47433 1022 struct task_struct *next) { }
0793a61d 1023static inline void
49f47433 1024perf_event_task_tick(struct task_struct *task) { }
cdd6c482
IM
1025static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1026static inline void perf_event_exit_task(struct task_struct *child) { }
1027static inline void perf_event_free_task(struct task_struct *task) { }
57c0c15b
IM
1028static inline void perf_event_do_pending(void) { }
1029static inline void perf_event_print_debug(void) { }
9e35ad38
PZ
1030static inline void perf_disable(void) { }
1031static inline void perf_enable(void) { }
57c0c15b
IM
1032static inline int perf_event_task_disable(void) { return -EINVAL; }
1033static inline int perf_event_task_enable(void) { return -EINVAL; }
15dbf27c 1034
925d519a 1035static inline void
cdd6c482 1036perf_sw_event(u32 event_id, u64 nr, int nmi,
78f13e95 1037 struct pt_regs *regs, u64 addr) { }
24f1e32c 1038static inline void
184f412c 1039perf_bp_event(struct perf_event *event, void *data) { }
0a4a9391 1040
39447b38 1041static inline int perf_register_guest_info_callbacks
dcf46b94 1042(struct perf_guest_info_callbacks *callbacks) { return 0; }
39447b38 1043static inline int perf_unregister_guest_info_callbacks
dcf46b94 1044(struct perf_guest_info_callbacks *callbacks) { return 0; }
39447b38 1045
57c0c15b 1046static inline void perf_event_mmap(struct vm_area_struct *vma) { }
cdd6c482
IM
1047static inline void perf_event_comm(struct task_struct *tsk) { }
1048static inline void perf_event_fork(struct task_struct *tsk) { }
1049static inline void perf_event_init(void) { }
184f412c 1050static inline int perf_swevent_get_recursion_context(void) { return -1; }
4ed7c92d 1051static inline void perf_swevent_put_recursion_context(int rctx) { }
44234adc
FW
1052static inline void perf_event_enable(struct perf_event *event) { }
1053static inline void perf_event_disable(struct perf_event *event) { }
0793a61d
TG
1054#endif
1055
5622f295
MM
1056#define perf_output_put(handle, x) \
1057 perf_output_copy((handle), &(x), sizeof(x))
1058
3f6da390
PZ
1059/*
1060 * This has to have a higher priority than migration_notifier in sched.c.
1061 */
1062#define perf_cpu_notifier(fn) \
1063do { \
1064 static struct notifier_block fn##_nb __cpuinitdata = \
1065 { .notifier_call = fn, .priority = 20 }; \
1066 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1067 (void *)(unsigned long)smp_processor_id()); \
1068 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1069 (void *)(unsigned long)smp_processor_id()); \
1070 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1071 (void *)(unsigned long)smp_processor_id()); \
1072 register_cpu_notifier(&fn##_nb); \
1073} while (0)
1074
f3dfd265 1075#endif /* __KERNEL__ */
cdd6c482 1076#endif /* _LINUX_PERF_EVENT_H */