]>
Commit | Line | Data |
---|---|---|
0793a61d TG |
1 | /* |
2 | * Performance counters: | |
3 | * | |
4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * Data type definitions, declarations, prototypes. | |
8 | * | |
9 | * Started by: Thomas Gleixner and Ingo Molnar | |
10 | * | |
11 | * For licencing details see kernel-base/COPYING | |
12 | */ | |
13 | #ifndef _LINUX_PERF_COUNTER_H | |
14 | #define _LINUX_PERF_COUNTER_H | |
15 | ||
16 | #include <asm/atomic.h> | |
17 | ||
18 | #include <linux/list.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/rculist.h> | |
21 | #include <linux/rcupdate.h> | |
22 | #include <linux/spinlock.h> | |
23 | ||
24 | struct task_struct; | |
25 | ||
26 | /* | |
27 | * Generalized hardware event types, used by the hw_event_type parameter | |
28 | * of the sys_perf_counter_open() syscall: | |
29 | */ | |
30 | enum hw_event_types { | |
31 | PERF_COUNT_CYCLES, | |
32 | PERF_COUNT_INSTRUCTIONS, | |
33 | PERF_COUNT_CACHE_REFERENCES, | |
34 | PERF_COUNT_CACHE_MISSES, | |
35 | PERF_COUNT_BRANCH_INSTRUCTIONS, | |
36 | PERF_COUNT_BRANCH_MISSES, | |
37 | /* | |
38 | * If this bit is set in the type, then trigger NMI sampling: | |
39 | */ | |
40 | PERF_COUNT_NMI = (1 << 30), | |
eab656ae | 41 | PERF_COUNT_RAW = (1 << 31), |
0793a61d TG |
42 | }; |
43 | ||
44 | /* | |
45 | * IRQ-notification data record type: | |
46 | */ | |
47 | enum perf_record_type { | |
48 | PERF_RECORD_SIMPLE, | |
49 | PERF_RECORD_IRQ, | |
50 | PERF_RECORD_GROUP, | |
51 | }; | |
52 | ||
eab656ae TG |
53 | struct perf_counter_event { |
54 | u32 hw_event_type; | |
55 | u32 hw_event_period; | |
56 | u64 hw_raw_ctrl; | |
57 | }; | |
58 | ||
0793a61d TG |
59 | /** |
60 | * struct hw_perf_counter - performance counter hardware details | |
61 | */ | |
62 | struct hw_perf_counter { | |
63 | u64 config; | |
64 | unsigned long config_base; | |
65 | unsigned long counter_base; | |
66 | int nmi; | |
67 | unsigned int idx; | |
68 | u64 prev_count; | |
69 | s32 next_count; | |
70 | u64 irq_period; | |
71 | }; | |
72 | ||
73 | /* | |
74 | * Hardcoded buffer length limit for now, for IRQ-fed events: | |
75 | */ | |
76 | #define PERF_DATA_BUFLEN 2048 | |
77 | ||
78 | /** | |
79 | * struct perf_data - performance counter IRQ data sampling ... | |
80 | */ | |
81 | struct perf_data { | |
82 | int len; | |
83 | int rd_idx; | |
84 | int overrun; | |
85 | u8 data[PERF_DATA_BUFLEN]; | |
86 | }; | |
87 | ||
88 | /** | |
89 | * struct perf_counter - performance counter kernel representation: | |
90 | */ | |
91 | struct perf_counter { | |
92 | struct list_head list; | |
93 | int active; | |
94 | #if BITS_PER_LONG == 64 | |
95 | atomic64_t count; | |
96 | #else | |
97 | atomic_t count32[2]; | |
98 | #endif | |
dfa7c899 | 99 | struct perf_counter_event event; |
0793a61d TG |
100 | struct hw_perf_counter hw; |
101 | ||
102 | struct perf_counter_context *ctx; | |
103 | struct task_struct *task; | |
104 | ||
105 | /* | |
106 | * Protect attach/detach: | |
107 | */ | |
108 | struct mutex mutex; | |
109 | ||
110 | int oncpu; | |
111 | int cpu; | |
112 | ||
0793a61d TG |
113 | enum perf_record_type record_type; |
114 | ||
115 | /* read() / irq related data */ | |
116 | wait_queue_head_t waitq; | |
117 | /* optional: for NMIs */ | |
118 | int wakeup_pending; | |
119 | struct perf_data *irqdata; | |
120 | struct perf_data *usrdata; | |
121 | struct perf_data data[2]; | |
122 | }; | |
123 | ||
124 | /** | |
125 | * struct perf_counter_context - counter context structure | |
126 | * | |
127 | * Used as a container for task counters and CPU counters as well: | |
128 | */ | |
129 | struct perf_counter_context { | |
130 | #ifdef CONFIG_PERF_COUNTERS | |
131 | /* | |
132 | * Protect the list of counters: | |
133 | */ | |
134 | spinlock_t lock; | |
135 | struct list_head counters; | |
136 | int nr_counters; | |
137 | int nr_active; | |
138 | struct task_struct *task; | |
139 | #endif | |
140 | }; | |
141 | ||
142 | /** | |
143 | * struct perf_counter_cpu_context - per cpu counter context structure | |
144 | */ | |
145 | struct perf_cpu_context { | |
146 | struct perf_counter_context ctx; | |
147 | struct perf_counter_context *task_ctx; | |
148 | int active_oncpu; | |
149 | int max_pertask; | |
150 | }; | |
151 | ||
152 | /* | |
153 | * Set by architecture code: | |
154 | */ | |
155 | extern int perf_max_counters; | |
156 | ||
157 | #ifdef CONFIG_PERF_COUNTERS | |
158 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | |
159 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); | |
160 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |
161 | extern void perf_counter_init_task(struct task_struct *task); | |
162 | extern void perf_counter_notify(struct pt_regs *regs); | |
163 | extern void perf_counter_print_debug(void); | |
4ac13294 TG |
164 | extern void hw_perf_restore_ctrl(u64 ctrl); |
165 | extern u64 hw_perf_disable_all(void); | |
0793a61d TG |
166 | #else |
167 | static inline void | |
168 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | |
169 | static inline void | |
170 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } | |
171 | static inline void | |
172 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | |
173 | static inline void perf_counter_init_task(struct task_struct *task) { } | |
174 | static inline void perf_counter_notify(struct pt_regs *regs) { } | |
175 | static inline void perf_counter_print_debug(void) { } | |
4ac13294 TG |
176 | static inline void hw_perf_restore_ctrl(u64 ctrl) { } |
177 | static inline u64 hw_perf_disable_all(void) { return 0; } | |
0793a61d TG |
178 | #endif |
179 | ||
180 | #endif /* _LINUX_PERF_COUNTER_H */ |