]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file cpu_buffer.c | |
3 | * | |
4 | * @remark Copyright 2002 OProfile authors | |
5 | * @remark Read the file COPYING | |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
345c2573 | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
1da177e4 LT |
9 | * |
10 | * Each CPU has a local buffer that stores PC value/event | |
11 | * pairs. We also log context switches when we notice them. | |
12 | * Eventually each CPU's buffer is processed into the global | |
13 | * event buffer by sync_buffer(). | |
14 | * | |
15 | * We use a local buffer for two reasons: an NMI or similar | |
16 | * interrupt cannot synchronise, and high sampling rates | |
17 | * would lead to catastrophic global synchronisation if | |
18 | * a global buffer was used. | |
19 | */ | |
20 | ||
21 | #include <linux/sched.h> | |
22 | #include <linux/oprofile.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <linux/errno.h> | |
6a18037d | 25 | |
1da177e4 LT |
26 | #include "event_buffer.h" |
27 | #include "cpu_buffer.h" | |
28 | #include "buffer_sync.h" | |
29 | #include "oprof.h" | |
30 | ||
6dad828b RR |
31 | #define OP_BUFFER_FLAGS 0 |
32 | ||
33 | /* | |
34 | * Read and write access is using spin locking. Thus, writing to the | |
35 | * buffer by NMI handler (x86) could occur also during critical | |
36 | * sections when reading the buffer. To avoid this, there are 2 | |
37 | * buffers for independent read and write access. Read access is in | |
38 | * process context only, write access only in the NMI handler. If the | |
39 | * read buffer runs empty, both buffers are swapped atomically. There | |
40 | * is potentially a small window during swapping where the buffers are | |
41 | * disabled and samples could be lost. | |
42 | * | |
43 | * Using 2 buffers is a little bit overhead, but the solution is clear | |
44 | * and does not require changes in the ring buffer implementation. It | |
45 | * can be changed to a single buffer solution when the ring buffer | |
46 | * access is implemented as non-locking atomic code. | |
47 | */ | |
9966718d RR |
48 | static struct ring_buffer *op_ring_buffer_read; |
49 | static struct ring_buffer *op_ring_buffer_write; | |
8b8b4988 | 50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
1da177e4 | 51 | |
c4028958 | 52 | static void wq_sync_buffer(struct work_struct *work); |
1da177e4 LT |
53 | |
54 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | |
55 | static int work_enabled; | |
56 | ||
a5598ca0 CL |
57 | unsigned long oprofile_get_cpu_buffer_size(void) |
58 | { | |
bd2172f5 | 59 | return oprofile_cpu_buffer_size; |
a5598ca0 CL |
60 | } |
61 | ||
62 | void oprofile_cpu_buffer_inc_smpl_lost(void) | |
63 | { | |
64 | struct oprofile_cpu_buffer *cpu_buf | |
65 | = &__get_cpu_var(cpu_buffer); | |
66 | ||
67 | cpu_buf->sample_lost_overflow++; | |
68 | } | |
69 | ||
30015776 RR |
70 | void free_cpu_buffers(void) |
71 | { | |
72 | if (op_ring_buffer_read) | |
73 | ring_buffer_free(op_ring_buffer_read); | |
74 | op_ring_buffer_read = NULL; | |
75 | if (op_ring_buffer_write) | |
76 | ring_buffer_free(op_ring_buffer_write); | |
77 | op_ring_buffer_write = NULL; | |
78 | } | |
79 | ||
1da177e4 LT |
80 | int alloc_cpu_buffers(void) |
81 | { | |
82 | int i; | |
6a18037d | 83 | |
bd2172f5 | 84 | unsigned long buffer_size = oprofile_cpu_buffer_size; |
6a18037d | 85 | |
6dad828b RR |
86 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); |
87 | if (!op_ring_buffer_read) | |
88 | goto fail; | |
89 | op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); | |
90 | if (!op_ring_buffer_write) | |
91 | goto fail; | |
92 | ||
4bd9b9dc | 93 | for_each_possible_cpu(i) { |
608dfddd | 94 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
6a18037d | 95 | |
1da177e4 LT |
96 | b->last_task = NULL; |
97 | b->last_is_kernel = -1; | |
98 | b->tracing = 0; | |
99 | b->buffer_size = buffer_size; | |
100 | b->tail_pos = 0; | |
101 | b->head_pos = 0; | |
102 | b->sample_received = 0; | |
103 | b->sample_lost_overflow = 0; | |
df9d177a PE |
104 | b->backtrace_aborted = 0; |
105 | b->sample_invalid_eip = 0; | |
1da177e4 | 106 | b->cpu = i; |
c4028958 | 107 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
1da177e4 LT |
108 | } |
109 | return 0; | |
110 | ||
111 | fail: | |
112 | free_cpu_buffers(); | |
113 | return -ENOMEM; | |
114 | } | |
1da177e4 LT |
115 | |
116 | void start_cpu_work(void) | |
117 | { | |
118 | int i; | |
119 | ||
120 | work_enabled = 1; | |
121 | ||
122 | for_each_online_cpu(i) { | |
608dfddd | 123 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 LT |
124 | |
125 | /* | |
126 | * Spread the work by 1 jiffy per cpu so they dont all | |
127 | * fire at once. | |
128 | */ | |
129 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); | |
130 | } | |
131 | } | |
132 | ||
1da177e4 LT |
133 | void end_cpu_work(void) |
134 | { | |
135 | int i; | |
136 | ||
137 | work_enabled = 0; | |
138 | ||
139 | for_each_online_cpu(i) { | |
608dfddd | 140 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 LT |
141 | |
142 | cancel_delayed_work(&b->work); | |
143 | } | |
144 | ||
145 | flush_scheduled_work(); | |
146 | } | |
147 | ||
9966718d RR |
148 | int op_cpu_buffer_write_entry(struct op_entry *entry) |
149 | { | |
150 | entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, | |
151 | sizeof(struct op_sample), | |
152 | &entry->irq_flags); | |
153 | if (entry->event) | |
154 | entry->sample = ring_buffer_event_data(entry->event); | |
155 | else | |
156 | entry->sample = NULL; | |
157 | ||
158 | if (!entry->sample) | |
159 | return -ENOMEM; | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | int op_cpu_buffer_write_commit(struct op_entry *entry) | |
165 | { | |
166 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, | |
167 | entry->irq_flags); | |
168 | } | |
169 | ||
170 | struct op_sample *op_cpu_buffer_read_entry(int cpu) | |
171 | { | |
172 | struct ring_buffer_event *e; | |
173 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | |
174 | if (e) | |
175 | return ring_buffer_event_data(e); | |
176 | if (ring_buffer_swap_cpu(op_ring_buffer_read, | |
177 | op_ring_buffer_write, | |
178 | cpu)) | |
179 | return NULL; | |
180 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | |
181 | if (e) | |
182 | return ring_buffer_event_data(e); | |
183 | return NULL; | |
184 | } | |
185 | ||
186 | unsigned long op_cpu_buffer_entries(int cpu) | |
187 | { | |
188 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | |
189 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | |
190 | } | |
191 | ||
211117ff | 192 | static inline int |
25ad2913 | 193 | add_sample(struct oprofile_cpu_buffer *cpu_buf, |
6a18037d | 194 | unsigned long pc, unsigned long event) |
1da177e4 | 195 | { |
6dad828b | 196 | struct op_entry entry; |
211117ff | 197 | int ret; |
6dad828b | 198 | |
6d2c53f3 | 199 | ret = op_cpu_buffer_write_entry(&entry); |
211117ff RR |
200 | if (ret) |
201 | return ret; | |
6dad828b RR |
202 | |
203 | entry.sample->eip = pc; | |
204 | entry.sample->event = event; | |
205 | ||
6d2c53f3 | 206 | ret = op_cpu_buffer_write_commit(&entry); |
211117ff RR |
207 | if (ret) |
208 | return ret; | |
6dad828b | 209 | |
211117ff | 210 | return 0; |
1da177e4 LT |
211 | } |
212 | ||
211117ff | 213 | static inline int |
25ad2913 | 214 | add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) |
1da177e4 | 215 | { |
211117ff | 216 | return add_sample(buffer, ESCAPE_CODE, value); |
1da177e4 LT |
217 | } |
218 | ||
1da177e4 LT |
219 | /* This must be safe from any context. It's safe writing here |
220 | * because of the head/tail separation of the writer and reader | |
221 | * of the CPU buffer. | |
222 | * | |
223 | * is_kernel is needed because on some architectures you cannot | |
224 | * tell if you are in kernel or user space simply by looking at | |
225 | * pc. We tag this in the buffer by generating kernel enter/exit | |
226 | * events whenever is_kernel changes | |
227 | */ | |
25ad2913 | 228 | static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, |
1da177e4 LT |
229 | int is_kernel, unsigned long event) |
230 | { | |
25ad2913 | 231 | struct task_struct *task; |
1da177e4 LT |
232 | |
233 | cpu_buf->sample_received++; | |
234 | ||
df9d177a PE |
235 | if (pc == ESCAPE_CODE) { |
236 | cpu_buf->sample_invalid_eip++; | |
237 | return 0; | |
238 | } | |
239 | ||
1da177e4 LT |
240 | is_kernel = !!is_kernel; |
241 | ||
242 | task = current; | |
243 | ||
244 | /* notice a switch from user->kernel or vice versa */ | |
245 | if (cpu_buf->last_is_kernel != is_kernel) { | |
246 | cpu_buf->last_is_kernel = is_kernel; | |
211117ff RR |
247 | if (add_code(cpu_buf, is_kernel)) |
248 | goto fail; | |
1da177e4 LT |
249 | } |
250 | ||
251 | /* notice a task switch */ | |
252 | if (cpu_buf->last_task != task) { | |
253 | cpu_buf->last_task = task; | |
211117ff RR |
254 | if (add_code(cpu_buf, (unsigned long)task)) |
255 | goto fail; | |
1da177e4 | 256 | } |
6a18037d | 257 | |
211117ff RR |
258 | if (add_sample(cpu_buf, pc, event)) |
259 | goto fail; | |
260 | ||
1da177e4 | 261 | return 1; |
211117ff RR |
262 | |
263 | fail: | |
264 | cpu_buf->sample_lost_overflow++; | |
265 | return 0; | |
1da177e4 LT |
266 | } |
267 | ||
345c2573 | 268 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 | 269 | { |
1da177e4 LT |
270 | add_code(cpu_buf, CPU_TRACE_BEGIN); |
271 | cpu_buf->tracing = 1; | |
272 | return 1; | |
273 | } | |
274 | ||
25ad2913 | 275 | static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 LT |
276 | { |
277 | cpu_buf->tracing = 0; | |
278 | } | |
279 | ||
d45d23be RR |
280 | static inline void |
281 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | |
282 | unsigned long event, int is_kernel) | |
1da177e4 | 283 | { |
608dfddd | 284 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 | 285 | |
bd2172f5 | 286 | if (!oprofile_backtrace_depth) { |
1da177e4 LT |
287 | log_sample(cpu_buf, pc, is_kernel, event); |
288 | return; | |
289 | } | |
290 | ||
291 | if (!oprofile_begin_trace(cpu_buf)) | |
292 | return; | |
293 | ||
fd13f6c8 RR |
294 | /* |
295 | * if log_sample() fail we can't backtrace since we lost the | |
296 | * source of this event | |
297 | */ | |
1da177e4 | 298 | if (log_sample(cpu_buf, pc, is_kernel, event)) |
bd2172f5 | 299 | oprofile_ops.backtrace(regs, oprofile_backtrace_depth); |
1da177e4 LT |
300 | oprofile_end_trace(cpu_buf); |
301 | } | |
302 | ||
d45d23be RR |
303 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
304 | unsigned long event, int is_kernel) | |
305 | { | |
306 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); | |
307 | } | |
308 | ||
27357716 BR |
309 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
310 | { | |
311 | int is_kernel = !user_mode(regs); | |
312 | unsigned long pc = profile_pc(regs); | |
313 | ||
d45d23be | 314 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
27357716 BR |
315 | } |
316 | ||
852402cc RR |
317 | #ifdef CONFIG_OPROFILE_IBS |
318 | ||
e2fee276 RR |
319 | #define MAX_IBS_SAMPLE_SIZE 14 |
320 | ||
cdc1834d RR |
321 | void oprofile_add_ibs_sample(struct pt_regs * const regs, |
322 | unsigned int * const ibs_sample, int ibs_code) | |
345c2573 | 323 | { |
e2fee276 RR |
324 | int is_kernel = !user_mode(regs); |
325 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | |
345c2573 | 326 | struct task_struct *task; |
211117ff | 327 | int fail = 0; |
345c2573 BK |
328 | |
329 | cpu_buf->sample_received++; | |
330 | ||
345c2573 BK |
331 | /* notice a switch from user->kernel or vice versa */ |
332 | if (cpu_buf->last_is_kernel != is_kernel) { | |
211117ff RR |
333 | if (add_code(cpu_buf, is_kernel)) |
334 | goto fail; | |
345c2573 | 335 | cpu_buf->last_is_kernel = is_kernel; |
345c2573 BK |
336 | } |
337 | ||
338 | /* notice a task switch */ | |
339 | if (!is_kernel) { | |
340 | task = current; | |
345c2573 | 341 | if (cpu_buf->last_task != task) { |
211117ff RR |
342 | if (add_code(cpu_buf, (unsigned long)task)) |
343 | goto fail; | |
345c2573 | 344 | cpu_buf->last_task = task; |
345c2573 BK |
345 | } |
346 | } | |
347 | ||
211117ff RR |
348 | fail = fail || add_code(cpu_buf, ibs_code); |
349 | fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); | |
350 | fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); | |
351 | fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); | |
345c2573 BK |
352 | |
353 | if (ibs_code == IBS_OP_BEGIN) { | |
211117ff RR |
354 | fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); |
355 | fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); | |
356 | fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); | |
345c2573 BK |
357 | } |
358 | ||
211117ff RR |
359 | if (fail) |
360 | goto fail; | |
361 | ||
bd2172f5 RR |
362 | if (oprofile_backtrace_depth) |
363 | oprofile_ops.backtrace(regs, oprofile_backtrace_depth); | |
211117ff RR |
364 | |
365 | return; | |
366 | ||
367 | fail: | |
368 | cpu_buf->sample_lost_overflow++; | |
369 | return; | |
345c2573 BK |
370 | } |
371 | ||
852402cc RR |
372 | #endif |
373 | ||
1da177e4 LT |
374 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
375 | { | |
608dfddd | 376 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 LT |
377 | log_sample(cpu_buf, pc, is_kernel, event); |
378 | } | |
379 | ||
1da177e4 LT |
380 | void oprofile_add_trace(unsigned long pc) |
381 | { | |
608dfddd | 382 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 LT |
383 | |
384 | if (!cpu_buf->tracing) | |
385 | return; | |
386 | ||
fd13f6c8 RR |
387 | /* |
388 | * broken frame can give an eip with the same value as an | |
389 | * escape code, abort the trace if we get it | |
390 | */ | |
211117ff RR |
391 | if (pc == ESCAPE_CODE) |
392 | goto fail; | |
393 | ||
394 | if (add_sample(cpu_buf, pc, 0)) | |
395 | goto fail; | |
1da177e4 | 396 | |
211117ff RR |
397 | return; |
398 | fail: | |
399 | cpu_buf->tracing = 0; | |
400 | cpu_buf->backtrace_aborted++; | |
401 | return; | |
1da177e4 LT |
402 | } |
403 | ||
1da177e4 LT |
404 | /* |
405 | * This serves to avoid cpu buffer overflow, and makes sure | |
406 | * the task mortuary progresses | |
407 | * | |
408 | * By using schedule_delayed_work_on and then schedule_delayed_work | |
409 | * we guarantee this will stay on the correct cpu | |
410 | */ | |
c4028958 | 411 | static void wq_sync_buffer(struct work_struct *work) |
1da177e4 | 412 | { |
25ad2913 | 413 | struct oprofile_cpu_buffer *b = |
c4028958 | 414 | container_of(work, struct oprofile_cpu_buffer, work.work); |
1da177e4 | 415 | if (b->cpu != smp_processor_id()) { |
bd17b625 | 416 | printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", |
1da177e4 | 417 | smp_processor_id(), b->cpu); |
4bd9b9dc CA |
418 | |
419 | if (!cpu_online(b->cpu)) { | |
420 | cancel_delayed_work(&b->work); | |
421 | return; | |
422 | } | |
1da177e4 LT |
423 | } |
424 | sync_buffer(b->cpu); | |
425 | ||
426 | /* don't re-add the work if we're shutting down */ | |
427 | if (work_enabled) | |
428 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); | |
429 | } |