]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/hw_breakpoint.c
perf_events: Fix read() bogus counts when in error state
[net-next-2.6.git] / kernel / hw_breakpoint.c
CommitLineData
62a038d3
P
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
24f1e32c 18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
ba1c813a
FW
19 *
20 * Thanks to Ingo Molnar for his many suggestions.
ba6909b7
P
21 *
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
62a038d3
P
25 */
26
27/*
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
31 */
32
33#include <linux/irqflags.h>
34#include <linux/kallsyms.h>
35#include <linux/notifier.h>
36#include <linux/kprobes.h>
37#include <linux/kdebug.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/percpu.h>
41#include <linux/sched.h>
42#include <linux/init.h>
43#include <linux/smp.h>
44
24f1e32c
FW
45#include <linux/hw_breakpoint.h>
46
ba1c813a
FW
47/*
48 * Constraints data
49 */
62a038d3 50
ba1c813a
FW
51/* Number of pinned cpu breakpoints in a cpu */
52static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
53
54/* Number of pinned task breakpoints in a cpu */
55static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
56
57/* Number of non-pinned cpu/task breakpoints in a cpu */
58static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
59
60/* Gather the number of total pinned and un-pinned bp in a cpuset */
61struct bp_busy_slots {
62 unsigned int pinned;
63 unsigned int flexible;
64};
65
66/* Serialize accesses to the above constraints */
67static DEFINE_MUTEX(nr_bp_mutex);
68
69/*
70 * Report the maximum number of pinned breakpoints a task
71 * have in this cpu
72 */
73static unsigned int max_task_bp_pinned(int cpu)
62a038d3 74{
ba1c813a
FW
75 int i;
76 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
62a038d3 77
ba1c813a
FW
78 for (i = HBP_NUM -1; i >= 0; i--) {
79 if (tsk_pinned[i] > 0)
80 return i + 1;
62a038d3
P
81 }
82
24f1e32c 83 return 0;
62a038d3
P
84}
85
ba1c813a
FW
86/*
87 * Report the number of pinned/un-pinned breakpoints we have in
88 * a given cpu (cpu > -1) or in all of them (cpu = -1).
89 */
90static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
91{
92 if (cpu >= 0) {
93 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
94 slots->pinned += max_task_bp_pinned(cpu);
95 slots->flexible = per_cpu(nr_bp_flexible, cpu);
96
97 return;
98 }
99
100 for_each_online_cpu(cpu) {
101 unsigned int nr;
102
103 nr = per_cpu(nr_cpu_bp_pinned, cpu);
104 nr += max_task_bp_pinned(cpu);
105
106 if (nr > slots->pinned)
107 slots->pinned = nr;
108
109 nr = per_cpu(nr_bp_flexible, cpu);
110
111 if (nr > slots->flexible)
112 slots->flexible = nr;
113 }
114}
115
116/*
117 * Add a pinned breakpoint for the given task in our constraint table
118 */
119static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
120{
121 int count = 0;
122 struct perf_event *bp;
123 struct perf_event_context *ctx = tsk->perf_event_ctxp;
11e66357 124 unsigned int *tsk_pinned;
ba1c813a
FW
125 struct list_head *list;
126 unsigned long flags;
127
128 if (WARN_ONCE(!ctx, "No perf context for this task"))
129 return;
130
131 list = &ctx->event_list;
132
133 spin_lock_irqsave(&ctx->lock, flags);
134
135 /*
136 * The current breakpoint counter is not included in the list
137 * at the open() callback time
138 */
139 list_for_each_entry(bp, list, event_entry) {
140 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
141 count++;
142 }
143
144 spin_unlock_irqrestore(&ctx->lock, flags);
145
146 if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
147 return;
148
11e66357 149 tsk_pinned = per_cpu(task_bp_pinned, cpu);
ba1c813a 150 if (enable) {
11e66357 151 tsk_pinned[count]++;
ba1c813a 152 if (count > 0)
11e66357 153 tsk_pinned[count-1]--;
ba1c813a 154 } else {
11e66357 155 tsk_pinned[count]--;
ba1c813a 156 if (count > 0)
11e66357 157 tsk_pinned[count-1]++;
ba1c813a
FW
158 }
159}
160
161/*
162 * Add/remove the given breakpoint in our constraint table
163 */
164static void toggle_bp_slot(struct perf_event *bp, bool enable)
165{
166 int cpu = bp->cpu;
167 struct task_struct *tsk = bp->ctx->task;
168
169 /* Pinned counter task profiling */
170 if (tsk) {
171 if (cpu >= 0) {
172 toggle_bp_task_slot(tsk, cpu, enable);
173 return;
174 }
175
176 for_each_online_cpu(cpu)
177 toggle_bp_task_slot(tsk, cpu, enable);
178 return;
179 }
180
181 /* Pinned counter cpu profiling */
182 if (enable)
183 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
184 else
185 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
186}
187
188/*
189 * Contraints to check before allowing this new breakpoint counter:
190 *
191 * == Non-pinned counter == (Considered as pinned for now)
192 *
193 * - If attached to a single cpu, check:
194 *
195 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
196 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
197 *
198 * -> If there are already non-pinned counters in this cpu, it means
199 * there is already a free slot for them.
200 * Otherwise, we check that the maximum number of per task
201 * breakpoints (for this cpu) plus the number of per cpu breakpoint
202 * (for this cpu) doesn't cover every registers.
203 *
204 * - If attached to every cpus, check:
205 *
206 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
207 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
208 *
209 * -> This is roughly the same, except we check the number of per cpu
210 * bp for every cpu and we keep the max one. Same for the per tasks
211 * breakpoints.
212 *
213 *
214 * == Pinned counter ==
215 *
216 * - If attached to a single cpu, check:
217 *
218 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
219 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
220 *
221 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
222 * one register at least (or they will never be fed).
223 *
224 * - If attached to every cpus, check:
225 *
226 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
227 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
228 */
229int reserve_bp_slot(struct perf_event *bp)
230{
231 struct bp_busy_slots slots = {0};
232 int ret = 0;
233
234 mutex_lock(&nr_bp_mutex);
235
236 fetch_bp_busy_slots(&slots, bp->cpu);
237
238 /* Flexible counters need to keep at least one slot */
239 if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
240 ret = -ENOSPC;
241 goto end;
242 }
243
244 toggle_bp_slot(bp, true);
245
246end:
247 mutex_unlock(&nr_bp_mutex);
248
249 return ret;
250}
251
24f1e32c 252void release_bp_slot(struct perf_event *bp)
62a038d3 253{
ba1c813a
FW
254 mutex_lock(&nr_bp_mutex);
255
256 toggle_bp_slot(bp, false);
257
258 mutex_unlock(&nr_bp_mutex);
62a038d3
P
259}
260
ba1c813a 261
24f1e32c 262int __register_perf_hw_breakpoint(struct perf_event *bp)
62a038d3 263{
24f1e32c 264 int ret;
62a038d3 265
24f1e32c
FW
266 ret = reserve_bp_slot(bp);
267 if (ret)
268 return ret;
62a038d3 269
fdf6bc95
FW
270 /*
271 * Ptrace breakpoints can be temporary perf events only
272 * meant to reserve a slot. In this case, it is created disabled and
273 * we don't want to check the params right now (as we put a null addr)
274 * But perf tools create events as disabled and we want to check
275 * the params for them.
276 * This is a quick hack that will be removed soon, once we remove
277 * the tmp breakpoints from ptrace
278 */
279 if (!bp->attr.disabled || bp->callback == perf_bp_event)
24f1e32c 280 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
62a038d3 281
24f1e32c
FW
282 return ret;
283}
62a038d3 284
24f1e32c
FW
285int register_perf_hw_breakpoint(struct perf_event *bp)
286{
287 bp->callback = perf_bp_event;
62a038d3 288
24f1e32c 289 return __register_perf_hw_breakpoint(bp);
62a038d3
P
290}
291
292/*
24f1e32c
FW
293 * Register a breakpoint bound to a task and a given cpu.
294 * If cpu is -1, the breakpoint is active for the task in every cpu
295 * If the task is -1, the breakpoint is active for every tasks in the given
296 * cpu.
62a038d3 297 */
24f1e32c
FW
298static struct perf_event *
299register_user_hw_breakpoint_cpu(unsigned long addr,
300 int len,
301 int type,
302 perf_callback_t triggered,
303 pid_t pid,
304 int cpu,
305 bool active)
62a038d3 306{
24f1e32c
FW
307 struct perf_event_attr *attr;
308 struct perf_event *bp;
309
310 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
311 if (!attr)
312 return ERR_PTR(-ENOMEM);
313
314 attr->type = PERF_TYPE_BREAKPOINT;
315 attr->size = sizeof(*attr);
316 attr->bp_addr = addr;
317 attr->bp_len = len;
318 attr->bp_type = type;
62a038d3 319 /*
24f1e32c
FW
320 * Such breakpoints are used by debuggers to trigger signals when
321 * we hit the excepted memory op. We can't miss such events, they
322 * must be pinned.
62a038d3 323 */
24f1e32c 324 attr->pinned = 1;
62a038d3 325
24f1e32c
FW
326 if (!active)
327 attr->disabled = 1;
62a038d3 328
24f1e32c
FW
329 bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
330 kfree(attr);
62a038d3 331
24f1e32c 332 return bp;
62a038d3
P
333}
334
335/**
336 * register_user_hw_breakpoint - register a hardware breakpoint for user space
24f1e32c
FW
337 * @addr: is the memory address that triggers the breakpoint
338 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
339 * @type: the type of the access to the memory (read/write/exec)
340 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 341 * @tsk: pointer to 'task_struct' of the process to which the address belongs
24f1e32c 342 * @active: should we activate it while registering it
62a038d3
P
343 *
344 */
24f1e32c
FW
345struct perf_event *
346register_user_hw_breakpoint(unsigned long addr,
347 int len,
348 int type,
349 perf_callback_t triggered,
350 struct task_struct *tsk,
351 bool active)
62a038d3 352{
24f1e32c
FW
353 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
354 tsk->pid, -1, active);
62a038d3
P
355}
356EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
357
358/**
359 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
24f1e32c
FW
360 * @bp: the breakpoint structure to modify
361 * @addr: is the memory address that triggers the breakpoint
362 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
363 * @type: the type of the access to the memory (read/write/exec)
364 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 365 * @tsk: pointer to 'task_struct' of the process to which the address belongs
24f1e32c 366 * @active: should we activate it while registering it
62a038d3 367 */
24f1e32c
FW
368struct perf_event *
369modify_user_hw_breakpoint(struct perf_event *bp,
370 unsigned long addr,
371 int len,
372 int type,
373 perf_callback_t triggered,
374 struct task_struct *tsk,
375 bool active)
62a038d3 376{
24f1e32c
FW
377 /*
378 * FIXME: do it without unregistering
379 * - We don't want to lose our slot
380 * - If the new bp is incorrect, don't lose the older one
381 */
382 unregister_hw_breakpoint(bp);
62a038d3 383
24f1e32c
FW
384 return register_user_hw_breakpoint(addr, len, type, triggered,
385 tsk, active);
62a038d3
P
386}
387EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
388
389/**
24f1e32c 390 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
62a038d3 391 * @bp: the breakpoint structure to unregister
62a038d3 392 */
24f1e32c 393void unregister_hw_breakpoint(struct perf_event *bp)
62a038d3 394{
24f1e32c
FW
395 if (!bp)
396 return;
397 perf_event_release_kernel(bp);
398}
399EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
400
401static struct perf_event *
402register_kernel_hw_breakpoint_cpu(unsigned long addr,
403 int len,
404 int type,
405 perf_callback_t triggered,
406 int cpu,
407 bool active)
408{
409 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
410 -1, cpu, active);
62a038d3 411}
62a038d3
P
412
413/**
24f1e32c
FW
414 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
415 * @addr: is the memory address that triggers the breakpoint
416 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
417 * @type: the type of the access to the memory (read/write/exec)
418 * @triggered: callback to trigger when we hit the breakpoint
419 * @active: should we activate it while registering it
62a038d3 420 *
24f1e32c 421 * @return a set of per_cpu pointers to perf events
62a038d3 422 */
24f1e32c
FW
423struct perf_event **
424register_wide_hw_breakpoint(unsigned long addr,
425 int len,
426 int type,
427 perf_callback_t triggered,
428 bool active)
62a038d3 429{
24f1e32c
FW
430 struct perf_event **cpu_events, **pevent, *bp;
431 long err;
432 int cpu;
433
434 cpu_events = alloc_percpu(typeof(*cpu_events));
435 if (!cpu_events)
436 return ERR_PTR(-ENOMEM);
62a038d3 437
24f1e32c
FW
438 for_each_possible_cpu(cpu) {
439 pevent = per_cpu_ptr(cpu_events, cpu);
440 bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
441 triggered, cpu, active);
62a038d3 442
24f1e32c 443 *pevent = bp;
62a038d3 444
605bfaee 445 if (IS_ERR(bp)) {
24f1e32c
FW
446 err = PTR_ERR(bp);
447 goto fail;
448 }
62a038d3
P
449 }
450
24f1e32c
FW
451 return cpu_events;
452
453fail:
454 for_each_possible_cpu(cpu) {
455 pevent = per_cpu_ptr(cpu_events, cpu);
605bfaee 456 if (IS_ERR(*pevent))
24f1e32c
FW
457 break;
458 unregister_hw_breakpoint(*pevent);
459 }
460 free_percpu(cpu_events);
461 /* return the error if any */
462 return ERR_PTR(err);
62a038d3 463}
f60d24d2 464EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
62a038d3
P
465
466/**
24f1e32c
FW
467 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
468 * @cpu_events: the per cpu set of events to unregister
62a038d3 469 */
24f1e32c 470void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
62a038d3 471{
24f1e32c
FW
472 int cpu;
473 struct perf_event **pevent;
62a038d3 474
24f1e32c
FW
475 for_each_possible_cpu(cpu) {
476 pevent = per_cpu_ptr(cpu_events, cpu);
477 unregister_hw_breakpoint(*pevent);
62a038d3 478 }
24f1e32c 479 free_percpu(cpu_events);
62a038d3 480}
f60d24d2 481EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
62a038d3
P
482
483static struct notifier_block hw_breakpoint_exceptions_nb = {
484 .notifier_call = hw_breakpoint_exceptions_notify,
485 /* we need to be notified first */
486 .priority = 0x7fffffff
487};
488
489static int __init init_hw_breakpoint(void)
490{
491 return register_die_notifier(&hw_breakpoint_exceptions_nb);
492}
62a038d3 493core_initcall(init_hw_breakpoint);
24f1e32c
FW
494
495
496struct pmu perf_ops_bp = {
497 .enable = arch_install_hw_breakpoint,
498 .disable = arch_uninstall_hw_breakpoint,
499 .read = hw_breakpoint_pmu_read,
500 .unthrottle = hw_breakpoint_pmu_unthrottle
501};