]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/hw_breakpoint.c
trace: strlen() return doesn't account for the NULL
[net-next-2.6.git] / kernel / hw_breakpoint.c
CommitLineData
62a038d3
P
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
24f1e32c 18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
ba1c813a
FW
19 *
20 * Thanks to Ingo Molnar for his many suggestions.
ba6909b7
P
21 *
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
62a038d3
P
25 */
26
27/*
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
31 */
32
33#include <linux/irqflags.h>
34#include <linux/kallsyms.h>
35#include <linux/notifier.h>
36#include <linux/kprobes.h>
37#include <linux/kdebug.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/percpu.h>
41#include <linux/sched.h>
42#include <linux/init.h>
feef47d0 43#include <linux/slab.h>
45a73372 44#include <linux/list.h>
88f7a890 45#include <linux/cpu.h>
62a038d3
P
46#include <linux/smp.h>
47
24f1e32c
FW
48#include <linux/hw_breakpoint.h>
49
0102752e 50
ba1c813a
FW
51/*
52 * Constraints data
53 */
62a038d3 54
ba1c813a 55/* Number of pinned cpu breakpoints in a cpu */
0102752e 56static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
ba1c813a
FW
57
58/* Number of pinned task breakpoints in a cpu */
777d0411 59static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
ba1c813a
FW
60
61/* Number of non-pinned cpu/task breakpoints in a cpu */
0102752e 62static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
ba1c813a 63
feef47d0
FW
64static int nr_slots[TYPE_MAX];
65
45a73372
FW
66/* Keep track of the breakpoints attached to tasks */
67static LIST_HEAD(bp_task_head);
68
feef47d0
FW
69static int constraints_initialized;
70
ba1c813a
FW
71/* Gather the number of total pinned and un-pinned bp in a cpuset */
72struct bp_busy_slots {
73 unsigned int pinned;
74 unsigned int flexible;
75};
76
77/* Serialize accesses to the above constraints */
78static DEFINE_MUTEX(nr_bp_mutex);
79
f93a2054
FW
80__weak int hw_breakpoint_weight(struct perf_event *bp)
81{
82 return 1;
83}
84
0102752e
FW
85static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
86{
87 if (bp->attr.bp_type & HW_BREAKPOINT_RW)
88 return TYPE_DATA;
89
90 return TYPE_INST;
91}
92
ba1c813a
FW
93/*
94 * Report the maximum number of pinned breakpoints a task
95 * have in this cpu
96 */
0102752e 97static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
62a038d3 98{
ba1c813a 99 int i;
0102752e 100 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
62a038d3 101
feef47d0 102 for (i = nr_slots[type] - 1; i >= 0; i--) {
ba1c813a
FW
103 if (tsk_pinned[i] > 0)
104 return i + 1;
62a038d3
P
105 }
106
24f1e32c 107 return 0;
62a038d3
P
108}
109
45a73372
FW
110/*
111 * Count the number of breakpoints of the same type and same task.
112 * The given event must be not on the list.
113 */
114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
56053170 115{
45a73372
FW
116 struct perf_event_context *ctx = bp->ctx;
117 struct perf_event *iter;
56053170
FW
118 int count = 0;
119
45a73372
FW
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->ctx == ctx && find_slot_idx(iter) == type)
122 count += hw_breakpoint_weight(iter);
56053170
FW
123 }
124
56053170
FW
125 return count;
126}
127
ba1c813a
FW
128/*
129 * Report the number of pinned/un-pinned breakpoints we have in
130 * a given cpu (cpu > -1) or in all of them (cpu = -1).
131 */
56053170 132static void
0102752e
FW
133fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134 enum bp_type_idx type)
ba1c813a 135{
56053170
FW
136 int cpu = bp->cpu;
137 struct task_struct *tsk = bp->ctx->task;
138
ba1c813a 139 if (cpu >= 0) {
0102752e 140 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
56053170 141 if (!tsk)
0102752e 142 slots->pinned += max_task_bp_pinned(cpu, type);
56053170 143 else
45a73372 144 slots->pinned += task_bp_pinned(bp, type);
0102752e 145 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
ba1c813a
FW
146
147 return;
148 }
149
150 for_each_online_cpu(cpu) {
151 unsigned int nr;
152
0102752e 153 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
56053170 154 if (!tsk)
0102752e 155 nr += max_task_bp_pinned(cpu, type);
56053170 156 else
45a73372 157 nr += task_bp_pinned(bp, type);
ba1c813a
FW
158
159 if (nr > slots->pinned)
160 slots->pinned = nr;
161
0102752e 162 nr = per_cpu(nr_bp_flexible[type], cpu);
ba1c813a
FW
163
164 if (nr > slots->flexible)
165 slots->flexible = nr;
166 }
167}
168
f93a2054
FW
169/*
170 * For now, continue to consider flexible as pinned, until we can
171 * ensure no flexible event can ever be scheduled before a pinned event
172 * in a same cpu.
173 */
174static void
175fetch_this_slot(struct bp_busy_slots *slots, int weight)
176{
177 slots->pinned += weight;
178}
179
ba1c813a
FW
180/*
181 * Add a pinned breakpoint for the given task in our constraint table
182 */
45a73372 183static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
f93a2054 184 enum bp_type_idx type, int weight)
ba1c813a 185{
11e66357 186 unsigned int *tsk_pinned;
f93a2054
FW
187 int old_count = 0;
188 int old_idx = 0;
189 int idx = 0;
ba1c813a 190
45a73372 191 old_count = task_bp_pinned(bp, type);
f93a2054
FW
192 old_idx = old_count - 1;
193 idx = old_idx + weight;
ba1c813a 194
45a73372 195 /* tsk_pinned[n] is the number of tasks having n breakpoints */
0102752e 196 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
ba1c813a 197 if (enable) {
f93a2054
FW
198 tsk_pinned[idx]++;
199 if (old_count > 0)
200 tsk_pinned[old_idx]--;
ba1c813a 201 } else {
f93a2054
FW
202 tsk_pinned[idx]--;
203 if (old_count > 0)
204 tsk_pinned[old_idx]++;
ba1c813a
FW
205 }
206}
207
208/*
209 * Add/remove the given breakpoint in our constraint table
210 */
0102752e 211static void
f93a2054
FW
212toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
213 int weight)
ba1c813a
FW
214{
215 int cpu = bp->cpu;
216 struct task_struct *tsk = bp->ctx->task;
217
45a73372
FW
218 /* Pinned counter cpu profiling */
219 if (!tsk) {
220
221 if (enable)
222 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
223 else
224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
225 return;
226 }
227
ba1c813a 228 /* Pinned counter task profiling */
ba1c813a 229
45a73372
FW
230 if (!enable)
231 list_del(&bp->hw.bp_list);
232
233 if (cpu >= 0) {
234 toggle_bp_task_slot(bp, cpu, enable, type, weight);
235 } else {
ba1c813a 236 for_each_online_cpu(cpu)
45a73372 237 toggle_bp_task_slot(bp, cpu, enable, type, weight);
ba1c813a
FW
238 }
239
ba1c813a 240 if (enable)
45a73372 241 list_add_tail(&bp->hw.bp_list, &bp_task_head);
ba1c813a
FW
242}
243
244/*
245 * Contraints to check before allowing this new breakpoint counter:
246 *
247 * == Non-pinned counter == (Considered as pinned for now)
248 *
249 * - If attached to a single cpu, check:
250 *
251 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
6ab88863 252 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
ba1c813a
FW
253 *
254 * -> If there are already non-pinned counters in this cpu, it means
255 * there is already a free slot for them.
256 * Otherwise, we check that the maximum number of per task
257 * breakpoints (for this cpu) plus the number of per cpu breakpoint
258 * (for this cpu) doesn't cover every registers.
259 *
260 * - If attached to every cpus, check:
261 *
262 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
6ab88863 263 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
ba1c813a
FW
264 *
265 * -> This is roughly the same, except we check the number of per cpu
266 * bp for every cpu and we keep the max one. Same for the per tasks
267 * breakpoints.
268 *
269 *
270 * == Pinned counter ==
271 *
272 * - If attached to a single cpu, check:
273 *
274 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
6ab88863 275 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
ba1c813a
FW
276 *
277 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
278 * one register at least (or they will never be fed).
279 *
280 * - If attached to every cpus, check:
281 *
282 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
6ab88863 283 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
ba1c813a 284 */
5352ae63 285static int __reserve_bp_slot(struct perf_event *bp)
ba1c813a
FW
286{
287 struct bp_busy_slots slots = {0};
0102752e 288 enum bp_type_idx type;
f93a2054 289 int weight;
ba1c813a 290
feef47d0
FW
291 /* We couldn't initialize breakpoint constraints on boot */
292 if (!constraints_initialized)
293 return -ENOMEM;
294
0102752e
FW
295 /* Basic checks */
296 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
297 bp->attr.bp_type == HW_BREAKPOINT_INVALID)
298 return -EINVAL;
299
300 type = find_slot_idx(bp);
f93a2054
FW
301 weight = hw_breakpoint_weight(bp);
302
0102752e 303 fetch_bp_busy_slots(&slots, bp, type);
45a73372
FW
304 /*
305 * Simulate the addition of this breakpoint to the constraints
306 * and see the result.
307 */
f93a2054 308 fetch_this_slot(&slots, weight);
ba1c813a
FW
309
310 /* Flexible counters need to keep at least one slot */
feef47d0 311 if (slots.pinned + (!!slots.flexible) > nr_slots[type])
5352ae63 312 return -ENOSPC;
ba1c813a 313
f93a2054 314 toggle_bp_slot(bp, true, type, weight);
ba1c813a 315
5352ae63
JW
316 return 0;
317}
318
319int reserve_bp_slot(struct perf_event *bp)
320{
321 int ret;
322
323 mutex_lock(&nr_bp_mutex);
324
325 ret = __reserve_bp_slot(bp);
326
ba1c813a
FW
327 mutex_unlock(&nr_bp_mutex);
328
329 return ret;
330}
331
5352ae63
JW
332static void __release_bp_slot(struct perf_event *bp)
333{
0102752e 334 enum bp_type_idx type;
f93a2054 335 int weight;
0102752e
FW
336
337 type = find_slot_idx(bp);
f93a2054
FW
338 weight = hw_breakpoint_weight(bp);
339 toggle_bp_slot(bp, false, type, weight);
5352ae63
JW
340}
341
24f1e32c 342void release_bp_slot(struct perf_event *bp)
62a038d3 343{
ba1c813a
FW
344 mutex_lock(&nr_bp_mutex);
345
5352ae63 346 __release_bp_slot(bp);
ba1c813a
FW
347
348 mutex_unlock(&nr_bp_mutex);
62a038d3
P
349}
350
5352ae63
JW
351/*
352 * Allow the kernel debugger to reserve breakpoint slots without
353 * taking a lock using the dbg_* variant of for the reserve and
354 * release breakpoint slots.
355 */
356int dbg_reserve_bp_slot(struct perf_event *bp)
357{
358 if (mutex_is_locked(&nr_bp_mutex))
359 return -1;
360
361 return __reserve_bp_slot(bp);
362}
363
364int dbg_release_bp_slot(struct perf_event *bp)
365{
366 if (mutex_is_locked(&nr_bp_mutex))
367 return -1;
368
369 __release_bp_slot(bp);
370
371 return 0;
372}
ba1c813a 373
b2812d03
FW
374static int validate_hw_breakpoint(struct perf_event *bp)
375{
376 int ret;
377
378 ret = arch_validate_hwbkpt_settings(bp);
379 if (ret)
380 return ret;
381
382 if (arch_check_bp_in_kernelspace(bp)) {
383 if (bp->attr.exclude_kernel)
384 return -EINVAL;
385 /*
386 * Don't let unprivileged users set a breakpoint in the trap
387 * path to avoid trap recursion attacks.
388 */
389 if (!capable(CAP_SYS_ADMIN))
390 return -EPERM;
391 }
392
393 return 0;
394}
395
b326e956 396int register_perf_hw_breakpoint(struct perf_event *bp)
62a038d3 397{
24f1e32c 398 int ret;
62a038d3 399
24f1e32c
FW
400 ret = reserve_bp_slot(bp);
401 if (ret)
402 return ret;
62a038d3 403
b2812d03 404 ret = validate_hw_breakpoint(bp);
62a038d3 405
b23ff0e9
MS
406 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
407 if (ret)
408 release_bp_slot(bp);
409
24f1e32c
FW
410 return ret;
411}
62a038d3 412
62a038d3
P
413/**
414 * register_user_hw_breakpoint - register a hardware breakpoint for user space
5fa10b28 415 * @attr: breakpoint attributes
24f1e32c 416 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 417 * @tsk: pointer to 'task_struct' of the process to which the address belongs
62a038d3 418 */
24f1e32c 419struct perf_event *
5fa10b28 420register_user_hw_breakpoint(struct perf_event_attr *attr,
b326e956 421 perf_overflow_handler_t triggered,
5fa10b28 422 struct task_struct *tsk)
62a038d3 423{
5fa10b28 424 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
62a038d3
P
425}
426EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
427
428/**
429 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
24f1e32c 430 * @bp: the breakpoint structure to modify
5fa10b28 431 * @attr: new breakpoint attributes
24f1e32c 432 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 433 * @tsk: pointer to 'task_struct' of the process to which the address belongs
62a038d3 434 */
44234adc 435int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
62a038d3 436{
44234adc 437 u64 old_addr = bp->attr.bp_addr;
cd757645 438 u64 old_len = bp->attr.bp_len;
44234adc 439 int old_type = bp->attr.bp_type;
44234adc
FW
440 int err = 0;
441
442 perf_event_disable(bp);
443
444 bp->attr.bp_addr = attr->bp_addr;
445 bp->attr.bp_type = attr->bp_type;
446 bp->attr.bp_len = attr->bp_len;
447
448 if (attr->disabled)
449 goto end;
62a038d3 450
b2812d03 451 err = validate_hw_breakpoint(bp);
44234adc
FW
452 if (!err)
453 perf_event_enable(bp);
454
455 if (err) {
456 bp->attr.bp_addr = old_addr;
457 bp->attr.bp_type = old_type;
458 bp->attr.bp_len = old_len;
459 if (!bp->attr.disabled)
460 perf_event_enable(bp);
461
462 return err;
463 }
464
465end:
466 bp->attr.disabled = attr->disabled;
467
468 return 0;
62a038d3
P
469}
470EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
471
472/**
24f1e32c 473 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
62a038d3 474 * @bp: the breakpoint structure to unregister
62a038d3 475 */
24f1e32c 476void unregister_hw_breakpoint(struct perf_event *bp)
62a038d3 477{
24f1e32c
FW
478 if (!bp)
479 return;
480 perf_event_release_kernel(bp);
481}
482EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
483
62a038d3 484/**
24f1e32c 485 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
dd1853c3 486 * @attr: breakpoint attributes
24f1e32c 487 * @triggered: callback to trigger when we hit the breakpoint
62a038d3 488 *
24f1e32c 489 * @return a set of per_cpu pointers to perf events
62a038d3 490 */
44ee6358 491struct perf_event * __percpu *
dd1853c3 492register_wide_hw_breakpoint(struct perf_event_attr *attr,
b326e956 493 perf_overflow_handler_t triggered)
62a038d3 494{
44ee6358 495 struct perf_event * __percpu *cpu_events, **pevent, *bp;
24f1e32c
FW
496 long err;
497 int cpu;
498
499 cpu_events = alloc_percpu(typeof(*cpu_events));
500 if (!cpu_events)
44ee6358 501 return (void __percpu __force *)ERR_PTR(-ENOMEM);
62a038d3 502
88f7a890
LZ
503 get_online_cpus();
504 for_each_online_cpu(cpu) {
24f1e32c 505 pevent = per_cpu_ptr(cpu_events, cpu);
dd1853c3 506 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
62a038d3 507
24f1e32c 508 *pevent = bp;
62a038d3 509
605bfaee 510 if (IS_ERR(bp)) {
24f1e32c
FW
511 err = PTR_ERR(bp);
512 goto fail;
513 }
62a038d3 514 }
88f7a890 515 put_online_cpus();
62a038d3 516
24f1e32c
FW
517 return cpu_events;
518
519fail:
88f7a890 520 for_each_online_cpu(cpu) {
24f1e32c 521 pevent = per_cpu_ptr(cpu_events, cpu);
605bfaee 522 if (IS_ERR(*pevent))
24f1e32c
FW
523 break;
524 unregister_hw_breakpoint(*pevent);
525 }
88f7a890
LZ
526 put_online_cpus();
527
24f1e32c 528 free_percpu(cpu_events);
44ee6358 529 return (void __percpu __force *)ERR_PTR(err);
62a038d3 530}
f60d24d2 531EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
62a038d3
P
532
533/**
24f1e32c
FW
534 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
535 * @cpu_events: the per cpu set of events to unregister
62a038d3 536 */
44ee6358 537void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
62a038d3 538{
24f1e32c
FW
539 int cpu;
540 struct perf_event **pevent;
62a038d3 541
24f1e32c
FW
542 for_each_possible_cpu(cpu) {
543 pevent = per_cpu_ptr(cpu_events, cpu);
544 unregister_hw_breakpoint(*pevent);
62a038d3 545 }
24f1e32c 546 free_percpu(cpu_events);
62a038d3 547}
f60d24d2 548EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
62a038d3
P
549
550static struct notifier_block hw_breakpoint_exceptions_nb = {
551 .notifier_call = hw_breakpoint_exceptions_notify,
552 /* we need to be notified first */
553 .priority = 0x7fffffff
554};
555
556static int __init init_hw_breakpoint(void)
557{
feef47d0
FW
558 unsigned int **task_bp_pinned;
559 int cpu, err_cpu;
560 int i;
561
562 for (i = 0; i < TYPE_MAX; i++)
563 nr_slots[i] = hw_breakpoint_slots(i);
564
565 for_each_possible_cpu(cpu) {
566 for (i = 0; i < TYPE_MAX; i++) {
567 task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
568 *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
569 GFP_KERNEL);
570 if (!*task_bp_pinned)
571 goto err_alloc;
572 }
573 }
574
575 constraints_initialized = 1;
576
62a038d3 577 return register_die_notifier(&hw_breakpoint_exceptions_nb);
feef47d0
FW
578
579 err_alloc:
580 for_each_possible_cpu(err_cpu) {
581 if (err_cpu == cpu)
582 break;
583 for (i = 0; i < TYPE_MAX; i++)
584 kfree(per_cpu(nr_task_bp_pinned[i], cpu));
585 }
586
587 return -ENOMEM;
62a038d3 588}
62a038d3 589core_initcall(init_hw_breakpoint);
24f1e32c
FW
590
591
592struct pmu perf_ops_bp = {
593 .enable = arch_install_hw_breakpoint,
594 .disable = arch_uninstall_hw_breakpoint,
595 .read = hw_breakpoint_pmu_read,
24f1e32c 596};