]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/perf_counter.h
perf counters: protect them against CSTATE transitions
[net-next-2.6.git] / include / linux / perf_counter.h
CommitLineData
0793a61d
TG
1/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
16#include <asm/atomic.h>
17
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/rculist.h>
21#include <linux/rcupdate.h>
22#include <linux/spinlock.h>
23
24struct task_struct;
25
26/*
27 * Generalized hardware event types, used by the hw_event_type parameter
28 * of the sys_perf_counter_open() syscall:
29 */
30enum hw_event_types {
31 PERF_COUNT_CYCLES,
32 PERF_COUNT_INSTRUCTIONS,
33 PERF_COUNT_CACHE_REFERENCES,
34 PERF_COUNT_CACHE_MISSES,
35 PERF_COUNT_BRANCH_INSTRUCTIONS,
36 PERF_COUNT_BRANCH_MISSES,
37 /*
38 * If this bit is set in the type, then trigger NMI sampling:
39 */
40 PERF_COUNT_NMI = (1 << 30),
41};
42
43/*
44 * IRQ-notification data record type:
45 */
46enum perf_record_type {
47 PERF_RECORD_SIMPLE,
48 PERF_RECORD_IRQ,
49 PERF_RECORD_GROUP,
50};
51
52/**
53 * struct hw_perf_counter - performance counter hardware details
54 */
55struct hw_perf_counter {
56 u64 config;
57 unsigned long config_base;
58 unsigned long counter_base;
59 int nmi;
60 unsigned int idx;
61 u64 prev_count;
62 s32 next_count;
63 u64 irq_period;
64};
65
66/*
67 * Hardcoded buffer length limit for now, for IRQ-fed events:
68 */
69#define PERF_DATA_BUFLEN 2048
70
71/**
72 * struct perf_data - performance counter IRQ data sampling ...
73 */
74struct perf_data {
75 int len;
76 int rd_idx;
77 int overrun;
78 u8 data[PERF_DATA_BUFLEN];
79};
80
81/**
82 * struct perf_counter - performance counter kernel representation:
83 */
84struct perf_counter {
85 struct list_head list;
86 int active;
87#if BITS_PER_LONG == 64
88 atomic64_t count;
89#else
90 atomic_t count32[2];
91#endif
92 u64 __irq_period;
93
94 struct hw_perf_counter hw;
95
96 struct perf_counter_context *ctx;
97 struct task_struct *task;
98
99 /*
100 * Protect attach/detach:
101 */
102 struct mutex mutex;
103
104 int oncpu;
105 int cpu;
106
107 s32 hw_event_type;
108 enum perf_record_type record_type;
109
110 /* read() / irq related data */
111 wait_queue_head_t waitq;
112 /* optional: for NMIs */
113 int wakeup_pending;
114 struct perf_data *irqdata;
115 struct perf_data *usrdata;
116 struct perf_data data[2];
117};
118
119/**
120 * struct perf_counter_context - counter context structure
121 *
122 * Used as a container for task counters and CPU counters as well:
123 */
124struct perf_counter_context {
125#ifdef CONFIG_PERF_COUNTERS
126 /*
127 * Protect the list of counters:
128 */
129 spinlock_t lock;
130 struct list_head counters;
131 int nr_counters;
132 int nr_active;
133 struct task_struct *task;
134#endif
135};
136
137/**
138 * struct perf_counter_cpu_context - per cpu counter context structure
139 */
140struct perf_cpu_context {
141 struct perf_counter_context ctx;
142 struct perf_counter_context *task_ctx;
143 int active_oncpu;
144 int max_pertask;
145};
146
147/*
148 * Set by architecture code:
149 */
150extern int perf_max_counters;
151
152#ifdef CONFIG_PERF_COUNTERS
153extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
154extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
155extern void perf_counter_task_tick(struct task_struct *task, int cpu);
156extern void perf_counter_init_task(struct task_struct *task);
157extern void perf_counter_notify(struct pt_regs *regs);
158extern void perf_counter_print_debug(void);
4ac13294
TG
159extern void hw_perf_restore_ctrl(u64 ctrl);
160extern u64 hw_perf_disable_all(void);
0793a61d
TG
161#else
162static inline void
163perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
164static inline void
165perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
166static inline void
167perf_counter_task_tick(struct task_struct *task, int cpu) { }
168static inline void perf_counter_init_task(struct task_struct *task) { }
169static inline void perf_counter_notify(struct pt_regs *regs) { }
170static inline void perf_counter_print_debug(void) { }
4ac13294
TG
171static inline void hw_perf_restore_ctrl(u64 ctrl) { }
172static inline u64 hw_perf_disable_all(void) { return 0; }
0793a61d
TG
173#endif
174
175#endif /* _LINUX_PERF_COUNTER_H */