]>
Commit | Line | Data |
---|---|---|
cdd6c482 | 1 | /* Performance event support for sparc64. |
59abbd1e | 2 | * |
4f6dbe4a | 3 | * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> |
59abbd1e | 4 | * |
cdd6c482 | 5 | * This code is based almost entirely upon the x86 perf event |
59abbd1e DM |
6 | * code, which is: |
7 | * | |
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
13 | */ | |
14 | ||
cdd6c482 | 15 | #include <linux/perf_event.h> |
59abbd1e | 16 | #include <linux/kprobes.h> |
667f0cee | 17 | #include <linux/ftrace.h> |
59abbd1e DM |
18 | #include <linux/kernel.h> |
19 | #include <linux/kdebug.h> | |
20 | #include <linux/mutex.h> | |
21 | ||
4f6dbe4a | 22 | #include <asm/stacktrace.h> |
59abbd1e | 23 | #include <asm/cpudata.h> |
4f6dbe4a | 24 | #include <asm/uaccess.h> |
59abbd1e DM |
25 | #include <asm/atomic.h> |
26 | #include <asm/nmi.h> | |
27 | #include <asm/pcr.h> | |
28 | ||
4f6dbe4a DM |
29 | #include "kstack.h" |
30 | ||
59abbd1e DM |
31 | /* Sparc64 chips have two performance counters, 32-bits each, with |
32 | * overflow interrupts generated on transition from 0xffffffff to 0. | |
33 | * The counters are accessed in one go using a 64-bit register. | |
34 | * | |
35 | * Both counters are controlled using a single control register. The | |
36 | * only way to stop all sampling is to clear all of the context (user, | |
37 | * supervisor, hypervisor) sampling enable bits. But these bits apply | |
38 | * to both counters, thus the two counters can't be enabled/disabled | |
39 | * individually. | |
40 | * | |
41 | * The control register has two event fields, one for each of the two | |
42 | * counters. It's thus nearly impossible to have one counter going | |
43 | * while keeping the other one stopped. Therefore it is possible to | |
44 | * get overflow interrupts for counters not currently "in use" and | |
45 | * that condition must be checked in the overflow interrupt handler. | |
46 | * | |
47 | * So we use a hack, in that we program inactive counters with the | |
48 | * "sw_count0" and "sw_count1" events. These count how many times | |
49 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | |
50 | * unusual way to encode a NOP and therefore will not trigger in | |
51 | * normal code. | |
52 | */ | |
53 | ||
cdd6c482 | 54 | #define MAX_HWEVENTS 2 |
59abbd1e DM |
55 | #define MAX_PERIOD ((1UL << 32) - 1) |
56 | ||
57 | #define PIC_UPPER_INDEX 0 | |
58 | #define PIC_LOWER_INDEX 1 | |
e7bef6b0 | 59 | #define PIC_NO_INDEX -1 |
59abbd1e | 60 | |
cdd6c482 | 61 | struct cpu_hw_events { |
e7bef6b0 DM |
62 | /* Number of events currently scheduled onto this cpu. |
63 | * This tells how many entries in the arrays below | |
64 | * are valid. | |
65 | */ | |
66 | int n_events; | |
67 | ||
68 | /* Number of new events added since the last hw_perf_disable(). | |
69 | * This works because the perf event layer always adds new | |
70 | * events inside of a perf_{disable,enable}() sequence. | |
71 | */ | |
72 | int n_added; | |
73 | ||
74 | /* Array of events current scheduled on this cpu. */ | |
75 | struct perf_event *event[MAX_HWEVENTS]; | |
76 | ||
77 | /* Array of encoded longs, specifying the %pcr register | |
78 | * encoding and the mask of PIC counters this even can | |
79 | * be scheduled on. See perf_event_encode() et al. | |
80 | */ | |
81 | unsigned long events[MAX_HWEVENTS]; | |
82 | ||
83 | /* The current counter index assigned to an event. When the | |
84 | * event hasn't been programmed into the cpu yet, this will | |
85 | * hold PIC_NO_INDEX. The event->hw.idx value tells us where | |
86 | * we ought to schedule the event. | |
87 | */ | |
88 | int current_idx[MAX_HWEVENTS]; | |
89 | ||
90 | /* Software copy of %pcr register on this cpu. */ | |
d1751388 | 91 | u64 pcr; |
e7bef6b0 DM |
92 | |
93 | /* Enabled/disable state. */ | |
d1751388 | 94 | int enabled; |
59abbd1e | 95 | }; |
cdd6c482 | 96 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
59abbd1e | 97 | |
e7bef6b0 DM |
98 | /* An event map describes the characteristics of a performance |
99 | * counter event. In particular it gives the encoding as well as | |
100 | * a mask telling which counters the event can be measured on. | |
101 | */ | |
59abbd1e DM |
102 | struct perf_event_map { |
103 | u16 encoding; | |
104 | u8 pic_mask; | |
105 | #define PIC_NONE 0x00 | |
106 | #define PIC_UPPER 0x01 | |
107 | #define PIC_LOWER 0x02 | |
108 | }; | |
109 | ||
e7bef6b0 | 110 | /* Encode a perf_event_map entry into a long. */ |
a72a8a5f DM |
111 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) |
112 | { | |
113 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | |
114 | } | |
115 | ||
e7bef6b0 DM |
116 | static u8 perf_event_get_msk(unsigned long val) |
117 | { | |
118 | return val & 0xff; | |
119 | } | |
120 | ||
121 | static u64 perf_event_get_enc(unsigned long val) | |
a72a8a5f | 122 | { |
e7bef6b0 | 123 | return val >> 16; |
a72a8a5f DM |
124 | } |
125 | ||
2ce4da2e DM |
126 | #define C(x) PERF_COUNT_HW_CACHE_##x |
127 | ||
128 | #define CACHE_OP_UNSUPPORTED 0xfffe | |
129 | #define CACHE_OP_NONSENSE 0xffff | |
130 | ||
131 | typedef struct perf_event_map cache_map_t | |
132 | [PERF_COUNT_HW_CACHE_MAX] | |
133 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
134 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
135 | ||
59abbd1e DM |
136 | struct sparc_pmu { |
137 | const struct perf_event_map *(*event_map)(int); | |
2ce4da2e | 138 | const cache_map_t *cache_map; |
59abbd1e DM |
139 | int max_events; |
140 | int upper_shift; | |
141 | int lower_shift; | |
142 | int event_mask; | |
91b9286d | 143 | int hv_bit; |
496c07e3 | 144 | int irq_bit; |
660d1376 DM |
145 | int upper_nop; |
146 | int lower_nop; | |
59abbd1e DM |
147 | }; |
148 | ||
28e8f9be | 149 | static const struct perf_event_map ultra3_perfmon_event_map[] = { |
59abbd1e DM |
150 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, |
151 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | |
152 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | |
153 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | |
154 | }; | |
155 | ||
28e8f9be | 156 | static const struct perf_event_map *ultra3_event_map(int event_id) |
59abbd1e | 157 | { |
28e8f9be | 158 | return &ultra3_perfmon_event_map[event_id]; |
59abbd1e DM |
159 | } |
160 | ||
28e8f9be | 161 | static const cache_map_t ultra3_cache_map = { |
2ce4da2e DM |
162 | [C(L1D)] = { |
163 | [C(OP_READ)] = { | |
164 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
165 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
166 | }, | |
167 | [C(OP_WRITE)] = { | |
168 | [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | |
169 | [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | |
170 | }, | |
171 | [C(OP_PREFETCH)] = { | |
172 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
173 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
174 | }, | |
175 | }, | |
176 | [C(L1I)] = { | |
177 | [C(OP_READ)] = { | |
178 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
179 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
180 | }, | |
181 | [ C(OP_WRITE) ] = { | |
182 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
183 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
184 | }, | |
185 | [ C(OP_PREFETCH) ] = { | |
186 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
187 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
188 | }, | |
189 | }, | |
190 | [C(LL)] = { | |
191 | [C(OP_READ)] = { | |
192 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | |
193 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | |
194 | }, | |
195 | [C(OP_WRITE)] = { | |
196 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | |
197 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | |
198 | }, | |
199 | [C(OP_PREFETCH)] = { | |
200 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
201 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
202 | }, | |
203 | }, | |
204 | [C(DTLB)] = { | |
205 | [C(OP_READ)] = { | |
206 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
207 | [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | |
208 | }, | |
209 | [ C(OP_WRITE) ] = { | |
210 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
211 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
212 | }, | |
213 | [ C(OP_PREFETCH) ] = { | |
214 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
215 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
216 | }, | |
217 | }, | |
218 | [C(ITLB)] = { | |
219 | [C(OP_READ)] = { | |
220 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
221 | [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | |
222 | }, | |
223 | [ C(OP_WRITE) ] = { | |
224 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
225 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
226 | }, | |
227 | [ C(OP_PREFETCH) ] = { | |
228 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
229 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
230 | }, | |
231 | }, | |
232 | [C(BPU)] = { | |
233 | [C(OP_READ)] = { | |
234 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
235 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
236 | }, | |
237 | [ C(OP_WRITE) ] = { | |
238 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
239 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
240 | }, | |
241 | [ C(OP_PREFETCH) ] = { | |
242 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
243 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
244 | }, | |
245 | }, | |
246 | }; | |
247 | ||
28e8f9be DM |
248 | static const struct sparc_pmu ultra3_pmu = { |
249 | .event_map = ultra3_event_map, | |
250 | .cache_map = &ultra3_cache_map, | |
251 | .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), | |
59abbd1e DM |
252 | .upper_shift = 11, |
253 | .lower_shift = 4, | |
254 | .event_mask = 0x3f, | |
660d1376 DM |
255 | .upper_nop = 0x1c, |
256 | .lower_nop = 0x14, | |
59abbd1e DM |
257 | }; |
258 | ||
7eebda60 DM |
259 | /* Niagara1 is very limited. The upper PIC is hard-locked to count |
260 | * only instructions, so it is free running which creates all kinds of | |
6e804251 | 261 | * problems. Some hardware designs make one wonder if the creator |
7eebda60 DM |
262 | * even looked at how this stuff gets used by software. |
263 | */ | |
264 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | |
265 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | |
266 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | |
267 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | |
268 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | |
269 | }; | |
270 | ||
271 | static const struct perf_event_map *niagara1_event_map(int event_id) | |
272 | { | |
273 | return &niagara1_perfmon_event_map[event_id]; | |
274 | } | |
275 | ||
276 | static const cache_map_t niagara1_cache_map = { | |
277 | [C(L1D)] = { | |
278 | [C(OP_READ)] = { | |
279 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
280 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
281 | }, | |
282 | [C(OP_WRITE)] = { | |
283 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
284 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
285 | }, | |
286 | [C(OP_PREFETCH)] = { | |
287 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
288 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
289 | }, | |
290 | }, | |
291 | [C(L1I)] = { | |
292 | [C(OP_READ)] = { | |
293 | [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | |
294 | [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | |
295 | }, | |
296 | [ C(OP_WRITE) ] = { | |
297 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
298 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
299 | }, | |
300 | [ C(OP_PREFETCH) ] = { | |
301 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
302 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
303 | }, | |
304 | }, | |
305 | [C(LL)] = { | |
306 | [C(OP_READ)] = { | |
307 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
308 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
309 | }, | |
310 | [C(OP_WRITE)] = { | |
311 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
312 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
313 | }, | |
314 | [C(OP_PREFETCH)] = { | |
315 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
316 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
317 | }, | |
318 | }, | |
319 | [C(DTLB)] = { | |
320 | [C(OP_READ)] = { | |
321 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
322 | [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | |
323 | }, | |
324 | [ C(OP_WRITE) ] = { | |
325 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
326 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
327 | }, | |
328 | [ C(OP_PREFETCH) ] = { | |
329 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
330 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
331 | }, | |
332 | }, | |
333 | [C(ITLB)] = { | |
334 | [C(OP_READ)] = { | |
335 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
336 | [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | |
337 | }, | |
338 | [ C(OP_WRITE) ] = { | |
339 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
340 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
341 | }, | |
342 | [ C(OP_PREFETCH) ] = { | |
343 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
344 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
345 | }, | |
346 | }, | |
347 | [C(BPU)] = { | |
348 | [C(OP_READ)] = { | |
349 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
350 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
351 | }, | |
352 | [ C(OP_WRITE) ] = { | |
353 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
354 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
355 | }, | |
356 | [ C(OP_PREFETCH) ] = { | |
357 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
358 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
359 | }, | |
360 | }, | |
361 | }; | |
362 | ||
363 | static const struct sparc_pmu niagara1_pmu = { | |
364 | .event_map = niagara1_event_map, | |
365 | .cache_map = &niagara1_cache_map, | |
366 | .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), | |
367 | .upper_shift = 0, | |
368 | .lower_shift = 4, | |
369 | .event_mask = 0x7, | |
370 | .upper_nop = 0x0, | |
371 | .lower_nop = 0x0, | |
372 | }; | |
373 | ||
b73d8847 DM |
374 | static const struct perf_event_map niagara2_perfmon_event_map[] = { |
375 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
376 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
377 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, | |
378 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, | |
379 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, | |
380 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, | |
381 | }; | |
382 | ||
cdd6c482 | 383 | static const struct perf_event_map *niagara2_event_map(int event_id) |
b73d8847 | 384 | { |
cdd6c482 | 385 | return &niagara2_perfmon_event_map[event_id]; |
b73d8847 DM |
386 | } |
387 | ||
d0b86480 DM |
388 | static const cache_map_t niagara2_cache_map = { |
389 | [C(L1D)] = { | |
390 | [C(OP_READ)] = { | |
391 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
392 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
393 | }, | |
394 | [C(OP_WRITE)] = { | |
395 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
396 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
397 | }, | |
398 | [C(OP_PREFETCH)] = { | |
399 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
400 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
401 | }, | |
402 | }, | |
403 | [C(L1I)] = { | |
404 | [C(OP_READ)] = { | |
405 | [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | |
406 | [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | |
407 | }, | |
408 | [ C(OP_WRITE) ] = { | |
409 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
410 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
411 | }, | |
412 | [ C(OP_PREFETCH) ] = { | |
413 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
414 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
415 | }, | |
416 | }, | |
417 | [C(LL)] = { | |
418 | [C(OP_READ)] = { | |
419 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
420 | [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | |
421 | }, | |
422 | [C(OP_WRITE)] = { | |
423 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
424 | [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | |
425 | }, | |
426 | [C(OP_PREFETCH)] = { | |
427 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
428 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
429 | }, | |
430 | }, | |
431 | [C(DTLB)] = { | |
432 | [C(OP_READ)] = { | |
433 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
434 | [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | |
435 | }, | |
436 | [ C(OP_WRITE) ] = { | |
437 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
438 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
439 | }, | |
440 | [ C(OP_PREFETCH) ] = { | |
441 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
442 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
443 | }, | |
444 | }, | |
445 | [C(ITLB)] = { | |
446 | [C(OP_READ)] = { | |
447 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
448 | [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | |
449 | }, | |
450 | [ C(OP_WRITE) ] = { | |
451 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
452 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
453 | }, | |
454 | [ C(OP_PREFETCH) ] = { | |
455 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
456 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
457 | }, | |
458 | }, | |
459 | [C(BPU)] = { | |
460 | [C(OP_READ)] = { | |
461 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
462 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
463 | }, | |
464 | [ C(OP_WRITE) ] = { | |
465 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
466 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
467 | }, | |
468 | [ C(OP_PREFETCH) ] = { | |
469 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
470 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
471 | }, | |
472 | }, | |
473 | }; | |
474 | ||
b73d8847 DM |
475 | static const struct sparc_pmu niagara2_pmu = { |
476 | .event_map = niagara2_event_map, | |
d0b86480 | 477 | .cache_map = &niagara2_cache_map, |
b73d8847 DM |
478 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), |
479 | .upper_shift = 19, | |
480 | .lower_shift = 6, | |
481 | .event_mask = 0xfff, | |
482 | .hv_bit = 0x8, | |
de23cf3c | 483 | .irq_bit = 0x30, |
b73d8847 DM |
484 | .upper_nop = 0x220, |
485 | .lower_nop = 0x220, | |
486 | }; | |
487 | ||
59abbd1e DM |
488 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
489 | ||
cdd6c482 | 490 | static u64 event_encoding(u64 event_id, int idx) |
59abbd1e DM |
491 | { |
492 | if (idx == PIC_UPPER_INDEX) | |
cdd6c482 | 493 | event_id <<= sparc_pmu->upper_shift; |
59abbd1e | 494 | else |
cdd6c482 IM |
495 | event_id <<= sparc_pmu->lower_shift; |
496 | return event_id; | |
59abbd1e DM |
497 | } |
498 | ||
499 | static u64 mask_for_index(int idx) | |
500 | { | |
501 | return event_encoding(sparc_pmu->event_mask, idx); | |
502 | } | |
503 | ||
504 | static u64 nop_for_index(int idx) | |
505 | { | |
506 | return event_encoding(idx == PIC_UPPER_INDEX ? | |
660d1376 DM |
507 | sparc_pmu->upper_nop : |
508 | sparc_pmu->lower_nop, idx); | |
59abbd1e DM |
509 | } |
510 | ||
d1751388 | 511 | static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
59abbd1e DM |
512 | { |
513 | u64 val, mask = mask_for_index(idx); | |
514 | ||
d1751388 DM |
515 | val = cpuc->pcr; |
516 | val &= ~mask; | |
517 | val |= hwc->config; | |
518 | cpuc->pcr = val; | |
519 | ||
520 | pcr_ops->write(cpuc->pcr); | |
59abbd1e DM |
521 | } |
522 | ||
d1751388 | 523 | static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) |
59abbd1e DM |
524 | { |
525 | u64 mask = mask_for_index(idx); | |
526 | u64 nop = nop_for_index(idx); | |
d1751388 | 527 | u64 val; |
59abbd1e | 528 | |
d1751388 DM |
529 | val = cpuc->pcr; |
530 | val &= ~mask; | |
531 | val |= nop; | |
532 | cpuc->pcr = val; | |
533 | ||
534 | pcr_ops->write(cpuc->pcr); | |
59abbd1e DM |
535 | } |
536 | ||
59abbd1e DM |
537 | static u32 read_pmc(int idx) |
538 | { | |
539 | u64 val; | |
540 | ||
541 | read_pic(val); | |
542 | if (idx == PIC_UPPER_INDEX) | |
543 | val >>= 32; | |
544 | ||
545 | return val & 0xffffffff; | |
546 | } | |
547 | ||
548 | static void write_pmc(int idx, u64 val) | |
549 | { | |
550 | u64 shift, mask, pic; | |
551 | ||
552 | shift = 0; | |
553 | if (idx == PIC_UPPER_INDEX) | |
554 | shift = 32; | |
555 | ||
556 | mask = ((u64) 0xffffffff) << shift; | |
557 | val <<= shift; | |
558 | ||
559 | read_pic(pic); | |
560 | pic &= ~mask; | |
561 | pic |= val; | |
562 | write_pic(pic); | |
563 | } | |
564 | ||
e7bef6b0 DM |
565 | static u64 sparc_perf_event_update(struct perf_event *event, |
566 | struct hw_perf_event *hwc, int idx) | |
567 | { | |
568 | int shift = 64 - 32; | |
569 | u64 prev_raw_count, new_raw_count; | |
570 | s64 delta; | |
571 | ||
572 | again: | |
573 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
574 | new_raw_count = read_pmc(idx); | |
575 | ||
576 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
577 | new_raw_count) != prev_raw_count) | |
578 | goto again; | |
579 | ||
580 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
581 | delta >>= shift; | |
582 | ||
583 | atomic64_add(delta, &event->count); | |
584 | atomic64_sub(delta, &hwc->period_left); | |
585 | ||
586 | return new_raw_count; | |
587 | } | |
588 | ||
cdd6c482 | 589 | static int sparc_perf_event_set_period(struct perf_event *event, |
d29862f0 | 590 | struct hw_perf_event *hwc, int idx) |
59abbd1e DM |
591 | { |
592 | s64 left = atomic64_read(&hwc->period_left); | |
593 | s64 period = hwc->sample_period; | |
594 | int ret = 0; | |
595 | ||
596 | if (unlikely(left <= -period)) { | |
597 | left = period; | |
598 | atomic64_set(&hwc->period_left, left); | |
599 | hwc->last_period = period; | |
600 | ret = 1; | |
601 | } | |
602 | ||
603 | if (unlikely(left <= 0)) { | |
604 | left += period; | |
605 | atomic64_set(&hwc->period_left, left); | |
606 | hwc->last_period = period; | |
607 | ret = 1; | |
608 | } | |
609 | if (left > MAX_PERIOD) | |
610 | left = MAX_PERIOD; | |
611 | ||
612 | atomic64_set(&hwc->prev_count, (u64)-left); | |
613 | ||
614 | write_pmc(idx, (u64)(-left) & 0xffffffff); | |
615 | ||
cdd6c482 | 616 | perf_event_update_userpage(event); |
59abbd1e DM |
617 | |
618 | return ret; | |
619 | } | |
620 | ||
e7bef6b0 DM |
621 | /* If performance event entries have been added, move existing |
622 | * events around (if necessary) and then assign new entries to | |
623 | * counters. | |
624 | */ | |
625 | static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) | |
59abbd1e | 626 | { |
e7bef6b0 | 627 | int i; |
59abbd1e | 628 | |
e7bef6b0 DM |
629 | if (!cpuc->n_added) |
630 | goto out; | |
59abbd1e | 631 | |
e7bef6b0 DM |
632 | /* Read in the counters which are moving. */ |
633 | for (i = 0; i < cpuc->n_events; i++) { | |
634 | struct perf_event *cp = cpuc->event[i]; | |
59abbd1e | 635 | |
e7bef6b0 DM |
636 | if (cpuc->current_idx[i] != PIC_NO_INDEX && |
637 | cpuc->current_idx[i] != cp->hw.idx) { | |
638 | sparc_perf_event_update(cp, &cp->hw, | |
639 | cpuc->current_idx[i]); | |
640 | cpuc->current_idx[i] = PIC_NO_INDEX; | |
641 | } | |
642 | } | |
59abbd1e | 643 | |
e7bef6b0 DM |
644 | /* Assign to counters all unassigned events. */ |
645 | for (i = 0; i < cpuc->n_events; i++) { | |
646 | struct perf_event *cp = cpuc->event[i]; | |
647 | struct hw_perf_event *hwc = &cp->hw; | |
648 | int idx = hwc->idx; | |
649 | u64 enc; | |
650 | ||
651 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | |
652 | continue; | |
653 | ||
654 | sparc_perf_event_set_period(cp, hwc, idx); | |
655 | cpuc->current_idx[i] = idx; | |
656 | ||
657 | enc = perf_event_get_enc(cpuc->events[i]); | |
b7d45c3f | 658 | pcr &= ~mask_for_index(idx); |
e7bef6b0 DM |
659 | pcr |= event_encoding(enc, idx); |
660 | } | |
661 | out: | |
662 | return pcr; | |
59abbd1e DM |
663 | } |
664 | ||
e7bef6b0 | 665 | void hw_perf_enable(void) |
59abbd1e | 666 | { |
e7bef6b0 DM |
667 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
668 | u64 pcr; | |
59abbd1e | 669 | |
e7bef6b0 DM |
670 | if (cpuc->enabled) |
671 | return; | |
59abbd1e | 672 | |
e7bef6b0 DM |
673 | cpuc->enabled = 1; |
674 | barrier(); | |
59abbd1e | 675 | |
e7bef6b0 DM |
676 | pcr = cpuc->pcr; |
677 | if (!cpuc->n_events) { | |
678 | pcr = 0; | |
679 | } else { | |
680 | pcr = maybe_change_configuration(cpuc, pcr); | |
59abbd1e | 681 | |
e7bef6b0 DM |
682 | /* We require that all of the events have the same |
683 | * configuration, so just fetch the settings from the | |
684 | * first entry. | |
685 | */ | |
686 | cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; | |
687 | } | |
59abbd1e | 688 | |
e7bef6b0 DM |
689 | pcr_ops->write(cpuc->pcr); |
690 | } | |
691 | ||
692 | void hw_perf_disable(void) | |
693 | { | |
694 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
695 | u64 val; | |
696 | ||
697 | if (!cpuc->enabled) | |
698 | return; | |
699 | ||
700 | cpuc->enabled = 0; | |
701 | cpuc->n_added = 0; | |
702 | ||
703 | val = cpuc->pcr; | |
704 | val &= ~(PCR_UTRACE | PCR_STRACE | | |
705 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | |
706 | cpuc->pcr = val; | |
707 | ||
708 | pcr_ops->write(cpuc->pcr); | |
59abbd1e DM |
709 | } |
710 | ||
cdd6c482 | 711 | static void sparc_pmu_disable(struct perf_event *event) |
59abbd1e | 712 | { |
cdd6c482 IM |
713 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
714 | struct hw_perf_event *hwc = &event->hw; | |
e7bef6b0 DM |
715 | unsigned long flags; |
716 | int i; | |
59abbd1e | 717 | |
e7bef6b0 DM |
718 | local_irq_save(flags); |
719 | perf_disable(); | |
720 | ||
721 | for (i = 0; i < cpuc->n_events; i++) { | |
722 | if (event == cpuc->event[i]) { | |
723 | int idx = cpuc->current_idx[i]; | |
724 | ||
725 | /* Shift remaining entries down into | |
726 | * the existing slot. | |
727 | */ | |
728 | while (++i < cpuc->n_events) { | |
729 | cpuc->event[i - 1] = cpuc->event[i]; | |
730 | cpuc->events[i - 1] = cpuc->events[i]; | |
731 | cpuc->current_idx[i - 1] = | |
732 | cpuc->current_idx[i]; | |
733 | } | |
734 | ||
735 | /* Absorb the final count and turn off the | |
736 | * event. | |
737 | */ | |
738 | sparc_pmu_disable_event(cpuc, hwc, idx); | |
739 | barrier(); | |
740 | sparc_perf_event_update(event, hwc, idx); | |
59abbd1e | 741 | |
e7bef6b0 | 742 | perf_event_update_userpage(event); |
59abbd1e | 743 | |
e7bef6b0 DM |
744 | cpuc->n_events--; |
745 | break; | |
746 | } | |
747 | } | |
59abbd1e | 748 | |
e7bef6b0 DM |
749 | perf_enable(); |
750 | local_irq_restore(flags); | |
751 | } | |
752 | ||
753 | static int active_event_index(struct cpu_hw_events *cpuc, | |
754 | struct perf_event *event) | |
755 | { | |
756 | int i; | |
757 | ||
758 | for (i = 0; i < cpuc->n_events; i++) { | |
759 | if (cpuc->event[i] == event) | |
760 | break; | |
761 | } | |
762 | BUG_ON(i == cpuc->n_events); | |
763 | return cpuc->current_idx[i]; | |
59abbd1e DM |
764 | } |
765 | ||
cdd6c482 | 766 | static void sparc_pmu_read(struct perf_event *event) |
59abbd1e | 767 | { |
e7bef6b0 DM |
768 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
769 | int idx = active_event_index(cpuc, event); | |
cdd6c482 | 770 | struct hw_perf_event *hwc = &event->hw; |
d1751388 | 771 | |
e7bef6b0 | 772 | sparc_perf_event_update(event, hwc, idx); |
59abbd1e DM |
773 | } |
774 | ||
cdd6c482 | 775 | static void sparc_pmu_unthrottle(struct perf_event *event) |
59abbd1e | 776 | { |
d1751388 | 777 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
e7bef6b0 | 778 | int idx = active_event_index(cpuc, event); |
cdd6c482 | 779 | struct hw_perf_event *hwc = &event->hw; |
d1751388 | 780 | |
e7bef6b0 | 781 | sparc_pmu_enable_event(cpuc, hwc, idx); |
59abbd1e DM |
782 | } |
783 | ||
cdd6c482 | 784 | static atomic_t active_events = ATOMIC_INIT(0); |
59abbd1e DM |
785 | static DEFINE_MUTEX(pmc_grab_mutex); |
786 | ||
d1751388 DM |
787 | static void perf_stop_nmi_watchdog(void *unused) |
788 | { | |
789 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
790 | ||
791 | stop_nmi_watchdog(NULL); | |
792 | cpuc->pcr = pcr_ops->read(); | |
793 | } | |
794 | ||
cdd6c482 | 795 | void perf_event_grab_pmc(void) |
59abbd1e | 796 | { |
cdd6c482 | 797 | if (atomic_inc_not_zero(&active_events)) |
59abbd1e DM |
798 | return; |
799 | ||
800 | mutex_lock(&pmc_grab_mutex); | |
cdd6c482 | 801 | if (atomic_read(&active_events) == 0) { |
59abbd1e | 802 | if (atomic_read(&nmi_active) > 0) { |
d1751388 | 803 | on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); |
59abbd1e DM |
804 | BUG_ON(atomic_read(&nmi_active) != 0); |
805 | } | |
cdd6c482 | 806 | atomic_inc(&active_events); |
59abbd1e DM |
807 | } |
808 | mutex_unlock(&pmc_grab_mutex); | |
809 | } | |
810 | ||
cdd6c482 | 811 | void perf_event_release_pmc(void) |
59abbd1e | 812 | { |
cdd6c482 | 813 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { |
59abbd1e DM |
814 | if (atomic_read(&nmi_active) == 0) |
815 | on_each_cpu(start_nmi_watchdog, NULL, 1); | |
816 | mutex_unlock(&pmc_grab_mutex); | |
817 | } | |
818 | } | |
819 | ||
2ce4da2e DM |
820 | static const struct perf_event_map *sparc_map_cache_event(u64 config) |
821 | { | |
822 | unsigned int cache_type, cache_op, cache_result; | |
823 | const struct perf_event_map *pmap; | |
824 | ||
825 | if (!sparc_pmu->cache_map) | |
826 | return ERR_PTR(-ENOENT); | |
827 | ||
828 | cache_type = (config >> 0) & 0xff; | |
829 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
830 | return ERR_PTR(-EINVAL); | |
831 | ||
832 | cache_op = (config >> 8) & 0xff; | |
833 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
834 | return ERR_PTR(-EINVAL); | |
835 | ||
836 | cache_result = (config >> 16) & 0xff; | |
837 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
838 | return ERR_PTR(-EINVAL); | |
839 | ||
840 | pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | |
841 | ||
842 | if (pmap->encoding == CACHE_OP_UNSUPPORTED) | |
843 | return ERR_PTR(-ENOENT); | |
844 | ||
845 | if (pmap->encoding == CACHE_OP_NONSENSE) | |
846 | return ERR_PTR(-EINVAL); | |
847 | ||
848 | return pmap; | |
849 | } | |
850 | ||
cdd6c482 | 851 | static void hw_perf_event_destroy(struct perf_event *event) |
59abbd1e | 852 | { |
cdd6c482 | 853 | perf_event_release_pmc(); |
59abbd1e DM |
854 | } |
855 | ||
a72a8a5f DM |
856 | /* Make sure all events can be scheduled into the hardware at |
857 | * the same time. This is simplified by the fact that we only | |
858 | * need to support 2 simultaneous HW events. | |
e7bef6b0 DM |
859 | * |
860 | * As a side effect, the evts[]->hw.idx values will be assigned | |
861 | * on success. These are pending indexes. When the events are | |
862 | * actually programmed into the chip, these values will propagate | |
863 | * to the per-cpu cpuc->current_idx[] slots, see the code in | |
864 | * maybe_change_configuration() for details. | |
a72a8a5f | 865 | */ |
e7bef6b0 DM |
866 | static int sparc_check_constraints(struct perf_event **evts, |
867 | unsigned long *events, int n_ev) | |
a72a8a5f | 868 | { |
e7bef6b0 DM |
869 | u8 msk0 = 0, msk1 = 0; |
870 | int idx0 = 0; | |
871 | ||
872 | /* This case is possible when we are invoked from | |
873 | * hw_perf_group_sched_in(). | |
874 | */ | |
875 | if (!n_ev) | |
876 | return 0; | |
877 | ||
878 | if (n_ev > perf_max_events) | |
879 | return -1; | |
880 | ||
881 | msk0 = perf_event_get_msk(events[0]); | |
882 | if (n_ev == 1) { | |
883 | if (msk0 & PIC_LOWER) | |
884 | idx0 = 1; | |
885 | goto success; | |
886 | } | |
887 | BUG_ON(n_ev != 2); | |
888 | msk1 = perf_event_get_msk(events[1]); | |
889 | ||
890 | /* If both events can go on any counter, OK. */ | |
891 | if (msk0 == (PIC_UPPER | PIC_LOWER) && | |
892 | msk1 == (PIC_UPPER | PIC_LOWER)) | |
893 | goto success; | |
894 | ||
895 | /* If one event is limited to a specific counter, | |
896 | * and the other can go on both, OK. | |
897 | */ | |
898 | if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && | |
899 | msk1 == (PIC_UPPER | PIC_LOWER)) { | |
900 | if (msk0 & PIC_LOWER) | |
901 | idx0 = 1; | |
902 | goto success; | |
a72a8a5f DM |
903 | } |
904 | ||
e7bef6b0 DM |
905 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && |
906 | msk0 == (PIC_UPPER | PIC_LOWER)) { | |
907 | if (msk1 & PIC_UPPER) | |
908 | idx0 = 1; | |
909 | goto success; | |
910 | } | |
911 | ||
912 | /* If the events are fixed to different counters, OK. */ | |
913 | if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || | |
914 | (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { | |
915 | if (msk0 & PIC_LOWER) | |
916 | idx0 = 1; | |
917 | goto success; | |
918 | } | |
919 | ||
920 | /* Otherwise, there is a conflict. */ | |
a72a8a5f | 921 | return -1; |
e7bef6b0 DM |
922 | |
923 | success: | |
924 | evts[0]->hw.idx = idx0; | |
925 | if (n_ev == 2) | |
926 | evts[1]->hw.idx = idx0 ^ 1; | |
927 | return 0; | |
a72a8a5f DM |
928 | } |
929 | ||
01552f76 DM |
930 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) |
931 | { | |
932 | int eu = 0, ek = 0, eh = 0; | |
933 | struct perf_event *event; | |
934 | int i, n, first; | |
935 | ||
936 | n = n_prev + n_new; | |
937 | if (n <= 1) | |
938 | return 0; | |
939 | ||
940 | first = 1; | |
941 | for (i = 0; i < n; i++) { | |
942 | event = evts[i]; | |
943 | if (first) { | |
944 | eu = event->attr.exclude_user; | |
945 | ek = event->attr.exclude_kernel; | |
946 | eh = event->attr.exclude_hv; | |
947 | first = 0; | |
948 | } else if (event->attr.exclude_user != eu || | |
949 | event->attr.exclude_kernel != ek || | |
950 | event->attr.exclude_hv != eh) { | |
951 | return -EAGAIN; | |
952 | } | |
953 | } | |
954 | ||
955 | return 0; | |
956 | } | |
957 | ||
958 | static int collect_events(struct perf_event *group, int max_count, | |
e7bef6b0 DM |
959 | struct perf_event *evts[], unsigned long *events, |
960 | int *current_idx) | |
01552f76 DM |
961 | { |
962 | struct perf_event *event; | |
963 | int n = 0; | |
964 | ||
965 | if (!is_software_event(group)) { | |
966 | if (n >= max_count) | |
967 | return -1; | |
968 | evts[n] = group; | |
e7bef6b0 DM |
969 | events[n] = group->hw.event_base; |
970 | current_idx[n++] = PIC_NO_INDEX; | |
01552f76 DM |
971 | } |
972 | list_for_each_entry(event, &group->sibling_list, group_entry) { | |
973 | if (!is_software_event(event) && | |
974 | event->state != PERF_EVENT_STATE_OFF) { | |
975 | if (n >= max_count) | |
976 | return -1; | |
977 | evts[n] = event; | |
e7bef6b0 DM |
978 | events[n] = event->hw.event_base; |
979 | current_idx[n++] = PIC_NO_INDEX; | |
01552f76 DM |
980 | } |
981 | } | |
982 | return n; | |
983 | } | |
984 | ||
6e37738a | 985 | static void event_sched_in(struct perf_event *event) |
e7bef6b0 DM |
986 | { |
987 | event->state = PERF_EVENT_STATE_ACTIVE; | |
6e37738a | 988 | event->oncpu = smp_processor_id(); |
e7bef6b0 DM |
989 | event->tstamp_running += event->ctx->time - event->tstamp_stopped; |
990 | if (is_software_event(event)) | |
991 | event->pmu->enable(event); | |
992 | } | |
993 | ||
994 | int hw_perf_group_sched_in(struct perf_event *group_leader, | |
995 | struct perf_cpu_context *cpuctx, | |
6e37738a | 996 | struct perf_event_context *ctx) |
e7bef6b0 DM |
997 | { |
998 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
999 | struct perf_event *sub; | |
1000 | int n0, n; | |
1001 | ||
1002 | if (!sparc_pmu) | |
1003 | return 0; | |
1004 | ||
1005 | n0 = cpuc->n_events; | |
1006 | n = collect_events(group_leader, perf_max_events - n0, | |
1007 | &cpuc->event[n0], &cpuc->events[n0], | |
1008 | &cpuc->current_idx[n0]); | |
1009 | if (n < 0) | |
1010 | return -EAGAIN; | |
1011 | if (check_excludes(cpuc->event, n0, n)) | |
1012 | return -EINVAL; | |
1013 | if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0)) | |
1014 | return -EAGAIN; | |
1015 | cpuc->n_events = n0 + n; | |
1016 | cpuc->n_added += n; | |
1017 | ||
1018 | cpuctx->active_oncpu += n; | |
1019 | n = 1; | |
6e37738a | 1020 | event_sched_in(group_leader); |
e7bef6b0 DM |
1021 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { |
1022 | if (sub->state != PERF_EVENT_STATE_OFF) { | |
6e37738a | 1023 | event_sched_in(sub); |
e7bef6b0 DM |
1024 | n++; |
1025 | } | |
1026 | } | |
1027 | ctx->nr_active += n; | |
1028 | ||
1029 | return 1; | |
1030 | } | |
1031 | ||
1032 | static int sparc_pmu_enable(struct perf_event *event) | |
1033 | { | |
1034 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
1035 | int n0, ret = -EAGAIN; | |
1036 | unsigned long flags; | |
1037 | ||
1038 | local_irq_save(flags); | |
1039 | perf_disable(); | |
1040 | ||
1041 | n0 = cpuc->n_events; | |
1042 | if (n0 >= perf_max_events) | |
1043 | goto out; | |
1044 | ||
1045 | cpuc->event[n0] = event; | |
1046 | cpuc->events[n0] = event->hw.event_base; | |
1047 | cpuc->current_idx[n0] = PIC_NO_INDEX; | |
1048 | ||
1049 | if (check_excludes(cpuc->event, n0, 1)) | |
1050 | goto out; | |
1051 | if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) | |
1052 | goto out; | |
1053 | ||
1054 | cpuc->n_events++; | |
1055 | cpuc->n_added++; | |
1056 | ||
1057 | ret = 0; | |
1058 | out: | |
1059 | perf_enable(); | |
1060 | local_irq_restore(flags); | |
1061 | return ret; | |
1062 | } | |
1063 | ||
cdd6c482 | 1064 | static int __hw_perf_event_init(struct perf_event *event) |
59abbd1e | 1065 | { |
cdd6c482 | 1066 | struct perf_event_attr *attr = &event->attr; |
01552f76 | 1067 | struct perf_event *evts[MAX_HWEVENTS]; |
cdd6c482 | 1068 | struct hw_perf_event *hwc = &event->hw; |
a72a8a5f | 1069 | unsigned long events[MAX_HWEVENTS]; |
e7bef6b0 | 1070 | int current_idx_dmy[MAX_HWEVENTS]; |
59abbd1e | 1071 | const struct perf_event_map *pmap; |
01552f76 | 1072 | int n; |
59abbd1e DM |
1073 | |
1074 | if (atomic_read(&nmi_active) < 0) | |
1075 | return -ENODEV; | |
1076 | ||
2ce4da2e DM |
1077 | if (attr->type == PERF_TYPE_HARDWARE) { |
1078 | if (attr->config >= sparc_pmu->max_events) | |
1079 | return -EINVAL; | |
1080 | pmap = sparc_pmu->event_map(attr->config); | |
1081 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | |
1082 | pmap = sparc_map_cache_event(attr->config); | |
1083 | if (IS_ERR(pmap)) | |
1084 | return PTR_ERR(pmap); | |
1085 | } else | |
59abbd1e DM |
1086 | return -EOPNOTSUPP; |
1087 | ||
e7bef6b0 | 1088 | /* We save the enable bits in the config_base. */ |
496c07e3 | 1089 | hwc->config_base = sparc_pmu->irq_bit; |
59abbd1e DM |
1090 | if (!attr->exclude_user) |
1091 | hwc->config_base |= PCR_UTRACE; | |
1092 | if (!attr->exclude_kernel) | |
1093 | hwc->config_base |= PCR_STRACE; | |
91b9286d DM |
1094 | if (!attr->exclude_hv) |
1095 | hwc->config_base |= sparc_pmu->hv_bit; | |
59abbd1e | 1096 | |
a72a8a5f DM |
1097 | hwc->event_base = perf_event_encode(pmap); |
1098 | ||
01552f76 DM |
1099 | n = 0; |
1100 | if (event->group_leader != event) { | |
1101 | n = collect_events(event->group_leader, | |
1102 | perf_max_events - 1, | |
e7bef6b0 | 1103 | evts, events, current_idx_dmy); |
01552f76 DM |
1104 | if (n < 0) |
1105 | return -EINVAL; | |
1106 | } | |
a72a8a5f | 1107 | events[n] = hwc->event_base; |
01552f76 DM |
1108 | evts[n] = event; |
1109 | ||
1110 | if (check_excludes(evts, n, 1)) | |
1111 | return -EINVAL; | |
1112 | ||
e7bef6b0 | 1113 | if (sparc_check_constraints(evts, events, n + 1)) |
a72a8a5f DM |
1114 | return -EINVAL; |
1115 | ||
e7bef6b0 DM |
1116 | hwc->idx = PIC_NO_INDEX; |
1117 | ||
01552f76 DM |
1118 | /* Try to do all error checking before this point, as unwinding |
1119 | * state after grabbing the PMC is difficult. | |
1120 | */ | |
1121 | perf_event_grab_pmc(); | |
1122 | event->destroy = hw_perf_event_destroy; | |
1123 | ||
59abbd1e DM |
1124 | if (!hwc->sample_period) { |
1125 | hwc->sample_period = MAX_PERIOD; | |
1126 | hwc->last_period = hwc->sample_period; | |
1127 | atomic64_set(&hwc->period_left, hwc->sample_period); | |
1128 | } | |
1129 | ||
59abbd1e DM |
1130 | return 0; |
1131 | } | |
1132 | ||
1133 | static const struct pmu pmu = { | |
1134 | .enable = sparc_pmu_enable, | |
1135 | .disable = sparc_pmu_disable, | |
1136 | .read = sparc_pmu_read, | |
1137 | .unthrottle = sparc_pmu_unthrottle, | |
1138 | }; | |
1139 | ||
cdd6c482 | 1140 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
59abbd1e | 1141 | { |
cdd6c482 | 1142 | int err = __hw_perf_event_init(event); |
59abbd1e DM |
1143 | |
1144 | if (err) | |
1145 | return ERR_PTR(err); | |
1146 | return &pmu; | |
1147 | } | |
1148 | ||
cdd6c482 | 1149 | void perf_event_print_debug(void) |
59abbd1e DM |
1150 | { |
1151 | unsigned long flags; | |
1152 | u64 pcr, pic; | |
1153 | int cpu; | |
1154 | ||
1155 | if (!sparc_pmu) | |
1156 | return; | |
1157 | ||
1158 | local_irq_save(flags); | |
1159 | ||
1160 | cpu = smp_processor_id(); | |
1161 | ||
1162 | pcr = pcr_ops->read(); | |
1163 | read_pic(pic); | |
1164 | ||
1165 | pr_info("\n"); | |
1166 | pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | |
1167 | cpu, pcr, pic); | |
1168 | ||
1169 | local_irq_restore(flags); | |
1170 | } | |
1171 | ||
cdd6c482 | 1172 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, |
d29862f0 | 1173 | unsigned long cmd, void *__args) |
59abbd1e DM |
1174 | { |
1175 | struct die_args *args = __args; | |
1176 | struct perf_sample_data data; | |
cdd6c482 | 1177 | struct cpu_hw_events *cpuc; |
59abbd1e | 1178 | struct pt_regs *regs; |
e7bef6b0 | 1179 | int i; |
59abbd1e | 1180 | |
cdd6c482 | 1181 | if (!atomic_read(&active_events)) |
59abbd1e DM |
1182 | return NOTIFY_DONE; |
1183 | ||
1184 | switch (cmd) { | |
1185 | case DIE_NMI: | |
1186 | break; | |
1187 | ||
1188 | default: | |
1189 | return NOTIFY_DONE; | |
1190 | } | |
1191 | ||
1192 | regs = args->regs; | |
1193 | ||
dc1d628a | 1194 | perf_sample_data_init(&data, 0); |
59abbd1e | 1195 | |
cdd6c482 | 1196 | cpuc = &__get_cpu_var(cpu_hw_events); |
e04ed38d DM |
1197 | |
1198 | /* If the PMU has the TOE IRQ enable bits, we need to do a | |
1199 | * dummy write to the %pcr to clear the overflow bits and thus | |
1200 | * the interrupt. | |
1201 | * | |
1202 | * Do this before we peek at the counters to determine | |
1203 | * overflow so we don't lose any events. | |
1204 | */ | |
1205 | if (sparc_pmu->irq_bit) | |
1206 | pcr_ops->write(cpuc->pcr); | |
1207 | ||
e7bef6b0 DM |
1208 | for (i = 0; i < cpuc->n_events; i++) { |
1209 | struct perf_event *event = cpuc->event[i]; | |
1210 | int idx = cpuc->current_idx[i]; | |
cdd6c482 | 1211 | struct hw_perf_event *hwc; |
59abbd1e DM |
1212 | u64 val; |
1213 | ||
cdd6c482 IM |
1214 | hwc = &event->hw; |
1215 | val = sparc_perf_event_update(event, hwc, idx); | |
59abbd1e DM |
1216 | if (val & (1ULL << 31)) |
1217 | continue; | |
1218 | ||
cdd6c482 IM |
1219 | data.period = event->hw.last_period; |
1220 | if (!sparc_perf_event_set_period(event, hwc, idx)) | |
59abbd1e DM |
1221 | continue; |
1222 | ||
cdd6c482 | 1223 | if (perf_event_overflow(event, 1, &data, regs)) |
d1751388 | 1224 | sparc_pmu_disable_event(cpuc, hwc, idx); |
59abbd1e DM |
1225 | } |
1226 | ||
1227 | return NOTIFY_STOP; | |
1228 | } | |
1229 | ||
cdd6c482 IM |
1230 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
1231 | .notifier_call = perf_event_nmi_handler, | |
59abbd1e DM |
1232 | }; |
1233 | ||
1234 | static bool __init supported_pmu(void) | |
1235 | { | |
28e8f9be DM |
1236 | if (!strcmp(sparc_pmu_type, "ultra3") || |
1237 | !strcmp(sparc_pmu_type, "ultra3+") || | |
1238 | !strcmp(sparc_pmu_type, "ultra3i") || | |
1239 | !strcmp(sparc_pmu_type, "ultra4+")) { | |
1240 | sparc_pmu = &ultra3_pmu; | |
59abbd1e DM |
1241 | return true; |
1242 | } | |
7eebda60 DM |
1243 | if (!strcmp(sparc_pmu_type, "niagara")) { |
1244 | sparc_pmu = &niagara1_pmu; | |
1245 | return true; | |
1246 | } | |
b73d8847 DM |
1247 | if (!strcmp(sparc_pmu_type, "niagara2")) { |
1248 | sparc_pmu = &niagara2_pmu; | |
1249 | return true; | |
1250 | } | |
59abbd1e DM |
1251 | return false; |
1252 | } | |
1253 | ||
cdd6c482 | 1254 | void __init init_hw_perf_events(void) |
59abbd1e | 1255 | { |
cdd6c482 | 1256 | pr_info("Performance events: "); |
59abbd1e DM |
1257 | |
1258 | if (!supported_pmu()) { | |
1259 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | |
1260 | return; | |
1261 | } | |
1262 | ||
1263 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | |
1264 | ||
e7bef6b0 DM |
1265 | /* All sparc64 PMUs currently have 2 events. */ |
1266 | perf_max_events = 2; | |
59abbd1e | 1267 | |
cdd6c482 | 1268 | register_die_notifier(&perf_event_nmi_notifier); |
59abbd1e | 1269 | } |
4f6dbe4a DM |
1270 | |
1271 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |
1272 | { | |
1273 | if (entry->nr < PERF_MAX_STACK_DEPTH) | |
1274 | entry->ip[entry->nr++] = ip; | |
1275 | } | |
1276 | ||
1277 | static void perf_callchain_kernel(struct pt_regs *regs, | |
1278 | struct perf_callchain_entry *entry) | |
1279 | { | |
1280 | unsigned long ksp, fp; | |
667f0cee DM |
1281 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1282 | int graph = 0; | |
1283 | #endif | |
4f6dbe4a DM |
1284 | |
1285 | callchain_store(entry, PERF_CONTEXT_KERNEL); | |
1286 | callchain_store(entry, regs->tpc); | |
1287 | ||
1288 | ksp = regs->u_regs[UREG_I6]; | |
1289 | fp = ksp + STACK_BIAS; | |
1290 | do { | |
1291 | struct sparc_stackf *sf; | |
1292 | struct pt_regs *regs; | |
1293 | unsigned long pc; | |
1294 | ||
1295 | if (!kstack_valid(current_thread_info(), fp)) | |
1296 | break; | |
1297 | ||
1298 | sf = (struct sparc_stackf *) fp; | |
1299 | regs = (struct pt_regs *) (sf + 1); | |
1300 | ||
1301 | if (kstack_is_trap_frame(current_thread_info(), regs)) { | |
1302 | if (user_mode(regs)) | |
1303 | break; | |
1304 | pc = regs->tpc; | |
1305 | fp = regs->u_regs[UREG_I6] + STACK_BIAS; | |
1306 | } else { | |
1307 | pc = sf->callers_pc; | |
1308 | fp = (unsigned long)sf->fp + STACK_BIAS; | |
1309 | } | |
1310 | callchain_store(entry, pc); | |
667f0cee DM |
1311 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1312 | if ((pc + 8UL) == (unsigned long) &return_to_handler) { | |
1313 | int index = current->curr_ret_stack; | |
1314 | if (current->ret_stack && index >= graph) { | |
1315 | pc = current->ret_stack[index - graph].ret; | |
1316 | callchain_store(entry, pc); | |
1317 | graph++; | |
1318 | } | |
1319 | } | |
1320 | #endif | |
4f6dbe4a DM |
1321 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1322 | } | |
1323 | ||
1324 | static void perf_callchain_user_64(struct pt_regs *regs, | |
1325 | struct perf_callchain_entry *entry) | |
1326 | { | |
1327 | unsigned long ufp; | |
1328 | ||
1329 | callchain_store(entry, PERF_CONTEXT_USER); | |
1330 | callchain_store(entry, regs->tpc); | |
1331 | ||
1332 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; | |
1333 | do { | |
1334 | struct sparc_stackf *usf, sf; | |
1335 | unsigned long pc; | |
1336 | ||
1337 | usf = (struct sparc_stackf *) ufp; | |
1338 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | |
1339 | break; | |
1340 | ||
1341 | pc = sf.callers_pc; | |
1342 | ufp = (unsigned long)sf.fp + STACK_BIAS; | |
1343 | callchain_store(entry, pc); | |
1344 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | |
1345 | } | |
1346 | ||
1347 | static void perf_callchain_user_32(struct pt_regs *regs, | |
1348 | struct perf_callchain_entry *entry) | |
1349 | { | |
1350 | unsigned long ufp; | |
1351 | ||
1352 | callchain_store(entry, PERF_CONTEXT_USER); | |
1353 | callchain_store(entry, regs->tpc); | |
1354 | ||
9e8307ec | 1355 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; |
4f6dbe4a DM |
1356 | do { |
1357 | struct sparc_stackf32 *usf, sf; | |
1358 | unsigned long pc; | |
1359 | ||
1360 | usf = (struct sparc_stackf32 *) ufp; | |
1361 | if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) | |
1362 | break; | |
1363 | ||
1364 | pc = sf.callers_pc; | |
1365 | ufp = (unsigned long)sf.fp; | |
1366 | callchain_store(entry, pc); | |
1367 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | |
1368 | } | |
1369 | ||
1370 | /* Like powerpc we can't get PMU interrupts within the PMU handler, | |
3ad2f3fb | 1371 | * so no need for separate NMI and IRQ chains as on x86. |
4f6dbe4a DM |
1372 | */ |
1373 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | |
1374 | ||
1375 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |
1376 | { | |
1377 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | |
1378 | ||
1379 | entry->nr = 0; | |
1380 | if (!user_mode(regs)) { | |
1381 | stack_trace_flush(); | |
1382 | perf_callchain_kernel(regs, entry); | |
1383 | if (current->mm) | |
1384 | regs = task_pt_regs(current); | |
1385 | else | |
1386 | regs = NULL; | |
1387 | } | |
1388 | if (regs) { | |
1389 | flushw_user(); | |
1390 | if (test_thread_flag(TIF_32BIT)) | |
1391 | perf_callchain_user_32(regs, entry); | |
1392 | else | |
1393 | perf_callchain_user_64(regs, entry); | |
1394 | } | |
1395 | return entry; | |
1396 | } |