]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/x86/kernel/cpu/perf_event.c
perf_events: Add fast-path to the rescheduling code
[net-next-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26
27 #include <asm/apic.h>
28 #include <asm/stacktrace.h>
29 #include <asm/nmi.h>
30
31 static u64 perf_event_mask __read_mostly;
32
33 /* The maximal number of PEBS events: */
34 #define MAX_PEBS_EVENTS 4
35
36 /* The size of a BTS record in bytes: */
37 #define BTS_RECORD_SIZE         24
38
39 /* The size of a per-cpu BTS buffer in bytes: */
40 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
41
42 /* The BTS overflow threshold in bytes from the end of the buffer: */
43 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
44
45
46 /*
47  * Bits in the debugctlmsr controlling branch tracing.
48  */
49 #define X86_DEBUGCTL_TR                 (1 << 6)
50 #define X86_DEBUGCTL_BTS                (1 << 7)
51 #define X86_DEBUGCTL_BTINT              (1 << 8)
52 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
53 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
54
55 /*
56  * A debug store configuration.
57  *
58  * We only support architectures that use 64bit fields.
59  */
60 struct debug_store {
61         u64     bts_buffer_base;
62         u64     bts_index;
63         u64     bts_absolute_maximum;
64         u64     bts_interrupt_threshold;
65         u64     pebs_buffer_base;
66         u64     pebs_index;
67         u64     pebs_absolute_maximum;
68         u64     pebs_interrupt_threshold;
69         u64     pebs_event_reset[MAX_PEBS_EVENTS];
70 };
71
72 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
73
74 struct event_constraint {
75         u64     idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)];
76         int     code;
77         int     cmask;
78 };
79
80 struct cpu_hw_events {
81         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
82         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
83         unsigned long           interrupts;
84         int                     enabled;
85         struct debug_store      *ds;
86
87         int                     n_events;
88         int                     n_added;
89         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
90         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
91 };
92
93 #define EVENT_CONSTRAINT(c, n, m) { \
94         .code = (c),    \
95         .cmask = (m),   \
96         .idxmsk[0] = (n) }
97
98 #define EVENT_CONSTRAINT_END \
99         { .code = 0, .cmask = 0, .idxmsk[0] = 0 }
100
101 #define for_each_event_constraint(e, c) \
102         for ((e) = (c); (e)->cmask; (e)++)
103
104 /*
105  * struct x86_pmu - generic x86 pmu
106  */
107 struct x86_pmu {
108         const char      *name;
109         int             version;
110         int             (*handle_irq)(struct pt_regs *);
111         void            (*disable_all)(void);
112         void            (*enable_all)(void);
113         void            (*enable)(struct hw_perf_event *, int);
114         void            (*disable)(struct hw_perf_event *, int);
115         unsigned        eventsel;
116         unsigned        perfctr;
117         u64             (*event_map)(int);
118         u64             (*raw_event)(u64);
119         int             max_events;
120         int             num_events;
121         int             num_events_fixed;
122         int             event_bits;
123         u64             event_mask;
124         int             apic;
125         u64             max_period;
126         u64             intel_ctrl;
127         void            (*enable_bts)(u64 config);
128         void            (*disable_bts)(void);
129         void            (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk);
130         void            (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event);
131         const struct event_constraint *event_constraints;
132 };
133
134 static struct x86_pmu x86_pmu __read_mostly;
135
136 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
137         .enabled = 1,
138 };
139
140 static int x86_perf_event_set_period(struct perf_event *event,
141                              struct hw_perf_event *hwc, int idx);
142
143 /*
144  * Not sure about some of these
145  */
146 static const u64 p6_perfmon_event_map[] =
147 {
148   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
149   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
150   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
151   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
152   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
153   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
154   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
155 };
156
157 static u64 p6_pmu_event_map(int hw_event)
158 {
159         return p6_perfmon_event_map[hw_event];
160 }
161
162 /*
163  * Event setting that is specified not to count anything.
164  * We use this to effectively disable a counter.
165  *
166  * L2_RQSTS with 0 MESI unit mask.
167  */
168 #define P6_NOP_EVENT                    0x0000002EULL
169
170 static u64 p6_pmu_raw_event(u64 hw_event)
171 {
172 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
173 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
174 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
175 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
176 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
177
178 #define P6_EVNTSEL_MASK                 \
179         (P6_EVNTSEL_EVENT_MASK |        \
180          P6_EVNTSEL_UNIT_MASK  |        \
181          P6_EVNTSEL_EDGE_MASK  |        \
182          P6_EVNTSEL_INV_MASK   |        \
183          P6_EVNTSEL_REG_MASK)
184
185         return hw_event & P6_EVNTSEL_MASK;
186 }
187
188 static struct event_constraint intel_p6_event_constraints[] =
189 {
190         EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK),     /* FLOPS */
191         EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK),     /* FP_COMP_OPS_EXE */
192         EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK),     /* FP_ASSIST */
193         EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK),     /* MUL */
194         EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK),     /* DIV */
195         EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK),     /* CYCLES_DIV_BUSY */
196         EVENT_CONSTRAINT_END
197 };
198
199 /*
200  * Intel PerfMon v3. Used on Core2 and later.
201  */
202 static const u64 intel_perfmon_event_map[] =
203 {
204   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
205   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
206   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
207   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
208   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
209   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
210   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
211 };
212
213 static struct event_constraint intel_core_event_constraints[] =
214 {
215         EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
216         EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
217         EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
218         EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
219         EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
220         EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
221         EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
222         EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */
223         EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */
224         EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */
225         EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */
226         EVENT_CONSTRAINT_END
227 };
228
229 static struct event_constraint intel_nehalem_event_constraints[] =
230 {
231         EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
232         EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
233         EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */
234         EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */
235         EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */
236         EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */
237         EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */
238         EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */
239         EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */
240         EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
241         EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */
242         EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */
243         EVENT_CONSTRAINT_END
244 };
245
246 static struct event_constraint intel_gen_event_constraints[] =
247 {
248         EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
249         EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
250         EVENT_CONSTRAINT_END
251 };
252
253 static u64 intel_pmu_event_map(int hw_event)
254 {
255         return intel_perfmon_event_map[hw_event];
256 }
257
258 /*
259  * Generalized hw caching related hw_event table, filled
260  * in on a per model basis. A value of 0 means
261  * 'not supported', -1 means 'hw_event makes no sense on
262  * this CPU', any other value means the raw hw_event
263  * ID.
264  */
265
266 #define C(x) PERF_COUNT_HW_CACHE_##x
267
268 static u64 __read_mostly hw_cache_event_ids
269                                 [PERF_COUNT_HW_CACHE_MAX]
270                                 [PERF_COUNT_HW_CACHE_OP_MAX]
271                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
272
273 static __initconst u64 nehalem_hw_cache_event_ids
274                                 [PERF_COUNT_HW_CACHE_MAX]
275                                 [PERF_COUNT_HW_CACHE_OP_MAX]
276                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
277 {
278  [ C(L1D) ] = {
279         [ C(OP_READ) ] = {
280                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
281                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
282         },
283         [ C(OP_WRITE) ] = {
284                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
285                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
286         },
287         [ C(OP_PREFETCH) ] = {
288                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
289                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
290         },
291  },
292  [ C(L1I ) ] = {
293         [ C(OP_READ) ] = {
294                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
295                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
296         },
297         [ C(OP_WRITE) ] = {
298                 [ C(RESULT_ACCESS) ] = -1,
299                 [ C(RESULT_MISS)   ] = -1,
300         },
301         [ C(OP_PREFETCH) ] = {
302                 [ C(RESULT_ACCESS) ] = 0x0,
303                 [ C(RESULT_MISS)   ] = 0x0,
304         },
305  },
306  [ C(LL  ) ] = {
307         [ C(OP_READ) ] = {
308                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
309                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
310         },
311         [ C(OP_WRITE) ] = {
312                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
313                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
314         },
315         [ C(OP_PREFETCH) ] = {
316                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
317                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
318         },
319  },
320  [ C(DTLB) ] = {
321         [ C(OP_READ) ] = {
322                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
323                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
324         },
325         [ C(OP_WRITE) ] = {
326                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
327                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
328         },
329         [ C(OP_PREFETCH) ] = {
330                 [ C(RESULT_ACCESS) ] = 0x0,
331                 [ C(RESULT_MISS)   ] = 0x0,
332         },
333  },
334  [ C(ITLB) ] = {
335         [ C(OP_READ) ] = {
336                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
337                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
338         },
339         [ C(OP_WRITE) ] = {
340                 [ C(RESULT_ACCESS) ] = -1,
341                 [ C(RESULT_MISS)   ] = -1,
342         },
343         [ C(OP_PREFETCH) ] = {
344                 [ C(RESULT_ACCESS) ] = -1,
345                 [ C(RESULT_MISS)   ] = -1,
346         },
347  },
348  [ C(BPU ) ] = {
349         [ C(OP_READ) ] = {
350                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
351                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
352         },
353         [ C(OP_WRITE) ] = {
354                 [ C(RESULT_ACCESS) ] = -1,
355                 [ C(RESULT_MISS)   ] = -1,
356         },
357         [ C(OP_PREFETCH) ] = {
358                 [ C(RESULT_ACCESS) ] = -1,
359                 [ C(RESULT_MISS)   ] = -1,
360         },
361  },
362 };
363
364 static __initconst u64 core2_hw_cache_event_ids
365                                 [PERF_COUNT_HW_CACHE_MAX]
366                                 [PERF_COUNT_HW_CACHE_OP_MAX]
367                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
368 {
369  [ C(L1D) ] = {
370         [ C(OP_READ) ] = {
371                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
372                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
373         },
374         [ C(OP_WRITE) ] = {
375                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
376                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
377         },
378         [ C(OP_PREFETCH) ] = {
379                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
380                 [ C(RESULT_MISS)   ] = 0,
381         },
382  },
383  [ C(L1I ) ] = {
384         [ C(OP_READ) ] = {
385                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
386                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
387         },
388         [ C(OP_WRITE) ] = {
389                 [ C(RESULT_ACCESS) ] = -1,
390                 [ C(RESULT_MISS)   ] = -1,
391         },
392         [ C(OP_PREFETCH) ] = {
393                 [ C(RESULT_ACCESS) ] = 0,
394                 [ C(RESULT_MISS)   ] = 0,
395         },
396  },
397  [ C(LL  ) ] = {
398         [ C(OP_READ) ] = {
399                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
400                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
401         },
402         [ C(OP_WRITE) ] = {
403                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
404                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
405         },
406         [ C(OP_PREFETCH) ] = {
407                 [ C(RESULT_ACCESS) ] = 0,
408                 [ C(RESULT_MISS)   ] = 0,
409         },
410  },
411  [ C(DTLB) ] = {
412         [ C(OP_READ) ] = {
413                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
414                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
415         },
416         [ C(OP_WRITE) ] = {
417                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
418                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
419         },
420         [ C(OP_PREFETCH) ] = {
421                 [ C(RESULT_ACCESS) ] = 0,
422                 [ C(RESULT_MISS)   ] = 0,
423         },
424  },
425  [ C(ITLB) ] = {
426         [ C(OP_READ) ] = {
427                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
428                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
429         },
430         [ C(OP_WRITE) ] = {
431                 [ C(RESULT_ACCESS) ] = -1,
432                 [ C(RESULT_MISS)   ] = -1,
433         },
434         [ C(OP_PREFETCH) ] = {
435                 [ C(RESULT_ACCESS) ] = -1,
436                 [ C(RESULT_MISS)   ] = -1,
437         },
438  },
439  [ C(BPU ) ] = {
440         [ C(OP_READ) ] = {
441                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
442                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
443         },
444         [ C(OP_WRITE) ] = {
445                 [ C(RESULT_ACCESS) ] = -1,
446                 [ C(RESULT_MISS)   ] = -1,
447         },
448         [ C(OP_PREFETCH) ] = {
449                 [ C(RESULT_ACCESS) ] = -1,
450                 [ C(RESULT_MISS)   ] = -1,
451         },
452  },
453 };
454
455 static __initconst u64 atom_hw_cache_event_ids
456                                 [PERF_COUNT_HW_CACHE_MAX]
457                                 [PERF_COUNT_HW_CACHE_OP_MAX]
458                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
459 {
460  [ C(L1D) ] = {
461         [ C(OP_READ) ] = {
462                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
463                 [ C(RESULT_MISS)   ] = 0,
464         },
465         [ C(OP_WRITE) ] = {
466                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
467                 [ C(RESULT_MISS)   ] = 0,
468         },
469         [ C(OP_PREFETCH) ] = {
470                 [ C(RESULT_ACCESS) ] = 0x0,
471                 [ C(RESULT_MISS)   ] = 0,
472         },
473  },
474  [ C(L1I ) ] = {
475         [ C(OP_READ) ] = {
476                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
477                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
478         },
479         [ C(OP_WRITE) ] = {
480                 [ C(RESULT_ACCESS) ] = -1,
481                 [ C(RESULT_MISS)   ] = -1,
482         },
483         [ C(OP_PREFETCH) ] = {
484                 [ C(RESULT_ACCESS) ] = 0,
485                 [ C(RESULT_MISS)   ] = 0,
486         },
487  },
488  [ C(LL  ) ] = {
489         [ C(OP_READ) ] = {
490                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
491                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
492         },
493         [ C(OP_WRITE) ] = {
494                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
495                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
496         },
497         [ C(OP_PREFETCH) ] = {
498                 [ C(RESULT_ACCESS) ] = 0,
499                 [ C(RESULT_MISS)   ] = 0,
500         },
501  },
502  [ C(DTLB) ] = {
503         [ C(OP_READ) ] = {
504                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
505                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
506         },
507         [ C(OP_WRITE) ] = {
508                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
509                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
510         },
511         [ C(OP_PREFETCH) ] = {
512                 [ C(RESULT_ACCESS) ] = 0,
513                 [ C(RESULT_MISS)   ] = 0,
514         },
515  },
516  [ C(ITLB) ] = {
517         [ C(OP_READ) ] = {
518                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
519                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
520         },
521         [ C(OP_WRITE) ] = {
522                 [ C(RESULT_ACCESS) ] = -1,
523                 [ C(RESULT_MISS)   ] = -1,
524         },
525         [ C(OP_PREFETCH) ] = {
526                 [ C(RESULT_ACCESS) ] = -1,
527                 [ C(RESULT_MISS)   ] = -1,
528         },
529  },
530  [ C(BPU ) ] = {
531         [ C(OP_READ) ] = {
532                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
533                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
534         },
535         [ C(OP_WRITE) ] = {
536                 [ C(RESULT_ACCESS) ] = -1,
537                 [ C(RESULT_MISS)   ] = -1,
538         },
539         [ C(OP_PREFETCH) ] = {
540                 [ C(RESULT_ACCESS) ] = -1,
541                 [ C(RESULT_MISS)   ] = -1,
542         },
543  },
544 };
545
546 static u64 intel_pmu_raw_event(u64 hw_event)
547 {
548 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
549 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
550 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
551 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
552 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
553
554 #define CORE_EVNTSEL_MASK               \
555         (INTEL_ARCH_EVTSEL_MASK |       \
556          INTEL_ARCH_UNIT_MASK   |       \
557          INTEL_ARCH_EDGE_MASK   |       \
558          INTEL_ARCH_INV_MASK    |       \
559          INTEL_ARCH_CNT_MASK)
560
561         return hw_event & CORE_EVNTSEL_MASK;
562 }
563
564 static __initconst u64 amd_hw_cache_event_ids
565                                 [PERF_COUNT_HW_CACHE_MAX]
566                                 [PERF_COUNT_HW_CACHE_OP_MAX]
567                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
568 {
569  [ C(L1D) ] = {
570         [ C(OP_READ) ] = {
571                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
572                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
573         },
574         [ C(OP_WRITE) ] = {
575                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
576                 [ C(RESULT_MISS)   ] = 0,
577         },
578         [ C(OP_PREFETCH) ] = {
579                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
580                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
581         },
582  },
583  [ C(L1I ) ] = {
584         [ C(OP_READ) ] = {
585                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
586                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
587         },
588         [ C(OP_WRITE) ] = {
589                 [ C(RESULT_ACCESS) ] = -1,
590                 [ C(RESULT_MISS)   ] = -1,
591         },
592         [ C(OP_PREFETCH) ] = {
593                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
594                 [ C(RESULT_MISS)   ] = 0,
595         },
596  },
597  [ C(LL  ) ] = {
598         [ C(OP_READ) ] = {
599                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
600                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
601         },
602         [ C(OP_WRITE) ] = {
603                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
604                 [ C(RESULT_MISS)   ] = 0,
605         },
606         [ C(OP_PREFETCH) ] = {
607                 [ C(RESULT_ACCESS) ] = 0,
608                 [ C(RESULT_MISS)   ] = 0,
609         },
610  },
611  [ C(DTLB) ] = {
612         [ C(OP_READ) ] = {
613                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
614                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
615         },
616         [ C(OP_WRITE) ] = {
617                 [ C(RESULT_ACCESS) ] = 0,
618                 [ C(RESULT_MISS)   ] = 0,
619         },
620         [ C(OP_PREFETCH) ] = {
621                 [ C(RESULT_ACCESS) ] = 0,
622                 [ C(RESULT_MISS)   ] = 0,
623         },
624  },
625  [ C(ITLB) ] = {
626         [ C(OP_READ) ] = {
627                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
628                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
629         },
630         [ C(OP_WRITE) ] = {
631                 [ C(RESULT_ACCESS) ] = -1,
632                 [ C(RESULT_MISS)   ] = -1,
633         },
634         [ C(OP_PREFETCH) ] = {
635                 [ C(RESULT_ACCESS) ] = -1,
636                 [ C(RESULT_MISS)   ] = -1,
637         },
638  },
639  [ C(BPU ) ] = {
640         [ C(OP_READ) ] = {
641                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
642                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
643         },
644         [ C(OP_WRITE) ] = {
645                 [ C(RESULT_ACCESS) ] = -1,
646                 [ C(RESULT_MISS)   ] = -1,
647         },
648         [ C(OP_PREFETCH) ] = {
649                 [ C(RESULT_ACCESS) ] = -1,
650                 [ C(RESULT_MISS)   ] = -1,
651         },
652  },
653 };
654
655 /*
656  * AMD Performance Monitor K7 and later.
657  */
658 static const u64 amd_perfmon_event_map[] =
659 {
660   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
661   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
662   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
663   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
664   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
665   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
666 };
667
668 static u64 amd_pmu_event_map(int hw_event)
669 {
670         return amd_perfmon_event_map[hw_event];
671 }
672
673 static u64 amd_pmu_raw_event(u64 hw_event)
674 {
675 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
676 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
677 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
678 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
679 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
680
681 #define K7_EVNTSEL_MASK                 \
682         (K7_EVNTSEL_EVENT_MASK |        \
683          K7_EVNTSEL_UNIT_MASK  |        \
684          K7_EVNTSEL_EDGE_MASK  |        \
685          K7_EVNTSEL_INV_MASK   |        \
686          K7_EVNTSEL_REG_MASK)
687
688         return hw_event & K7_EVNTSEL_MASK;
689 }
690
691 /*
692  * Propagate event elapsed time into the generic event.
693  * Can only be executed on the CPU where the event is active.
694  * Returns the delta events processed.
695  */
696 static u64
697 x86_perf_event_update(struct perf_event *event,
698                         struct hw_perf_event *hwc, int idx)
699 {
700         int shift = 64 - x86_pmu.event_bits;
701         u64 prev_raw_count, new_raw_count;
702         s64 delta;
703
704         if (idx == X86_PMC_IDX_FIXED_BTS)
705                 return 0;
706
707         /*
708          * Careful: an NMI might modify the previous event value.
709          *
710          * Our tactic to handle this is to first atomically read and
711          * exchange a new raw count - then add that new-prev delta
712          * count to the generic event atomically:
713          */
714 again:
715         prev_raw_count = atomic64_read(&hwc->prev_count);
716         rdmsrl(hwc->event_base + idx, new_raw_count);
717
718         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
719                                         new_raw_count) != prev_raw_count)
720                 goto again;
721
722         /*
723          * Now we have the new raw value and have updated the prev
724          * timestamp already. We can now calculate the elapsed delta
725          * (event-)time and add that to the generic event.
726          *
727          * Careful, not all hw sign-extends above the physical width
728          * of the count.
729          */
730         delta = (new_raw_count << shift) - (prev_raw_count << shift);
731         delta >>= shift;
732
733         atomic64_add(delta, &event->count);
734         atomic64_sub(delta, &hwc->period_left);
735
736         return new_raw_count;
737 }
738
739 static atomic_t active_events;
740 static DEFINE_MUTEX(pmc_reserve_mutex);
741
742 static bool reserve_pmc_hardware(void)
743 {
744 #ifdef CONFIG_X86_LOCAL_APIC
745         int i;
746
747         if (nmi_watchdog == NMI_LOCAL_APIC)
748                 disable_lapic_nmi_watchdog();
749
750         for (i = 0; i < x86_pmu.num_events; i++) {
751                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
752                         goto perfctr_fail;
753         }
754
755         for (i = 0; i < x86_pmu.num_events; i++) {
756                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
757                         goto eventsel_fail;
758         }
759 #endif
760
761         return true;
762
763 #ifdef CONFIG_X86_LOCAL_APIC
764 eventsel_fail:
765         for (i--; i >= 0; i--)
766                 release_evntsel_nmi(x86_pmu.eventsel + i);
767
768         i = x86_pmu.num_events;
769
770 perfctr_fail:
771         for (i--; i >= 0; i--)
772                 release_perfctr_nmi(x86_pmu.perfctr + i);
773
774         if (nmi_watchdog == NMI_LOCAL_APIC)
775                 enable_lapic_nmi_watchdog();
776
777         return false;
778 #endif
779 }
780
781 static void release_pmc_hardware(void)
782 {
783 #ifdef CONFIG_X86_LOCAL_APIC
784         int i;
785
786         for (i = 0; i < x86_pmu.num_events; i++) {
787                 release_perfctr_nmi(x86_pmu.perfctr + i);
788                 release_evntsel_nmi(x86_pmu.eventsel + i);
789         }
790
791         if (nmi_watchdog == NMI_LOCAL_APIC)
792                 enable_lapic_nmi_watchdog();
793 #endif
794 }
795
796 static inline bool bts_available(void)
797 {
798         return x86_pmu.enable_bts != NULL;
799 }
800
801 static inline void init_debug_store_on_cpu(int cpu)
802 {
803         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
804
805         if (!ds)
806                 return;
807
808         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
809                      (u32)((u64)(unsigned long)ds),
810                      (u32)((u64)(unsigned long)ds >> 32));
811 }
812
813 static inline void fini_debug_store_on_cpu(int cpu)
814 {
815         if (!per_cpu(cpu_hw_events, cpu).ds)
816                 return;
817
818         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
819 }
820
821 static void release_bts_hardware(void)
822 {
823         int cpu;
824
825         if (!bts_available())
826                 return;
827
828         get_online_cpus();
829
830         for_each_online_cpu(cpu)
831                 fini_debug_store_on_cpu(cpu);
832
833         for_each_possible_cpu(cpu) {
834                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
835
836                 if (!ds)
837                         continue;
838
839                 per_cpu(cpu_hw_events, cpu).ds = NULL;
840
841                 kfree((void *)(unsigned long)ds->bts_buffer_base);
842                 kfree(ds);
843         }
844
845         put_online_cpus();
846 }
847
848 static int reserve_bts_hardware(void)
849 {
850         int cpu, err = 0;
851
852         if (!bts_available())
853                 return 0;
854
855         get_online_cpus();
856
857         for_each_possible_cpu(cpu) {
858                 struct debug_store *ds;
859                 void *buffer;
860
861                 err = -ENOMEM;
862                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
863                 if (unlikely(!buffer))
864                         break;
865
866                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
867                 if (unlikely(!ds)) {
868                         kfree(buffer);
869                         break;
870                 }
871
872                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
873                 ds->bts_index = ds->bts_buffer_base;
874                 ds->bts_absolute_maximum =
875                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
876                 ds->bts_interrupt_threshold =
877                         ds->bts_absolute_maximum - BTS_OVFL_TH;
878
879                 per_cpu(cpu_hw_events, cpu).ds = ds;
880                 err = 0;
881         }
882
883         if (err)
884                 release_bts_hardware();
885         else {
886                 for_each_online_cpu(cpu)
887                         init_debug_store_on_cpu(cpu);
888         }
889
890         put_online_cpus();
891
892         return err;
893 }
894
895 static void hw_perf_event_destroy(struct perf_event *event)
896 {
897         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
898                 release_pmc_hardware();
899                 release_bts_hardware();
900                 mutex_unlock(&pmc_reserve_mutex);
901         }
902 }
903
904 static inline int x86_pmu_initialized(void)
905 {
906         return x86_pmu.handle_irq != NULL;
907 }
908
909 static inline int
910 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
911 {
912         unsigned int cache_type, cache_op, cache_result;
913         u64 config, val;
914
915         config = attr->config;
916
917         cache_type = (config >>  0) & 0xff;
918         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
919                 return -EINVAL;
920
921         cache_op = (config >>  8) & 0xff;
922         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
923                 return -EINVAL;
924
925         cache_result = (config >> 16) & 0xff;
926         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
927                 return -EINVAL;
928
929         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
930
931         if (val == 0)
932                 return -ENOENT;
933
934         if (val == -1)
935                 return -EINVAL;
936
937         hwc->config |= val;
938
939         return 0;
940 }
941
942 static void intel_pmu_enable_bts(u64 config)
943 {
944         unsigned long debugctlmsr;
945
946         debugctlmsr = get_debugctlmsr();
947
948         debugctlmsr |= X86_DEBUGCTL_TR;
949         debugctlmsr |= X86_DEBUGCTL_BTS;
950         debugctlmsr |= X86_DEBUGCTL_BTINT;
951
952         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
953                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
954
955         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
956                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
957
958         update_debugctlmsr(debugctlmsr);
959 }
960
961 static void intel_pmu_disable_bts(void)
962 {
963         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
964         unsigned long debugctlmsr;
965
966         if (!cpuc->ds)
967                 return;
968
969         debugctlmsr = get_debugctlmsr();
970
971         debugctlmsr &=
972                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
973                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
974
975         update_debugctlmsr(debugctlmsr);
976 }
977
978 /*
979  * Setup the hardware configuration for a given attr_type
980  */
981 static int __hw_perf_event_init(struct perf_event *event)
982 {
983         struct perf_event_attr *attr = &event->attr;
984         struct hw_perf_event *hwc = &event->hw;
985         u64 config;
986         int err;
987
988         if (!x86_pmu_initialized())
989                 return -ENODEV;
990
991         err = 0;
992         if (!atomic_inc_not_zero(&active_events)) {
993                 mutex_lock(&pmc_reserve_mutex);
994                 if (atomic_read(&active_events) == 0) {
995                         if (!reserve_pmc_hardware())
996                                 err = -EBUSY;
997                         else
998                                 err = reserve_bts_hardware();
999                 }
1000                 if (!err)
1001                         atomic_inc(&active_events);
1002                 mutex_unlock(&pmc_reserve_mutex);
1003         }
1004         if (err)
1005                 return err;
1006
1007         event->destroy = hw_perf_event_destroy;
1008
1009         /*
1010          * Generate PMC IRQs:
1011          * (keep 'enabled' bit clear for now)
1012          */
1013         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1014
1015         hwc->idx = -1;
1016
1017         /*
1018          * Count user and OS events unless requested not to.
1019          */
1020         if (!attr->exclude_user)
1021                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1022         if (!attr->exclude_kernel)
1023                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1024
1025         if (!hwc->sample_period) {
1026                 hwc->sample_period = x86_pmu.max_period;
1027                 hwc->last_period = hwc->sample_period;
1028                 atomic64_set(&hwc->period_left, hwc->sample_period);
1029         } else {
1030                 /*
1031                  * If we have a PMU initialized but no APIC
1032                  * interrupts, we cannot sample hardware
1033                  * events (user-space has to fall back and
1034                  * sample via a hrtimer based software event):
1035                  */
1036                 if (!x86_pmu.apic)
1037                         return -EOPNOTSUPP;
1038         }
1039
1040         /*
1041          * Raw hw_event type provide the config in the hw_event structure
1042          */
1043         if (attr->type == PERF_TYPE_RAW) {
1044                 hwc->config |= x86_pmu.raw_event(attr->config);
1045                 return 0;
1046         }
1047
1048         if (attr->type == PERF_TYPE_HW_CACHE)
1049                 return set_ext_hw_attr(hwc, attr);
1050
1051         if (attr->config >= x86_pmu.max_events)
1052                 return -EINVAL;
1053
1054         /*
1055          * The generic map:
1056          */
1057         config = x86_pmu.event_map(attr->config);
1058
1059         if (config == 0)
1060                 return -ENOENT;
1061
1062         if (config == -1LL)
1063                 return -EINVAL;
1064
1065         /*
1066          * Branch tracing:
1067          */
1068         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1069             (hwc->sample_period == 1)) {
1070                 /* BTS is not supported by this architecture. */
1071                 if (!bts_available())
1072                         return -EOPNOTSUPP;
1073
1074                 /* BTS is currently only allowed for user-mode. */
1075                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1076                         return -EOPNOTSUPP;
1077         }
1078
1079         hwc->config |= config;
1080
1081         return 0;
1082 }
1083
1084 static void p6_pmu_disable_all(void)
1085 {
1086         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1087         u64 val;
1088
1089         if (!cpuc->enabled)
1090                 return;
1091
1092         cpuc->enabled = 0;
1093         barrier();
1094
1095         /* p6 only has one enable register */
1096         rdmsrl(MSR_P6_EVNTSEL0, val);
1097         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1098         wrmsrl(MSR_P6_EVNTSEL0, val);
1099 }
1100
1101 static void intel_pmu_disable_all(void)
1102 {
1103         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1104
1105         if (!cpuc->enabled)
1106                 return;
1107
1108         cpuc->enabled = 0;
1109         barrier();
1110
1111         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1112
1113         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1114                 intel_pmu_disable_bts();
1115 }
1116
1117 static void amd_pmu_disable_all(void)
1118 {
1119         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1120         int idx;
1121
1122         if (!cpuc->enabled)
1123                 return;
1124
1125         cpuc->enabled = 0;
1126         /*
1127          * ensure we write the disable before we start disabling the
1128          * events proper, so that amd_pmu_enable_event() does the
1129          * right thing.
1130          */
1131         barrier();
1132
1133         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1134                 u64 val;
1135
1136                 if (!test_bit(idx, cpuc->active_mask))
1137                         continue;
1138                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1139                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1140                         continue;
1141                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1142                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1143         }
1144 }
1145
1146 void hw_perf_disable(void)
1147 {
1148         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1149
1150         if (!x86_pmu_initialized())
1151                 return;
1152
1153         if (cpuc->enabled)
1154                 cpuc->n_added = 0;
1155
1156         x86_pmu.disable_all();
1157 }
1158
1159 static void p6_pmu_enable_all(void)
1160 {
1161         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1162         unsigned long val;
1163
1164         if (cpuc->enabled)
1165                 return;
1166
1167         cpuc->enabled = 1;
1168         barrier();
1169
1170         /* p6 only has one enable register */
1171         rdmsrl(MSR_P6_EVNTSEL0, val);
1172         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1173         wrmsrl(MSR_P6_EVNTSEL0, val);
1174 }
1175
1176 static void intel_pmu_enable_all(void)
1177 {
1178         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1179
1180         if (cpuc->enabled)
1181                 return;
1182
1183         cpuc->enabled = 1;
1184         barrier();
1185
1186         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1187
1188         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1189                 struct perf_event *event =
1190                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1191
1192                 if (WARN_ON_ONCE(!event))
1193                         return;
1194
1195                 intel_pmu_enable_bts(event->hw.config);
1196         }
1197 }
1198
1199 static void amd_pmu_enable_all(void)
1200 {
1201         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1202         int idx;
1203
1204         if (cpuc->enabled)
1205                 return;
1206
1207         cpuc->enabled = 1;
1208         barrier();
1209
1210         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1211                 struct perf_event *event = cpuc->events[idx];
1212                 u64 val;
1213
1214                 if (!test_bit(idx, cpuc->active_mask))
1215                         continue;
1216
1217                 val = event->hw.config;
1218                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1219                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1220         }
1221 }
1222
1223 static const struct pmu pmu;
1224
1225 static inline int is_x86_event(struct perf_event *event)
1226 {
1227         return event->pmu == &pmu;
1228 }
1229
1230 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1231 {
1232         int i, j , w, num;
1233         int weight, wmax;
1234         unsigned long *c;
1235         u64 constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1236         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1237         struct hw_perf_event *hwc;
1238
1239         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1240
1241         for (i = 0; i < n; i++) {
1242                 x86_pmu.get_event_constraints(cpuc,
1243                                               cpuc->event_list[i],
1244                                               constraints[i]);
1245         }
1246
1247         /*
1248          * fastpath, try to reuse previous register
1249          */
1250         for (i = 0, num = n; i < n; i++, num--) {
1251                 hwc = &cpuc->event_list[i]->hw;
1252                 c = (unsigned long *)constraints[i];
1253
1254                 /* never assigned */
1255                 if (hwc->idx == -1)
1256                         break;
1257
1258                 /* constraint still honored */
1259                 if (!test_bit(hwc->idx, c))
1260                         break;
1261
1262                 /* not already used */
1263                 if (test_bit(hwc->idx, used_mask))
1264                         break;
1265
1266 #if 0
1267                 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1268                          smp_processor_id(),
1269                          hwc->config,
1270                          hwc->idx,
1271                          assign ? 'y' : 'n');
1272 #endif
1273
1274                 set_bit(hwc->idx, used_mask);
1275                 if (assign)
1276                         assign[i] = hwc->idx;
1277         }
1278         if (!num)
1279                 goto done;
1280
1281         /*
1282          * begin slow path
1283          */
1284
1285         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1286
1287         /*
1288          * weight = number of possible counters
1289          *
1290          * 1    = most constrained, only works on one counter
1291          * wmax = least constrained, works on any counter
1292          *
1293          * assign events to counters starting with most
1294          * constrained events.
1295          */
1296         wmax = x86_pmu.num_events;
1297
1298         /*
1299          * when fixed event counters are present,
1300          * wmax is incremented by 1 to account
1301          * for one more choice
1302          */
1303         if (x86_pmu.num_events_fixed)
1304                 wmax++;
1305
1306         for (w = 1, num = n; num && w <= wmax; w++) {
1307                 /* for each event */
1308                 for (i = 0; num && i < n; i++) {
1309                         c = (unsigned long *)constraints[i];
1310                         hwc = &cpuc->event_list[i]->hw;
1311
1312                         weight = bitmap_weight(c, X86_PMC_IDX_MAX);
1313                         if (weight != w)
1314                                 continue;
1315
1316                         for_each_bit(j, c, X86_PMC_IDX_MAX) {
1317                                 if (!test_bit(j, used_mask))
1318                                         break;
1319                         }
1320
1321                         if (j == X86_PMC_IDX_MAX)
1322                                 break;
1323
1324 #if 0
1325                         pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
1326                                 smp_processor_id(),
1327                                 hwc->config,
1328                                 j,
1329                                 assign ? 'y' : 'n');
1330 #endif
1331
1332                         set_bit(j, used_mask);
1333
1334                         if (assign)
1335                                 assign[i] = j;
1336                         num--;
1337                 }
1338         }
1339 done:
1340         /*
1341          * scheduling failed or is just a simulation,
1342          * free resources if necessary
1343          */
1344         if (!assign || num) {
1345                 for (i = 0; i < n; i++) {
1346                         if (x86_pmu.put_event_constraints)
1347                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1348                 }
1349         }
1350         return num ? -ENOSPC : 0;
1351 }
1352
1353 /*
1354  * dogrp: true if must collect siblings events (group)
1355  * returns total number of events and error code
1356  */
1357 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1358 {
1359         struct perf_event *event;
1360         int n, max_count;
1361
1362         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1363
1364         /* current number of events already accepted */
1365         n = cpuc->n_events;
1366
1367         if (is_x86_event(leader)) {
1368                 if (n >= max_count)
1369                         return -ENOSPC;
1370                 cpuc->event_list[n] = leader;
1371                 n++;
1372         }
1373         if (!dogrp)
1374                 return n;
1375
1376         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1377                 if (!is_x86_event(event) ||
1378                     event->state <= PERF_EVENT_STATE_OFF)
1379                         continue;
1380
1381                 if (n >= max_count)
1382                         return -ENOSPC;
1383
1384                 cpuc->event_list[n] = event;
1385                 n++;
1386         }
1387         return n;
1388 }
1389
1390
1391 static inline void x86_assign_hw_event(struct perf_event *event,
1392                                 struct hw_perf_event *hwc, int idx)
1393 {
1394         hwc->idx = idx;
1395
1396         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1397                 hwc->config_base = 0;
1398                 hwc->event_base = 0;
1399         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1400                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1401                 /*
1402                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1403                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1404                  */
1405                 hwc->event_base =
1406                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1407         } else {
1408                 hwc->config_base = x86_pmu.eventsel;
1409                 hwc->event_base  = x86_pmu.perfctr;
1410         }
1411 }
1412
1413 void hw_perf_enable(void)
1414 {
1415         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1416         struct perf_event *event;
1417         struct hw_perf_event *hwc;
1418         int i;
1419
1420         if (!x86_pmu_initialized())
1421                 return;
1422         if (cpuc->n_added) {
1423                 /*
1424                  * apply assignment obtained either from
1425                  * hw_perf_group_sched_in() or x86_pmu_enable()
1426                  *
1427                  * step1: save events moving to new counters
1428                  * step2: reprogram moved events into new counters
1429                  */
1430                 for (i = 0; i < cpuc->n_events; i++) {
1431
1432                         event = cpuc->event_list[i];
1433                         hwc = &event->hw;
1434
1435                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1436                                 continue;
1437
1438                         x86_pmu.disable(hwc, hwc->idx);
1439
1440                         clear_bit(hwc->idx, cpuc->active_mask);
1441                         barrier();
1442                         cpuc->events[hwc->idx] = NULL;
1443
1444                         x86_perf_event_update(event, hwc, hwc->idx);
1445
1446                         hwc->idx = -1;
1447                 }
1448
1449                 for (i = 0; i < cpuc->n_events; i++) {
1450
1451                         event = cpuc->event_list[i];
1452                         hwc = &event->hw;
1453
1454                         if (hwc->idx == -1) {
1455                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1456                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1457                         }
1458                         /*
1459                          * need to mark as active because x86_pmu_disable()
1460                          * clear active_mask and eventsp[] yet it preserves
1461                          * idx
1462                          */
1463                         set_bit(hwc->idx, cpuc->active_mask);
1464                         cpuc->events[hwc->idx] = event;
1465
1466                         x86_pmu.enable(hwc, hwc->idx);
1467                         perf_event_update_userpage(event);
1468                 }
1469                 cpuc->n_added = 0;
1470                 perf_events_lapic_init();
1471         }
1472         x86_pmu.enable_all();
1473 }
1474
1475 static inline u64 intel_pmu_get_status(void)
1476 {
1477         u64 status;
1478
1479         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1480
1481         return status;
1482 }
1483
1484 static inline void intel_pmu_ack_status(u64 ack)
1485 {
1486         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1487 }
1488
1489 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1490 {
1491         (void)checking_wrmsrl(hwc->config_base + idx,
1492                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1493 }
1494
1495 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1496 {
1497         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1498 }
1499
1500 static inline void
1501 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1502 {
1503         int idx = __idx - X86_PMC_IDX_FIXED;
1504         u64 ctrl_val, mask;
1505
1506         mask = 0xfULL << (idx * 4);
1507
1508         rdmsrl(hwc->config_base, ctrl_val);
1509         ctrl_val &= ~mask;
1510         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1511 }
1512
1513 static inline void
1514 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1515 {
1516         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1517         u64 val = P6_NOP_EVENT;
1518
1519         if (cpuc->enabled)
1520                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1521
1522         (void)checking_wrmsrl(hwc->config_base + idx, val);
1523 }
1524
1525 static inline void
1526 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1527 {
1528         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1529                 intel_pmu_disable_bts();
1530                 return;
1531         }
1532
1533         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1534                 intel_pmu_disable_fixed(hwc, idx);
1535                 return;
1536         }
1537
1538         x86_pmu_disable_event(hwc, idx);
1539 }
1540
1541 static inline void
1542 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1543 {
1544         x86_pmu_disable_event(hwc, idx);
1545 }
1546
1547 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1548
1549 /*
1550  * Set the next IRQ period, based on the hwc->period_left value.
1551  * To be called with the event disabled in hw:
1552  */
1553 static int
1554 x86_perf_event_set_period(struct perf_event *event,
1555                              struct hw_perf_event *hwc, int idx)
1556 {
1557         s64 left = atomic64_read(&hwc->period_left);
1558         s64 period = hwc->sample_period;
1559         int err, ret = 0;
1560
1561         if (idx == X86_PMC_IDX_FIXED_BTS)
1562                 return 0;
1563
1564         /*
1565          * If we are way outside a reasonable range then just skip forward:
1566          */
1567         if (unlikely(left <= -period)) {
1568                 left = period;
1569                 atomic64_set(&hwc->period_left, left);
1570                 hwc->last_period = period;
1571                 ret = 1;
1572         }
1573
1574         if (unlikely(left <= 0)) {
1575                 left += period;
1576                 atomic64_set(&hwc->period_left, left);
1577                 hwc->last_period = period;
1578                 ret = 1;
1579         }
1580         /*
1581          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1582          */
1583         if (unlikely(left < 2))
1584                 left = 2;
1585
1586         if (left > x86_pmu.max_period)
1587                 left = x86_pmu.max_period;
1588
1589         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1590
1591         /*
1592          * The hw event starts counting from this event offset,
1593          * mark it to be able to extra future deltas:
1594          */
1595         atomic64_set(&hwc->prev_count, (u64)-left);
1596
1597         err = checking_wrmsrl(hwc->event_base + idx,
1598                              (u64)(-left) & x86_pmu.event_mask);
1599
1600         perf_event_update_userpage(event);
1601
1602         return ret;
1603 }
1604
1605 static inline void
1606 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1607 {
1608         int idx = __idx - X86_PMC_IDX_FIXED;
1609         u64 ctrl_val, bits, mask;
1610         int err;
1611
1612         /*
1613          * Enable IRQ generation (0x8),
1614          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1615          * if requested:
1616          */
1617         bits = 0x8ULL;
1618         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1619                 bits |= 0x2;
1620         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1621                 bits |= 0x1;
1622         bits <<= (idx * 4);
1623         mask = 0xfULL << (idx * 4);
1624
1625         rdmsrl(hwc->config_base, ctrl_val);
1626         ctrl_val &= ~mask;
1627         ctrl_val |= bits;
1628         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1629 }
1630
1631 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1632 {
1633         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1634         u64 val;
1635
1636         val = hwc->config;
1637         if (cpuc->enabled)
1638                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1639
1640         (void)checking_wrmsrl(hwc->config_base + idx, val);
1641 }
1642
1643
1644 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1645 {
1646         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1647                 if (!__get_cpu_var(cpu_hw_events).enabled)
1648                         return;
1649
1650                 intel_pmu_enable_bts(hwc->config);
1651                 return;
1652         }
1653
1654         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1655                 intel_pmu_enable_fixed(hwc, idx);
1656                 return;
1657         }
1658
1659         x86_pmu_enable_event(hwc, idx);
1660 }
1661
1662 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1663 {
1664         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1665
1666         if (cpuc->enabled)
1667                 x86_pmu_enable_event(hwc, idx);
1668 }
1669
1670 /*
1671  * activate a single event
1672  *
1673  * The event is added to the group of enabled events
1674  * but only if it can be scehduled with existing events.
1675  *
1676  * Called with PMU disabled. If successful and return value 1,
1677  * then guaranteed to call perf_enable() and hw_perf_enable()
1678  */
1679 static int x86_pmu_enable(struct perf_event *event)
1680 {
1681         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1682         struct hw_perf_event *hwc;
1683         int assign[X86_PMC_IDX_MAX];
1684         int n, n0, ret;
1685
1686         hwc = &event->hw;
1687
1688         n0 = cpuc->n_events;
1689         n = collect_events(cpuc, event, false);
1690         if (n < 0)
1691                 return n;
1692
1693         ret = x86_schedule_events(cpuc, n, assign);
1694         if (ret)
1695                 return ret;
1696         /*
1697          * copy new assignment, now we know it is possible
1698          * will be used by hw_perf_enable()
1699          */
1700         memcpy(cpuc->assign, assign, n*sizeof(int));
1701
1702         cpuc->n_events = n;
1703         cpuc->n_added  = n - n0;
1704
1705         if (hwc->idx != -1)
1706                 x86_perf_event_set_period(event, hwc, hwc->idx);
1707
1708         return 0;
1709 }
1710
1711 static void x86_pmu_unthrottle(struct perf_event *event)
1712 {
1713         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1714         struct hw_perf_event *hwc = &event->hw;
1715
1716         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1717                                 cpuc->events[hwc->idx] != event))
1718                 return;
1719
1720         x86_pmu.enable(hwc, hwc->idx);
1721 }
1722
1723 void perf_event_print_debug(void)
1724 {
1725         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1726         struct cpu_hw_events *cpuc;
1727         unsigned long flags;
1728         int cpu, idx;
1729
1730         if (!x86_pmu.num_events)
1731                 return;
1732
1733         local_irq_save(flags);
1734
1735         cpu = smp_processor_id();
1736         cpuc = &per_cpu(cpu_hw_events, cpu);
1737
1738         if (x86_pmu.version >= 2) {
1739                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1740                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1741                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1742                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1743
1744                 pr_info("\n");
1745                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1746                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1747                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1748                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1749         }
1750         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1751
1752         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1753                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1754                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1755
1756                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1757
1758                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1759                         cpu, idx, pmc_ctrl);
1760                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1761                         cpu, idx, pmc_count);
1762                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1763                         cpu, idx, prev_left);
1764         }
1765         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1766                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1767
1768                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1769                         cpu, idx, pmc_count);
1770         }
1771         local_irq_restore(flags);
1772 }
1773
1774 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1775 {
1776         struct debug_store *ds = cpuc->ds;
1777         struct bts_record {
1778                 u64     from;
1779                 u64     to;
1780                 u64     flags;
1781         };
1782         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1783         struct bts_record *at, *top;
1784         struct perf_output_handle handle;
1785         struct perf_event_header header;
1786         struct perf_sample_data data;
1787         struct pt_regs regs;
1788
1789         if (!event)
1790                 return;
1791
1792         if (!ds)
1793                 return;
1794
1795         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1796         top = (struct bts_record *)(unsigned long)ds->bts_index;
1797
1798         if (top <= at)
1799                 return;
1800
1801         ds->bts_index = ds->bts_buffer_base;
1802
1803
1804         data.period     = event->hw.last_period;
1805         data.addr       = 0;
1806         data.raw        = NULL;
1807         regs.ip         = 0;
1808
1809         /*
1810          * Prepare a generic sample, i.e. fill in the invariant fields.
1811          * We will overwrite the from and to address before we output
1812          * the sample.
1813          */
1814         perf_prepare_sample(&header, &data, event, &regs);
1815
1816         if (perf_output_begin(&handle, event,
1817                               header.size * (top - at), 1, 1))
1818                 return;
1819
1820         for (; at < top; at++) {
1821                 data.ip         = at->from;
1822                 data.addr       = at->to;
1823
1824                 perf_output_sample(&handle, &header, &data, event);
1825         }
1826
1827         perf_output_end(&handle);
1828
1829         /* There's new data available. */
1830         event->hw.interrupts++;
1831         event->pending_kill = POLL_IN;
1832 }
1833
1834 static void x86_pmu_disable(struct perf_event *event)
1835 {
1836         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1837         struct hw_perf_event *hwc = &event->hw;
1838         int i, idx = hwc->idx;
1839
1840         /*
1841          * Must be done before we disable, otherwise the nmi handler
1842          * could reenable again:
1843          */
1844         clear_bit(idx, cpuc->active_mask);
1845         x86_pmu.disable(hwc, idx);
1846
1847         /*
1848          * Make sure the cleared pointer becomes visible before we
1849          * (potentially) free the event:
1850          */
1851         barrier();
1852
1853         /*
1854          * Drain the remaining delta count out of a event
1855          * that we are disabling:
1856          */
1857         x86_perf_event_update(event, hwc, idx);
1858
1859         /* Drain the remaining BTS records. */
1860         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1861                 intel_pmu_drain_bts_buffer(cpuc);
1862
1863         cpuc->events[idx] = NULL;
1864
1865         for (i = 0; i < cpuc->n_events; i++) {
1866                 if (event == cpuc->event_list[i]) {
1867
1868                         if (x86_pmu.put_event_constraints)
1869                                 x86_pmu.put_event_constraints(cpuc, event);
1870
1871                         while (++i < cpuc->n_events)
1872                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1873
1874                         --cpuc->n_events;
1875                 }
1876         }
1877         perf_event_update_userpage(event);
1878 }
1879
1880 /*
1881  * Save and restart an expired event. Called by NMI contexts,
1882  * so it has to be careful about preempting normal event ops:
1883  */
1884 static int intel_pmu_save_and_restart(struct perf_event *event)
1885 {
1886         struct hw_perf_event *hwc = &event->hw;
1887         int idx = hwc->idx;
1888         int ret;
1889
1890         x86_perf_event_update(event, hwc, idx);
1891         ret = x86_perf_event_set_period(event, hwc, idx);
1892
1893         if (event->state == PERF_EVENT_STATE_ACTIVE)
1894                 intel_pmu_enable_event(hwc, idx);
1895
1896         return ret;
1897 }
1898
1899 static void intel_pmu_reset(void)
1900 {
1901         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1902         unsigned long flags;
1903         int idx;
1904
1905         if (!x86_pmu.num_events)
1906                 return;
1907
1908         local_irq_save(flags);
1909
1910         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1911
1912         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1913                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1914                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1915         }
1916         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1917                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1918         }
1919         if (ds)
1920                 ds->bts_index = ds->bts_buffer_base;
1921
1922         local_irq_restore(flags);
1923 }
1924
1925 static int p6_pmu_handle_irq(struct pt_regs *regs)
1926 {
1927         struct perf_sample_data data;
1928         struct cpu_hw_events *cpuc;
1929         struct perf_event *event;
1930         struct hw_perf_event *hwc;
1931         int idx, handled = 0;
1932         u64 val;
1933
1934         data.addr = 0;
1935         data.raw = NULL;
1936
1937         cpuc = &__get_cpu_var(cpu_hw_events);
1938
1939         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1940                 if (!test_bit(idx, cpuc->active_mask))
1941                         continue;
1942
1943                 event = cpuc->events[idx];
1944                 hwc = &event->hw;
1945
1946                 val = x86_perf_event_update(event, hwc, idx);
1947                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1948                         continue;
1949
1950                 /*
1951                  * event overflow
1952                  */
1953                 handled         = 1;
1954                 data.period     = event->hw.last_period;
1955
1956                 if (!x86_perf_event_set_period(event, hwc, idx))
1957                         continue;
1958
1959                 if (perf_event_overflow(event, 1, &data, regs))
1960                         p6_pmu_disable_event(hwc, idx);
1961         }
1962
1963         if (handled)
1964                 inc_irq_stat(apic_perf_irqs);
1965
1966         return handled;
1967 }
1968
1969 /*
1970  * This handler is triggered by the local APIC, so the APIC IRQ handling
1971  * rules apply:
1972  */
1973 static int intel_pmu_handle_irq(struct pt_regs *regs)
1974 {
1975         struct perf_sample_data data;
1976         struct cpu_hw_events *cpuc;
1977         int bit, loops;
1978         u64 ack, status;
1979
1980         data.addr = 0;
1981         data.raw = NULL;
1982
1983         cpuc = &__get_cpu_var(cpu_hw_events);
1984
1985         perf_disable();
1986         intel_pmu_drain_bts_buffer(cpuc);
1987         status = intel_pmu_get_status();
1988         if (!status) {
1989                 perf_enable();
1990                 return 0;
1991         }
1992
1993         loops = 0;
1994 again:
1995         if (++loops > 100) {
1996                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1997                 perf_event_print_debug();
1998                 intel_pmu_reset();
1999                 perf_enable();
2000                 return 1;
2001         }
2002
2003         inc_irq_stat(apic_perf_irqs);
2004         ack = status;
2005         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2006                 struct perf_event *event = cpuc->events[bit];
2007
2008                 clear_bit(bit, (unsigned long *) &status);
2009                 if (!test_bit(bit, cpuc->active_mask))
2010                         continue;
2011
2012                 if (!intel_pmu_save_and_restart(event))
2013                         continue;
2014
2015                 data.period = event->hw.last_period;
2016
2017                 if (perf_event_overflow(event, 1, &data, regs))
2018                         intel_pmu_disable_event(&event->hw, bit);
2019         }
2020
2021         intel_pmu_ack_status(ack);
2022
2023         /*
2024          * Repeat if there is more work to be done:
2025          */
2026         status = intel_pmu_get_status();
2027         if (status)
2028                 goto again;
2029
2030         perf_enable();
2031
2032         return 1;
2033 }
2034
2035 static int amd_pmu_handle_irq(struct pt_regs *regs)
2036 {
2037         struct perf_sample_data data;
2038         struct cpu_hw_events *cpuc;
2039         struct perf_event *event;
2040         struct hw_perf_event *hwc;
2041         int idx, handled = 0;
2042         u64 val;
2043
2044         data.addr = 0;
2045         data.raw = NULL;
2046
2047         cpuc = &__get_cpu_var(cpu_hw_events);
2048
2049         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2050                 if (!test_bit(idx, cpuc->active_mask))
2051                         continue;
2052
2053                 event = cpuc->events[idx];
2054                 hwc = &event->hw;
2055
2056                 val = x86_perf_event_update(event, hwc, idx);
2057                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2058                         continue;
2059
2060                 /*
2061                  * event overflow
2062                  */
2063                 handled         = 1;
2064                 data.period     = event->hw.last_period;
2065
2066                 if (!x86_perf_event_set_period(event, hwc, idx))
2067                         continue;
2068
2069                 if (perf_event_overflow(event, 1, &data, regs))
2070                         amd_pmu_disable_event(hwc, idx);
2071         }
2072
2073         if (handled)
2074                 inc_irq_stat(apic_perf_irqs);
2075
2076         return handled;
2077 }
2078
2079 void smp_perf_pending_interrupt(struct pt_regs *regs)
2080 {
2081         irq_enter();
2082         ack_APIC_irq();
2083         inc_irq_stat(apic_pending_irqs);
2084         perf_event_do_pending();
2085         irq_exit();
2086 }
2087
2088 void set_perf_event_pending(void)
2089 {
2090 #ifdef CONFIG_X86_LOCAL_APIC
2091         if (!x86_pmu.apic || !x86_pmu_initialized())
2092                 return;
2093
2094         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2095 #endif
2096 }
2097
2098 void perf_events_lapic_init(void)
2099 {
2100 #ifdef CONFIG_X86_LOCAL_APIC
2101         if (!x86_pmu.apic || !x86_pmu_initialized())
2102                 return;
2103
2104         /*
2105          * Always use NMI for PMU
2106          */
2107         apic_write(APIC_LVTPC, APIC_DM_NMI);
2108 #endif
2109 }
2110
2111 static int __kprobes
2112 perf_event_nmi_handler(struct notifier_block *self,
2113                          unsigned long cmd, void *__args)
2114 {
2115         struct die_args *args = __args;
2116         struct pt_regs *regs;
2117
2118         if (!atomic_read(&active_events))
2119                 return NOTIFY_DONE;
2120
2121         switch (cmd) {
2122         case DIE_NMI:
2123         case DIE_NMI_IPI:
2124                 break;
2125
2126         default:
2127                 return NOTIFY_DONE;
2128         }
2129
2130         regs = args->regs;
2131
2132 #ifdef CONFIG_X86_LOCAL_APIC
2133         apic_write(APIC_LVTPC, APIC_DM_NMI);
2134 #endif
2135         /*
2136          * Can't rely on the handled return value to say it was our NMI, two
2137          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2138          *
2139          * If the first NMI handles both, the latter will be empty and daze
2140          * the CPU.
2141          */
2142         x86_pmu.handle_irq(regs);
2143
2144         return NOTIFY_STOP;
2145 }
2146
2147 static struct event_constraint bts_constraint = {
2148         .code = 0,
2149         .cmask = 0,
2150         .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
2151 };
2152
2153 static int intel_special_constraints(struct perf_event *event,
2154                                      u64 *idxmsk)
2155 {
2156         unsigned int hw_event;
2157
2158         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2159
2160         if (unlikely((hw_event ==
2161                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2162                      (event->hw.sample_period == 1))) {
2163
2164                 bitmap_copy((unsigned long *)idxmsk,
2165                             (unsigned long *)bts_constraint.idxmsk,
2166                             X86_PMC_IDX_MAX);
2167                 return 1;
2168         }
2169         return 0;
2170 }
2171
2172 static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2173                                         struct perf_event *event,
2174                                         u64 *idxmsk)
2175 {
2176         const struct event_constraint *c;
2177
2178         /*
2179          * cleanup bitmask
2180          */
2181         bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX);
2182
2183         if (intel_special_constraints(event, idxmsk))
2184                 return;
2185
2186         if (x86_pmu.event_constraints) {
2187                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2188                         if ((event->hw.config & c->cmask) == c->code) {
2189
2190                                 bitmap_copy((unsigned long *)idxmsk,
2191                                             (unsigned long *)c->idxmsk,
2192                                             X86_PMC_IDX_MAX);
2193                                 return;
2194                         }
2195                 }
2196         }
2197         /* no constraints, means supports all generic counters */
2198         bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2199 }
2200
2201 static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2202                                       struct perf_event *event,
2203                                       u64 *idxmsk)
2204 {
2205         /* no constraints, means supports all generic counters */
2206         bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2207 }
2208
2209 static int x86_event_sched_in(struct perf_event *event,
2210                           struct perf_cpu_context *cpuctx, int cpu)
2211 {
2212         int ret = 0;
2213
2214         event->state = PERF_EVENT_STATE_ACTIVE;
2215         event->oncpu = cpu;
2216         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2217
2218         if (!is_x86_event(event))
2219                 ret = event->pmu->enable(event);
2220
2221         if (!ret && !is_software_event(event))
2222                 cpuctx->active_oncpu++;
2223
2224         if (!ret && event->attr.exclusive)
2225                 cpuctx->exclusive = 1;
2226
2227         return ret;
2228 }
2229
2230 static void x86_event_sched_out(struct perf_event *event,
2231                             struct perf_cpu_context *cpuctx, int cpu)
2232 {
2233         event->state = PERF_EVENT_STATE_INACTIVE;
2234         event->oncpu = -1;
2235
2236         if (!is_x86_event(event))
2237                 event->pmu->disable(event);
2238
2239         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2240
2241         if (!is_software_event(event))
2242                 cpuctx->active_oncpu--;
2243
2244         if (event->attr.exclusive || !cpuctx->active_oncpu)
2245                 cpuctx->exclusive = 0;
2246 }
2247
2248 /*
2249  * Called to enable a whole group of events.
2250  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2251  * Assumes the caller has disabled interrupts and has
2252  * frozen the PMU with hw_perf_save_disable.
2253  *
2254  * called with PMU disabled. If successful and return value 1,
2255  * then guaranteed to call perf_enable() and hw_perf_enable()
2256  */
2257 int hw_perf_group_sched_in(struct perf_event *leader,
2258                struct perf_cpu_context *cpuctx,
2259                struct perf_event_context *ctx, int cpu)
2260 {
2261         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2262         struct perf_event *sub;
2263         int assign[X86_PMC_IDX_MAX];
2264         int n0, n1, ret;
2265
2266         /* n0 = total number of events */
2267         n0 = collect_events(cpuc, leader, true);
2268         if (n0 < 0)
2269                 return n0;
2270
2271         ret = x86_schedule_events(cpuc, n0, assign);
2272         if (ret)
2273                 return ret;
2274
2275         ret = x86_event_sched_in(leader, cpuctx, cpu);
2276         if (ret)
2277                 return ret;
2278
2279         n1 = 1;
2280         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2281                 if (sub->state > PERF_EVENT_STATE_OFF) {
2282                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2283                         if (ret)
2284                                 goto undo;
2285                         ++n1;
2286                 }
2287         }
2288         /*
2289          * copy new assignment, now we know it is possible
2290          * will be used by hw_perf_enable()
2291          */
2292         memcpy(cpuc->assign, assign, n0*sizeof(int));
2293
2294         cpuc->n_events  = n0;
2295         cpuc->n_added   = n1;
2296         ctx->nr_active += n1;
2297
2298         /*
2299          * 1 means successful and events are active
2300          * This is not quite true because we defer
2301          * actual activation until hw_perf_enable() but
2302          * this way we* ensure caller won't try to enable
2303          * individual events
2304          */
2305         return 1;
2306 undo:
2307         x86_event_sched_out(leader, cpuctx, cpu);
2308         n0  = 1;
2309         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2310                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2311                         x86_event_sched_out(sub, cpuctx, cpu);
2312                         if (++n0 == n1)
2313                                 break;
2314                 }
2315         }
2316         return ret;
2317 }
2318
2319 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2320         .notifier_call          = perf_event_nmi_handler,
2321         .next                   = NULL,
2322         .priority               = 1
2323 };
2324
2325 static __initconst struct x86_pmu p6_pmu = {
2326         .name                   = "p6",
2327         .handle_irq             = p6_pmu_handle_irq,
2328         .disable_all            = p6_pmu_disable_all,
2329         .enable_all             = p6_pmu_enable_all,
2330         .enable                 = p6_pmu_enable_event,
2331         .disable                = p6_pmu_disable_event,
2332         .eventsel               = MSR_P6_EVNTSEL0,
2333         .perfctr                = MSR_P6_PERFCTR0,
2334         .event_map              = p6_pmu_event_map,
2335         .raw_event              = p6_pmu_raw_event,
2336         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2337         .apic                   = 1,
2338         .max_period             = (1ULL << 31) - 1,
2339         .version                = 0,
2340         .num_events             = 2,
2341         /*
2342          * Events have 40 bits implemented. However they are designed such
2343          * that bits [32-39] are sign extensions of bit 31. As such the
2344          * effective width of a event for P6-like PMU is 32 bits only.
2345          *
2346          * See IA-32 Intel Architecture Software developer manual Vol 3B
2347          */
2348         .event_bits             = 32,
2349         .event_mask             = (1ULL << 32) - 1,
2350         .get_event_constraints  = intel_get_event_constraints,
2351         .event_constraints      = intel_p6_event_constraints
2352 };
2353
2354 static __initconst struct x86_pmu intel_pmu = {
2355         .name                   = "Intel",
2356         .handle_irq             = intel_pmu_handle_irq,
2357         .disable_all            = intel_pmu_disable_all,
2358         .enable_all             = intel_pmu_enable_all,
2359         .enable                 = intel_pmu_enable_event,
2360         .disable                = intel_pmu_disable_event,
2361         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2362         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2363         .event_map              = intel_pmu_event_map,
2364         .raw_event              = intel_pmu_raw_event,
2365         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2366         .apic                   = 1,
2367         /*
2368          * Intel PMCs cannot be accessed sanely above 32 bit width,
2369          * so we install an artificial 1<<31 period regardless of
2370          * the generic event period:
2371          */
2372         .max_period             = (1ULL << 31) - 1,
2373         .enable_bts             = intel_pmu_enable_bts,
2374         .disable_bts            = intel_pmu_disable_bts,
2375         .get_event_constraints  = intel_get_event_constraints
2376 };
2377
2378 static __initconst struct x86_pmu amd_pmu = {
2379         .name                   = "AMD",
2380         .handle_irq             = amd_pmu_handle_irq,
2381         .disable_all            = amd_pmu_disable_all,
2382         .enable_all             = amd_pmu_enable_all,
2383         .enable                 = amd_pmu_enable_event,
2384         .disable                = amd_pmu_disable_event,
2385         .eventsel               = MSR_K7_EVNTSEL0,
2386         .perfctr                = MSR_K7_PERFCTR0,
2387         .event_map              = amd_pmu_event_map,
2388         .raw_event              = amd_pmu_raw_event,
2389         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2390         .num_events             = 4,
2391         .event_bits             = 48,
2392         .event_mask             = (1ULL << 48) - 1,
2393         .apic                   = 1,
2394         /* use highest bit to detect overflow */
2395         .max_period             = (1ULL << 47) - 1,
2396         .get_event_constraints  = amd_get_event_constraints
2397 };
2398
2399 static __init int p6_pmu_init(void)
2400 {
2401         switch (boot_cpu_data.x86_model) {
2402         case 1:
2403         case 3:  /* Pentium Pro */
2404         case 5:
2405         case 6:  /* Pentium II */
2406         case 7:
2407         case 8:
2408         case 11: /* Pentium III */
2409         case 9:
2410         case 13:
2411                 /* Pentium M */
2412                 break;
2413         default:
2414                 pr_cont("unsupported p6 CPU model %d ",
2415                         boot_cpu_data.x86_model);
2416                 return -ENODEV;
2417         }
2418
2419         x86_pmu = p6_pmu;
2420
2421         return 0;
2422 }
2423
2424 static __init int intel_pmu_init(void)
2425 {
2426         union cpuid10_edx edx;
2427         union cpuid10_eax eax;
2428         unsigned int unused;
2429         unsigned int ebx;
2430         int version;
2431
2432         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2433                 /* check for P6 processor family */
2434            if (boot_cpu_data.x86 == 6) {
2435                 return p6_pmu_init();
2436            } else {
2437                 return -ENODEV;
2438            }
2439         }
2440
2441         /*
2442          * Check whether the Architectural PerfMon supports
2443          * Branch Misses Retired hw_event or not.
2444          */
2445         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2446         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2447                 return -ENODEV;
2448
2449         version = eax.split.version_id;
2450         if (version < 2)
2451                 return -ENODEV;
2452
2453         x86_pmu                         = intel_pmu;
2454         x86_pmu.version                 = version;
2455         x86_pmu.num_events              = eax.split.num_events;
2456         x86_pmu.event_bits              = eax.split.bit_width;
2457         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2458
2459         /*
2460          * Quirk: v2 perfmon does not report fixed-purpose events, so
2461          * assume at least 3 events:
2462          */
2463         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2464
2465         /*
2466          * Install the hw-cache-events table:
2467          */
2468         switch (boot_cpu_data.x86_model) {
2469         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2470         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2471         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2472         case 29: /* six-core 45 nm xeon "Dunnington" */
2473                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2474                        sizeof(hw_cache_event_ids));
2475
2476                 x86_pmu.event_constraints = intel_core_event_constraints;
2477                 pr_cont("Core2 events, ");
2478                 break;
2479         case 26:
2480                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2481                        sizeof(hw_cache_event_ids));
2482
2483                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2484                 pr_cont("Nehalem/Corei7 events, ");
2485                 break;
2486         case 28:
2487                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2488                        sizeof(hw_cache_event_ids));
2489
2490                 x86_pmu.event_constraints = intel_gen_event_constraints;
2491                 pr_cont("Atom events, ");
2492                 break;
2493         default:
2494                 /*
2495                  * default constraints for v2 and up
2496                  */
2497                 x86_pmu.event_constraints = intel_gen_event_constraints;
2498                 pr_cont("generic architected perfmon, ");
2499         }
2500         return 0;
2501 }
2502
2503 static __init int amd_pmu_init(void)
2504 {
2505         /* Performance-monitoring supported from K7 and later: */
2506         if (boot_cpu_data.x86 < 6)
2507                 return -ENODEV;
2508
2509         x86_pmu = amd_pmu;
2510
2511         /* Events are common for all AMDs */
2512         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2513                sizeof(hw_cache_event_ids));
2514
2515         return 0;
2516 }
2517
2518 static void __init pmu_check_apic(void)
2519 {
2520         if (cpu_has_apic)
2521                 return;
2522
2523         x86_pmu.apic = 0;
2524         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2525         pr_info("no hardware sampling interrupt available.\n");
2526 }
2527
2528 void __init init_hw_perf_events(void)
2529 {
2530         int err;
2531
2532         pr_info("Performance Events: ");
2533
2534         switch (boot_cpu_data.x86_vendor) {
2535         case X86_VENDOR_INTEL:
2536                 err = intel_pmu_init();
2537                 break;
2538         case X86_VENDOR_AMD:
2539                 err = amd_pmu_init();
2540                 break;
2541         default:
2542                 return;
2543         }
2544         if (err != 0) {
2545                 pr_cont("no PMU driver, software events only.\n");
2546                 return;
2547         }
2548
2549         pmu_check_apic();
2550
2551         pr_cont("%s PMU driver.\n", x86_pmu.name);
2552
2553         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2554                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2555                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2556                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2557         }
2558         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2559         perf_max_events = x86_pmu.num_events;
2560
2561         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2562                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2563                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2564                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2565         }
2566
2567         perf_event_mask |=
2568                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2569         x86_pmu.intel_ctrl = perf_event_mask;
2570
2571         perf_events_lapic_init();
2572         register_die_notifier(&perf_event_nmi_notifier);
2573
2574         pr_info("... version:                %d\n",     x86_pmu.version);
2575         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2576         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2577         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2578         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2579         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2580         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2581 }
2582
2583 static inline void x86_pmu_read(struct perf_event *event)
2584 {
2585         x86_perf_event_update(event, &event->hw, event->hw.idx);
2586 }
2587
2588 static const struct pmu pmu = {
2589         .enable         = x86_pmu_enable,
2590         .disable        = x86_pmu_disable,
2591         .read           = x86_pmu_read,
2592         .unthrottle     = x86_pmu_unthrottle,
2593 };
2594
2595 /*
2596  * validate a single event group
2597  *
2598  * validation include:
2599  *      - check events are compatible which each other
2600  *      - events do not compete for the same counter
2601  *      - number of events <= number of counters
2602  *
2603  * validation ensures the group can be loaded onto the
2604  * PMU if it was the only group available.
2605  */
2606 static int validate_group(struct perf_event *event)
2607 {
2608         struct perf_event *leader = event->group_leader;
2609         struct cpu_hw_events fake_cpuc;
2610         int n;
2611
2612         memset(&fake_cpuc, 0, sizeof(fake_cpuc));
2613
2614         /*
2615          * the event is not yet connected with its
2616          * siblings therefore we must first collect
2617          * existing siblings, then add the new event
2618          * before we can simulate the scheduling
2619          */
2620         n = collect_events(&fake_cpuc, leader, true);
2621         if (n < 0)
2622                 return -ENOSPC;
2623
2624         fake_cpuc.n_events = n;
2625         n = collect_events(&fake_cpuc, event, false);
2626         if (n < 0)
2627                 return -ENOSPC;
2628
2629         fake_cpuc.n_events = n;
2630
2631         return x86_schedule_events(&fake_cpuc, n, NULL);
2632 }
2633
2634 const struct pmu *hw_perf_event_init(struct perf_event *event)
2635 {
2636         const struct pmu *tmp;
2637         int err;
2638
2639         err = __hw_perf_event_init(event);
2640         if (!err) {
2641                 /*
2642                  * we temporarily connect event to its pmu
2643                  * such that validate_group() can classify
2644                  * it as an x86 event using is_x86_event()
2645                  */
2646                 tmp = event->pmu;
2647                 event->pmu = &pmu;
2648
2649                 if (event->group_leader != event)
2650                         err = validate_group(event);
2651
2652                 event->pmu = tmp;
2653         }
2654         if (err) {
2655                 if (event->destroy)
2656                         event->destroy(event);
2657                 return ERR_PTR(err);
2658         }
2659
2660         return &pmu;
2661 }
2662
2663 /*
2664  * callchain support
2665  */
2666
2667 static inline
2668 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2669 {
2670         if (entry->nr < PERF_MAX_STACK_DEPTH)
2671                 entry->ip[entry->nr++] = ip;
2672 }
2673
2674 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2675 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2676
2677
2678 static void
2679 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2680 {
2681         /* Ignore warnings */
2682 }
2683
2684 static void backtrace_warning(void *data, char *msg)
2685 {
2686         /* Ignore warnings */
2687 }
2688
2689 static int backtrace_stack(void *data, char *name)
2690 {
2691         return 0;
2692 }
2693
2694 static void backtrace_address(void *data, unsigned long addr, int reliable)
2695 {
2696         struct perf_callchain_entry *entry = data;
2697
2698         if (reliable)
2699                 callchain_store(entry, addr);
2700 }
2701
2702 static const struct stacktrace_ops backtrace_ops = {
2703         .warning                = backtrace_warning,
2704         .warning_symbol         = backtrace_warning_symbol,
2705         .stack                  = backtrace_stack,
2706         .address                = backtrace_address,
2707         .walk_stack             = print_context_stack_bp,
2708 };
2709
2710 #include "../dumpstack.h"
2711
2712 static void
2713 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2714 {
2715         callchain_store(entry, PERF_CONTEXT_KERNEL);
2716         callchain_store(entry, regs->ip);
2717
2718         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2719 }
2720
2721 /*
2722  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2723  */
2724 static unsigned long
2725 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2726 {
2727         unsigned long offset, addr = (unsigned long)from;
2728         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2729         unsigned long size, len = 0;
2730         struct page *page;
2731         void *map;
2732         int ret;
2733
2734         do {
2735                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2736                 if (!ret)
2737                         break;
2738
2739                 offset = addr & (PAGE_SIZE - 1);
2740                 size = min(PAGE_SIZE - offset, n - len);
2741
2742                 map = kmap_atomic(page, type);
2743                 memcpy(to, map+offset, size);
2744                 kunmap_atomic(map, type);
2745                 put_page(page);
2746
2747                 len  += size;
2748                 to   += size;
2749                 addr += size;
2750
2751         } while (len < n);
2752
2753         return len;
2754 }
2755
2756 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2757 {
2758         unsigned long bytes;
2759
2760         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2761
2762         return bytes == sizeof(*frame);
2763 }
2764
2765 static void
2766 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2767 {
2768         struct stack_frame frame;
2769         const void __user *fp;
2770
2771         if (!user_mode(regs))
2772                 regs = task_pt_regs(current);
2773
2774         fp = (void __user *)regs->bp;
2775
2776         callchain_store(entry, PERF_CONTEXT_USER);
2777         callchain_store(entry, regs->ip);
2778
2779         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2780                 frame.next_frame             = NULL;
2781                 frame.return_address = 0;
2782
2783                 if (!copy_stack_frame(fp, &frame))
2784                         break;
2785
2786                 if ((unsigned long)fp < regs->sp)
2787                         break;
2788
2789                 callchain_store(entry, frame.return_address);
2790                 fp = frame.next_frame;
2791         }
2792 }
2793
2794 static void
2795 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2796 {
2797         int is_user;
2798
2799         if (!regs)
2800                 return;
2801
2802         is_user = user_mode(regs);
2803
2804         if (is_user && current->state != TASK_RUNNING)
2805                 return;
2806
2807         if (!is_user)
2808                 perf_callchain_kernel(regs, entry);
2809
2810         if (current->mm)
2811                 perf_callchain_user(regs, entry);
2812 }
2813
2814 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2815 {
2816         struct perf_callchain_entry *entry;
2817
2818         if (in_nmi())
2819                 entry = &__get_cpu_var(pmc_nmi_entry);
2820         else
2821                 entry = &__get_cpu_var(pmc_irq_entry);
2822
2823         entry->nr = 0;
2824
2825         perf_do_callchain(regs, entry);
2826
2827         return entry;
2828 }
2829
2830 void hw_perf_event_setup_online(int cpu)
2831 {
2832         init_debug_store_on_cpu(cpu);
2833 }