1 #ifdef CONFIG_CPU_SUP_INTEL
4 * Intel PerfMon, used on Core and later.
6 static const u64 intel_perfmon_event_map[] =
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
17 static struct event_constraint intel_core_event_constraints[] =
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
28 static struct event_constraint intel_core2_event_constraints[] =
30 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
38 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
47 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
51 static struct event_constraint intel_nehalem_event_constraints[] =
53 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
56 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
67 static struct event_constraint intel_westmere_event_constraints[] =
69 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
78 static struct event_constraint intel_gen_event_constraints[] =
80 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
81 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
86 static u64 intel_pmu_event_map(int hw_event)
88 return intel_perfmon_event_map[hw_event];
91 static __initconst u64 westmere_hw_cache_event_ids
92 [PERF_COUNT_HW_CACHE_MAX]
93 [PERF_COUNT_HW_CACHE_OP_MAX]
94 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
98 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
99 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
102 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
103 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
105 [ C(OP_PREFETCH) ] = {
106 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
107 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
112 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
113 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
116 [ C(RESULT_ACCESS) ] = -1,
117 [ C(RESULT_MISS) ] = -1,
119 [ C(OP_PREFETCH) ] = {
120 [ C(RESULT_ACCESS) ] = 0x0,
121 [ C(RESULT_MISS) ] = 0x0,
126 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
127 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
130 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
131 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
133 [ C(OP_PREFETCH) ] = {
134 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
135 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
140 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
141 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
144 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
145 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
147 [ C(OP_PREFETCH) ] = {
148 [ C(RESULT_ACCESS) ] = 0x0,
149 [ C(RESULT_MISS) ] = 0x0,
154 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
155 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
158 [ C(RESULT_ACCESS) ] = -1,
159 [ C(RESULT_MISS) ] = -1,
161 [ C(OP_PREFETCH) ] = {
162 [ C(RESULT_ACCESS) ] = -1,
163 [ C(RESULT_MISS) ] = -1,
168 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
169 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
172 [ C(RESULT_ACCESS) ] = -1,
173 [ C(RESULT_MISS) ] = -1,
175 [ C(OP_PREFETCH) ] = {
176 [ C(RESULT_ACCESS) ] = -1,
177 [ C(RESULT_MISS) ] = -1,
182 static __initconst u64 nehalem_hw_cache_event_ids
183 [PERF_COUNT_HW_CACHE_MAX]
184 [PERF_COUNT_HW_CACHE_OP_MAX]
185 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
189 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
190 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
193 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
194 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
196 [ C(OP_PREFETCH) ] = {
197 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
198 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
203 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
204 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
207 [ C(RESULT_ACCESS) ] = -1,
208 [ C(RESULT_MISS) ] = -1,
210 [ C(OP_PREFETCH) ] = {
211 [ C(RESULT_ACCESS) ] = 0x0,
212 [ C(RESULT_MISS) ] = 0x0,
217 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
218 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
221 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
222 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
224 [ C(OP_PREFETCH) ] = {
225 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
226 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
231 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
232 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
235 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
236 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
238 [ C(OP_PREFETCH) ] = {
239 [ C(RESULT_ACCESS) ] = 0x0,
240 [ C(RESULT_MISS) ] = 0x0,
245 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
246 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
249 [ C(RESULT_ACCESS) ] = -1,
250 [ C(RESULT_MISS) ] = -1,
252 [ C(OP_PREFETCH) ] = {
253 [ C(RESULT_ACCESS) ] = -1,
254 [ C(RESULT_MISS) ] = -1,
259 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
260 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
263 [ C(RESULT_ACCESS) ] = -1,
264 [ C(RESULT_MISS) ] = -1,
266 [ C(OP_PREFETCH) ] = {
267 [ C(RESULT_ACCESS) ] = -1,
268 [ C(RESULT_MISS) ] = -1,
273 static __initconst u64 core2_hw_cache_event_ids
274 [PERF_COUNT_HW_CACHE_MAX]
275 [PERF_COUNT_HW_CACHE_OP_MAX]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
280 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
284 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS) ] = 0,
294 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
295 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
298 [ C(RESULT_ACCESS) ] = -1,
299 [ C(RESULT_MISS) ] = -1,
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = 0,
303 [ C(RESULT_MISS) ] = 0,
308 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
309 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
312 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
313 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
315 [ C(OP_PREFETCH) ] = {
316 [ C(RESULT_ACCESS) ] = 0,
317 [ C(RESULT_MISS) ] = 0,
322 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
326 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
329 [ C(OP_PREFETCH) ] = {
330 [ C(RESULT_ACCESS) ] = 0,
331 [ C(RESULT_MISS) ] = 0,
336 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
337 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
343 [ C(OP_PREFETCH) ] = {
344 [ C(RESULT_ACCESS) ] = -1,
345 [ C(RESULT_MISS) ] = -1,
350 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
351 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = -1,
359 [ C(RESULT_MISS) ] = -1,
364 static __initconst u64 atom_hw_cache_event_ids
365 [PERF_COUNT_HW_CACHE_MAX]
366 [PERF_COUNT_HW_CACHE_OP_MAX]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
371 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
372 [ C(RESULT_MISS) ] = 0,
375 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
376 [ C(RESULT_MISS) ] = 0,
378 [ C(OP_PREFETCH) ] = {
379 [ C(RESULT_ACCESS) ] = 0x0,
380 [ C(RESULT_MISS) ] = 0,
385 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
386 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
389 [ C(RESULT_ACCESS) ] = -1,
390 [ C(RESULT_MISS) ] = -1,
392 [ C(OP_PREFETCH) ] = {
393 [ C(RESULT_ACCESS) ] = 0,
394 [ C(RESULT_MISS) ] = 0,
399 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
403 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
406 [ C(OP_PREFETCH) ] = {
407 [ C(RESULT_ACCESS) ] = 0,
408 [ C(RESULT_MISS) ] = 0,
413 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
417 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
420 [ C(OP_PREFETCH) ] = {
421 [ C(RESULT_ACCESS) ] = 0,
422 [ C(RESULT_MISS) ] = 0,
427 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
431 [ C(RESULT_ACCESS) ] = -1,
432 [ C(RESULT_MISS) ] = -1,
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = -1,
436 [ C(RESULT_MISS) ] = -1,
441 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
445 [ C(RESULT_ACCESS) ] = -1,
446 [ C(RESULT_MISS) ] = -1,
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = -1,
450 [ C(RESULT_MISS) ] = -1,
455 static u64 intel_pmu_raw_event(u64 hw_event)
457 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
458 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
459 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
460 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
461 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
463 #define CORE_EVNTSEL_MASK \
464 (INTEL_ARCH_EVTSEL_MASK | \
465 INTEL_ARCH_UNIT_MASK | \
466 INTEL_ARCH_EDGE_MASK | \
467 INTEL_ARCH_INV_MASK | \
470 return hw_event & CORE_EVNTSEL_MASK;
473 static void intel_pmu_disable_all(void)
475 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
477 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
479 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
480 intel_pmu_disable_bts();
482 intel_pmu_pebs_disable_all();
483 intel_pmu_lbr_disable_all();
486 static void intel_pmu_enable_all(int added)
488 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
490 intel_pmu_pebs_enable_all();
491 intel_pmu_lbr_enable_all();
492 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
494 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
495 struct perf_event *event =
496 cpuc->events[X86_PMC_IDX_FIXED_BTS];
498 if (WARN_ON_ONCE(!event))
501 intel_pmu_enable_bts(event->hw.config);
507 * Intel Errata AAK100 (model 26)
508 * Intel Errata AAP53 (model 30)
510 * These chips need to be 'reset' when adding counters by programming
511 * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
512 * either in sequence on the same PMC or on different PMCs.
514 static void intel_pmu_nhm_enable_all(int added)
517 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
520 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
521 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
522 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
524 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
525 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
527 for (i = 0; i < 3; i++) {
528 struct perf_event *event = cpuc->events[i];
533 __x86_pmu_enable_event(&event->hw);
536 intel_pmu_enable_all(added);
539 static inline u64 intel_pmu_get_status(void)
543 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
548 static inline void intel_pmu_ack_status(u64 ack)
550 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
553 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
555 int idx = hwc->idx - X86_PMC_IDX_FIXED;
558 mask = 0xfULL << (idx * 4);
560 rdmsrl(hwc->config_base, ctrl_val);
562 wrmsrl(hwc->config_base, ctrl_val);
565 static void intel_pmu_disable_event(struct perf_event *event)
567 struct hw_perf_event *hwc = &event->hw;
569 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
570 intel_pmu_disable_bts();
571 intel_pmu_drain_bts_buffer();
575 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
576 intel_pmu_disable_fixed(hwc);
580 x86_pmu_disable_event(event);
582 if (unlikely(event->attr.precise))
583 intel_pmu_pebs_disable(event);
586 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
588 int idx = hwc->idx - X86_PMC_IDX_FIXED;
589 u64 ctrl_val, bits, mask;
592 * Enable IRQ generation (0x8),
593 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
597 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
599 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
603 * ANY bit is supported in v3 and up
605 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
609 mask = 0xfULL << (idx * 4);
611 rdmsrl(hwc->config_base, ctrl_val);
614 wrmsrl(hwc->config_base, ctrl_val);
617 static void intel_pmu_enable_event(struct perf_event *event)
619 struct hw_perf_event *hwc = &event->hw;
621 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
622 if (!__get_cpu_var(cpu_hw_events).enabled)
625 intel_pmu_enable_bts(hwc->config);
629 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
630 intel_pmu_enable_fixed(hwc);
634 if (unlikely(event->attr.precise))
635 intel_pmu_pebs_enable(event);
637 __x86_pmu_enable_event(hwc);
641 * Save and restart an expired event. Called by NMI contexts,
642 * so it has to be careful about preempting normal event ops:
644 static int intel_pmu_save_and_restart(struct perf_event *event)
646 x86_perf_event_update(event);
647 return x86_perf_event_set_period(event);
650 static void intel_pmu_reset(void)
652 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
656 if (!x86_pmu.num_events)
659 local_irq_save(flags);
661 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
663 for (idx = 0; idx < x86_pmu.num_events; idx++) {
664 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
665 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
667 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
668 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
671 ds->bts_index = ds->bts_buffer_base;
673 local_irq_restore(flags);
677 * This handler is triggered by the local APIC, so the APIC IRQ handling
680 static int intel_pmu_handle_irq(struct pt_regs *regs)
682 struct perf_sample_data data;
683 struct cpu_hw_events *cpuc;
687 perf_sample_data_init(&data, 0);
689 cpuc = &__get_cpu_var(cpu_hw_events);
691 intel_pmu_disable_all();
692 intel_pmu_drain_bts_buffer();
693 status = intel_pmu_get_status();
695 intel_pmu_enable_all(0);
702 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
703 perf_event_print_debug();
708 inc_irq_stat(apic_perf_irqs);
711 intel_pmu_lbr_read();
714 * PEBS overflow sets bit 62 in the global status register
716 if (__test_and_clear_bit(62, (unsigned long *)&status))
717 x86_pmu.drain_pebs(regs);
719 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
720 struct perf_event *event = cpuc->events[bit];
722 if (!test_bit(bit, cpuc->active_mask))
725 if (!intel_pmu_save_and_restart(event))
728 data.period = event->hw.last_period;
730 if (perf_event_overflow(event, 1, &data, regs))
734 intel_pmu_ack_status(ack);
737 * Repeat if there is more work to be done:
739 status = intel_pmu_get_status();
744 intel_pmu_enable_all(0);
748 static struct event_constraint *
749 intel_bts_constraints(struct perf_event *event)
751 struct hw_perf_event *hwc = &event->hw;
752 unsigned int hw_event, bts_event;
754 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
755 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
757 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
758 return &bts_constraint;
763 static struct event_constraint *
764 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
766 struct event_constraint *c;
768 c = intel_bts_constraints(event);
772 c = intel_pebs_constraints(event);
776 return x86_get_event_constraints(cpuc, event);
779 static __initconst struct x86_pmu core_pmu = {
781 .handle_irq = x86_pmu_handle_irq,
782 .disable_all = x86_pmu_disable_all,
783 .enable_all = x86_pmu_enable_all,
784 .enable = x86_pmu_enable_event,
785 .disable = x86_pmu_disable_event,
786 .hw_config = x86_hw_config,
787 .schedule_events = x86_schedule_events,
788 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
789 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
790 .event_map = intel_pmu_event_map,
791 .raw_event = intel_pmu_raw_event,
792 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
795 * Intel PMCs cannot be accessed sanely above 32 bit width,
796 * so we install an artificial 1<<31 period regardless of
797 * the generic event period:
799 .max_period = (1ULL << 31) - 1,
800 .get_event_constraints = intel_get_event_constraints,
801 .event_constraints = intel_core_event_constraints,
804 static void intel_pmu_cpu_starting(int cpu)
806 init_debug_store_on_cpu(cpu);
808 * Deal with CPUs that don't clear their LBRs on power-up.
810 intel_pmu_lbr_reset();
813 static void intel_pmu_cpu_dying(int cpu)
815 fini_debug_store_on_cpu(cpu);
818 static __initconst struct x86_pmu intel_pmu = {
820 .handle_irq = intel_pmu_handle_irq,
821 .disable_all = intel_pmu_disable_all,
822 .enable_all = intel_pmu_enable_all,
823 .enable = intel_pmu_enable_event,
824 .disable = intel_pmu_disable_event,
825 .hw_config = x86_hw_config,
826 .schedule_events = x86_schedule_events,
827 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
828 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
829 .event_map = intel_pmu_event_map,
830 .raw_event = intel_pmu_raw_event,
831 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
834 * Intel PMCs cannot be accessed sanely above 32 bit width,
835 * so we install an artificial 1<<31 period regardless of
836 * the generic event period:
838 .max_period = (1ULL << 31) - 1,
839 .get_event_constraints = intel_get_event_constraints,
841 .cpu_starting = intel_pmu_cpu_starting,
842 .cpu_dying = intel_pmu_cpu_dying,
845 static void intel_clovertown_quirks(void)
848 * PEBS is unreliable due to:
850 * AJ67 - PEBS may experience CPL leaks
851 * AJ68 - PEBS PMI may be delayed by one event
852 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
853 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
855 * AJ67 could be worked around by restricting the OS/USR flags.
856 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
858 * AJ106 could possibly be worked around by not allowing LBR
859 * usage from PEBS, including the fixup.
860 * AJ68 could possibly be worked around by always programming
861 * a pebs_event_reset[0] value and coping with the lost events.
863 * But taken together it might just make sense to not enable PEBS on
866 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
868 x86_pmu.pebs_constraints = NULL;
871 static __init int intel_pmu_init(void)
873 union cpuid10_edx edx;
874 union cpuid10_eax eax;
879 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
880 switch (boot_cpu_data.x86) {
882 return p6_pmu_init();
884 return p4_pmu_init();
890 * Check whether the Architectural PerfMon supports
891 * Branch Misses Retired hw_event or not.
893 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
894 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
897 version = eax.split.version_id;
903 x86_pmu.version = version;
904 x86_pmu.num_events = eax.split.num_events;
905 x86_pmu.event_bits = eax.split.bit_width;
906 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
909 * Quirk: v2 perfmon does not report fixed-purpose events, so
910 * assume at least 3 events:
913 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
916 * v2 and above have a perf capabilities MSR
921 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
922 x86_pmu.intel_cap.capabilities = capabilities;
928 * Install the hw-cache-events table:
930 switch (boot_cpu_data.x86_model) {
931 case 14: /* 65 nm core solo/duo, "Yonah" */
932 pr_cont("Core events, ");
935 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
936 x86_pmu.quirks = intel_clovertown_quirks;
937 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
938 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
939 case 29: /* six-core 45 nm xeon "Dunnington" */
940 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
941 sizeof(hw_cache_event_ids));
943 intel_pmu_lbr_init_core();
945 x86_pmu.event_constraints = intel_core2_event_constraints;
946 pr_cont("Core2 events, ");
949 case 26: /* 45 nm nehalem, "Bloomfield" */
950 case 30: /* 45 nm nehalem, "Lynnfield" */
951 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
952 sizeof(hw_cache_event_ids));
954 intel_pmu_lbr_init_nhm();
956 x86_pmu.event_constraints = intel_nehalem_event_constraints;
957 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
958 pr_cont("Nehalem events, ");
962 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
963 sizeof(hw_cache_event_ids));
965 intel_pmu_lbr_init_atom();
967 x86_pmu.event_constraints = intel_gen_event_constraints;
968 pr_cont("Atom events, ");
971 case 37: /* 32 nm nehalem, "Clarkdale" */
972 case 44: /* 32 nm nehalem, "Gulftown" */
973 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
974 sizeof(hw_cache_event_ids));
976 intel_pmu_lbr_init_nhm();
978 x86_pmu.event_constraints = intel_westmere_event_constraints;
979 pr_cont("Westmere events, ");
984 * default constraints for v2 and up
986 x86_pmu.event_constraints = intel_gen_event_constraints;
987 pr_cont("generic architected perfmon, ");
992 #else /* CONFIG_CPU_SUP_INTEL */
994 static int intel_pmu_init(void)
999 #endif /* CONFIG_CPU_SUP_INTEL */