1 #ifdef CONFIG_CPU_SUP_INTEL
7 LBR_FORMAT_EIP_FLAGS = 0x03,
11 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
12 * otherwise it becomes near impossible to get a reliable stack.
15 #define X86_DEBUGCTL_LBR (1 << 0)
16 #define X86_DEBUGCTL_FREEZE_LBRS_ON_PMI (1 << 11)
18 static void __intel_pmu_lbr_enable(void)
22 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
23 debugctl |= (X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI);
24 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
27 static void __intel_pmu_lbr_disable(void)
31 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
32 debugctl &= ~(X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI);
33 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
36 static void intel_pmu_lbr_reset_32(void)
40 for (i = 0; i < x86_pmu.lbr_nr; i++)
41 wrmsrl(x86_pmu.lbr_from + i, 0);
44 static void intel_pmu_lbr_reset_64(void)
48 for (i = 0; i < x86_pmu.lbr_nr; i++) {
49 wrmsrl(x86_pmu.lbr_from + i, 0);
50 wrmsrl(x86_pmu.lbr_to + i, 0);
54 static void intel_pmu_lbr_reset(void)
59 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
60 intel_pmu_lbr_reset_32();
62 intel_pmu_lbr_reset_64();
65 static void intel_pmu_lbr_enable(struct perf_event *event)
67 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
72 WARN_ON_ONCE(cpuc->enabled);
75 * Reset the LBR stack if we changed task context to
79 if (event->ctx->task && cpuc->lbr_context != event->ctx) {
80 intel_pmu_lbr_reset();
81 cpuc->lbr_context = event->ctx;
87 static void intel_pmu_lbr_disable(struct perf_event *event)
89 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
95 WARN_ON_ONCE(cpuc->lbr_users < 0);
97 if (cpuc->enabled && !cpuc->lbr_users)
98 __intel_pmu_lbr_disable();
101 static void intel_pmu_lbr_enable_all(void)
103 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
106 __intel_pmu_lbr_enable();
109 static void intel_pmu_lbr_disable_all(void)
111 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
114 __intel_pmu_lbr_disable();
117 static inline u64 intel_pmu_lbr_tos(void)
121 rdmsrl(x86_pmu.lbr_tos, tos);
126 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
128 unsigned long mask = x86_pmu.lbr_nr - 1;
129 u64 tos = intel_pmu_lbr_tos();
132 for (i = 0; i < x86_pmu.lbr_nr; i++, tos--) {
133 unsigned long lbr_idx = (tos - i) & mask;
142 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
144 cpuc->lbr_entries[i].from = msr_lastbranch.from;
145 cpuc->lbr_entries[i].to = msr_lastbranch.to;
146 cpuc->lbr_entries[i].flags = 0;
148 cpuc->lbr_stack.nr = i;
151 #define LBR_FROM_FLAG_MISPRED (1ULL << 63)
154 * Due to lack of segmentation in Linux the effective address (offset)
155 * is the same as the linear address, allowing us to merge the LIP and EIP
158 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
160 unsigned long mask = x86_pmu.lbr_nr - 1;
161 int lbr_format = x86_pmu.intel_cap.lbr_format;
162 u64 tos = intel_pmu_lbr_tos();
165 for (i = 0; i < x86_pmu.lbr_nr; i++, tos--) {
166 unsigned long lbr_idx = (tos - i) & mask;
167 u64 from, to, flags = 0;
169 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
170 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
172 if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
173 flags = !!(from & LBR_FROM_FLAG_MISPRED);
174 from = (u64)((((s64)from) << 1) >> 1);
177 cpuc->lbr_entries[i].from = from;
178 cpuc->lbr_entries[i].to = to;
179 cpuc->lbr_entries[i].flags = flags;
181 cpuc->lbr_stack.nr = i;
184 static void intel_pmu_lbr_read(void)
186 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
188 if (!cpuc->lbr_users)
191 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
192 intel_pmu_lbr_read_32(cpuc);
194 intel_pmu_lbr_read_64(cpuc);
197 static void intel_pmu_lbr_init_core(void)
200 x86_pmu.lbr_tos = 0x01c9;
201 x86_pmu.lbr_from = 0x40;
202 x86_pmu.lbr_to = 0x60;
205 static void intel_pmu_lbr_init_nhm(void)
208 x86_pmu.lbr_tos = 0x01c9;
209 x86_pmu.lbr_from = 0x680;
210 x86_pmu.lbr_to = 0x6c0;
213 static void intel_pmu_lbr_init_atom(void)
216 x86_pmu.lbr_tos = 0x01c9;
217 x86_pmu.lbr_from = 0x40;
218 x86_pmu.lbr_to = 0x60;
221 #endif /* CONFIG_CPU_SUP_INTEL */