]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/x86/kernel/cpu/perf_event_intel_lbr.c
df4c98e26c5b68ecfad1aa42a3255f664a3c2ae8
[net-next-2.6.git] / arch / x86 / kernel / cpu / perf_event_intel_lbr.c
1 #ifdef CONFIG_CPU_SUP_INTEL
2
3 enum {
4         LBR_FORMAT_32           = 0x00,
5         LBR_FORMAT_LIP          = 0x01,
6         LBR_FORMAT_EIP          = 0x02,
7         LBR_FORMAT_EIP_FLAGS    = 0x03,
8 };
9
10 /*
11  * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
12  * otherwise it becomes near impossible to get a reliable stack.
13  */
14
15 #define X86_DEBUGCTL_LBR                        (1 << 0)
16 #define X86_DEBUGCTL_FREEZE_LBRS_ON_PMI         (1 << 11)
17
18 static void __intel_pmu_lbr_enable(void)
19 {
20         u64 debugctl;
21
22         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
23         debugctl |= (X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI);
24         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
25 }
26
27 static void __intel_pmu_lbr_disable(void)
28 {
29         u64 debugctl;
30
31         rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
32         debugctl &= ~(X86_DEBUGCTL_LBR | X86_DEBUGCTL_FREEZE_LBRS_ON_PMI);
33         wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
34 }
35
36 static void intel_pmu_lbr_reset_32(void)
37 {
38         int i;
39
40         for (i = 0; i < x86_pmu.lbr_nr; i++)
41                 wrmsrl(x86_pmu.lbr_from + i, 0);
42 }
43
44 static void intel_pmu_lbr_reset_64(void)
45 {
46         int i;
47
48         for (i = 0; i < x86_pmu.lbr_nr; i++) {
49                 wrmsrl(x86_pmu.lbr_from + i, 0);
50                 wrmsrl(x86_pmu.lbr_to   + i, 0);
51         }
52 }
53
54 static void intel_pmu_lbr_reset(void)
55 {
56         if (!x86_pmu.lbr_nr)
57                 return;
58
59         if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
60                 intel_pmu_lbr_reset_32();
61         else
62                 intel_pmu_lbr_reset_64();
63 }
64
65 static void intel_pmu_lbr_enable(struct perf_event *event)
66 {
67         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
68
69         if (!x86_pmu.lbr_nr)
70                 return;
71
72         WARN_ON_ONCE(cpuc->enabled);
73
74         /*
75          * Reset the LBR stack if we changed task context to
76          * avoid data leaks.
77          */
78
79         if (event->ctx->task && cpuc->lbr_context != event->ctx) {
80                 intel_pmu_lbr_reset();
81                 cpuc->lbr_context = event->ctx;
82         }
83
84         cpuc->lbr_users++;
85 }
86
87 static void intel_pmu_lbr_disable(struct perf_event *event)
88 {
89         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
90
91         if (!x86_pmu.lbr_nr)
92                 return;
93
94         cpuc->lbr_users--;
95         WARN_ON_ONCE(cpuc->lbr_users < 0);
96
97         if (cpuc->enabled && !cpuc->lbr_users)
98                 __intel_pmu_lbr_disable();
99 }
100
101 static void intel_pmu_lbr_enable_all(void)
102 {
103         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
104
105         if (cpuc->lbr_users)
106                 __intel_pmu_lbr_enable();
107 }
108
109 static void intel_pmu_lbr_disable_all(void)
110 {
111         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
112
113         if (cpuc->lbr_users)
114                 __intel_pmu_lbr_disable();
115 }
116
117 static inline u64 intel_pmu_lbr_tos(void)
118 {
119         u64 tos;
120
121         rdmsrl(x86_pmu.lbr_tos, tos);
122
123         return tos;
124 }
125
126 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
127 {
128         unsigned long mask = x86_pmu.lbr_nr - 1;
129         u64 tos = intel_pmu_lbr_tos();
130         int i;
131
132         for (i = 0; i < x86_pmu.lbr_nr; i++) {
133                 unsigned long lbr_idx = (tos - i) & mask;
134                 union {
135                         struct {
136                                 u32 from;
137                                 u32 to;
138                         };
139                         u64     lbr;
140                 } msr_lastbranch;
141
142                 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
143
144                 cpuc->lbr_entries[i].from  = msr_lastbranch.from;
145                 cpuc->lbr_entries[i].to    = msr_lastbranch.to;
146                 cpuc->lbr_entries[i].flags = 0;
147         }
148         cpuc->lbr_stack.nr = i;
149 }
150
151 #define LBR_FROM_FLAG_MISPRED  (1ULL << 63)
152
153 /*
154  * Due to lack of segmentation in Linux the effective address (offset)
155  * is the same as the linear address, allowing us to merge the LIP and EIP
156  * LBR formats.
157  */
158 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
159 {
160         unsigned long mask = x86_pmu.lbr_nr - 1;
161         int lbr_format = x86_pmu.intel_cap.lbr_format;
162         u64 tos = intel_pmu_lbr_tos();
163         int i;
164
165         for (i = 0; i < x86_pmu.lbr_nr; i++) {
166                 unsigned long lbr_idx = (tos - i) & mask;
167                 u64 from, to, flags = 0;
168
169                 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
170                 rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
171
172                 if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
173                         flags = !!(from & LBR_FROM_FLAG_MISPRED);
174                         from = (u64)((((s64)from) << 1) >> 1);
175                 }
176
177                 cpuc->lbr_entries[i].from  = from;
178                 cpuc->lbr_entries[i].to    = to;
179                 cpuc->lbr_entries[i].flags = flags;
180         }
181         cpuc->lbr_stack.nr = i;
182 }
183
184 static void intel_pmu_lbr_read(void)
185 {
186         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
187
188         if (!cpuc->lbr_users)
189                 return;
190
191         if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
192                 intel_pmu_lbr_read_32(cpuc);
193         else
194                 intel_pmu_lbr_read_64(cpuc);
195 }
196
197 static void intel_pmu_lbr_init_core(void)
198 {
199         x86_pmu.lbr_nr     = 4;
200         x86_pmu.lbr_tos    = 0x01c9;
201         x86_pmu.lbr_from   = 0x40;
202         x86_pmu.lbr_to     = 0x60;
203 }
204
205 static void intel_pmu_lbr_init_nhm(void)
206 {
207         x86_pmu.lbr_nr     = 16;
208         x86_pmu.lbr_tos    = 0x01c9;
209         x86_pmu.lbr_from   = 0x680;
210         x86_pmu.lbr_to     = 0x6c0;
211 }
212
213 static void intel_pmu_lbr_init_atom(void)
214 {
215         x86_pmu.lbr_nr     = 8;
216         x86_pmu.lbr_tos    = 0x01c9;
217         x86_pmu.lbr_from   = 0x40;
218         x86_pmu.lbr_to     = 0x60;
219 }
220
221 #endif /* CONFIG_CPU_SUP_INTEL */