]>
Commit | Line | Data |
---|---|---|
539eb11e JS |
1 | /* |
2 | * This code largely moved from arch/i386/kernel/timer/timer_tsc.c | |
3 | * which was originally moved from arch/i386/kernel/time.c. | |
4 | * See comments there for proper credits. | |
5 | */ | |
6 | ||
5d0cf410 | 7 | #include <linux/clocksource.h> |
539eb11e JS |
8 | #include <linux/workqueue.h> |
9 | #include <linux/cpufreq.h> | |
10 | #include <linux/jiffies.h> | |
11 | #include <linux/init.h> | |
5d0cf410 | 12 | #include <linux/dmi.h> |
539eb11e | 13 | |
5d0cf410 | 14 | #include <asm/delay.h> |
539eb11e JS |
15 | #include <asm/tsc.h> |
16 | #include <asm/io.h> | |
17 | ||
18 | #include "mach_timer.h" | |
19 | ||
20 | /* | |
21 | * On some systems the TSC frequency does not | |
22 | * change with the cpu frequency. So we need | |
23 | * an extra value to store the TSC freq | |
24 | */ | |
25 | unsigned int tsc_khz; | |
26 | ||
27 | int tsc_disable __cpuinitdata = 0; | |
28 | ||
29 | #ifdef CONFIG_X86_TSC | |
30 | static int __init tsc_setup(char *str) | |
31 | { | |
32 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | |
33 | "cannot disable TSC.\n"); | |
34 | return 1; | |
35 | } | |
36 | #else | |
37 | /* | |
38 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
39 | * in cpu/common.c | |
40 | */ | |
41 | static int __init tsc_setup(char *str) | |
42 | { | |
43 | tsc_disable = 1; | |
44 | ||
45 | return 1; | |
46 | } | |
47 | #endif | |
48 | ||
49 | __setup("notsc", tsc_setup); | |
50 | ||
539eb11e JS |
51 | /* |
52 | * code to mark and check if the TSC is unstable | |
53 | * due to cpufreq or due to unsynced TSCs | |
54 | */ | |
55 | static int tsc_unstable; | |
56 | ||
57 | static inline int check_tsc_unstable(void) | |
58 | { | |
59 | return tsc_unstable; | |
60 | } | |
61 | ||
62 | void mark_tsc_unstable(void) | |
63 | { | |
64 | tsc_unstable = 1; | |
65 | } | |
66 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |
67 | ||
68 | /* Accellerators for sched_clock() | |
69 | * convert from cycles(64bits) => nanoseconds (64bits) | |
70 | * basic equation: | |
71 | * ns = cycles / (freq / ns_per_sec) | |
72 | * ns = cycles * (ns_per_sec / freq) | |
73 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
74 | * ns = cycles * (10^6 / cpu_khz) | |
75 | * | |
76 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
77 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
78 | * ns = cycles * cyc2ns_scale / SC | |
79 | * | |
80 | * And since SC is a constant power of two, we can convert the div | |
81 | * into a shift. | |
82 | * | |
83 | * We can use khz divisor instead of mhz to keep a better percision, since | |
84 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
85 | * (mathieu.desnoyers@polymtl.ca) | |
86 | * | |
87 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
88 | */ | |
89 | static unsigned long cyc2ns_scale __read_mostly; | |
90 | ||
91 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | |
92 | ||
93 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | |
94 | { | |
95 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | |
96 | } | |
97 | ||
98 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |
99 | { | |
100 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | |
101 | } | |
102 | ||
103 | /* | |
104 | * Scheduler clock - returns current time in nanosec units. | |
105 | */ | |
106 | unsigned long long sched_clock(void) | |
107 | { | |
108 | unsigned long long this_offset; | |
109 | ||
110 | /* | |
111 | * in the NUMA case we dont use the TSC as they are not | |
112 | * synchronized across all CPUs. | |
113 | */ | |
114 | #ifndef CONFIG_NUMA | |
115 | if (!cpu_khz || check_tsc_unstable()) | |
116 | #endif | |
117 | /* no locking but a rare wrong value is not a big deal */ | |
118 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | |
119 | ||
120 | /* read the Time Stamp Counter: */ | |
121 | rdtscll(this_offset); | |
122 | ||
123 | /* return the value in ns */ | |
124 | return cycles_2_ns(this_offset); | |
125 | } | |
126 | ||
127 | static unsigned long calculate_cpu_khz(void) | |
128 | { | |
129 | unsigned long long start, end; | |
130 | unsigned long count; | |
131 | u64 delta64; | |
132 | int i; | |
133 | unsigned long flags; | |
134 | ||
135 | local_irq_save(flags); | |
136 | ||
137 | /* run 3 times to ensure the cache is warm */ | |
138 | for (i = 0; i < 3; i++) { | |
139 | mach_prepare_counter(); | |
140 | rdtscll(start); | |
141 | mach_countup(&count); | |
142 | rdtscll(end); | |
143 | } | |
144 | /* | |
145 | * Error: ECTCNEVERSET | |
146 | * The CTC wasn't reliable: we got a hit on the very first read, | |
147 | * or the CPU was so fast/slow that the quotient wouldn't fit in | |
148 | * 32 bits.. | |
149 | */ | |
150 | if (count <= 1) | |
151 | goto err; | |
152 | ||
153 | delta64 = end - start; | |
154 | ||
155 | /* cpu freq too fast: */ | |
156 | if (delta64 > (1ULL<<32)) | |
157 | goto err; | |
158 | ||
159 | /* cpu freq too slow: */ | |
160 | if (delta64 <= CALIBRATE_TIME_MSEC) | |
161 | goto err; | |
162 | ||
163 | delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ | |
164 | do_div(delta64,CALIBRATE_TIME_MSEC); | |
165 | ||
166 | local_irq_restore(flags); | |
167 | return (unsigned long)delta64; | |
168 | err: | |
169 | local_irq_restore(flags); | |
170 | return 0; | |
171 | } | |
172 | ||
173 | int recalibrate_cpu_khz(void) | |
174 | { | |
175 | #ifndef CONFIG_SMP | |
176 | unsigned long cpu_khz_old = cpu_khz; | |
177 | ||
178 | if (cpu_has_tsc) { | |
179 | cpu_khz = calculate_cpu_khz(); | |
180 | tsc_khz = cpu_khz; | |
181 | cpu_data[0].loops_per_jiffy = | |
182 | cpufreq_scale(cpu_data[0].loops_per_jiffy, | |
183 | cpu_khz_old, cpu_khz); | |
184 | return 0; | |
185 | } else | |
186 | return -ENODEV; | |
187 | #else | |
188 | return -ENODEV; | |
189 | #endif | |
190 | } | |
191 | ||
192 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
193 | ||
c0d83745 | 194 | void __init tsc_init(void) |
539eb11e JS |
195 | { |
196 | if (!cpu_has_tsc || tsc_disable) | |
197 | return; | |
198 | ||
199 | cpu_khz = calculate_cpu_khz(); | |
200 | tsc_khz = cpu_khz; | |
201 | ||
202 | if (!cpu_khz) | |
203 | return; | |
204 | ||
205 | printk("Detected %lu.%03lu MHz processor.\n", | |
206 | (unsigned long)cpu_khz / 1000, | |
207 | (unsigned long)cpu_khz % 1000); | |
208 | ||
209 | set_cyc2ns_scale(cpu_khz); | |
6f84fa2f | 210 | use_tsc_delay(); |
539eb11e JS |
211 | } |
212 | ||
213 | #ifdef CONFIG_CPU_FREQ | |
214 | ||
215 | static unsigned int cpufreq_delayed_issched = 0; | |
216 | static unsigned int cpufreq_init = 0; | |
217 | static struct work_struct cpufreq_delayed_get_work; | |
218 | ||
219 | static void handle_cpufreq_delayed_get(void *v) | |
220 | { | |
221 | unsigned int cpu; | |
222 | ||
223 | for_each_online_cpu(cpu) | |
224 | cpufreq_get(cpu); | |
225 | ||
226 | cpufreq_delayed_issched = 0; | |
227 | } | |
228 | ||
229 | /* | |
230 | * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries | |
231 | * to verify the CPU frequency the timing core thinks the CPU is running | |
232 | * at is still correct. | |
233 | */ | |
234 | static inline void cpufreq_delayed_get(void) | |
235 | { | |
236 | if (cpufreq_init && !cpufreq_delayed_issched) { | |
237 | cpufreq_delayed_issched = 1; | |
238 | printk(KERN_DEBUG "Checking if CPU frequency changed.\n"); | |
239 | schedule_work(&cpufreq_delayed_get_work); | |
240 | } | |
241 | } | |
242 | ||
243 | /* | |
244 | * if the CPU frequency is scaled, TSC-based delays will need a different | |
245 | * loops_per_jiffy value to function properly. | |
246 | */ | |
247 | static unsigned int ref_freq = 0; | |
248 | static unsigned long loops_per_jiffy_ref = 0; | |
249 | static unsigned long cpu_khz_ref = 0; | |
250 | ||
251 | static int | |
252 | time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) | |
253 | { | |
254 | struct cpufreq_freqs *freq = data; | |
255 | ||
256 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | |
257 | write_seqlock_irq(&xtime_lock); | |
258 | ||
259 | if (!ref_freq) { | |
260 | if (!freq->old){ | |
261 | ref_freq = freq->new; | |
262 | goto end; | |
263 | } | |
264 | ref_freq = freq->old; | |
265 | loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; | |
266 | cpu_khz_ref = cpu_khz; | |
267 | } | |
268 | ||
269 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
270 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
271 | (val == CPUFREQ_RESUMECHANGE)) { | |
272 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
273 | cpu_data[freq->cpu].loops_per_jiffy = | |
274 | cpufreq_scale(loops_per_jiffy_ref, | |
275 | ref_freq, freq->new); | |
276 | ||
277 | if (cpu_khz) { | |
278 | ||
279 | if (num_online_cpus() == 1) | |
280 | cpu_khz = cpufreq_scale(cpu_khz_ref, | |
281 | ref_freq, freq->new); | |
282 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { | |
283 | tsc_khz = cpu_khz; | |
284 | set_cyc2ns_scale(cpu_khz); | |
285 | /* | |
286 | * TSC based sched_clock turns | |
287 | * to junk w/ cpufreq | |
288 | */ | |
289 | mark_tsc_unstable(); | |
290 | } | |
291 | } | |
292 | } | |
293 | end: | |
294 | if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE) | |
295 | write_sequnlock_irq(&xtime_lock); | |
296 | ||
297 | return 0; | |
298 | } | |
299 | ||
300 | static struct notifier_block time_cpufreq_notifier_block = { | |
301 | .notifier_call = time_cpufreq_notifier | |
302 | }; | |
303 | ||
304 | static int __init cpufreq_tsc(void) | |
305 | { | |
306 | int ret; | |
307 | ||
308 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | |
309 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | |
310 | CPUFREQ_TRANSITION_NOTIFIER); | |
311 | if (!ret) | |
312 | cpufreq_init = 1; | |
313 | ||
314 | return ret; | |
315 | } | |
316 | ||
317 | core_initcall(cpufreq_tsc); | |
318 | ||
319 | #endif | |
5d0cf410 JS |
320 | |
321 | /* clock source code */ | |
322 | ||
323 | static unsigned long current_tsc_khz = 0; | |
324 | static int tsc_update_callback(void); | |
325 | ||
326 | static cycle_t read_tsc(void) | |
327 | { | |
328 | cycle_t ret; | |
329 | ||
330 | rdtscll(ret); | |
331 | ||
332 | return ret; | |
333 | } | |
334 | ||
335 | static struct clocksource clocksource_tsc = { | |
336 | .name = "tsc", | |
337 | .rating = 300, | |
338 | .read = read_tsc, | |
7f9f303a | 339 | .mask = CLOCKSOURCE_MASK(64), |
5d0cf410 JS |
340 | .mult = 0, /* to be set */ |
341 | .shift = 22, | |
342 | .update_callback = tsc_update_callback, | |
343 | .is_continuous = 1, | |
344 | }; | |
345 | ||
346 | static int tsc_update_callback(void) | |
347 | { | |
348 | int change = 0; | |
349 | ||
350 | /* check to see if we should switch to the safe clocksource: */ | |
3f4a0b91 JS |
351 | if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { |
352 | clocksource_tsc.rating = 0; | |
a2752549 | 353 | clocksource_reselect(); |
5d0cf410 JS |
354 | change = 1; |
355 | } | |
356 | ||
357 | /* only update if tsc_khz has changed: */ | |
358 | if (current_tsc_khz != tsc_khz) { | |
359 | current_tsc_khz = tsc_khz; | |
360 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | |
361 | clocksource_tsc.shift); | |
362 | change = 1; | |
363 | } | |
364 | ||
365 | return change; | |
366 | } | |
367 | ||
368 | static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) | |
369 | { | |
370 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | |
371 | d->ident); | |
372 | mark_tsc_unstable(); | |
373 | return 0; | |
374 | } | |
375 | ||
376 | /* List of systems that have known TSC problems */ | |
377 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | |
378 | { | |
379 | .callback = dmi_mark_tsc_unstable, | |
380 | .ident = "IBM Thinkpad 380XD", | |
381 | .matches = { | |
382 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | |
383 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | |
384 | }, | |
385 | }, | |
386 | {} | |
387 | }; | |
388 | ||
389 | #define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */ | |
390 | static struct timer_list verify_tsc_freq_timer; | |
391 | ||
392 | /* XXX - Probably should add locking */ | |
393 | static void verify_tsc_freq(unsigned long unused) | |
394 | { | |
395 | static u64 last_tsc; | |
396 | static unsigned long last_jiffies; | |
397 | ||
398 | u64 now_tsc, interval_tsc; | |
399 | unsigned long now_jiffies, interval_jiffies; | |
400 | ||
401 | ||
402 | if (check_tsc_unstable()) | |
403 | return; | |
404 | ||
405 | rdtscll(now_tsc); | |
406 | now_jiffies = jiffies; | |
407 | ||
408 | if (!last_jiffies) { | |
409 | goto out; | |
410 | } | |
411 | ||
412 | interval_jiffies = now_jiffies - last_jiffies; | |
413 | interval_tsc = now_tsc - last_tsc; | |
414 | interval_tsc *= HZ; | |
415 | do_div(interval_tsc, cpu_khz*1000); | |
416 | ||
417 | if (interval_tsc < (interval_jiffies * 3 / 4)) { | |
418 | printk("TSC appears to be running slowly. " | |
419 | "Marking it as unstable\n"); | |
420 | mark_tsc_unstable(); | |
421 | return; | |
422 | } | |
423 | ||
424 | out: | |
425 | last_tsc = now_tsc; | |
426 | last_jiffies = now_jiffies; | |
427 | /* set us up to go off on the next interval: */ | |
428 | mod_timer(&verify_tsc_freq_timer, | |
429 | jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL)); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Make an educated guess if the TSC is trustworthy and synchronized | |
434 | * over all CPUs. | |
435 | */ | |
436 | static __init int unsynchronized_tsc(void) | |
437 | { | |
438 | /* | |
439 | * Intel systems are normally all synchronized. | |
440 | * Exceptions must mark TSC as unstable: | |
441 | */ | |
442 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | |
443 | return 0; | |
444 | ||
445 | /* assume multi socket systems are not synchronized: */ | |
446 | return num_possible_cpus() > 1; | |
447 | } | |
448 | ||
449 | static int __init init_tsc_clocksource(void) | |
450 | { | |
451 | ||
452 | if (cpu_has_tsc && tsc_khz && !tsc_disable) { | |
453 | /* check blacklist */ | |
454 | dmi_check_system(bad_tsc_dmi_table); | |
455 | ||
456 | if (unsynchronized_tsc()) /* mark unstable if unsynced */ | |
457 | mark_tsc_unstable(); | |
458 | current_tsc_khz = tsc_khz; | |
459 | clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, | |
460 | clocksource_tsc.shift); | |
461 | /* lower the rating if we already know its unstable: */ | |
462 | if (check_tsc_unstable()) | |
3f4a0b91 | 463 | clocksource_tsc.rating = 0; |
5d0cf410 JS |
464 | |
465 | init_timer(&verify_tsc_freq_timer); | |
466 | verify_tsc_freq_timer.function = verify_tsc_freq; | |
467 | verify_tsc_freq_timer.expires = | |
468 | jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL); | |
469 | add_timer(&verify_tsc_freq_timer); | |
470 | ||
a2752549 | 471 | return clocksource_register(&clocksource_tsc); |
5d0cf410 JS |
472 | } |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
477 | module_init(init_tsc_clocksource); |