]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Local APIC handling, local APIC timers | |
3 | * | |
4 | * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com> | |
5 | * | |
6 | * Fixes | |
7 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs; | |
8 | * thanks to Eric Gilmore | |
9 | * and Rolf G. Tews | |
10 | * for testing these extensively. | |
11 | * Maciej W. Rozycki : Various updates and fixes. | |
12 | * Mikael Pettersson : Power Management for UP-APIC. | |
13 | * Pavel Machek and | |
14 | * Mikael Pettersson : PM converted to driver model. | |
15 | */ | |
16 | ||
1da177e4 LT |
17 | #include <linux/init.h> |
18 | ||
19 | #include <linux/mm.h> | |
1da177e4 LT |
20 | #include <linux/delay.h> |
21 | #include <linux/bootmem.h> | |
22 | #include <linux/smp_lock.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/mc146818rtc.h> | |
25 | #include <linux/kernel_stat.h> | |
26 | #include <linux/sysdev.h> | |
f3705136 | 27 | #include <linux/cpu.h> |
6eb0a0fd | 28 | #include <linux/module.h> |
1da177e4 LT |
29 | |
30 | #include <asm/atomic.h> | |
31 | #include <asm/smp.h> | |
32 | #include <asm/mtrr.h> | |
33 | #include <asm/mpspec.h> | |
34 | #include <asm/desc.h> | |
35 | #include <asm/arch_hooks.h> | |
36 | #include <asm/hpet.h> | |
306e440d | 37 | #include <asm/i8253.h> |
3e4ff115 | 38 | #include <asm/nmi.h> |
1da177e4 LT |
39 | |
40 | #include <mach_apic.h> | |
382dbd07 | 41 | #include <mach_apicdef.h> |
6eb0a0fd | 42 | #include <mach_ipi.h> |
1da177e4 LT |
43 | |
44 | #include "io_ports.h" | |
45 | ||
6eb0a0fd VP |
46 | /* |
47 | * cpu_mask that denotes the CPUs that needs timer interrupt coming in as | |
48 | * IPIs in place of local APIC timers | |
49 | */ | |
50 | static cpumask_t timer_bcast_ipi; | |
51 | ||
9635b47d EB |
52 | /* |
53 | * Knob to control our willingness to enable the local APIC. | |
54 | */ | |
3d08a256 AB |
55 | static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */ |
56 | ||
57 | static inline void lapic_disable(void) | |
58 | { | |
59 | enable_local_apic = -1; | |
60 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | |
61 | } | |
62 | ||
63 | static inline void lapic_enable(void) | |
64 | { | |
65 | enable_local_apic = 1; | |
66 | } | |
9635b47d | 67 | |
1da177e4 LT |
68 | /* |
69 | * Debug level | |
70 | */ | |
71 | int apic_verbosity; | |
72 | ||
73 | ||
74 | static void apic_pm_activate(void); | |
75 | ||
a0b4da91 | 76 | static int modern_apic(void) |
95d769aa AK |
77 | { |
78 | unsigned int lvr, version; | |
79 | /* AMD systems use old APIC versions, so check the CPU */ | |
80 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | |
81 | boot_cpu_data.x86 >= 0xf) | |
82 | return 1; | |
83 | lvr = apic_read(APIC_LVR); | |
84 | version = GET_APIC_VERSION(lvr); | |
85 | return version >= 0x14; | |
86 | } | |
87 | ||
1da177e4 LT |
88 | /* |
89 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
90 | * each architecture has to answer this themselves. | |
91 | */ | |
92 | void ack_bad_irq(unsigned int irq) | |
93 | { | |
94 | printk("unexpected IRQ trap at vector %02x\n", irq); | |
95 | /* | |
96 | * Currently unexpected vectors happen only on SMP and APIC. | |
97 | * We _must_ ack these because every local APIC has only N | |
98 | * irq slots per priority level, and a 'hanging, unacked' IRQ | |
99 | * holds up an irq slot - in excessive cases (when multiple | |
100 | * unexpected vectors occur) that might lock up the APIC | |
101 | * completely. | |
3777a959 | 102 | * But only ack when the APIC is enabled -AK |
1da177e4 | 103 | */ |
e1a8e6c9 | 104 | if (cpu_has_apic) |
3777a959 | 105 | ack_APIC_irq(); |
1da177e4 LT |
106 | } |
107 | ||
108 | void __init apic_intr_init(void) | |
109 | { | |
110 | #ifdef CONFIG_SMP | |
111 | smp_intr_init(); | |
112 | #endif | |
113 | /* self generated IPI for local APIC timer */ | |
114 | set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); | |
115 | ||
116 | /* IPI vectors for APIC spurious and error interrupts */ | |
117 | set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); | |
118 | set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); | |
119 | ||
120 | /* thermal monitor LVT interrupt */ | |
121 | #ifdef CONFIG_X86_MCE_P4THERMAL | |
122 | set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | |
123 | #endif | |
124 | } | |
125 | ||
126 | /* Using APIC to generate smp_local_timer_interrupt? */ | |
acae9d32 | 127 | int using_apic_timer __read_mostly = 0; |
1da177e4 | 128 | |
1da177e4 LT |
129 | static int enabled_via_apicbase; |
130 | ||
131 | void enable_NMI_through_LVT0 (void * dummy) | |
132 | { | |
133 | unsigned int v, ver; | |
134 | ||
135 | ver = apic_read(APIC_LVR); | |
136 | ver = GET_APIC_VERSION(ver); | |
137 | v = APIC_DM_NMI; /* unmask and set to NMI */ | |
138 | if (!APIC_INTEGRATED(ver)) /* 82489DX */ | |
139 | v |= APIC_LVT_LEVEL_TRIGGER; | |
140 | apic_write_around(APIC_LVT0, v); | |
141 | } | |
142 | ||
143 | int get_physical_broadcast(void) | |
144 | { | |
95d769aa | 145 | if (modern_apic()) |
1da177e4 LT |
146 | return 0xff; |
147 | else | |
148 | return 0xf; | |
149 | } | |
150 | ||
151 | int get_maxlvt(void) | |
152 | { | |
153 | unsigned int v, ver, maxlvt; | |
154 | ||
155 | v = apic_read(APIC_LVR); | |
156 | ver = GET_APIC_VERSION(v); | |
157 | /* 82489DXs do not report # of LVT entries. */ | |
158 | maxlvt = APIC_INTEGRATED(ver) ? GET_APIC_MAXLVT(v) : 2; | |
159 | return maxlvt; | |
160 | } | |
161 | ||
162 | void clear_local_APIC(void) | |
163 | { | |
164 | int maxlvt; | |
165 | unsigned long v; | |
166 | ||
167 | maxlvt = get_maxlvt(); | |
168 | ||
169 | /* | |
704fc59e | 170 | * Masking an LVT entry can trigger a local APIC error |
1da177e4 LT |
171 | * if the vector is zero. Mask LVTERR first to prevent this. |
172 | */ | |
173 | if (maxlvt >= 3) { | |
174 | v = ERROR_APIC_VECTOR; /* any non-zero vector will do */ | |
175 | apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED); | |
176 | } | |
177 | /* | |
178 | * Careful: we have to set masks only first to deassert | |
179 | * any level-triggered sources. | |
180 | */ | |
181 | v = apic_read(APIC_LVTT); | |
182 | apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); | |
183 | v = apic_read(APIC_LVT0); | |
184 | apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); | |
185 | v = apic_read(APIC_LVT1); | |
186 | apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED); | |
187 | if (maxlvt >= 4) { | |
188 | v = apic_read(APIC_LVTPC); | |
189 | apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED); | |
190 | } | |
191 | ||
192 | /* lets not touch this if we didn't frob it */ | |
193 | #ifdef CONFIG_X86_MCE_P4THERMAL | |
194 | if (maxlvt >= 5) { | |
195 | v = apic_read(APIC_LVTTHMR); | |
196 | apic_write_around(APIC_LVTTHMR, v | APIC_LVT_MASKED); | |
197 | } | |
198 | #endif | |
199 | /* | |
200 | * Clean APIC state for other OSs: | |
201 | */ | |
202 | apic_write_around(APIC_LVTT, APIC_LVT_MASKED); | |
203 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED); | |
204 | apic_write_around(APIC_LVT1, APIC_LVT_MASKED); | |
205 | if (maxlvt >= 3) | |
206 | apic_write_around(APIC_LVTERR, APIC_LVT_MASKED); | |
207 | if (maxlvt >= 4) | |
208 | apic_write_around(APIC_LVTPC, APIC_LVT_MASKED); | |
209 | ||
210 | #ifdef CONFIG_X86_MCE_P4THERMAL | |
211 | if (maxlvt >= 5) | |
212 | apic_write_around(APIC_LVTTHMR, APIC_LVT_MASKED); | |
213 | #endif | |
214 | v = GET_APIC_VERSION(apic_read(APIC_LVR)); | |
215 | if (APIC_INTEGRATED(v)) { /* !82489DX */ | |
216 | if (maxlvt > 3) /* Due to Pentium errata 3AP and 11AP. */ | |
217 | apic_write(APIC_ESR, 0); | |
218 | apic_read(APIC_ESR); | |
219 | } | |
220 | } | |
221 | ||
222 | void __init connect_bsp_APIC(void) | |
223 | { | |
224 | if (pic_mode) { | |
225 | /* | |
226 | * Do not trust the local APIC being empty at bootup. | |
227 | */ | |
228 | clear_local_APIC(); | |
229 | /* | |
230 | * PIC mode, enable APIC mode in the IMCR, i.e. | |
231 | * connect BSP's local APIC to INT and NMI lines. | |
232 | */ | |
233 | apic_printk(APIC_VERBOSE, "leaving PIC mode, " | |
234 | "enabling APIC mode.\n"); | |
235 | outb(0x70, 0x22); | |
236 | outb(0x01, 0x23); | |
237 | } | |
238 | enable_apic_mode(); | |
239 | } | |
240 | ||
650927ef | 241 | void disconnect_bsp_APIC(int virt_wire_setup) |
1da177e4 LT |
242 | { |
243 | if (pic_mode) { | |
244 | /* | |
245 | * Put the board back into PIC mode (has an effect | |
246 | * only on certain older boards). Note that APIC | |
247 | * interrupts, including IPIs, won't work beyond | |
248 | * this point! The only exception are INIT IPIs. | |
249 | */ | |
250 | apic_printk(APIC_VERBOSE, "disabling APIC mode, " | |
251 | "entering PIC mode.\n"); | |
252 | outb(0x70, 0x22); | |
253 | outb(0x00, 0x23); | |
254 | } | |
650927ef EB |
255 | else { |
256 | /* Go back to Virtual Wire compatibility mode */ | |
257 | unsigned long value; | |
258 | ||
259 | /* For the spurious interrupt use vector F, and enable it */ | |
260 | value = apic_read(APIC_SPIV); | |
261 | value &= ~APIC_VECTOR_MASK; | |
262 | value |= APIC_SPIV_APIC_ENABLED; | |
263 | value |= 0xf; | |
264 | apic_write_around(APIC_SPIV, value); | |
265 | ||
266 | if (!virt_wire_setup) { | |
267 | /* For LVT0 make it edge triggered, active high, external and enabled */ | |
268 | value = apic_read(APIC_LVT0); | |
269 | value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING | | |
270 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | |
271 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED ); | |
272 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | |
273 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT); | |
274 | apic_write_around(APIC_LVT0, value); | |
275 | } | |
276 | else { | |
277 | /* Disable LVT0 */ | |
278 | apic_write_around(APIC_LVT0, APIC_LVT_MASKED); | |
279 | } | |
280 | ||
281 | /* For LVT1 make it edge triggered, active high, nmi and enabled */ | |
282 | value = apic_read(APIC_LVT1); | |
283 | value &= ~( | |
284 | APIC_MODE_MASK | APIC_SEND_PENDING | | |
285 | APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR | | |
286 | APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED); | |
287 | value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING; | |
288 | value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI); | |
289 | apic_write_around(APIC_LVT1, value); | |
290 | } | |
1da177e4 LT |
291 | } |
292 | ||
293 | void disable_local_APIC(void) | |
294 | { | |
295 | unsigned long value; | |
296 | ||
297 | clear_local_APIC(); | |
298 | ||
299 | /* | |
300 | * Disable APIC (implies clearing of registers | |
301 | * for 82489DX!). | |
302 | */ | |
303 | value = apic_read(APIC_SPIV); | |
304 | value &= ~APIC_SPIV_APIC_ENABLED; | |
305 | apic_write_around(APIC_SPIV, value); | |
306 | ||
307 | if (enabled_via_apicbase) { | |
308 | unsigned int l, h; | |
309 | rdmsr(MSR_IA32_APICBASE, l, h); | |
310 | l &= ~MSR_IA32_APICBASE_ENABLE; | |
311 | wrmsr(MSR_IA32_APICBASE, l, h); | |
312 | } | |
313 | } | |
314 | ||
315 | /* | |
316 | * This is to verify that we're looking at a real local APIC. | |
317 | * Check these against your board if the CPUs aren't getting | |
318 | * started for no apparent reason. | |
319 | */ | |
320 | int __init verify_local_APIC(void) | |
321 | { | |
322 | unsigned int reg0, reg1; | |
323 | ||
324 | /* | |
325 | * The version register is read-only in a real APIC. | |
326 | */ | |
327 | reg0 = apic_read(APIC_LVR); | |
328 | apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0); | |
329 | apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK); | |
330 | reg1 = apic_read(APIC_LVR); | |
331 | apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1); | |
332 | ||
333 | /* | |
334 | * The two version reads above should print the same | |
335 | * numbers. If the second one is different, then we | |
336 | * poke at a non-APIC. | |
337 | */ | |
338 | if (reg1 != reg0) | |
339 | return 0; | |
340 | ||
341 | /* | |
342 | * Check if the version looks reasonably. | |
343 | */ | |
344 | reg1 = GET_APIC_VERSION(reg0); | |
345 | if (reg1 == 0x00 || reg1 == 0xff) | |
346 | return 0; | |
347 | reg1 = get_maxlvt(); | |
348 | if (reg1 < 0x02 || reg1 == 0xff) | |
349 | return 0; | |
350 | ||
351 | /* | |
352 | * The ID register is read/write in a real APIC. | |
353 | */ | |
354 | reg0 = apic_read(APIC_ID); | |
355 | apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0); | |
356 | ||
357 | /* | |
358 | * The next two are just to see if we have sane values. | |
359 | * They're only really relevant if we're in Virtual Wire | |
360 | * compatibility mode, but most boxes are anymore. | |
361 | */ | |
362 | reg0 = apic_read(APIC_LVT0); | |
363 | apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0); | |
364 | reg1 = apic_read(APIC_LVT1); | |
365 | apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1); | |
366 | ||
367 | return 1; | |
368 | } | |
369 | ||
370 | void __init sync_Arb_IDs(void) | |
371 | { | |
95d769aa AK |
372 | /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 |
373 | And not needed on AMD */ | |
374 | if (modern_apic()) | |
1da177e4 LT |
375 | return; |
376 | /* | |
377 | * Wait for idle. | |
378 | */ | |
379 | apic_wait_icr_idle(); | |
380 | ||
381 | apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n"); | |
382 | apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG | |
383 | | APIC_DM_INIT); | |
384 | } | |
385 | ||
386 | extern void __error_in_apic_c (void); | |
387 | ||
388 | /* | |
389 | * An initial setup of the virtual wire mode. | |
390 | */ | |
391 | void __init init_bsp_APIC(void) | |
392 | { | |
393 | unsigned long value, ver; | |
394 | ||
395 | /* | |
396 | * Don't do the setup now if we have a SMP BIOS as the | |
397 | * through-I/O-APIC virtual wire mode might be active. | |
398 | */ | |
399 | if (smp_found_config || !cpu_has_apic) | |
400 | return; | |
401 | ||
402 | value = apic_read(APIC_LVR); | |
403 | ver = GET_APIC_VERSION(value); | |
404 | ||
405 | /* | |
406 | * Do not trust the local APIC being empty at bootup. | |
407 | */ | |
408 | clear_local_APIC(); | |
409 | ||
410 | /* | |
411 | * Enable APIC. | |
412 | */ | |
413 | value = apic_read(APIC_SPIV); | |
414 | value &= ~APIC_VECTOR_MASK; | |
415 | value |= APIC_SPIV_APIC_ENABLED; | |
416 | ||
417 | /* This bit is reserved on P4/Xeon and should be cleared */ | |
418 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15)) | |
419 | value &= ~APIC_SPIV_FOCUS_DISABLED; | |
420 | else | |
421 | value |= APIC_SPIV_FOCUS_DISABLED; | |
422 | value |= SPURIOUS_APIC_VECTOR; | |
423 | apic_write_around(APIC_SPIV, value); | |
424 | ||
425 | /* | |
426 | * Set up the virtual wire mode. | |
427 | */ | |
428 | apic_write_around(APIC_LVT0, APIC_DM_EXTINT); | |
429 | value = APIC_DM_NMI; | |
430 | if (!APIC_INTEGRATED(ver)) /* 82489DX */ | |
431 | value |= APIC_LVT_LEVEL_TRIGGER; | |
432 | apic_write_around(APIC_LVT1, value); | |
433 | } | |
434 | ||
0bb3184d | 435 | void __devinit setup_local_APIC(void) |
1da177e4 LT |
436 | { |
437 | unsigned long oldvalue, value, ver, maxlvt; | |
1a75a3f0 | 438 | int i, j; |
1da177e4 LT |
439 | |
440 | /* Pound the ESR really hard over the head with a big hammer - mbligh */ | |
441 | if (esr_disable) { | |
442 | apic_write(APIC_ESR, 0); | |
443 | apic_write(APIC_ESR, 0); | |
444 | apic_write(APIC_ESR, 0); | |
445 | apic_write(APIC_ESR, 0); | |
446 | } | |
447 | ||
448 | value = apic_read(APIC_LVR); | |
449 | ver = GET_APIC_VERSION(value); | |
450 | ||
451 | if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f) | |
452 | __error_in_apic_c(); | |
453 | ||
454 | /* | |
455 | * Double-check whether this APIC is really registered. | |
456 | */ | |
457 | if (!apic_id_registered()) | |
458 | BUG(); | |
459 | ||
460 | /* | |
461 | * Intel recommends to set DFR, LDR and TPR before enabling | |
462 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | |
463 | * document number 292116). So here it goes... | |
464 | */ | |
465 | init_apic_ldr(); | |
466 | ||
467 | /* | |
468 | * Set Task Priority to 'accept all'. We never change this | |
469 | * later on. | |
470 | */ | |
471 | value = apic_read(APIC_TASKPRI); | |
472 | value &= ~APIC_TPRI_MASK; | |
473 | apic_write_around(APIC_TASKPRI, value); | |
474 | ||
1a75a3f0 VG |
475 | /* |
476 | * After a crash, we no longer service the interrupts and a pending | |
477 | * interrupt from previous kernel might still have ISR bit set. | |
478 | * | |
479 | * Most probably by now CPU has serviced that pending interrupt and | |
480 | * it might not have done the ack_APIC_irq() because it thought, | |
481 | * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it | |
482 | * does not clear the ISR bit and cpu thinks it has already serivced | |
483 | * the interrupt. Hence a vector might get locked. It was noticed | |
484 | * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. | |
485 | */ | |
486 | for (i = APIC_ISR_NR - 1; i >= 0; i--) { | |
487 | value = apic_read(APIC_ISR + i*0x10); | |
488 | for (j = 31; j >= 0; j--) { | |
489 | if (value & (1<<j)) | |
490 | ack_APIC_irq(); | |
491 | } | |
492 | } | |
493 | ||
1da177e4 LT |
494 | /* |
495 | * Now that we are all set up, enable the APIC | |
496 | */ | |
497 | value = apic_read(APIC_SPIV); | |
498 | value &= ~APIC_VECTOR_MASK; | |
499 | /* | |
500 | * Enable APIC | |
501 | */ | |
502 | value |= APIC_SPIV_APIC_ENABLED; | |
503 | ||
504 | /* | |
505 | * Some unknown Intel IO/APIC (or APIC) errata is biting us with | |
506 | * certain networking cards. If high frequency interrupts are | |
507 | * happening on a particular IOAPIC pin, plus the IOAPIC routing | |
508 | * entry is masked/unmasked at a high rate as well then sooner or | |
509 | * later IOAPIC line gets 'stuck', no more interrupts are received | |
510 | * from the device. If focus CPU is disabled then the hang goes | |
511 | * away, oh well :-( | |
512 | * | |
513 | * [ This bug can be reproduced easily with a level-triggered | |
514 | * PCI Ne2000 networking cards and PII/PIII processors, dual | |
515 | * BX chipset. ] | |
516 | */ | |
517 | /* | |
518 | * Actually disabling the focus CPU check just makes the hang less | |
519 | * frequent as it makes the interrupt distributon model be more | |
520 | * like LRU than MRU (the short-term load is more even across CPUs). | |
521 | * See also the comment in end_level_ioapic_irq(). --macro | |
522 | */ | |
523 | #if 1 | |
524 | /* Enable focus processor (bit==0) */ | |
525 | value &= ~APIC_SPIV_FOCUS_DISABLED; | |
526 | #else | |
527 | /* Disable focus processor (bit==1) */ | |
528 | value |= APIC_SPIV_FOCUS_DISABLED; | |
529 | #endif | |
530 | /* | |
531 | * Set spurious IRQ vector | |
532 | */ | |
533 | value |= SPURIOUS_APIC_VECTOR; | |
534 | apic_write_around(APIC_SPIV, value); | |
535 | ||
536 | /* | |
537 | * Set up LVT0, LVT1: | |
538 | * | |
539 | * set up through-local-APIC on the BP's LINT0. This is not | |
540 | * strictly necessery in pure symmetric-IO mode, but sometimes | |
541 | * we delegate interrupts to the 8259A. | |
542 | */ | |
543 | /* | |
544 | * TODO: set up through-local-APIC from through-I/O-APIC? --macro | |
545 | */ | |
546 | value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; | |
547 | if (!smp_processor_id() && (pic_mode || !value)) { | |
548 | value = APIC_DM_EXTINT; | |
549 | apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", | |
550 | smp_processor_id()); | |
551 | } else { | |
552 | value = APIC_DM_EXTINT | APIC_LVT_MASKED; | |
553 | apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", | |
554 | smp_processor_id()); | |
555 | } | |
556 | apic_write_around(APIC_LVT0, value); | |
557 | ||
558 | /* | |
559 | * only the BP should see the LINT1 NMI signal, obviously. | |
560 | */ | |
561 | if (!smp_processor_id()) | |
562 | value = APIC_DM_NMI; | |
563 | else | |
564 | value = APIC_DM_NMI | APIC_LVT_MASKED; | |
565 | if (!APIC_INTEGRATED(ver)) /* 82489DX */ | |
566 | value |= APIC_LVT_LEVEL_TRIGGER; | |
567 | apic_write_around(APIC_LVT1, value); | |
568 | ||
569 | if (APIC_INTEGRATED(ver) && !esr_disable) { /* !82489DX */ | |
570 | maxlvt = get_maxlvt(); | |
571 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ | |
572 | apic_write(APIC_ESR, 0); | |
573 | oldvalue = apic_read(APIC_ESR); | |
574 | ||
575 | value = ERROR_APIC_VECTOR; // enables sending errors | |
576 | apic_write_around(APIC_LVTERR, value); | |
577 | /* | |
578 | * spec says clear errors after enabling vector. | |
579 | */ | |
580 | if (maxlvt > 3) | |
581 | apic_write(APIC_ESR, 0); | |
582 | value = apic_read(APIC_ESR); | |
583 | if (value != oldvalue) | |
584 | apic_printk(APIC_VERBOSE, "ESR value before enabling " | |
585 | "vector: 0x%08lx after: 0x%08lx\n", | |
586 | oldvalue, value); | |
587 | } else { | |
588 | if (esr_disable) | |
589 | /* | |
590 | * Something untraceble is creating bad interrupts on | |
591 | * secondary quads ... for the moment, just leave the | |
592 | * ESR disabled - we can't do anything useful with the | |
593 | * errors anyway - mbligh | |
594 | */ | |
595 | printk("Leaving ESR disabled.\n"); | |
596 | else | |
597 | printk("No ESR for 82489DX.\n"); | |
598 | } | |
599 | ||
b7471c6d | 600 | setup_apic_nmi_watchdog(NULL); |
1da177e4 LT |
601 | apic_pm_activate(); |
602 | } | |
603 | ||
604 | /* | |
605 | * If Linux enabled the LAPIC against the BIOS default | |
606 | * disable it down before re-entering the BIOS on shutdown. | |
607 | * Otherwise the BIOS may get confused and not power-off. | |
77f72b19 ZM |
608 | * Additionally clear all LVT entries before disable_local_APIC |
609 | * for the case where Linux didn't enable the LAPIC. | |
1da177e4 LT |
610 | */ |
611 | void lapic_shutdown(void) | |
612 | { | |
67963132 MS |
613 | unsigned long flags; |
614 | ||
77f72b19 | 615 | if (!cpu_has_apic) |
1da177e4 LT |
616 | return; |
617 | ||
67963132 | 618 | local_irq_save(flags); |
77f72b19 ZM |
619 | clear_local_APIC(); |
620 | ||
621 | if (enabled_via_apicbase) | |
622 | disable_local_APIC(); | |
623 | ||
67963132 | 624 | local_irq_restore(flags); |
1da177e4 LT |
625 | } |
626 | ||
627 | #ifdef CONFIG_PM | |
628 | ||
629 | static struct { | |
630 | int active; | |
631 | /* r/w apic fields */ | |
632 | unsigned int apic_id; | |
633 | unsigned int apic_taskpri; | |
634 | unsigned int apic_ldr; | |
635 | unsigned int apic_dfr; | |
636 | unsigned int apic_spiv; | |
637 | unsigned int apic_lvtt; | |
638 | unsigned int apic_lvtpc; | |
639 | unsigned int apic_lvt0; | |
640 | unsigned int apic_lvt1; | |
641 | unsigned int apic_lvterr; | |
642 | unsigned int apic_tmict; | |
643 | unsigned int apic_tdcr; | |
644 | unsigned int apic_thmr; | |
645 | } apic_pm_state; | |
646 | ||
438510f6 | 647 | static int lapic_suspend(struct sys_device *dev, pm_message_t state) |
1da177e4 LT |
648 | { |
649 | unsigned long flags; | |
650 | ||
651 | if (!apic_pm_state.active) | |
652 | return 0; | |
653 | ||
654 | apic_pm_state.apic_id = apic_read(APIC_ID); | |
655 | apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI); | |
656 | apic_pm_state.apic_ldr = apic_read(APIC_LDR); | |
657 | apic_pm_state.apic_dfr = apic_read(APIC_DFR); | |
658 | apic_pm_state.apic_spiv = apic_read(APIC_SPIV); | |
659 | apic_pm_state.apic_lvtt = apic_read(APIC_LVTT); | |
660 | apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC); | |
661 | apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0); | |
662 | apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1); | |
663 | apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR); | |
664 | apic_pm_state.apic_tmict = apic_read(APIC_TMICT); | |
665 | apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); | |
666 | apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); | |
667 | ||
668 | local_irq_save(flags); | |
669 | disable_local_APIC(); | |
670 | local_irq_restore(flags); | |
671 | return 0; | |
672 | } | |
673 | ||
674 | static int lapic_resume(struct sys_device *dev) | |
675 | { | |
676 | unsigned int l, h; | |
677 | unsigned long flags; | |
678 | ||
679 | if (!apic_pm_state.active) | |
680 | return 0; | |
681 | ||
682 | local_irq_save(flags); | |
683 | ||
684 | /* | |
685 | * Make sure the APICBASE points to the right address | |
686 | * | |
687 | * FIXME! This will be wrong if we ever support suspend on | |
688 | * SMP! We'll need to do this as part of the CPU restore! | |
689 | */ | |
690 | rdmsr(MSR_IA32_APICBASE, l, h); | |
691 | l &= ~MSR_IA32_APICBASE_BASE; | |
692 | l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; | |
693 | wrmsr(MSR_IA32_APICBASE, l, h); | |
694 | ||
695 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | |
696 | apic_write(APIC_ID, apic_pm_state.apic_id); | |
697 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | |
698 | apic_write(APIC_LDR, apic_pm_state.apic_ldr); | |
699 | apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri); | |
700 | apic_write(APIC_SPIV, apic_pm_state.apic_spiv); | |
701 | apic_write(APIC_LVT0, apic_pm_state.apic_lvt0); | |
702 | apic_write(APIC_LVT1, apic_pm_state.apic_lvt1); | |
703 | apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr); | |
704 | apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc); | |
705 | apic_write(APIC_LVTT, apic_pm_state.apic_lvtt); | |
706 | apic_write(APIC_TDCR, apic_pm_state.apic_tdcr); | |
707 | apic_write(APIC_TMICT, apic_pm_state.apic_tmict); | |
708 | apic_write(APIC_ESR, 0); | |
709 | apic_read(APIC_ESR); | |
710 | apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr); | |
711 | apic_write(APIC_ESR, 0); | |
712 | apic_read(APIC_ESR); | |
713 | local_irq_restore(flags); | |
714 | return 0; | |
715 | } | |
716 | ||
717 | /* | |
718 | * This device has no shutdown method - fully functioning local APICs | |
719 | * are needed on every CPU up until machine_halt/restart/poweroff. | |
720 | */ | |
721 | ||
722 | static struct sysdev_class lapic_sysclass = { | |
723 | set_kset_name("lapic"), | |
724 | .resume = lapic_resume, | |
725 | .suspend = lapic_suspend, | |
726 | }; | |
727 | ||
728 | static struct sys_device device_lapic = { | |
729 | .id = 0, | |
730 | .cls = &lapic_sysclass, | |
731 | }; | |
732 | ||
0bb3184d | 733 | static void __devinit apic_pm_activate(void) |
1da177e4 LT |
734 | { |
735 | apic_pm_state.active = 1; | |
736 | } | |
737 | ||
738 | static int __init init_lapic_sysfs(void) | |
739 | { | |
740 | int error; | |
741 | ||
742 | if (!cpu_has_apic) | |
743 | return 0; | |
744 | /* XXX: remove suspend/resume procs if !apic_pm_state.active? */ | |
745 | ||
746 | error = sysdev_class_register(&lapic_sysclass); | |
747 | if (!error) | |
748 | error = sysdev_register(&device_lapic); | |
749 | return error; | |
750 | } | |
751 | device_initcall(init_lapic_sysfs); | |
752 | ||
753 | #else /* CONFIG_PM */ | |
754 | ||
755 | static void apic_pm_activate(void) { } | |
756 | ||
757 | #endif /* CONFIG_PM */ | |
758 | ||
759 | /* | |
760 | * Detect and enable local APICs on non-SMP boards. | |
761 | * Original code written by Keir Fraser. | |
762 | */ | |
763 | ||
1da177e4 LT |
764 | static int __init apic_set_verbosity(char *str) |
765 | { | |
766 | if (strcmp("debug", str) == 0) | |
767 | apic_verbosity = APIC_DEBUG; | |
768 | else if (strcmp("verbose", str) == 0) | |
769 | apic_verbosity = APIC_VERBOSE; | |
9b41046c | 770 | return 1; |
1da177e4 LT |
771 | } |
772 | ||
773 | __setup("apic=", apic_set_verbosity); | |
774 | ||
775 | static int __init detect_init_APIC (void) | |
776 | { | |
777 | u32 h, l, features; | |
1da177e4 LT |
778 | |
779 | /* Disabled by kernel option? */ | |
780 | if (enable_local_apic < 0) | |
781 | return -1; | |
782 | ||
1da177e4 LT |
783 | switch (boot_cpu_data.x86_vendor) { |
784 | case X86_VENDOR_AMD: | |
785 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || | |
786 | (boot_cpu_data.x86 == 15)) | |
787 | break; | |
788 | goto no_apic; | |
789 | case X86_VENDOR_INTEL: | |
790 | if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || | |
791 | (boot_cpu_data.x86 == 5 && cpu_has_apic)) | |
792 | break; | |
793 | goto no_apic; | |
794 | default: | |
795 | goto no_apic; | |
796 | } | |
797 | ||
798 | if (!cpu_has_apic) { | |
799 | /* | |
800 | * Over-ride BIOS and try to enable the local | |
801 | * APIC only if "lapic" specified. | |
802 | */ | |
803 | if (enable_local_apic <= 0) { | |
804 | printk("Local APIC disabled by BIOS -- " | |
805 | "you can enable it with \"lapic\"\n"); | |
806 | return -1; | |
807 | } | |
808 | /* | |
809 | * Some BIOSes disable the local APIC in the | |
810 | * APIC_BASE MSR. This can only be done in | |
811 | * software for Intel P6 or later and AMD K7 | |
812 | * (Model > 1) or later. | |
813 | */ | |
814 | rdmsr(MSR_IA32_APICBASE, l, h); | |
815 | if (!(l & MSR_IA32_APICBASE_ENABLE)) { | |
816 | printk("Local APIC disabled by BIOS -- reenabling.\n"); | |
817 | l &= ~MSR_IA32_APICBASE_BASE; | |
818 | l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; | |
819 | wrmsr(MSR_IA32_APICBASE, l, h); | |
820 | enabled_via_apicbase = 1; | |
821 | } | |
822 | } | |
823 | /* | |
824 | * The APIC feature bit should now be enabled | |
825 | * in `cpuid' | |
826 | */ | |
827 | features = cpuid_edx(1); | |
828 | if (!(features & (1 << X86_FEATURE_APIC))) { | |
829 | printk("Could not enable APIC!\n"); | |
830 | return -1; | |
831 | } | |
832 | set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | |
833 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | |
834 | ||
835 | /* The BIOS may have set up the APIC at some other address */ | |
836 | rdmsr(MSR_IA32_APICBASE, l, h); | |
837 | if (l & MSR_IA32_APICBASE_ENABLE) | |
838 | mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; | |
839 | ||
840 | if (nmi_watchdog != NMI_NONE) | |
841 | nmi_watchdog = NMI_LOCAL_APIC; | |
842 | ||
843 | printk("Found and enabled local APIC!\n"); | |
844 | ||
845 | apic_pm_activate(); | |
846 | ||
847 | return 0; | |
848 | ||
849 | no_apic: | |
850 | printk("No local APIC present or hardware disabled\n"); | |
851 | return -1; | |
852 | } | |
853 | ||
854 | void __init init_apic_mappings(void) | |
855 | { | |
856 | unsigned long apic_phys; | |
857 | ||
858 | /* | |
859 | * If no local APIC can be found then set up a fake all | |
860 | * zeroes page to simulate the local APIC and another | |
861 | * one for the IO-APIC. | |
862 | */ | |
863 | if (!smp_found_config && detect_init_APIC()) { | |
864 | apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); | |
865 | apic_phys = __pa(apic_phys); | |
866 | } else | |
867 | apic_phys = mp_lapic_addr; | |
868 | ||
869 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); | |
870 | printk(KERN_DEBUG "mapped APIC to %08lx (%08lx)\n", APIC_BASE, | |
871 | apic_phys); | |
872 | ||
873 | /* | |
874 | * Fetch the APIC ID of the BSP in case we have a | |
875 | * default configuration (or the MP table is broken). | |
876 | */ | |
1e4c85f9 LT |
877 | if (boot_cpu_physical_apicid == -1U) |
878 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | |
1da177e4 LT |
879 | |
880 | #ifdef CONFIG_X86_IO_APIC | |
881 | { | |
882 | unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; | |
883 | int i; | |
884 | ||
885 | for (i = 0; i < nr_ioapics; i++) { | |
886 | if (smp_found_config) { | |
887 | ioapic_phys = mp_ioapics[i].mpc_apicaddr; | |
888 | if (!ioapic_phys) { | |
889 | printk(KERN_ERR | |
890 | "WARNING: bogus zero IO-APIC " | |
891 | "address found in MPTABLE, " | |
892 | "disabling IO/APIC support!\n"); | |
893 | smp_found_config = 0; | |
894 | skip_ioapic_setup = 1; | |
895 | goto fake_ioapic_page; | |
896 | } | |
897 | } else { | |
898 | fake_ioapic_page: | |
899 | ioapic_phys = (unsigned long) | |
900 | alloc_bootmem_pages(PAGE_SIZE); | |
901 | ioapic_phys = __pa(ioapic_phys); | |
902 | } | |
903 | set_fixmap_nocache(idx, ioapic_phys); | |
904 | printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n", | |
905 | __fix_to_virt(idx), ioapic_phys); | |
906 | idx++; | |
907 | } | |
908 | } | |
909 | #endif | |
910 | } | |
911 | ||
912 | /* | |
913 | * This part sets up the APIC 32 bit clock in LVTT1, with HZ interrupts | |
914 | * per second. We assume that the caller has already set up the local | |
915 | * APIC. | |
916 | * | |
917 | * The APIC timer is not exactly sync with the external timer chip, it | |
918 | * closely follows bus clocks. | |
919 | */ | |
920 | ||
921 | /* | |
922 | * The timer chip is already set up at HZ interrupts per second here, | |
923 | * but we do not accept timer interrupts yet. We only allow the BP | |
924 | * to calibrate. | |
925 | */ | |
0bb3184d | 926 | static unsigned int __devinit get_8254_timer_count(void) |
1da177e4 | 927 | { |
1da177e4 LT |
928 | unsigned long flags; |
929 | ||
930 | unsigned int count; | |
931 | ||
932 | spin_lock_irqsave(&i8253_lock, flags); | |
933 | ||
934 | outb_p(0x00, PIT_MODE); | |
935 | count = inb_p(PIT_CH0); | |
936 | count |= inb_p(PIT_CH0) << 8; | |
937 | ||
938 | spin_unlock_irqrestore(&i8253_lock, flags); | |
939 | ||
940 | return count; | |
941 | } | |
942 | ||
943 | /* next tick in 8254 can be caught by catching timer wraparound */ | |
0bb3184d | 944 | static void __devinit wait_8254_wraparound(void) |
1da177e4 LT |
945 | { |
946 | unsigned int curr_count, prev_count; | |
947 | ||
948 | curr_count = get_8254_timer_count(); | |
949 | do { | |
950 | prev_count = curr_count; | |
951 | curr_count = get_8254_timer_count(); | |
952 | ||
953 | /* workaround for broken Mercury/Neptune */ | |
954 | if (prev_count >= curr_count + 0x100) | |
955 | curr_count = get_8254_timer_count(); | |
956 | ||
957 | } while (prev_count >= curr_count); | |
958 | } | |
959 | ||
960 | /* | |
961 | * Default initialization for 8254 timers. If we use other timers like HPET, | |
962 | * we override this later | |
963 | */ | |
0bb3184d | 964 | void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound; |
1da177e4 LT |
965 | |
966 | /* | |
967 | * This function sets up the local APIC timer, with a timeout of | |
968 | * 'clocks' APIC bus clock. During calibration we actually call | |
969 | * this function twice on the boot CPU, once with a bogus timeout | |
970 | * value, second time for real. The other (noncalibrating) CPUs | |
971 | * call this function only once, with the real, calibrated value. | |
972 | * | |
973 | * We do reads before writes even if unnecessary, to get around the | |
974 | * P5 APIC double write bug. | |
975 | */ | |
976 | ||
977 | #define APIC_DIVISOR 16 | |
978 | ||
979 | static void __setup_APIC_LVTT(unsigned int clocks) | |
980 | { | |
981 | unsigned int lvtt_value, tmp_value, ver; | |
6eb0a0fd | 982 | int cpu = smp_processor_id(); |
1da177e4 LT |
983 | |
984 | ver = GET_APIC_VERSION(apic_read(APIC_LVR)); | |
985 | lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR; | |
986 | if (!APIC_INTEGRATED(ver)) | |
987 | lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV); | |
6eb0a0fd VP |
988 | |
989 | if (cpu_isset(cpu, timer_bcast_ipi)) | |
990 | lvtt_value |= APIC_LVT_MASKED; | |
991 | ||
1da177e4 LT |
992 | apic_write_around(APIC_LVTT, lvtt_value); |
993 | ||
994 | /* | |
995 | * Divide PICLK by 16 | |
996 | */ | |
997 | tmp_value = apic_read(APIC_TDCR); | |
998 | apic_write_around(APIC_TDCR, (tmp_value | |
999 | & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) | |
1000 | | APIC_TDR_DIV_16); | |
1001 | ||
1002 | apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); | |
1003 | } | |
1004 | ||
0bb3184d | 1005 | static void __devinit setup_APIC_timer(unsigned int clocks) |
1da177e4 LT |
1006 | { |
1007 | unsigned long flags; | |
1008 | ||
1009 | local_irq_save(flags); | |
1010 | ||
1011 | /* | |
1012 | * Wait for IRQ0's slice: | |
1013 | */ | |
1014 | wait_timer_tick(); | |
1015 | ||
1016 | __setup_APIC_LVTT(clocks); | |
1017 | ||
1018 | local_irq_restore(flags); | |
1019 | } | |
1020 | ||
1021 | /* | |
1022 | * In this function we calibrate APIC bus clocks to the external | |
1023 | * timer. Unfortunately we cannot use jiffies and the timer irq | |
1024 | * to calibrate, since some later bootup code depends on getting | |
1025 | * the first irq? Ugh. | |
1026 | * | |
1027 | * We want to do the calibration only once since we | |
1028 | * want to have local timer irqs syncron. CPUs connected | |
1029 | * by the same APIC bus have the very same bus frequency. | |
1030 | * And we want to have irqs off anyways, no accidental | |
1031 | * APIC irq that way. | |
1032 | */ | |
1033 | ||
1034 | static int __init calibrate_APIC_clock(void) | |
1035 | { | |
1036 | unsigned long long t1 = 0, t2 = 0; | |
1037 | long tt1, tt2; | |
1038 | long result; | |
1039 | int i; | |
1040 | const int LOOPS = HZ/10; | |
1041 | ||
1042 | apic_printk(APIC_VERBOSE, "calibrating APIC timer ...\n"); | |
1043 | ||
1044 | /* | |
1045 | * Put whatever arbitrary (but long enough) timeout | |
1046 | * value into the APIC clock, we just want to get the | |
1047 | * counter running for calibration. | |
1048 | */ | |
1049 | __setup_APIC_LVTT(1000000000); | |
1050 | ||
1051 | /* | |
1052 | * The timer chip counts down to zero. Let's wait | |
1053 | * for a wraparound to start exact measurement: | |
1054 | * (the current tick might have been already half done) | |
1055 | */ | |
1056 | ||
1057 | wait_timer_tick(); | |
1058 | ||
1059 | /* | |
1060 | * We wrapped around just now. Let's start: | |
1061 | */ | |
1062 | if (cpu_has_tsc) | |
1063 | rdtscll(t1); | |
1064 | tt1 = apic_read(APIC_TMCCT); | |
1065 | ||
1066 | /* | |
1067 | * Let's wait LOOPS wraprounds: | |
1068 | */ | |
1069 | for (i = 0; i < LOOPS; i++) | |
1070 | wait_timer_tick(); | |
1071 | ||
1072 | tt2 = apic_read(APIC_TMCCT); | |
1073 | if (cpu_has_tsc) | |
1074 | rdtscll(t2); | |
1075 | ||
1076 | /* | |
1077 | * The APIC bus clock counter is 32 bits only, it | |
1078 | * might have overflown, but note that we use signed | |
1079 | * longs, thus no extra care needed. | |
1080 | * | |
1081 | * underflown to be exact, as the timer counts down ;) | |
1082 | */ | |
1083 | ||
1084 | result = (tt1-tt2)*APIC_DIVISOR/LOOPS; | |
1085 | ||
1086 | if (cpu_has_tsc) | |
1087 | apic_printk(APIC_VERBOSE, "..... CPU clock speed is " | |
1088 | "%ld.%04ld MHz.\n", | |
1089 | ((long)(t2-t1)/LOOPS)/(1000000/HZ), | |
1090 | ((long)(t2-t1)/LOOPS)%(1000000/HZ)); | |
1091 | ||
1092 | apic_printk(APIC_VERBOSE, "..... host bus clock speed is " | |
1093 | "%ld.%04ld MHz.\n", | |
1094 | result/(1000000/HZ), | |
1095 | result%(1000000/HZ)); | |
1096 | ||
1097 | return result; | |
1098 | } | |
1099 | ||
1100 | static unsigned int calibration_result; | |
1101 | ||
1102 | void __init setup_boot_APIC_clock(void) | |
1103 | { | |
f2b36db6 | 1104 | unsigned long flags; |
1da177e4 LT |
1105 | apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); |
1106 | using_apic_timer = 1; | |
1107 | ||
f2b36db6 | 1108 | local_irq_save(flags); |
1da177e4 LT |
1109 | |
1110 | calibration_result = calibrate_APIC_clock(); | |
1111 | /* | |
1112 | * Now set up the timer for real. | |
1113 | */ | |
1114 | setup_APIC_timer(calibration_result); | |
1115 | ||
f2b36db6 | 1116 | local_irq_restore(flags); |
1da177e4 LT |
1117 | } |
1118 | ||
0bb3184d | 1119 | void __devinit setup_secondary_APIC_clock(void) |
1da177e4 LT |
1120 | { |
1121 | setup_APIC_timer(calibration_result); | |
1122 | } | |
1123 | ||
6eb0a0fd | 1124 | void disable_APIC_timer(void) |
1da177e4 LT |
1125 | { |
1126 | if (using_apic_timer) { | |
1127 | unsigned long v; | |
1128 | ||
1129 | v = apic_read(APIC_LVTT); | |
704fc59e SS |
1130 | /* |
1131 | * When an illegal vector value (0-15) is written to an LVT | |
1132 | * entry and delivery mode is Fixed, the APIC may signal an | |
1133 | * illegal vector error, with out regard to whether the mask | |
1134 | * bit is set or whether an interrupt is actually seen on input. | |
1135 | * | |
1136 | * Boot sequence might call this function when the LVTT has | |
1137 | * '0' vector value. So make sure vector field is set to | |
1138 | * valid value. | |
1139 | */ | |
1140 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | |
1141 | apic_write_around(APIC_LVTT, v); | |
1da177e4 LT |
1142 | } |
1143 | } | |
1144 | ||
1145 | void enable_APIC_timer(void) | |
1146 | { | |
6eb0a0fd VP |
1147 | int cpu = smp_processor_id(); |
1148 | ||
1149 | if (using_apic_timer && | |
1150 | !cpu_isset(cpu, timer_bcast_ipi)) { | |
1da177e4 LT |
1151 | unsigned long v; |
1152 | ||
1153 | v = apic_read(APIC_LVTT); | |
1154 | apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED); | |
1155 | } | |
1156 | } | |
1157 | ||
6eb0a0fd VP |
1158 | void switch_APIC_timer_to_ipi(void *cpumask) |
1159 | { | |
1160 | cpumask_t mask = *(cpumask_t *)cpumask; | |
1161 | int cpu = smp_processor_id(); | |
1162 | ||
1163 | if (cpu_isset(cpu, mask) && | |
1164 | !cpu_isset(cpu, timer_bcast_ipi)) { | |
1165 | disable_APIC_timer(); | |
1166 | cpu_set(cpu, timer_bcast_ipi); | |
1167 | } | |
1168 | } | |
1169 | EXPORT_SYMBOL(switch_APIC_timer_to_ipi); | |
1170 | ||
1171 | void switch_ipi_to_APIC_timer(void *cpumask) | |
1172 | { | |
1173 | cpumask_t mask = *(cpumask_t *)cpumask; | |
1174 | int cpu = smp_processor_id(); | |
1175 | ||
1176 | if (cpu_isset(cpu, mask) && | |
1177 | cpu_isset(cpu, timer_bcast_ipi)) { | |
1178 | cpu_clear(cpu, timer_bcast_ipi); | |
1179 | enable_APIC_timer(); | |
1180 | } | |
1181 | } | |
1182 | EXPORT_SYMBOL(switch_ipi_to_APIC_timer); | |
1183 | ||
1da177e4 LT |
1184 | #undef APIC_DIVISOR |
1185 | ||
1186 | /* | |
1187 | * Local timer interrupt handler. It does both profiling and | |
1188 | * process statistics/rescheduling. | |
1189 | * | |
1190 | * We do profiling in every local tick, statistics/rescheduling | |
1191 | * happen only every 'profiling multiplier' ticks. The default | |
1192 | * multiplier is 1 and it can be changed by writing the new multiplier | |
1193 | * value into /proc/profile. | |
1194 | */ | |
1195 | ||
7d12e780 | 1196 | inline void smp_local_timer_interrupt(void) |
1da177e4 | 1197 | { |
7d12e780 | 1198 | profile_tick(CPU_PROFILING); |
1da177e4 | 1199 | #ifdef CONFIG_SMP |
7d12e780 | 1200 | update_process_times(user_mode_vm(irq_regs)); |
1da177e4 | 1201 | #endif |
1da177e4 LT |
1202 | |
1203 | /* | |
1204 | * We take the 'long' return path, and there every subsystem | |
1205 | * grabs the apropriate locks (kernel lock/ irq lock). | |
1206 | * | |
1207 | * we might want to decouple profiling from the 'long path', | |
1208 | * and do the profiling totally in assembly. | |
1209 | * | |
1210 | * Currently this isn't too much of an issue (performance wise), | |
1211 | * we can take more than 100K local irqs per second on a 100 MHz P5. | |
1212 | */ | |
1213 | } | |
1214 | ||
1215 | /* | |
1216 | * Local APIC timer interrupt. This is the most natural way for doing | |
1217 | * local interrupts, but local timer interrupts can be emulated by | |
1218 | * broadcast interrupts too. [in case the hw doesn't support APIC timers] | |
1219 | * | |
1220 | * [ if a single-CPU system runs an SMP kernel then we call the local | |
1221 | * interrupt as well. Thus we cannot inline the local irq ... ] | |
1222 | */ | |
1223 | ||
1224 | fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) | |
1225 | { | |
7d12e780 | 1226 | struct pt_regs *old_regs = set_irq_regs(regs); |
1da177e4 LT |
1227 | int cpu = smp_processor_id(); |
1228 | ||
1229 | /* | |
1230 | * the NMI deadlock-detector uses this. | |
1231 | */ | |
1232 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | |
1233 | ||
1234 | /* | |
1235 | * NOTE! We'd better ACK the irq immediately, | |
1236 | * because timer handling can be slow. | |
1237 | */ | |
1238 | ack_APIC_irq(); | |
1239 | /* | |
1240 | * update_process_times() expects us to have done irq_enter(). | |
1241 | * Besides, if we don't timer interrupts ignore the global | |
1242 | * interrupt lock, which is the WrongThing (tm) to do. | |
1243 | */ | |
1244 | irq_enter(); | |
7d12e780 | 1245 | smp_local_timer_interrupt(); |
1da177e4 | 1246 | irq_exit(); |
7d12e780 | 1247 | set_irq_regs(old_regs); |
1da177e4 LT |
1248 | } |
1249 | ||
6eb0a0fd | 1250 | #ifndef CONFIG_SMP |
7d12e780 | 1251 | static void up_apic_timer_interrupt_call(void) |
6eb0a0fd VP |
1252 | { |
1253 | int cpu = smp_processor_id(); | |
1254 | ||
1255 | /* | |
1256 | * the NMI deadlock-detector uses this. | |
1257 | */ | |
1258 | per_cpu(irq_stat, cpu).apic_timer_irqs++; | |
1259 | ||
7d12e780 | 1260 | smp_local_timer_interrupt(); |
6eb0a0fd VP |
1261 | } |
1262 | #endif | |
1263 | ||
7d12e780 | 1264 | void smp_send_timer_broadcast_ipi(void) |
6eb0a0fd VP |
1265 | { |
1266 | cpumask_t mask; | |
1267 | ||
1268 | cpus_and(mask, cpu_online_map, timer_bcast_ipi); | |
1269 | if (!cpus_empty(mask)) { | |
1270 | #ifdef CONFIG_SMP | |
1271 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | |
1272 | #else | |
1273 | /* | |
1274 | * We can directly call the apic timer interrupt handler | |
1275 | * in UP case. Minus all irq related functions | |
1276 | */ | |
7d12e780 | 1277 | up_apic_timer_interrupt_call(); |
6eb0a0fd VP |
1278 | #endif |
1279 | } | |
1280 | } | |
1281 | ||
5a07a30c VP |
1282 | int setup_profiling_timer(unsigned int multiplier) |
1283 | { | |
1284 | return -EINVAL; | |
1285 | } | |
1286 | ||
1da177e4 LT |
1287 | /* |
1288 | * This interrupt should _never_ happen with our APIC/SMP architecture | |
1289 | */ | |
1290 | fastcall void smp_spurious_interrupt(struct pt_regs *regs) | |
1291 | { | |
1292 | unsigned long v; | |
1293 | ||
1294 | irq_enter(); | |
1295 | /* | |
1296 | * Check if this really is a spurious interrupt and ACK it | |
1297 | * if it is a vectored one. Just in case... | |
1298 | * Spurious interrupts should not be ACKed. | |
1299 | */ | |
1300 | v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1)); | |
1301 | if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f))) | |
1302 | ack_APIC_irq(); | |
1303 | ||
1304 | /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | |
1305 | printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", | |
1306 | smp_processor_id()); | |
1307 | irq_exit(); | |
1308 | } | |
1309 | ||
1310 | /* | |
1311 | * This interrupt should never happen with our APIC/SMP architecture | |
1312 | */ | |
1313 | ||
1314 | fastcall void smp_error_interrupt(struct pt_regs *regs) | |
1315 | { | |
1316 | unsigned long v, v1; | |
1317 | ||
1318 | irq_enter(); | |
1319 | /* First tickle the hardware, only then report what went on. -- REW */ | |
1320 | v = apic_read(APIC_ESR); | |
1321 | apic_write(APIC_ESR, 0); | |
1322 | v1 = apic_read(APIC_ESR); | |
1323 | ack_APIC_irq(); | |
1324 | atomic_inc(&irq_err_count); | |
1325 | ||
1326 | /* Here is what the APIC error bits mean: | |
1327 | 0: Send CS error | |
1328 | 1: Receive CS error | |
1329 | 2: Send accept error | |
1330 | 3: Receive accept error | |
1331 | 4: Reserved | |
1332 | 5: Send illegal vector | |
1333 | 6: Received illegal vector | |
1334 | 7: Illegal register address | |
1335 | */ | |
1336 | printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n", | |
1337 | smp_processor_id(), v , v1); | |
1338 | irq_exit(); | |
1339 | } | |
1340 | ||
1341 | /* | |
1e4c85f9 LT |
1342 | * This initializes the IO-APIC and APIC hardware if this is |
1343 | * a UP kernel. | |
1da177e4 | 1344 | */ |
1e4c85f9 | 1345 | int __init APIC_init_uniprocessor (void) |
1da177e4 | 1346 | { |
1e4c85f9 LT |
1347 | if (enable_local_apic < 0) |
1348 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); | |
1da177e4 | 1349 | |
1e4c85f9 | 1350 | if (!smp_found_config && !cpu_has_apic) |
1da177e4 LT |
1351 | return -1; |
1352 | ||
1353 | /* | |
1e4c85f9 | 1354 | * Complain if the BIOS pretends there is one. |
1da177e4 LT |
1355 | */ |
1356 | if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | |
1357 | printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", | |
1358 | boot_cpu_physical_apicid); | |
3777a959 | 1359 | clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); |
1da177e4 LT |
1360 | return -1; |
1361 | } | |
1362 | ||
1363 | verify_local_APIC(); | |
1364 | ||
1365 | connect_bsp_APIC(); | |
1366 | ||
be0d03f1 VG |
1367 | /* |
1368 | * Hack: In case of kdump, after a crash, kernel might be booting | |
1369 | * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid | |
1370 | * might be zero if read from MP tables. Get it from LAPIC. | |
1371 | */ | |
1372 | #ifdef CONFIG_CRASH_DUMP | |
1373 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | |
1374 | #endif | |
1e4c85f9 | 1375 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
1da177e4 | 1376 | |
1e4c85f9 | 1377 | setup_local_APIC(); |
1da177e4 | 1378 | |
1da177e4 | 1379 | #ifdef CONFIG_X86_IO_APIC |
1e4c85f9 LT |
1380 | if (smp_found_config) |
1381 | if (!skip_ioapic_setup && nr_ioapics) | |
1382 | setup_IO_APIC(); | |
1da177e4 LT |
1383 | #endif |
1384 | setup_boot_APIC_clock(); | |
1e4c85f9 LT |
1385 | |
1386 | return 0; | |
1da177e4 | 1387 | } |
1a3f239d RR |
1388 | |
1389 | static int __init parse_lapic(char *arg) | |
1390 | { | |
1391 | lapic_enable(); | |
1392 | return 0; | |
1393 | } | |
1394 | early_param("lapic", parse_lapic); | |
1395 | ||
1396 | static int __init parse_nolapic(char *arg) | |
1397 | { | |
1398 | lapic_disable(); | |
1399 | return 0; | |
1400 | } | |
1401 | early_param("nolapic", parse_nolapic); | |
1402 |