]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/kernel.h> | |
3 | ||
4 | #include <linux/string.h> | |
5 | #include <linux/bitops.h> | |
6 | #include <linux/smp.h> | |
83ce4009 | 7 | #include <linux/sched.h> |
1da177e4 | 8 | #include <linux/thread_info.h> |
53e86b91 | 9 | #include <linux/module.h> |
8bdbd962 | 10 | #include <linux/uaccess.h> |
1da177e4 LT |
11 | |
12 | #include <asm/processor.h> | |
d72b1b4f | 13 | #include <asm/pgtable.h> |
1da177e4 | 14 | #include <asm/msr.h> |
eee3af4a | 15 | #include <asm/ds.h> |
73bdb73f | 16 | #include <asm/bugs.h> |
1f442d70 | 17 | #include <asm/cpu.h> |
1da177e4 | 18 | |
185f3b9d | 19 | #ifdef CONFIG_X86_64 |
8bdbd962 | 20 | #include <linux/topology.h> |
185f3b9d YL |
21 | #include <asm/numa_64.h> |
22 | #endif | |
23 | ||
1da177e4 LT |
24 | #include "cpu.h" |
25 | ||
26 | #ifdef CONFIG_X86_LOCAL_APIC | |
27 | #include <asm/mpspec.h> | |
28 | #include <asm/apic.h> | |
1da177e4 LT |
29 | #endif |
30 | ||
03ae5768 | 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
1da177e4 | 32 | { |
99fb4d34 | 33 | /* Unmask CPUID levels if masked: */ |
30a0fb94 | 34 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
99fb4d34 | 35 | u64 misc_enable; |
066941bd | 36 | |
99fb4d34 IM |
37 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
38 | ||
39 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | |
40 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | |
41 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
42 | c->cpuid_level = cpuid_eax(0); | |
43 | } | |
066941bd PA |
44 | } |
45 | ||
2b16a235 AK |
46 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
47 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | |
48 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
185f3b9d YL |
49 | |
50 | #ifdef CONFIG_X86_64 | |
51 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | |
52 | #else | |
53 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | |
54 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | |
55 | c->x86_cache_alignment = 128; | |
56 | #endif | |
40fb1715 | 57 | |
13c6c532 JB |
58 | /* CPUID workaround for 0F33/0F34 CPU */ |
59 | if (c->x86 == 0xF && c->x86_model == 0x3 | |
60 | && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) | |
61 | c->x86_phys_bits = 36; | |
62 | ||
40fb1715 VP |
63 | /* |
64 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
83ce4009 IM |
65 | * with P/T states and does not stop in deep C-states. |
66 | * | |
67 | * It is also reliable across cores and sockets. (but not across | |
68 | * cabinets - we turn it off in that case explicitly.) | |
40fb1715 VP |
69 | */ |
70 | if (c->x86_power & (1 << 8)) { | |
71 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
72 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | |
83ce4009 IM |
73 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); |
74 | sched_clock_stable = 1; | |
40fb1715 VP |
75 | } |
76 | ||
75a04811 PA |
77 | /* |
78 | * There is a known erratum on Pentium III and Core Solo | |
79 | * and Core Duo CPUs. | |
80 | * " Page with PAT set to WC while associated MTRR is UC | |
81 | * may consolidate to UC " | |
82 | * Because of this erratum, it is better to stick with | |
83 | * setting WC in MTRR rather than using PAT on these CPUs. | |
84 | * | |
85 | * Enable PAT WC only on P4, Core 2 or later CPUs. | |
86 | */ | |
87 | if (c->x86 == 6 && c->x86_model < 15) | |
88 | clear_cpu_cap(c, X86_FEATURE_PAT); | |
f8561296 VN |
89 | |
90 | #ifdef CONFIG_KMEMCHECK | |
91 | /* | |
92 | * P4s have a "fast strings" feature which causes single- | |
93 | * stepping REP instructions to only generate a #DB on | |
94 | * cache-line boundaries. | |
95 | * | |
96 | * Ingo Molnar reported a Pentium D (model 6) and a Xeon | |
97 | * (model 2) with the same problem. | |
98 | */ | |
99 | if (c->x86 == 15) { | |
100 | u64 misc_enable; | |
101 | ||
102 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
103 | ||
104 | if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { | |
105 | printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); | |
106 | ||
107 | misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; | |
108 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
109 | } | |
110 | } | |
111 | #endif | |
1da177e4 LT |
112 | } |
113 | ||
185f3b9d | 114 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
115 | /* |
116 | * Early probe support logic for ppro memory erratum #50 | |
117 | * | |
118 | * This is called before we do cpu ident work | |
119 | */ | |
65eb6b43 | 120 | |
3bc9b76b | 121 | int __cpuinit ppro_with_ram_bug(void) |
1da177e4 LT |
122 | { |
123 | /* Uses data from early_cpu_detect now */ | |
124 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
125 | boot_cpu_data.x86 == 6 && | |
126 | boot_cpu_data.x86_model == 1 && | |
127 | boot_cpu_data.x86_mask < 8) { | |
128 | printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); | |
129 | return 1; | |
130 | } | |
131 | return 0; | |
132 | } | |
65eb6b43 | 133 | |
4052704d YL |
134 | #ifdef CONFIG_X86_F00F_BUG |
135 | static void __cpuinit trap_init_f00f_bug(void) | |
136 | { | |
137 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | |
1da177e4 | 138 | |
4052704d YL |
139 | /* |
140 | * Update the IDT descriptor and reload the IDT so that | |
141 | * it uses the read-only mapped virtual address. | |
142 | */ | |
143 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | |
144 | load_idt(&idt_descr); | |
145 | } | |
146 | #endif | |
147 | ||
1f442d70 YL |
148 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) |
149 | { | |
150 | #ifdef CONFIG_SMP | |
151 | /* calling is from identify_secondary_cpu() ? */ | |
152 | if (c->cpu_index == boot_cpu_id) | |
153 | return; | |
154 | ||
155 | /* | |
156 | * Mask B, Pentium, but not Pentium MMX | |
157 | */ | |
158 | if (c->x86 == 5 && | |
159 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
160 | c->x86_model <= 3) { | |
161 | /* | |
162 | * Remember we have B step Pentia with bugs | |
163 | */ | |
164 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | |
165 | "with B stepping processors.\n"); | |
166 | } | |
167 | #endif | |
168 | } | |
169 | ||
4052704d | 170 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
1da177e4 LT |
171 | { |
172 | unsigned long lo, hi; | |
173 | ||
4052704d YL |
174 | #ifdef CONFIG_X86_F00F_BUG |
175 | /* | |
176 | * All current models of Pentium and Pentium with MMX technology CPUs | |
8bdbd962 AC |
177 | * have the F0 0F bug, which lets nonprivileged users lock up the |
178 | * system. | |
4052704d YL |
179 | * Note that the workaround only should be initialized once... |
180 | */ | |
181 | c->f00f_bug = 0; | |
182 | if (!paravirt_enabled() && c->x86 == 5) { | |
183 | static int f00f_workaround_enabled; | |
184 | ||
185 | c->f00f_bug = 1; | |
186 | if (!f00f_workaround_enabled) { | |
187 | trap_init_f00f_bug(); | |
188 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | |
189 | f00f_workaround_enabled = 1; | |
190 | } | |
191 | } | |
192 | #endif | |
193 | ||
194 | /* | |
195 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | |
196 | * model 3 mask 3 | |
197 | */ | |
198 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | |
199 | clear_cpu_cap(c, X86_FEATURE_SEP); | |
200 | ||
201 | /* | |
202 | * P4 Xeon errata 037 workaround. | |
203 | * Hardware prefetcher may cause stale data to be loaded into the cache. | |
204 | */ | |
1da177e4 | 205 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
65eb6b43 | 206 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
ecab22aa | 207 | if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { |
1da177e4 LT |
208 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
209 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | |
ecab22aa | 210 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
8bdbd962 | 211 | wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
1da177e4 LT |
212 | } |
213 | } | |
1da177e4 | 214 | |
4052704d YL |
215 | /* |
216 | * See if we have a good local APIC by checking for buggy Pentia, | |
217 | * i.e. all B steppings and the C2 stepping of P54C when using their | |
218 | * integrated APIC (see 11AP erratum in "Pentium Processor | |
219 | * Specification Update"). | |
220 | */ | |
221 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | |
222 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | |
223 | set_cpu_cap(c, X86_FEATURE_11AP); | |
185f3b9d | 224 | |
185f3b9d | 225 | |
4052704d | 226 | #ifdef CONFIG_X86_INTEL_USERCOPY |
185f3b9d | 227 | /* |
4052704d | 228 | * Set up the preferred alignment for movsl bulk memory moves |
185f3b9d | 229 | */ |
4052704d YL |
230 | switch (c->x86) { |
231 | case 4: /* 486: untested */ | |
232 | break; | |
233 | case 5: /* Old Pentia: untested */ | |
234 | break; | |
235 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | |
236 | movsl_mask.mask = 7; | |
237 | break; | |
238 | case 15: /* P4 is OK down to 8-byte alignment */ | |
239 | movsl_mask.mask = 7; | |
240 | break; | |
241 | } | |
185f3b9d | 242 | #endif |
4052704d YL |
243 | |
244 | #ifdef CONFIG_X86_NUMAQ | |
245 | numaq_tsc_disable(); | |
246 | #endif | |
1f442d70 YL |
247 | |
248 | intel_smp_check(c); | |
4052704d YL |
249 | } |
250 | #else | |
251 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |
252 | { | |
253 | } | |
185f3b9d YL |
254 | #endif |
255 | ||
2759c328 | 256 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
185f3b9d YL |
257 | { |
258 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
259 | unsigned node; | |
260 | int cpu = smp_processor_id(); | |
2759c328 | 261 | int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; |
185f3b9d YL |
262 | |
263 | /* Don't do the funky fallback heuristics the AMD version employs | |
264 | for now. */ | |
265 | node = apicid_to_node[apicid]; | |
d9c2d5ac | 266 | if (node == NUMA_NO_NODE) |
185f3b9d | 267 | node = first_node(node_online_map); |
d9c2d5ac YL |
268 | else if (!node_online(node)) { |
269 | /* reuse the value from init_cpu_to_node() */ | |
270 | node = cpu_to_node(cpu); | |
271 | } | |
185f3b9d YL |
272 | numa_set_node(cpu, node); |
273 | ||
823b259b | 274 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); |
185f3b9d YL |
275 | #endif |
276 | } | |
277 | ||
3dd9d514 AK |
278 | /* |
279 | * find out the number of processor cores on the die | |
280 | */ | |
f69feff7 | 281 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) |
3dd9d514 | 282 | { |
f2ab4461 | 283 | unsigned int eax, ebx, ecx, edx; |
3dd9d514 AK |
284 | |
285 | if (c->cpuid_level < 4) | |
286 | return 1; | |
287 | ||
f2ab4461 ZA |
288 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
289 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | |
3dd9d514 | 290 | if (eax & 0x1f) |
8bdbd962 | 291 | return (eax >> 26) + 1; |
3dd9d514 AK |
292 | else |
293 | return 1; | |
294 | } | |
295 | ||
e38e05a8 SY |
296 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) |
297 | { | |
298 | /* Intel VMX MSR indicated features */ | |
299 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
300 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 | |
301 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 | |
302 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 | |
303 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 | |
304 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 | |
305 | ||
306 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | |
307 | ||
308 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
309 | clear_cpu_cap(c, X86_FEATURE_VNMI); | |
310 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
311 | clear_cpu_cap(c, X86_FEATURE_EPT); | |
312 | clear_cpu_cap(c, X86_FEATURE_VPID); | |
313 | ||
314 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | |
315 | msr_ctl = vmx_msr_high | vmx_msr_low; | |
316 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | |
317 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
318 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | |
319 | set_cpu_cap(c, X86_FEATURE_VNMI); | |
320 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | |
321 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | |
322 | vmx_msr_low, vmx_msr_high); | |
323 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | |
324 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | |
325 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | |
326 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
327 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | |
328 | set_cpu_cap(c, X86_FEATURE_EPT); | |
329 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | |
330 | set_cpu_cap(c, X86_FEATURE_VPID); | |
331 | } | |
332 | } | |
333 | ||
3bc9b76b | 334 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
1da177e4 LT |
335 | { |
336 | unsigned int l2 = 0; | |
1da177e4 | 337 | |
2b16a235 AK |
338 | early_init_intel(c); |
339 | ||
4052704d | 340 | intel_workarounds(c); |
1da177e4 | 341 | |
345077cd SS |
342 | /* |
343 | * Detect the extended topology information if available. This | |
344 | * will reinitialise the initial_apicid which will be used | |
345 | * in init_intel_cacheinfo() | |
346 | */ | |
347 | detect_extended_topology(c); | |
348 | ||
1da177e4 | 349 | l2 = init_intel_cacheinfo(c); |
65eb6b43 | 350 | if (c->cpuid_level > 9) { |
0080e667 VP |
351 | unsigned eax = cpuid_eax(10); |
352 | /* Check for version and the number of counters */ | |
353 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | |
d0e95ebd | 354 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
0080e667 | 355 | } |
1da177e4 | 356 | |
a8303aaf PZ |
357 | if (c->cpuid_level > 6) { |
358 | unsigned ecx = cpuid_ecx(6); | |
359 | if (ecx & 0x01) | |
360 | set_cpu_cap(c, X86_FEATURE_APERFMPERF); | |
361 | } | |
362 | ||
4052704d YL |
363 | if (cpu_has_xmm2) |
364 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | |
365 | if (cpu_has_ds) { | |
366 | unsigned int l1; | |
367 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | |
368 | if (!(l1 & (1<<11))) | |
369 | set_cpu_cap(c, X86_FEATURE_BTS); | |
370 | if (!(l1 & (1<<12))) | |
371 | set_cpu_cap(c, X86_FEATURE_PEBS); | |
372 | ds_init_intel(c); | |
373 | } | |
1da177e4 | 374 | |
e736ad54 PV |
375 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) |
376 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | |
377 | ||
4052704d YL |
378 | #ifdef CONFIG_X86_64 |
379 | if (c->x86 == 15) | |
380 | c->x86_cache_alignment = c->x86_clflush_size * 2; | |
381 | if (c->x86 == 6) | |
382 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
383 | #else | |
65eb6b43 PC |
384 | /* |
385 | * Names for the Pentium II/Celeron processors | |
386 | * detectable only by also checking the cache size. | |
387 | * Dixon is NOT a Celeron. | |
388 | */ | |
1da177e4 | 389 | if (c->x86 == 6) { |
4052704d YL |
390 | char *p = NULL; |
391 | ||
1da177e4 LT |
392 | switch (c->x86_model) { |
393 | case 5: | |
394 | if (c->x86_mask == 0) { | |
395 | if (l2 == 0) | |
396 | p = "Celeron (Covington)"; | |
397 | else if (l2 == 256) | |
398 | p = "Mobile Pentium II (Dixon)"; | |
399 | } | |
400 | break; | |
65eb6b43 | 401 | |
1da177e4 LT |
402 | case 6: |
403 | if (l2 == 128) | |
404 | p = "Celeron (Mendocino)"; | |
405 | else if (c->x86_mask == 0 || c->x86_mask == 5) | |
406 | p = "Celeron-A"; | |
407 | break; | |
65eb6b43 | 408 | |
1da177e4 LT |
409 | case 8: |
410 | if (l2 == 128) | |
411 | p = "Celeron (Coppermine)"; | |
412 | break; | |
413 | } | |
1da177e4 | 414 | |
4052704d YL |
415 | if (p) |
416 | strcpy(c->x86_model_id, p); | |
1da177e4 | 417 | } |
1da177e4 | 418 | |
185f3b9d YL |
419 | if (c->x86 == 15) |
420 | set_cpu_cap(c, X86_FEATURE_P4); | |
421 | if (c->x86 == 6) | |
422 | set_cpu_cap(c, X86_FEATURE_P3); | |
f4166c54 | 423 | #endif |
185f3b9d | 424 | |
185f3b9d YL |
425 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
426 | /* | |
427 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | |
428 | * detection. | |
429 | */ | |
430 | c->x86_max_cores = intel_num_cpu_cores(c); | |
431 | #ifdef CONFIG_X86_32 | |
432 | detect_ht(c); | |
433 | #endif | |
434 | } | |
435 | ||
436 | /* Work around errata */ | |
2759c328 | 437 | srat_detect_node(c); |
e38e05a8 SY |
438 | |
439 | if (cpu_has(c, X86_FEATURE_VMX)) | |
440 | detect_vmx_virtcap(c); | |
42ed458a | 441 | } |
1da177e4 | 442 | |
185f3b9d | 443 | #ifdef CONFIG_X86_32 |
65eb6b43 | 444 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1da177e4 | 445 | { |
65eb6b43 PC |
446 | /* |
447 | * Intel PIII Tualatin. This comes in two flavours. | |
1da177e4 LT |
448 | * One has 256kb of cache, the other 512. We have no way |
449 | * to determine which, so we use a boottime override | |
450 | * for the 512kb model, and assume 256 otherwise. | |
451 | */ | |
452 | if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) | |
453 | size = 256; | |
454 | return size; | |
455 | } | |
185f3b9d | 456 | #endif |
1da177e4 | 457 | |
02dde8b4 | 458 | static const struct cpu_dev __cpuinitconst intel_cpu_dev = { |
1da177e4 | 459 | .c_vendor = "Intel", |
65eb6b43 | 460 | .c_ident = { "GenuineIntel" }, |
185f3b9d | 461 | #ifdef CONFIG_X86_32 |
1da177e4 | 462 | .c_models = { |
65eb6b43 PC |
463 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = |
464 | { | |
465 | [0] = "486 DX-25/33", | |
466 | [1] = "486 DX-50", | |
467 | [2] = "486 SX", | |
468 | [3] = "486 DX/2", | |
469 | [4] = "486 SL", | |
470 | [5] = "486 SX/2", | |
471 | [7] = "486 DX/2-WB", | |
472 | [8] = "486 DX/4", | |
1da177e4 LT |
473 | [9] = "486 DX/4-WB" |
474 | } | |
475 | }, | |
476 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | |
65eb6b43 PC |
477 | { |
478 | [0] = "Pentium 60/66 A-step", | |
479 | [1] = "Pentium 60/66", | |
1da177e4 | 480 | [2] = "Pentium 75 - 200", |
65eb6b43 | 481 | [3] = "OverDrive PODP5V83", |
1da177e4 | 482 | [4] = "Pentium MMX", |
65eb6b43 | 483 | [7] = "Mobile Pentium 75 - 200", |
1da177e4 LT |
484 | [8] = "Mobile Pentium MMX" |
485 | } | |
486 | }, | |
487 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | |
65eb6b43 | 488 | { |
1da177e4 | 489 | [0] = "Pentium Pro A-step", |
65eb6b43 PC |
490 | [1] = "Pentium Pro", |
491 | [3] = "Pentium II (Klamath)", | |
492 | [4] = "Pentium II (Deschutes)", | |
493 | [5] = "Pentium II (Deschutes)", | |
1da177e4 | 494 | [6] = "Mobile Pentium II", |
65eb6b43 PC |
495 | [7] = "Pentium III (Katmai)", |
496 | [8] = "Pentium III (Coppermine)", | |
1da177e4 LT |
497 | [10] = "Pentium III (Cascades)", |
498 | [11] = "Pentium III (Tualatin)", | |
499 | } | |
500 | }, | |
501 | { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = | |
502 | { | |
503 | [0] = "Pentium 4 (Unknown)", | |
504 | [1] = "Pentium 4 (Willamette)", | |
505 | [2] = "Pentium 4 (Northwood)", | |
506 | [4] = "Pentium 4 (Foster)", | |
507 | [5] = "Pentium 4 (Foster)", | |
508 | } | |
509 | }, | |
510 | }, | |
185f3b9d YL |
511 | .c_size_cache = intel_size_cache, |
512 | #endif | |
03ae5768 | 513 | .c_early_init = early_init_intel, |
1da177e4 | 514 | .c_init = init_intel, |
10a434fc | 515 | .c_x86_vendor = X86_VENDOR_INTEL, |
1da177e4 LT |
516 | }; |
517 | ||
10a434fc | 518 | cpu_dev_register(intel_cpu_dev); |
1da177e4 | 519 |