]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86_64/kernel/setup.c
[PATCH] x86-64: add X86_FEATURE_PEBS and detection
[net-next-2.6.git] / arch / x86_64 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
1da177e4
LT
8 */
9
10/*
11 * This file handles the architecture-dependent parts of initialization
12 */
13
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/user.h>
23#include <linux/a.out.h>
894673ee 24#include <linux/screen_info.h>
1da177e4
LT
25#include <linux/ioport.h>
26#include <linux/delay.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/initrd.h>
29#include <linux/highmem.h>
30#include <linux/bootmem.h>
31#include <linux/module.h>
32#include <asm/processor.h>
33#include <linux/console.h>
34#include <linux/seq_file.h>
aac04b32 35#include <linux/crash_dump.h>
1da177e4
LT
36#include <linux/root_dev.h>
37#include <linux/pci.h>
38#include <linux/acpi.h>
39#include <linux/kallsyms.h>
40#include <linux/edd.h>
bbfceef4 41#include <linux/mmzone.h>
5f5609df 42#include <linux/kexec.h>
95235ca2 43#include <linux/cpufreq.h>
e9928674 44#include <linux/dmi.h>
17a941d8 45#include <linux/dma-mapping.h>
681558fd 46#include <linux/ctype.h>
bbfceef4 47
1da177e4
LT
48#include <asm/mtrr.h>
49#include <asm/uaccess.h>
50#include <asm/system.h>
51#include <asm/io.h>
52#include <asm/smp.h>
53#include <asm/msr.h>
54#include <asm/desc.h>
55#include <video/edid.h>
56#include <asm/e820.h>
57#include <asm/dma.h>
58#include <asm/mpspec.h>
59#include <asm/mmu_context.h>
60#include <asm/bootsetup.h>
61#include <asm/proto.h>
62#include <asm/setup.h>
63#include <asm/mach_apic.h>
64#include <asm/numa.h>
2bc0414e 65#include <asm/sections.h>
f2d3efed 66#include <asm/dmi.h>
1da177e4
LT
67
68/*
69 * Machine setup..
70 */
71
6c231b7b 72struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 73EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
74
75unsigned long mmu_cr4_features;
76
1da177e4
LT
77/* Boot loader ID as an integer, for the benefit of proc_dointvec */
78int bootloader_type;
79
80unsigned long saved_video_mode;
81
f2d3efed
AK
82/*
83 * Early DMI memory
84 */
85int dmi_alloc_index;
86char dmi_alloc_data[DMI_MAX_DATA];
87
1da177e4
LT
88/*
89 * Setup options
90 */
1da177e4 91struct screen_info screen_info;
2ee60e17 92EXPORT_SYMBOL(screen_info);
1da177e4
LT
93struct sys_desc_table_struct {
94 unsigned short length;
95 unsigned char table[0];
96};
97
98struct edid_info edid_info;
ba70710e 99EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
100
101extern int root_mountflags;
1da177e4
LT
102
103char command_line[COMMAND_LINE_SIZE];
104
105struct resource standard_io_resources[] = {
106 { .name = "dma1", .start = 0x00, .end = 0x1f,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "pic1", .start = 0x20, .end = 0x21,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer0", .start = 0x40, .end = 0x43,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "timer1", .start = 0x50, .end = 0x53,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "keyboard", .start = 0x60, .end = 0x6f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic2", .start = 0xa0, .end = 0xa1,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "dma2", .start = 0xc0, .end = 0xdf,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "fpu", .start = 0xf0, .end = 0xff,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
124};
125
1da177e4
LT
126#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
127
128struct resource data_resource = {
129 .name = "Kernel data",
130 .start = 0,
131 .end = 0,
132 .flags = IORESOURCE_RAM,
133};
134struct resource code_resource = {
135 .name = "Kernel code",
136 .start = 0,
137 .end = 0,
138 .flags = IORESOURCE_RAM,
139};
140
141#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
142
143static struct resource system_rom_resource = {
144 .name = "System ROM",
145 .start = 0xf0000,
146 .end = 0xfffff,
147 .flags = IORESOURCE_ROM,
148};
149
150static struct resource extension_rom_resource = {
151 .name = "Extension ROM",
152 .start = 0xe0000,
153 .end = 0xeffff,
154 .flags = IORESOURCE_ROM,
155};
156
157static struct resource adapter_rom_resources[] = {
158 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
159 .flags = IORESOURCE_ROM },
160 { .name = "Adapter ROM", .start = 0, .end = 0,
161 .flags = IORESOURCE_ROM },
162 { .name = "Adapter ROM", .start = 0, .end = 0,
163 .flags = IORESOURCE_ROM },
164 { .name = "Adapter ROM", .start = 0, .end = 0,
165 .flags = IORESOURCE_ROM },
166 { .name = "Adapter ROM", .start = 0, .end = 0,
167 .flags = IORESOURCE_ROM },
168 { .name = "Adapter ROM", .start = 0, .end = 0,
169 .flags = IORESOURCE_ROM }
170};
171
1da177e4
LT
172static struct resource video_rom_resource = {
173 .name = "Video ROM",
174 .start = 0xc0000,
175 .end = 0xc7fff,
176 .flags = IORESOURCE_ROM,
177};
178
179static struct resource video_ram_resource = {
180 .name = "Video RAM area",
181 .start = 0xa0000,
182 .end = 0xbffff,
183 .flags = IORESOURCE_RAM,
184};
185
186#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
187
188static int __init romchecksum(unsigned char *rom, unsigned long length)
189{
190 unsigned char *p, sum = 0;
191
192 for (p = rom; p < rom + length; p++)
193 sum += *p;
194 return sum == 0;
195}
196
197static void __init probe_roms(void)
198{
199 unsigned long start, length, upper;
200 unsigned char *rom;
201 int i;
202
203 /* video rom */
204 upper = adapter_rom_resources[0].start;
205 for (start = video_rom_resource.start; start < upper; start += 2048) {
206 rom = isa_bus_to_virt(start);
207 if (!romsignature(rom))
208 continue;
209
210 video_rom_resource.start = start;
211
212 /* 0 < length <= 0x7f * 512, historically */
213 length = rom[2] * 512;
214
215 /* if checksum okay, trust length byte */
216 if (length && romchecksum(rom, length))
217 video_rom_resource.end = start + length - 1;
218
219 request_resource(&iomem_resource, &video_rom_resource);
220 break;
221 }
222
223 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
224 if (start < upper)
225 start = upper;
226
227 /* system rom */
228 request_resource(&iomem_resource, &system_rom_resource);
229 upper = system_rom_resource.start;
230
231 /* check for extension rom (ignore length byte!) */
232 rom = isa_bus_to_virt(extension_rom_resource.start);
233 if (romsignature(rom)) {
234 length = extension_rom_resource.end - extension_rom_resource.start + 1;
235 if (romchecksum(rom, length)) {
236 request_resource(&iomem_resource, &extension_rom_resource);
237 upper = extension_rom_resource.start;
238 }
239 }
240
241 /* check for adapter roms on 2k boundaries */
9d0ef4fd
AK
242 for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper;
243 start += 2048) {
1da177e4
LT
244 rom = isa_bus_to_virt(start);
245 if (!romsignature(rom))
246 continue;
247
248 /* 0 < length <= 0x7f * 512, historically */
249 length = rom[2] * 512;
250
251 /* but accept any length that fits if checksum okay */
252 if (!length || start + length > upper || !romchecksum(rom, length))
253 continue;
254
255 adapter_rom_resources[i].start = start;
256 adapter_rom_resources[i].end = start + length - 1;
257 request_resource(&iomem_resource, &adapter_rom_resources[i]);
258
259 start = adapter_rom_resources[i++].end & ~2047UL;
260 }
261}
262
2c8c0e6b
AK
263#ifdef CONFIG_PROC_VMCORE
264/* elfcorehdr= specifies the location of elf core header
265 * stored by the crashed kernel. This option will be passed
266 * by kexec loader to the capture kernel.
267 */
268static int __init setup_elfcorehdr(char *arg)
681558fd 269{
2c8c0e6b
AK
270 char *end;
271 if (!arg)
272 return -EINVAL;
273 elfcorehdr_addr = memparse(arg, &end);
274 return end > arg ? 0 : -EINVAL;
681558fd 275}
2c8c0e6b 276early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
277#endif
278
2b97690f 279#ifndef CONFIG_NUMA
bbfceef4
MT
280static void __init
281contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 282{
bbfceef4
MT
283 unsigned long bootmap_size, bootmap;
284
bbfceef4
MT
285 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
286 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
287 if (bootmap == -1L)
288 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
289 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
290 e820_register_active_regions(0, start_pfn, end_pfn);
291 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 292 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
293}
294#endif
295
1da177e4
LT
296#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
297struct edd edd;
298#ifdef CONFIG_EDD_MODULE
299EXPORT_SYMBOL(edd);
300#endif
301/**
302 * copy_edd() - Copy the BIOS EDD information
303 * from boot_params into a safe place.
304 *
305 */
306static inline void copy_edd(void)
307{
308 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
309 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
310 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
311 edd.edd_info_nr = EDD_NR;
312}
313#else
314static inline void copy_edd(void)
315{
316}
317#endif
318
319#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
320
321unsigned __initdata ebda_addr;
322unsigned __initdata ebda_size;
323
324static void discover_ebda(void)
1da177e4 325{
ac71d12c 326 /*
1da177e4
LT
327 * there is a real-mode segmented pointer pointing to the
328 * 4K EBDA area at 0x40E
329 */
ac71d12c
AK
330 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
331 ebda_addr <<= 4;
332
333 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
334
335 /* Round EBDA up to pages */
336 if (ebda_size == 0)
337 ebda_size = 1;
338 ebda_size <<= 10;
339 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
340 if (ebda_size > 64*1024)
341 ebda_size = 64*1024;
1da177e4
LT
342}
343
344void __init setup_arch(char **cmdline_p)
345{
43c85c9c
AK
346 printk(KERN_INFO "Command line: %s\n", saved_command_line);
347
1da177e4 348 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
1da177e4
LT
349 screen_info = SCREEN_INFO;
350 edid_info = EDID_INFO;
351 saved_video_mode = SAVED_VIDEO_MODE;
352 bootloader_type = LOADER_TYPE;
353
354#ifdef CONFIG_BLK_DEV_RAM
355 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
356 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
357 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
358#endif
359 setup_memory_region();
360 copy_edd();
361
362 if (!MOUNT_ROOT_RDONLY)
363 root_mountflags &= ~MS_RDONLY;
364 init_mm.start_code = (unsigned long) &_text;
365 init_mm.end_code = (unsigned long) &_etext;
366 init_mm.end_data = (unsigned long) &_edata;
367 init_mm.brk = (unsigned long) &_end;
368
369 code_resource.start = virt_to_phys(&_text);
370 code_resource.end = virt_to_phys(&_etext)-1;
371 data_resource.start = virt_to_phys(&_etext);
372 data_resource.end = virt_to_phys(&_edata)-1;
373
1da177e4
LT
374 early_identify_cpu(&boot_cpu_data);
375
2c8c0e6b
AK
376 strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
377 *cmdline_p = command_line;
378
379 parse_early_param();
380
381 finish_e820_parsing();
9ca33eb6 382
5cb248ab 383 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
384 /*
385 * partially used pages are not usable - thus
386 * we are rounding upwards:
387 */
388 end_pfn = e820_end_of_ram();
caff0710 389 num_physpages = end_pfn;
1da177e4
LT
390
391 check_efer();
392
ac71d12c
AK
393 discover_ebda();
394
1da177e4
LT
395 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
396
f2d3efed
AK
397 dmi_scan_machine();
398
f6c2e333
SS
399 zap_low_mappings(0);
400
888ba6c6 401#ifdef CONFIG_ACPI
1da177e4
LT
402 /*
403 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
404 * Call this early for SRAT node setup.
405 */
406 acpi_boot_table_init();
407#endif
408
caff0710
JB
409 /* How many end-of-memory variables you have, grandma! */
410 max_low_pfn = end_pfn;
411 max_pfn = end_pfn;
412 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
413
5cb248ab
MG
414 /* Remove active ranges so rediscovery with NUMA-awareness happens */
415 remove_all_active_ranges();
416
1da177e4
LT
417#ifdef CONFIG_ACPI_NUMA
418 /*
419 * Parse SRAT to discover nodes.
420 */
421 acpi_numa_init();
422#endif
423
2b97690f 424#ifdef CONFIG_NUMA
1da177e4
LT
425 numa_initmem_init(0, end_pfn);
426#else
bbfceef4 427 contig_initmem_init(0, end_pfn);
1da177e4
LT
428#endif
429
430 /* Reserve direct mapping */
431 reserve_bootmem_generic(table_start << PAGE_SHIFT,
432 (table_end - table_start) << PAGE_SHIFT);
433
434 /* reserve kernel */
ceee8822
AK
435 reserve_bootmem_generic(__pa_symbol(&_text),
436 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
437
438 /*
439 * reserve physical page 0 - it's a special BIOS page on many boxes,
440 * enabling clean reboots, SMP operation, laptop functions.
441 */
442 reserve_bootmem_generic(0, PAGE_SIZE);
443
444 /* reserve ebda region */
ac71d12c
AK
445 if (ebda_addr)
446 reserve_bootmem_generic(ebda_addr, ebda_size);
1da177e4
LT
447
448#ifdef CONFIG_SMP
449 /*
450 * But first pinch a few for the stack/trampoline stuff
451 * FIXME: Don't need the extra page at 4K, but need to fix
452 * trampoline before removing it. (see the GDT stuff)
453 */
454 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
455
456 /* Reserve SMP trampoline */
457 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
458#endif
459
460#ifdef CONFIG_ACPI_SLEEP
461 /*
462 * Reserve low memory region for sleep support.
463 */
464 acpi_reserve_bootmem();
465#endif
1da177e4
LT
466 /*
467 * Find and reserve possible boot-time SMP configuration:
468 */
469 find_smp_config();
1da177e4
LT
470#ifdef CONFIG_BLK_DEV_INITRD
471 if (LOADER_TYPE && INITRD_START) {
472 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
473 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
474 initrd_start =
475 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
476 initrd_end = initrd_start+INITRD_SIZE;
477 }
478 else {
479 printk(KERN_ERR "initrd extends beyond end of memory "
480 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
481 (unsigned long)(INITRD_START + INITRD_SIZE),
482 (unsigned long)(end_pfn << PAGE_SHIFT));
483 initrd_start = 0;
484 }
485 }
486#endif
5f5609df
EB
487#ifdef CONFIG_KEXEC
488 if (crashk_res.start != crashk_res.end) {
00212fef 489 reserve_bootmem_generic(crashk_res.start,
5f5609df
EB
490 crashk_res.end - crashk_res.start + 1);
491 }
492#endif
0d317fb7 493
1da177e4
LT
494 paging_init();
495
f157cbb1 496#ifdef CONFIG_PCI
dfa4698c 497 early_quirks();
f157cbb1 498#endif
1da177e4 499
51f62e18
AR
500 /*
501 * set this early, so we dont allocate cpu0
502 * if MADT list doesnt list BSP first
503 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
504 */
505 cpu_set(0, cpu_present_map);
888ba6c6 506#ifdef CONFIG_ACPI
1da177e4
LT
507 /*
508 * Read APIC and some other early information from ACPI tables.
509 */
510 acpi_boot_init();
511#endif
512
05b3cbd8
RT
513 init_cpu_to_node();
514
1da177e4
LT
515 /*
516 * get boot-time SMP configuration:
517 */
518 if (smp_found_config)
519 get_smp_config();
520 init_apic_mappings();
1da177e4
LT
521
522 /*
523 * Request address space for all standard RAM and ROM resources
524 * and also for regions reported as reserved by the e820.
525 */
526 probe_roms();
527 e820_reserve_resources();
e8eff5ac 528 e820_mark_nosave_regions();
1da177e4
LT
529
530 request_resource(&iomem_resource, &video_ram_resource);
531
532 {
533 unsigned i;
534 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 535 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4
LT
536 request_resource(&ioport_resource, &standard_io_resources[i]);
537 }
538
a1e97782 539 e820_setup_gap();
1da177e4 540
1da177e4
LT
541#ifdef CONFIG_VT
542#if defined(CONFIG_VGA_CONSOLE)
543 conswitchp = &vga_con;
544#elif defined(CONFIG_DUMMY_CONSOLE)
545 conswitchp = &dummy_con;
546#endif
547#endif
548}
549
e6982c67 550static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
551{
552 unsigned int *v;
553
ebfcaa96 554 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
555 return 0;
556
557 v = (unsigned int *) c->x86_model_id;
558 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
559 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
560 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
561 c->x86_model_id[48] = 0;
562 return 1;
563}
564
565
e6982c67 566static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
567{
568 unsigned int n, dummy, eax, ebx, ecx, edx;
569
ebfcaa96 570 n = c->extended_cpuid_level;
1da177e4
LT
571
572 if (n >= 0x80000005) {
573 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
574 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
575 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
576 c->x86_cache_size=(ecx>>24)+(edx>>24);
577 /* On K8 L1 TLB is inclusive, so don't count it */
578 c->x86_tlbsize = 0;
579 }
580
581 if (n >= 0x80000006) {
582 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
583 ecx = cpuid_ecx(0x80000006);
584 c->x86_cache_size = ecx >> 16;
585 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
586
587 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
588 c->x86_cache_size, ecx & 0xFF);
589 }
590
591 if (n >= 0x80000007)
592 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
593 if (n >= 0x80000008) {
594 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
595 c->x86_virt_bits = (eax >> 8) & 0xff;
596 c->x86_phys_bits = eax & 0xff;
597 }
598}
599
3f098c26
AK
600#ifdef CONFIG_NUMA
601static int nearby_node(int apicid)
602{
603 int i;
604 for (i = apicid - 1; i >= 0; i--) {
605 int node = apicid_to_node[i];
606 if (node != NUMA_NO_NODE && node_online(node))
607 return node;
608 }
609 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
610 int node = apicid_to_node[i];
611 if (node != NUMA_NO_NODE && node_online(node))
612 return node;
613 }
614 return first_node(node_online_map); /* Shouldn't happen */
615}
616#endif
617
63518644
AK
618/*
619 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
620 * Assumes number of cores is a power of two.
621 */
622static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
623{
624#ifdef CONFIG_SMP
b41e2939 625 unsigned bits;
3f098c26 626#ifdef CONFIG_NUMA
f3fa8ebc 627 int cpu = smp_processor_id();
3f098c26 628 int node = 0;
60c1bc82 629 unsigned apicid = hard_smp_processor_id();
3f098c26 630#endif
faee9a5d 631 unsigned ecx = cpuid_ecx(0x80000008);
b41e2939 632
faee9a5d 633 c->x86_max_cores = (ecx & 0xff) + 1;
b41e2939 634
faee9a5d
AK
635 /* CPU telling us the core id bits shift? */
636 bits = (ecx >> 12) & 0xF;
637
638 /* Otherwise recompute */
639 if (bits == 0) {
640 while ((1 << bits) < c->x86_max_cores)
641 bits++;
642 }
b41e2939
AK
643
644 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 645 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 646 /* Convert the APIC ID into the socket ID */
f3fa8ebc 647 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
648
649#ifdef CONFIG_NUMA
f3fa8ebc 650 node = c->phys_proc_id;
3f098c26
AK
651 if (apicid_to_node[apicid] != NUMA_NO_NODE)
652 node = apicid_to_node[apicid];
653 if (!node_online(node)) {
654 /* Two possibilities here:
655 - The CPU is missing memory and no node was created.
656 In that case try picking one from a nearby CPU
657 - The APIC IDs differ from the HyperTransport node IDs
658 which the K8 northbridge parsing fills in.
659 Assume they are all increased by a constant offset,
660 but in the same order as the HT nodeids.
661 If that doesn't result in a usable node fall back to the
662 path for the previous case. */
f3fa8ebc 663 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
3f098c26
AK
664 if (ht_nodeid >= 0 &&
665 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
666 node = apicid_to_node[ht_nodeid];
667 /* Pick a nearby node */
668 if (!node_online(node))
669 node = nearby_node(apicid);
670 }
69d81fcd 671 numa_set_node(cpu, node);
3f098c26 672
e42f9437 673 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 674#endif
63518644
AK
675#endif
676}
1da177e4 677
ed77504b 678static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 679{
7bcd3f34 680 unsigned level;
1da177e4 681
bc5e8fdf
LT
682#ifdef CONFIG_SMP
683 unsigned long value;
684
7d318d77
AK
685 /*
686 * Disable TLB flush filter by setting HWCR.FFDIS on K8
687 * bit 6 of msr C001_0015
688 *
689 * Errata 63 for SH-B3 steppings
690 * Errata 122 for all steppings (F+ have it disabled by default)
691 */
692 if (c->x86 == 15) {
693 rdmsrl(MSR_K8_HWCR, value);
694 value |= 1 << 6;
695 wrmsrl(MSR_K8_HWCR, value);
696 }
bc5e8fdf
LT
697#endif
698
1da177e4
LT
699 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
700 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
701 clear_bit(0*32+31, &c->x86_capability);
702
7bcd3f34
AK
703 /* On C+ stepping K8 rep microcode works well for copy/memset */
704 level = cpuid_eax(1);
705 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
706 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
707
18bd057b
AK
708 /* Enable workaround for FXSAVE leak */
709 if (c->x86 >= 6)
710 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
711
e42f9437
RS
712 level = get_model_name(c);
713 if (!level) {
1da177e4
LT
714 switch (c->x86) {
715 case 15:
716 /* Should distinguish Models here, but this is only
717 a fallback anyways. */
718 strcpy(c->x86_model_id, "Hammer");
719 break;
720 }
721 }
722 display_cacheinfo(c);
723
130951cc
AK
724 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
725 if (c->x86_power & (1<<8))
726 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
727
faee9a5d
AK
728 /* Multi core CPU? */
729 if (c->extended_cpuid_level >= 0x80000008)
63518644 730 amd_detect_cmp(c);
1da177e4 731
240cd6a8
AK
732 /* Fix cpuid4 emulation for more */
733 num_cache_leaves = 3;
2049336f
AK
734
735 /* When there is only one core no need to synchronize RDTSC */
736 if (num_possible_cpus() == 1)
737 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
738 else
739 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1da177e4
LT
740}
741
e6982c67 742static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
743{
744#ifdef CONFIG_SMP
745 u32 eax, ebx, ecx, edx;
94605eff 746 int index_msb, core_bits;
94605eff
SS
747
748 cpuid(1, &eax, &ebx, &ecx, &edx);
749
94605eff 750
e42f9437 751 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 752 return;
e42f9437
RS
753 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
754 goto out;
1da177e4 755
1da177e4 756 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 757
1da177e4
LT
758 if (smp_num_siblings == 1) {
759 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
760 } else if (smp_num_siblings > 1 ) {
761
1da177e4
LT
762 if (smp_num_siblings > NR_CPUS) {
763 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
764 smp_num_siblings = 1;
765 return;
766 }
94605eff
SS
767
768 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 769 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 770
94605eff 771 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 772
94605eff
SS
773 index_msb = get_count_order(smp_num_siblings) ;
774
775 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 776
f3fa8ebc 777 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 778 ((1 << core_bits) - 1);
1da177e4 779 }
e42f9437
RS
780out:
781 if ((c->x86_max_cores * smp_num_siblings) > 1) {
782 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
783 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
784 }
785
1da177e4
LT
786#endif
787}
788
3dd9d514
AK
789/*
790 * find out the number of processor cores on the die
791 */
e6982c67 792static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 793{
2bbc419f 794 unsigned int eax, t;
3dd9d514
AK
795
796 if (c->cpuid_level < 4)
797 return 1;
798
2bbc419f 799 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
800
801 if (eax & 0x1f)
802 return ((eax >> 26) + 1);
803 else
804 return 1;
805}
806
df0cc26b
AK
807static void srat_detect_node(void)
808{
809#ifdef CONFIG_NUMA
ddea7be0 810 unsigned node;
df0cc26b 811 int cpu = smp_processor_id();
e42f9437 812 int apicid = hard_smp_processor_id();
df0cc26b
AK
813
814 /* Don't do the funky fallback heuristics the AMD version employs
815 for now. */
e42f9437 816 node = apicid_to_node[apicid];
df0cc26b 817 if (node == NUMA_NO_NODE)
0d015324 818 node = first_node(node_online_map);
69d81fcd 819 numa_set_node(cpu, node);
df0cc26b 820
c31fbb1a 821 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
822#endif
823}
824
e6982c67 825static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
826{
827 /* Cache sizes */
828 unsigned n;
829
830 init_intel_cacheinfo(c);
0080e667
VP
831 if (c->cpuid_level > 9 ) {
832 unsigned eax = cpuid_eax(10);
833 /* Check for version and the number of counters */
834 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
835 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
836 }
837
36b2a8d5
SE
838 if (cpu_has_ds) {
839 unsigned int l1, l2;
840 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
841 if (!(l1 & (1<<12)))
842 set_bit(X86_FEATURE_PEBS, c->x86_capability);
843 }
844
ebfcaa96 845 n = c->extended_cpuid_level;
1da177e4
LT
846 if (n >= 0x80000008) {
847 unsigned eax = cpuid_eax(0x80000008);
848 c->x86_virt_bits = (eax >> 8) & 0xff;
849 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
850 /* CPUID workaround for Intel 0F34 CPU */
851 if (c->x86_vendor == X86_VENDOR_INTEL &&
852 c->x86 == 0xF && c->x86_model == 0x3 &&
853 c->x86_mask == 0x4)
854 c->x86_phys_bits = 36;
1da177e4
LT
855 }
856
857 if (c->x86 == 15)
858 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
859 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
860 (c->x86 == 0x6 && c->x86_model >= 0x0e))
c29601e9 861 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
27fbe5b2
AK
862 if (c->x86 == 6)
863 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
c818a181 864 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
94605eff 865 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
866
867 srat_detect_node();
1da177e4
LT
868}
869
672289e9 870static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
871{
872 char *v = c->x86_vendor_id;
873
874 if (!strcmp(v, "AuthenticAMD"))
875 c->x86_vendor = X86_VENDOR_AMD;
876 else if (!strcmp(v, "GenuineIntel"))
877 c->x86_vendor = X86_VENDOR_INTEL;
878 else
879 c->x86_vendor = X86_VENDOR_UNKNOWN;
880}
881
882struct cpu_model_info {
883 int vendor;
884 int family;
885 char *model_names[16];
886};
887
888/* Do some early cpuid on the boot CPU to get some parameter that are
889 needed before check_bugs. Everything advanced is in identify_cpu
890 below. */
e6982c67 891void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
892{
893 u32 tfms;
894
895 c->loops_per_jiffy = loops_per_jiffy;
896 c->x86_cache_size = -1;
897 c->x86_vendor = X86_VENDOR_UNKNOWN;
898 c->x86_model = c->x86_mask = 0; /* So far unknown... */
899 c->x86_vendor_id[0] = '\0'; /* Unset */
900 c->x86_model_id[0] = '\0'; /* Unset */
901 c->x86_clflush_size = 64;
902 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 903 c->x86_max_cores = 1;
ebfcaa96 904 c->extended_cpuid_level = 0;
1da177e4
LT
905 memset(&c->x86_capability, 0, sizeof c->x86_capability);
906
907 /* Get vendor name */
908 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
909 (unsigned int *)&c->x86_vendor_id[0],
910 (unsigned int *)&c->x86_vendor_id[8],
911 (unsigned int *)&c->x86_vendor_id[4]);
912
913 get_cpu_vendor(c);
914
915 /* Initialize the standard set of capabilities */
916 /* Note that the vendor-specific code below might override */
917
918 /* Intel-defined flags: level 0x00000001 */
919 if (c->cpuid_level >= 0x00000001) {
920 __u32 misc;
921 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
922 &c->x86_capability[0]);
923 c->x86 = (tfms >> 8) & 0xf;
924 c->x86_model = (tfms >> 4) & 0xf;
925 c->x86_mask = tfms & 0xf;
f5f786d0 926 if (c->x86 == 0xf)
1da177e4 927 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 928 if (c->x86 >= 0x6)
1da177e4 929 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
930 if (c->x86_capability[0] & (1<<19))
931 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
932 } else {
933 /* Have CPUID level 0 only - unheard of */
934 c->x86 = 4;
935 }
a158608b
AK
936
937#ifdef CONFIG_SMP
f3fa8ebc 938 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 939#endif
1da177e4
LT
940}
941
942/*
943 * This does the hard work of actually picking apart the CPU stuff...
944 */
e6982c67 945void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
946{
947 int i;
948 u32 xlvl;
949
950 early_identify_cpu(c);
951
952 /* AMD-defined flags: level 0x80000001 */
953 xlvl = cpuid_eax(0x80000000);
ebfcaa96 954 c->extended_cpuid_level = xlvl;
1da177e4
LT
955 if ((xlvl & 0xffff0000) == 0x80000000) {
956 if (xlvl >= 0x80000001) {
957 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 958 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
959 }
960 if (xlvl >= 0x80000004)
961 get_model_name(c); /* Default name */
962 }
963
964 /* Transmeta-defined flags: level 0x80860001 */
965 xlvl = cpuid_eax(0x80860000);
966 if ((xlvl & 0xffff0000) == 0x80860000) {
967 /* Don't set x86_cpuid_level here for now to not confuse. */
968 if (xlvl >= 0x80860001)
969 c->x86_capability[2] = cpuid_edx(0x80860001);
970 }
971
1e9f28fa
SS
972 c->apicid = phys_pkg_id(0);
973
1da177e4
LT
974 /*
975 * Vendor-specific initialization. In this section we
976 * canonicalize the feature flags, meaning if there are
977 * features a certain CPU supports which CPUID doesn't
978 * tell us, CPUID claiming incorrect flags, or other bugs,
979 * we handle them here.
980 *
981 * At the end of this section, c->x86_capability better
982 * indicate the features this CPU genuinely supports!
983 */
984 switch (c->x86_vendor) {
985 case X86_VENDOR_AMD:
986 init_amd(c);
987 break;
988
989 case X86_VENDOR_INTEL:
990 init_intel(c);
991 break;
992
993 case X86_VENDOR_UNKNOWN:
994 default:
995 display_cacheinfo(c);
996 break;
997 }
998
999 select_idle_routine(c);
1000 detect_ht(c);
1da177e4
LT
1001
1002 /*
1003 * On SMP, boot_cpu_data holds the common feature set between
1004 * all CPUs; so make sure that we indicate which features are
1005 * common between the CPUs. The first time this routine gets
1006 * executed, c == &boot_cpu_data.
1007 */
1008 if (c != &boot_cpu_data) {
1009 /* AND the already accumulated flags with these */
1010 for (i = 0 ; i < NCAPINTS ; i++)
1011 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1012 }
1013
1014#ifdef CONFIG_X86_MCE
1015 mcheck_init(c);
1016#endif
3b520b23
SL
1017 if (c == &boot_cpu_data)
1018 mtrr_bp_init();
1019 else
1020 mtrr_ap_init();
1da177e4 1021#ifdef CONFIG_NUMA
3019e8eb 1022 numa_add_cpu(smp_processor_id());
1da177e4
LT
1023#endif
1024}
1025
1026
e6982c67 1027void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1028{
1029 if (c->x86_model_id[0])
1030 printk("%s", c->x86_model_id);
1031
1032 if (c->x86_mask || c->cpuid_level >= 0)
1033 printk(" stepping %02x\n", c->x86_mask);
1034 else
1035 printk("\n");
1036}
1037
1038/*
1039 * Get CPU information for use by the procfs.
1040 */
1041
1042static int show_cpuinfo(struct seq_file *m, void *v)
1043{
1044 struct cpuinfo_x86 *c = v;
1045
1046 /*
1047 * These flag bits must match the definitions in <asm/cpufeature.h>.
1048 * NULL means this bit is undefined or reserved; either way it doesn't
1049 * have meaning as far as Linux is concerned. Note that it's important
1050 * to realize there is a difference between this table and CPUID -- if
1051 * applications want to get the raw CPUID data, they should access
1052 * /dev/cpu/<cpu_nr>/cpuid instead.
1053 */
1054 static char *x86_cap_flags[] = {
1055 /* Intel-defined */
1056 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1057 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1058 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1059 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1060
1061 /* AMD-defined */
3c3b73b6 1062 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1063 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
7b0e8501 1065 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1da177e4
LT
1066
1067 /* Transmeta-defined */
1068 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1069 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1070 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1071 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1072
1073 /* Other (Linux-defined) */
622dcaf9 1074 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
c29601e9 1075 "constant_tsc", NULL, NULL,
d167a518 1076 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1077 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1078 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1079
1080 /* Intel-defined (#2) */
9d95dd84 1081 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307
DJ
1082 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1083 NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1084 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1085
5b7abc6f
PA
1086 /* VIA/Cyrix/Centaur-defined */
1087 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1088 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1089 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1090 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1091
1da177e4 1092 /* AMD-defined (#2) */
3f98bc49 1093 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1da177e4
LT
1094 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1095 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1096 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1097 };
1098 static char *x86_power_flags[] = {
1099 "ts", /* temperature sensor */
1100 "fid", /* frequency id control */
1101 "vid", /* voltage id control */
1102 "ttp", /* thermal trip */
1103 "tm",
3f98bc49
AK
1104 "stc",
1105 NULL,
39b3a791 1106 /* nothing */ /* constant_tsc - moved to flags */
1da177e4
LT
1107 };
1108
1109
1110#ifdef CONFIG_SMP
1111 if (!cpu_online(c-cpu_data))
1112 return 0;
1113#endif
1114
1115 seq_printf(m,"processor\t: %u\n"
1116 "vendor_id\t: %s\n"
1117 "cpu family\t: %d\n"
1118 "model\t\t: %d\n"
1119 "model name\t: %s\n",
1120 (unsigned)(c-cpu_data),
1121 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1122 c->x86,
1123 (int)c->x86_model,
1124 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1125
1126 if (c->x86_mask || c->cpuid_level >= 0)
1127 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1128 else
1129 seq_printf(m, "stepping\t: unknown\n");
1130
1131 if (cpu_has(c,X86_FEATURE_TSC)) {
95235ca2
VP
1132 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1133 if (!freq)
1134 freq = cpu_khz;
1da177e4 1135 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1136 freq / 1000, (freq % 1000));
1da177e4
LT
1137 }
1138
1139 /* Cache size */
1140 if (c->x86_cache_size >= 0)
1141 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1142
1143#ifdef CONFIG_SMP
94605eff 1144 if (smp_num_siblings * c->x86_max_cores > 1) {
db468681 1145 int cpu = c - cpu_data;
f3fa8ebc 1146 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
94605eff 1147 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
f3fa8ebc 1148 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1149 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1150 }
1da177e4
LT
1151#endif
1152
1153 seq_printf(m,
1154 "fpu\t\t: yes\n"
1155 "fpu_exception\t: yes\n"
1156 "cpuid level\t: %d\n"
1157 "wp\t\t: yes\n"
1158 "flags\t\t:",
1159 c->cpuid_level);
1160
1161 {
1162 int i;
1163 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3d1712c9 1164 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1da177e4
LT
1165 seq_printf(m, " %s", x86_cap_flags[i]);
1166 }
1167
1168 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1169 c->loops_per_jiffy/(500000/HZ),
1170 (c->loops_per_jiffy/(5000/HZ)) % 100);
1171
1172 if (c->x86_tlbsize > 0)
1173 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1174 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1175 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1176
1177 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1178 c->x86_phys_bits, c->x86_virt_bits);
1179
1180 seq_printf(m, "power management:");
1181 {
1182 unsigned i;
1183 for (i = 0; i < 32; i++)
1184 if (c->x86_power & (1 << i)) {
3f98bc49
AK
1185 if (i < ARRAY_SIZE(x86_power_flags) &&
1186 x86_power_flags[i])
1187 seq_printf(m, "%s%s",
1188 x86_power_flags[i][0]?" ":"",
1189 x86_power_flags[i]);
1da177e4
LT
1190 else
1191 seq_printf(m, " [%d]", i);
1192 }
1193 }
1da177e4 1194
d31ddaa1 1195 seq_printf(m, "\n\n");
1da177e4
LT
1196
1197 return 0;
1198}
1199
1200static void *c_start(struct seq_file *m, loff_t *pos)
1201{
1202 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1203}
1204
1205static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1206{
1207 ++*pos;
1208 return c_start(m, pos);
1209}
1210
1211static void c_stop(struct seq_file *m, void *v)
1212{
1213}
1214
1215struct seq_operations cpuinfo_op = {
1216 .start =c_start,
1217 .next = c_next,
1218 .stop = c_stop,
1219 .show = show_cpuinfo,
1220};
e9928674 1221
9c63f873 1222#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
160bd18e
MP
1223#include <linux/platform_device.h>
1224static __init int add_pcspkr(void)
1225{
1226 struct platform_device *pd;
1227 int ret;
1228
1229 pd = platform_device_alloc("pcspkr", -1);
1230 if (!pd)
1231 return -ENOMEM;
1232
1233 ret = platform_device_add(pd);
1234 if (ret)
1235 platform_device_put(pd);
1236
1237 return ret;
1238}
1239device_initcall(add_pcspkr);
1240#endif