]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/arm/kernel/setup.c
ARM: Convert platform reservations to use LMB rather than bootmem
[net-next-2.6.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29
30 #include <asm/unified.h>
31 #include <asm/cpu.h>
32 #include <asm/cputype.h>
33 #include <asm/elf.h>
34 #include <asm/procinfo.h>
35 #include <asm/sections.h>
36 #include <asm/setup.h>
37 #include <asm/mach-types.h>
38 #include <asm/cacheflush.h>
39 #include <asm/cachetype.h>
40 #include <asm/tlbflush.h>
41
42 #include <asm/mach/arch.h>
43 #include <asm/mach/irq.h>
44 #include <asm/mach/time.h>
45 #include <asm/traps.h>
46 #include <asm/unwind.h>
47
48 #include "compat.h"
49 #include "atags.h"
50 #include "tcm.h"
51
52 #ifndef MEM_SIZE
53 #define MEM_SIZE        (16*1024*1024)
54 #endif
55
56 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
57 char fpe_type[8];
58
59 static int __init fpe_setup(char *line)
60 {
61         memcpy(fpe_type, line, 8);
62         return 1;
63 }
64
65 __setup("fpe=", fpe_setup);
66 #endif
67
68 extern void paging_init(struct machine_desc *desc);
69 extern void reboot_setup(char *str);
70
71 unsigned int processor_id;
72 EXPORT_SYMBOL(processor_id);
73 unsigned int __machine_arch_type;
74 EXPORT_SYMBOL(__machine_arch_type);
75 unsigned int cacheid;
76 EXPORT_SYMBOL(cacheid);
77
78 unsigned int __atags_pointer __initdata;
79
80 unsigned int system_rev;
81 EXPORT_SYMBOL(system_rev);
82
83 unsigned int system_serial_low;
84 EXPORT_SYMBOL(system_serial_low);
85
86 unsigned int system_serial_high;
87 EXPORT_SYMBOL(system_serial_high);
88
89 unsigned int elf_hwcap;
90 EXPORT_SYMBOL(elf_hwcap);
91
92
93 #ifdef MULTI_CPU
94 struct processor processor;
95 #endif
96 #ifdef MULTI_TLB
97 struct cpu_tlb_fns cpu_tlb;
98 #endif
99 #ifdef MULTI_USER
100 struct cpu_user_fns cpu_user;
101 #endif
102 #ifdef MULTI_CACHE
103 struct cpu_cache_fns cpu_cache;
104 #endif
105 #ifdef CONFIG_OUTER_CACHE
106 struct outer_cache_fns outer_cache;
107 EXPORT_SYMBOL(outer_cache);
108 #endif
109
110 struct stack {
111         u32 irq[3];
112         u32 abt[3];
113         u32 und[3];
114 } ____cacheline_aligned;
115
116 static struct stack stacks[NR_CPUS];
117
118 char elf_platform[ELF_PLATFORM_SIZE];
119 EXPORT_SYMBOL(elf_platform);
120
121 static const char *cpu_name;
122 static const char *machine_name;
123 static char __initdata cmd_line[COMMAND_LINE_SIZE];
124
125 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
126 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
127 #define ENDIANNESS ((char)endian_test.l)
128
129 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
130
131 /*
132  * Standard memory resources
133  */
134 static struct resource mem_res[] = {
135         {
136                 .name = "Video RAM",
137                 .start = 0,
138                 .end = 0,
139                 .flags = IORESOURCE_MEM
140         },
141         {
142                 .name = "Kernel text",
143                 .start = 0,
144                 .end = 0,
145                 .flags = IORESOURCE_MEM
146         },
147         {
148                 .name = "Kernel data",
149                 .start = 0,
150                 .end = 0,
151                 .flags = IORESOURCE_MEM
152         }
153 };
154
155 #define video_ram   mem_res[0]
156 #define kernel_code mem_res[1]
157 #define kernel_data mem_res[2]
158
159 static struct resource io_res[] = {
160         {
161                 .name = "reserved",
162                 .start = 0x3bc,
163                 .end = 0x3be,
164                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
165         },
166         {
167                 .name = "reserved",
168                 .start = 0x378,
169                 .end = 0x37f,
170                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171         },
172         {
173                 .name = "reserved",
174                 .start = 0x278,
175                 .end = 0x27f,
176                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177         }
178 };
179
180 #define lp0 io_res[0]
181 #define lp1 io_res[1]
182 #define lp2 io_res[2]
183
184 static const char *proc_arch[] = {
185         "undefined/unknown",
186         "3",
187         "4",
188         "4T",
189         "5",
190         "5T",
191         "5TE",
192         "5TEJ",
193         "6TEJ",
194         "7",
195         "?(11)",
196         "?(12)",
197         "?(13)",
198         "?(14)",
199         "?(15)",
200         "?(16)",
201         "?(17)",
202 };
203
204 int cpu_architecture(void)
205 {
206         int cpu_arch;
207
208         if ((read_cpuid_id() & 0x0008f000) == 0) {
209                 cpu_arch = CPU_ARCH_UNKNOWN;
210         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
211                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
212         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
213                 cpu_arch = (read_cpuid_id() >> 16) & 7;
214                 if (cpu_arch)
215                         cpu_arch += CPU_ARCH_ARMv3;
216         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
217                 unsigned int mmfr0;
218
219                 /* Revised CPUID format. Read the Memory Model Feature
220                  * Register 0 and check for VMSAv7 or PMSAv7 */
221                 asm("mrc        p15, 0, %0, c0, c1, 4"
222                     : "=r" (mmfr0));
223                 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
224                     (mmfr0 & 0x000000f0) == 0x00000030)
225                         cpu_arch = CPU_ARCH_ARMv7;
226                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
227                          (mmfr0 & 0x000000f0) == 0x00000020)
228                         cpu_arch = CPU_ARCH_ARMv6;
229                 else
230                         cpu_arch = CPU_ARCH_UNKNOWN;
231         } else
232                 cpu_arch = CPU_ARCH_UNKNOWN;
233
234         return cpu_arch;
235 }
236
237 static void __init cacheid_init(void)
238 {
239         unsigned int cachetype = read_cpuid_cachetype();
240         unsigned int arch = cpu_architecture();
241
242         if (arch >= CPU_ARCH_ARMv6) {
243                 if ((cachetype & (7 << 29)) == 4 << 29) {
244                         /* ARMv7 register format */
245                         cacheid = CACHEID_VIPT_NONALIASING;
246                         if ((cachetype & (3 << 14)) == 1 << 14)
247                                 cacheid |= CACHEID_ASID_TAGGED;
248                 } else if (cachetype & (1 << 23))
249                         cacheid = CACHEID_VIPT_ALIASING;
250                 else
251                         cacheid = CACHEID_VIPT_NONALIASING;
252         } else {
253                 cacheid = CACHEID_VIVT;
254         }
255
256         printk("CPU: %s data cache, %s instruction cache\n",
257                 cache_is_vivt() ? "VIVT" :
258                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
259                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
260                 cache_is_vivt() ? "VIVT" :
261                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
262                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
263                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
264 }
265
266 /*
267  * These functions re-use the assembly code in head.S, which
268  * already provide the required functionality.
269  */
270 extern struct proc_info_list *lookup_processor_type(unsigned int);
271 extern struct machine_desc *lookup_machine_type(unsigned int);
272
273 static void __init setup_processor(void)
274 {
275         struct proc_info_list *list;
276
277         /*
278          * locate processor in the list of supported processor
279          * types.  The linker builds this table for us from the
280          * entries in arch/arm/mm/proc-*.S
281          */
282         list = lookup_processor_type(read_cpuid_id());
283         if (!list) {
284                 printk("CPU configuration botched (ID %08x), unable "
285                        "to continue.\n", read_cpuid_id());
286                 while (1);
287         }
288
289         cpu_name = list->cpu_name;
290
291 #ifdef MULTI_CPU
292         processor = *list->proc;
293 #endif
294 #ifdef MULTI_TLB
295         cpu_tlb = *list->tlb;
296 #endif
297 #ifdef MULTI_USER
298         cpu_user = *list->user;
299 #endif
300 #ifdef MULTI_CACHE
301         cpu_cache = *list->cache;
302 #endif
303
304         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
305                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
306                proc_arch[cpu_architecture()], cr_alignment);
307
308         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
309         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
310         elf_hwcap = list->elf_hwcap;
311 #ifndef CONFIG_ARM_THUMB
312         elf_hwcap &= ~HWCAP_THUMB;
313 #endif
314
315         cacheid_init();
316         cpu_proc_init();
317 }
318
319 /*
320  * cpu_init - initialise one CPU.
321  *
322  * cpu_init sets up the per-CPU stacks.
323  */
324 void cpu_init(void)
325 {
326         unsigned int cpu = smp_processor_id();
327         struct stack *stk = &stacks[cpu];
328
329         if (cpu >= NR_CPUS) {
330                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
331                 BUG();
332         }
333
334         /*
335          * Define the placement constraint for the inline asm directive below.
336          * In Thumb-2, msr with an immediate value is not allowed.
337          */
338 #ifdef CONFIG_THUMB2_KERNEL
339 #define PLC     "r"
340 #else
341 #define PLC     "I"
342 #endif
343
344         /*
345          * setup stacks for re-entrant exception handlers
346          */
347         __asm__ (
348         "msr    cpsr_c, %1\n\t"
349         "add    r14, %0, %2\n\t"
350         "mov    sp, r14\n\t"
351         "msr    cpsr_c, %3\n\t"
352         "add    r14, %0, %4\n\t"
353         "mov    sp, r14\n\t"
354         "msr    cpsr_c, %5\n\t"
355         "add    r14, %0, %6\n\t"
356         "mov    sp, r14\n\t"
357         "msr    cpsr_c, %7"
358             :
359             : "r" (stk),
360               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
361               "I" (offsetof(struct stack, irq[0])),
362               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
363               "I" (offsetof(struct stack, abt[0])),
364               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
365               "I" (offsetof(struct stack, und[0])),
366               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
367             : "r14");
368 }
369
370 static struct machine_desc * __init setup_machine(unsigned int nr)
371 {
372         struct machine_desc *list;
373
374         /*
375          * locate machine in the list of supported machines.
376          */
377         list = lookup_machine_type(nr);
378         if (!list) {
379                 printk("Machine configuration botched (nr %d), unable "
380                        "to continue.\n", nr);
381                 while (1);
382         }
383
384         printk("Machine: %s\n", list->name);
385
386         return list;
387 }
388
389 static int __init arm_add_memory(unsigned long start, unsigned long size)
390 {
391         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
392
393         if (meminfo.nr_banks >= NR_BANKS) {
394                 printk(KERN_CRIT "NR_BANKS too low, "
395                         "ignoring memory at %#lx\n", start);
396                 return -EINVAL;
397         }
398
399         /*
400          * Ensure that start/size are aligned to a page boundary.
401          * Size is appropriately rounded down, start is rounded up.
402          */
403         size -= start & ~PAGE_MASK;
404         bank->start = PAGE_ALIGN(start);
405         bank->size  = size & PAGE_MASK;
406
407         /*
408          * Check whether this memory region has non-zero size or
409          * invalid node number.
410          */
411         if (bank->size == 0)
412                 return -EINVAL;
413
414         meminfo.nr_banks++;
415         return 0;
416 }
417
418 /*
419  * Pick out the memory size.  We look for mem=size@start,
420  * where start and size are "size[KkMm]"
421  */
422 static int __init early_mem(char *p)
423 {
424         static int usermem __initdata = 0;
425         unsigned long size, start;
426         char *endp;
427
428         /*
429          * If the user specifies memory size, we
430          * blow away any automatically generated
431          * size.
432          */
433         if (usermem == 0) {
434                 usermem = 1;
435                 meminfo.nr_banks = 0;
436         }
437
438         start = PHYS_OFFSET;
439         size  = memparse(p, &endp);
440         if (*endp == '@')
441                 start = memparse(endp + 1, NULL);
442
443         arm_add_memory(start, size);
444
445         return 0;
446 }
447 early_param("mem", early_mem);
448
449 static void __init
450 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
451 {
452 #ifdef CONFIG_BLK_DEV_RAM
453         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
454
455         rd_image_start = image_start;
456         rd_prompt = prompt;
457         rd_doload = doload;
458
459         if (rd_sz)
460                 rd_size = rd_sz;
461 #endif
462 }
463
464 static void __init
465 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
466 {
467         struct resource *res;
468         int i;
469
470         kernel_code.start   = virt_to_phys(_text);
471         kernel_code.end     = virt_to_phys(_etext - 1);
472         kernel_data.start   = virt_to_phys(_data);
473         kernel_data.end     = virt_to_phys(_end - 1);
474
475         for (i = 0; i < mi->nr_banks; i++) {
476                 if (mi->bank[i].size == 0)
477                         continue;
478
479                 res = alloc_bootmem_low(sizeof(*res));
480                 res->name  = "System RAM";
481                 res->start = mi->bank[i].start;
482                 res->end   = mi->bank[i].start + mi->bank[i].size - 1;
483                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
484
485                 request_resource(&iomem_resource, res);
486
487                 if (kernel_code.start >= res->start &&
488                     kernel_code.end <= res->end)
489                         request_resource(res, &kernel_code);
490                 if (kernel_data.start >= res->start &&
491                     kernel_data.end <= res->end)
492                         request_resource(res, &kernel_data);
493         }
494
495         if (mdesc->video_start) {
496                 video_ram.start = mdesc->video_start;
497                 video_ram.end   = mdesc->video_end;
498                 request_resource(&iomem_resource, &video_ram);
499         }
500
501         /*
502          * Some machines don't have the possibility of ever
503          * possessing lp0, lp1 or lp2
504          */
505         if (mdesc->reserve_lp0)
506                 request_resource(&ioport_resource, &lp0);
507         if (mdesc->reserve_lp1)
508                 request_resource(&ioport_resource, &lp1);
509         if (mdesc->reserve_lp2)
510                 request_resource(&ioport_resource, &lp2);
511 }
512
513 /*
514  *  Tag parsing.
515  *
516  * This is the new way of passing data to the kernel at boot time.  Rather
517  * than passing a fixed inflexible structure to the kernel, we pass a list
518  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
519  * tag for the list to be recognised (to distinguish the tagged list from
520  * a param_struct).  The list is terminated with a zero-length tag (this tag
521  * is not parsed in any way).
522  */
523 static int __init parse_tag_core(const struct tag *tag)
524 {
525         if (tag->hdr.size > 2) {
526                 if ((tag->u.core.flags & 1) == 0)
527                         root_mountflags &= ~MS_RDONLY;
528                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
529         }
530         return 0;
531 }
532
533 __tagtable(ATAG_CORE, parse_tag_core);
534
535 static int __init parse_tag_mem32(const struct tag *tag)
536 {
537         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
538 }
539
540 __tagtable(ATAG_MEM, parse_tag_mem32);
541
542 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
543 struct screen_info screen_info = {
544  .orig_video_lines      = 30,
545  .orig_video_cols       = 80,
546  .orig_video_mode       = 0,
547  .orig_video_ega_bx     = 0,
548  .orig_video_isVGA      = 1,
549  .orig_video_points     = 8
550 };
551
552 static int __init parse_tag_videotext(const struct tag *tag)
553 {
554         screen_info.orig_x            = tag->u.videotext.x;
555         screen_info.orig_y            = tag->u.videotext.y;
556         screen_info.orig_video_page   = tag->u.videotext.video_page;
557         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
558         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
559         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
560         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
561         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
562         screen_info.orig_video_points = tag->u.videotext.video_points;
563         return 0;
564 }
565
566 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
567 #endif
568
569 static int __init parse_tag_ramdisk(const struct tag *tag)
570 {
571         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
572                       (tag->u.ramdisk.flags & 2) == 0,
573                       tag->u.ramdisk.start, tag->u.ramdisk.size);
574         return 0;
575 }
576
577 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
578
579 static int __init parse_tag_serialnr(const struct tag *tag)
580 {
581         system_serial_low = tag->u.serialnr.low;
582         system_serial_high = tag->u.serialnr.high;
583         return 0;
584 }
585
586 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
587
588 static int __init parse_tag_revision(const struct tag *tag)
589 {
590         system_rev = tag->u.revision.rev;
591         return 0;
592 }
593
594 __tagtable(ATAG_REVISION, parse_tag_revision);
595
596 #ifndef CONFIG_CMDLINE_FORCE
597 static int __init parse_tag_cmdline(const struct tag *tag)
598 {
599         strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
600         return 0;
601 }
602
603 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
604 #endif /* CONFIG_CMDLINE_FORCE */
605
606 /*
607  * Scan the tag table for this tag, and call its parse function.
608  * The tag table is built by the linker from all the __tagtable
609  * declarations.
610  */
611 static int __init parse_tag(const struct tag *tag)
612 {
613         extern struct tagtable __tagtable_begin, __tagtable_end;
614         struct tagtable *t;
615
616         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
617                 if (tag->hdr.tag == t->tag) {
618                         t->parse(tag);
619                         break;
620                 }
621
622         return t < &__tagtable_end;
623 }
624
625 /*
626  * Parse all tags in the list, checking both the global and architecture
627  * specific tag tables.
628  */
629 static void __init parse_tags(const struct tag *t)
630 {
631         for (; t->hdr.size; t = tag_next(t))
632                 if (!parse_tag(t))
633                         printk(KERN_WARNING
634                                 "Ignoring unrecognised tag 0x%08x\n",
635                                 t->hdr.tag);
636 }
637
638 /*
639  * This holds our defaults.
640  */
641 static struct init_tags {
642         struct tag_header hdr1;
643         struct tag_core   core;
644         struct tag_header hdr2;
645         struct tag_mem32  mem;
646         struct tag_header hdr3;
647 } init_tags __initdata = {
648         { tag_size(tag_core), ATAG_CORE },
649         { 1, PAGE_SIZE, 0xff },
650         { tag_size(tag_mem32), ATAG_MEM },
651         { MEM_SIZE, PHYS_OFFSET },
652         { 0, ATAG_NONE }
653 };
654
655 static void (*init_machine)(void) __initdata;
656
657 static int __init customize_machine(void)
658 {
659         /* customizes platform devices, or adds new ones */
660         if (init_machine)
661                 init_machine();
662         return 0;
663 }
664 arch_initcall(customize_machine);
665
666 void __init setup_arch(char **cmdline_p)
667 {
668         struct tag *tags = (struct tag *)&init_tags;
669         struct machine_desc *mdesc;
670         char *from = default_command_line;
671
672         unwind_init();
673
674         setup_processor();
675         mdesc = setup_machine(machine_arch_type);
676         machine_name = mdesc->name;
677
678         if (mdesc->soft_reboot)
679                 reboot_setup("s");
680
681         if (__atags_pointer)
682                 tags = phys_to_virt(__atags_pointer);
683         else if (mdesc->boot_params)
684                 tags = phys_to_virt(mdesc->boot_params);
685
686         /*
687          * If we have the old style parameters, convert them to
688          * a tag list.
689          */
690         if (tags->hdr.tag != ATAG_CORE)
691                 convert_to_tag_list(tags);
692         if (tags->hdr.tag != ATAG_CORE)
693                 tags = (struct tag *)&init_tags;
694
695         if (mdesc->fixup)
696                 mdesc->fixup(mdesc, tags, &from, &meminfo);
697
698         if (tags->hdr.tag == ATAG_CORE) {
699                 if (meminfo.nr_banks != 0)
700                         squash_mem_tags(tags);
701                 save_atags(tags);
702                 parse_tags(tags);
703         }
704
705         init_mm.start_code = (unsigned long) _text;
706         init_mm.end_code   = (unsigned long) _etext;
707         init_mm.end_data   = (unsigned long) _edata;
708         init_mm.brk        = (unsigned long) _end;
709
710         /* parse_early_param needs a boot_command_line */
711         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
712
713         /* populate cmd_line too for later use, preserving boot_command_line */
714         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
715         *cmdline_p = cmd_line;
716
717         parse_early_param();
718
719         arm_memblock_init(&meminfo, mdesc);
720
721         paging_init(mdesc);
722         request_standard_resources(&meminfo, mdesc);
723
724 #ifdef CONFIG_SMP
725         smp_init_cpus();
726 #endif
727
728         cpu_init();
729         tcm_init();
730
731         /*
732          * Set up various architecture-specific pointers
733          */
734         init_arch_irq = mdesc->init_irq;
735         system_timer = mdesc->timer;
736         init_machine = mdesc->init_machine;
737
738 #ifdef CONFIG_VT
739 #if defined(CONFIG_VGA_CONSOLE)
740         conswitchp = &vga_con;
741 #elif defined(CONFIG_DUMMY_CONSOLE)
742         conswitchp = &dummy_con;
743 #endif
744 #endif
745         early_trap_init();
746 }
747
748
749 static int __init topology_init(void)
750 {
751         int cpu;
752
753         for_each_possible_cpu(cpu) {
754                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
755                 cpuinfo->cpu.hotpluggable = 1;
756                 register_cpu(&cpuinfo->cpu, cpu);
757         }
758
759         return 0;
760 }
761 subsys_initcall(topology_init);
762
763 #ifdef CONFIG_HAVE_PROC_CPU
764 static int __init proc_cpu_init(void)
765 {
766         struct proc_dir_entry *res;
767
768         res = proc_mkdir("cpu", NULL);
769         if (!res)
770                 return -ENOMEM;
771         return 0;
772 }
773 fs_initcall(proc_cpu_init);
774 #endif
775
776 static const char *hwcap_str[] = {
777         "swp",
778         "half",
779         "thumb",
780         "26bit",
781         "fastmult",
782         "fpa",
783         "vfp",
784         "edsp",
785         "java",
786         "iwmmxt",
787         "crunch",
788         "thumbee",
789         "neon",
790         "vfpv3",
791         "vfpv3d16",
792         NULL
793 };
794
795 static int c_show(struct seq_file *m, void *v)
796 {
797         int i;
798
799         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
800                    cpu_name, read_cpuid_id() & 15, elf_platform);
801
802 #if defined(CONFIG_SMP)
803         for_each_online_cpu(i) {
804                 /*
805                  * glibc reads /proc/cpuinfo to determine the number of
806                  * online processors, looking for lines beginning with
807                  * "processor".  Give glibc what it expects.
808                  */
809                 seq_printf(m, "processor\t: %d\n", i);
810                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
811                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
812                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
813         }
814 #else /* CONFIG_SMP */
815         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
816                    loops_per_jiffy / (500000/HZ),
817                    (loops_per_jiffy / (5000/HZ)) % 100);
818 #endif
819
820         /* dump out the processor features */
821         seq_puts(m, "Features\t: ");
822
823         for (i = 0; hwcap_str[i]; i++)
824                 if (elf_hwcap & (1 << i))
825                         seq_printf(m, "%s ", hwcap_str[i]);
826
827         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
828         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
829
830         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
831                 /* pre-ARM7 */
832                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
833         } else {
834                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
835                         /* ARM7 */
836                         seq_printf(m, "CPU variant\t: 0x%02x\n",
837                                    (read_cpuid_id() >> 16) & 127);
838                 } else {
839                         /* post-ARM7 */
840                         seq_printf(m, "CPU variant\t: 0x%x\n",
841                                    (read_cpuid_id() >> 20) & 15);
842                 }
843                 seq_printf(m, "CPU part\t: 0x%03x\n",
844                            (read_cpuid_id() >> 4) & 0xfff);
845         }
846         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
847
848         seq_puts(m, "\n");
849
850         seq_printf(m, "Hardware\t: %s\n", machine_name);
851         seq_printf(m, "Revision\t: %04x\n", system_rev);
852         seq_printf(m, "Serial\t\t: %08x%08x\n",
853                    system_serial_high, system_serial_low);
854
855         return 0;
856 }
857
858 static void *c_start(struct seq_file *m, loff_t *pos)
859 {
860         return *pos < 1 ? (void *)1 : NULL;
861 }
862
863 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
864 {
865         ++*pos;
866         return NULL;
867 }
868
869 static void c_stop(struct seq_file *m, void *v)
870 {
871 }
872
873 const struct seq_operations cpuinfo_op = {
874         .start  = c_start,
875         .next   = c_next,
876         .stop   = c_stop,
877         .show   = c_show
878 };