]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/kernel/setup.c
ARM: 6116/1: kdump: reserve memory for crashkernel
[net-next-2.6.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
1da177e4
LT
23#include <linux/root_dev.h>
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
7bbb7940 26#include <linux/smp.h>
4e950f6f 27#include <linux/fs.h>
e119bfff 28#include <linux/proc_fs.h>
1da177e4 29
b86040a5 30#include <asm/unified.h>
1da177e4 31#include <asm/cpu.h>
0ba8b9b2 32#include <asm/cputype.h>
1da177e4 33#include <asm/elf.h>
1da177e4 34#include <asm/procinfo.h>
37efe642 35#include <asm/sections.h>
1da177e4
LT
36#include <asm/setup.h>
37#include <asm/mach-types.h>
38#include <asm/cacheflush.h>
46097c7d 39#include <asm/cachetype.h>
1da177e4
LT
40#include <asm/tlbflush.h>
41
42#include <asm/mach/arch.h>
43#include <asm/mach/irq.h>
44#include <asm/mach/time.h>
5cbad0eb 45#include <asm/traps.h>
bff595c1 46#include <asm/unwind.h>
1da177e4 47
0fc1c832 48#include "compat.h"
4cd9d6f7 49#include "atags.h"
bc581770 50#include "tcm.h"
0fc1c832 51
1da177e4
LT
52#ifndef MEM_SIZE
53#define MEM_SIZE (16*1024*1024)
54#endif
55
56#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
57char fpe_type[8];
58
59static int __init fpe_setup(char *line)
60{
61 memcpy(fpe_type, line, 8);
62 return 1;
63}
64
65__setup("fpe=", fpe_setup);
66#endif
67
4b5f32ce 68extern void paging_init(struct machine_desc *desc);
1da177e4 69extern void reboot_setup(char *str);
1da177e4
LT
70
71unsigned int processor_id;
c18f6581 72EXPORT_SYMBOL(processor_id);
1da177e4
LT
73unsigned int __machine_arch_type;
74EXPORT_SYMBOL(__machine_arch_type);
c0e95878
RK
75unsigned int cacheid;
76EXPORT_SYMBOL(cacheid);
1da177e4 77
9d20fdd5
BG
78unsigned int __atags_pointer __initdata;
79
1da177e4
LT
80unsigned int system_rev;
81EXPORT_SYMBOL(system_rev);
82
83unsigned int system_serial_low;
84EXPORT_SYMBOL(system_serial_low);
85
86unsigned int system_serial_high;
87EXPORT_SYMBOL(system_serial_high);
88
89unsigned int elf_hwcap;
90EXPORT_SYMBOL(elf_hwcap);
91
92
93#ifdef MULTI_CPU
94struct processor processor;
95#endif
96#ifdef MULTI_TLB
97struct cpu_tlb_fns cpu_tlb;
98#endif
99#ifdef MULTI_USER
100struct cpu_user_fns cpu_user;
101#endif
102#ifdef MULTI_CACHE
103struct cpu_cache_fns cpu_cache;
104#endif
953233dc
CM
105#ifdef CONFIG_OUTER_CACHE
106struct outer_cache_fns outer_cache;
6c09f09d 107EXPORT_SYMBOL(outer_cache);
953233dc 108#endif
1da177e4 109
ccea7a19
RK
110struct stack {
111 u32 irq[3];
112 u32 abt[3];
113 u32 und[3];
114} ____cacheline_aligned;
115
116static struct stack stacks[NR_CPUS];
117
1da177e4
LT
118char elf_platform[ELF_PLATFORM_SIZE];
119EXPORT_SYMBOL(elf_platform);
120
1da177e4
LT
121static const char *cpu_name;
122static const char *machine_name;
48ab7e09 123static char __initdata cmd_line[COMMAND_LINE_SIZE];
1da177e4
LT
124
125static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
126static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
127#define ENDIANNESS ((char)endian_test.l)
128
129DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
130
131/*
132 * Standard memory resources
133 */
134static struct resource mem_res[] = {
740e518e
GKH
135 {
136 .name = "Video RAM",
137 .start = 0,
138 .end = 0,
139 .flags = IORESOURCE_MEM
140 },
141 {
142 .name = "Kernel text",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_MEM
146 },
147 {
148 .name = "Kernel data",
149 .start = 0,
150 .end = 0,
151 .flags = IORESOURCE_MEM
152 }
1da177e4
LT
153};
154
155#define video_ram mem_res[0]
156#define kernel_code mem_res[1]
157#define kernel_data mem_res[2]
158
159static struct resource io_res[] = {
740e518e
GKH
160 {
161 .name = "reserved",
162 .start = 0x3bc,
163 .end = 0x3be,
164 .flags = IORESOURCE_IO | IORESOURCE_BUSY
165 },
166 {
167 .name = "reserved",
168 .start = 0x378,
169 .end = 0x37f,
170 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171 },
172 {
173 .name = "reserved",
174 .start = 0x278,
175 .end = 0x27f,
176 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177 }
1da177e4
LT
178};
179
180#define lp0 io_res[0]
181#define lp1 io_res[1]
182#define lp2 io_res[2]
183
1da177e4
LT
184static const char *proc_arch[] = {
185 "undefined/unknown",
186 "3",
187 "4",
188 "4T",
189 "5",
190 "5T",
191 "5TE",
192 "5TEJ",
193 "6TEJ",
6b090a25 194 "7",
1da177e4
LT
195 "?(11)",
196 "?(12)",
197 "?(13)",
198 "?(14)",
199 "?(15)",
200 "?(16)",
201 "?(17)",
202};
203
1da177e4
LT
204int cpu_architecture(void)
205{
206 int cpu_arch;
207
0ba8b9b2 208 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 209 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
210 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
211 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
212 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
213 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
214 if (cpu_arch)
215 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 216 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
217 unsigned int mmfr0;
218
219 /* Revised CPUID format. Read the Memory Model Feature
220 * Register 0 and check for VMSAv7 or PMSAv7 */
221 asm("mrc p15, 0, %0, c0, c1, 4"
222 : "=r" (mmfr0));
223 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
224 (mmfr0 & 0x000000f0) == 0x00000030)
225 cpu_arch = CPU_ARCH_ARMv7;
226 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
227 (mmfr0 & 0x000000f0) == 0x00000020)
228 cpu_arch = CPU_ARCH_ARMv6;
229 else
230 cpu_arch = CPU_ARCH_UNKNOWN;
231 } else
232 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
233
234 return cpu_arch;
235}
236
c0e95878
RK
237static void __init cacheid_init(void)
238{
239 unsigned int cachetype = read_cpuid_cachetype();
240 unsigned int arch = cpu_architecture();
241
b57ee99f
CM
242 if (arch >= CPU_ARCH_ARMv6) {
243 if ((cachetype & (7 << 29)) == 4 << 29) {
244 /* ARMv7 register format */
245 cacheid = CACHEID_VIPT_NONALIASING;
246 if ((cachetype & (3 << 14)) == 1 << 14)
247 cacheid |= CACHEID_ASID_TAGGED;
248 } else if (cachetype & (1 << 23))
c0e95878
RK
249 cacheid = CACHEID_VIPT_ALIASING;
250 else
251 cacheid = CACHEID_VIPT_NONALIASING;
252 } else {
253 cacheid = CACHEID_VIVT;
254 }
2b4ae1f1
RK
255
256 printk("CPU: %s data cache, %s instruction cache\n",
257 cache_is_vivt() ? "VIVT" :
258 cache_is_vipt_aliasing() ? "VIPT aliasing" :
259 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
260 cache_is_vivt() ? "VIVT" :
261 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
262 cache_is_vipt_aliasing() ? "VIPT aliasing" :
263 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
264}
265
1da177e4
LT
266/*
267 * These functions re-use the assembly code in head.S, which
268 * already provide the required functionality.
269 */
0f44ba1d 270extern struct proc_info_list *lookup_processor_type(unsigned int);
1da177e4
LT
271extern struct machine_desc *lookup_machine_type(unsigned int);
272
f159f4ed
TL
273static void __init feat_v6_fixup(void)
274{
275 int id = read_cpuid_id();
276
277 if ((id & 0xff0f0000) != 0x41070000)
278 return;
279
280 /*
281 * HWCAP_TLS is available only on 1136 r1p0 and later,
282 * see also kuser_get_tls_init.
283 */
284 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
285 elf_hwcap &= ~HWCAP_TLS;
286}
287
1da177e4
LT
288static void __init setup_processor(void)
289{
290 struct proc_info_list *list;
291
292 /*
293 * locate processor in the list of supported processor
294 * types. The linker builds this table for us from the
295 * entries in arch/arm/mm/proc-*.S
296 */
0ba8b9b2 297 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
298 if (!list) {
299 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 300 "to continue.\n", read_cpuid_id());
1da177e4
LT
301 while (1);
302 }
303
304 cpu_name = list->cpu_name;
305
306#ifdef MULTI_CPU
307 processor = *list->proc;
308#endif
309#ifdef MULTI_TLB
310 cpu_tlb = *list->tlb;
311#endif
312#ifdef MULTI_USER
313 cpu_user = *list->user;
314#endif
315#ifdef MULTI_CACHE
316 cpu_cache = *list->cache;
317#endif
318
4e19025b 319 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 320 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 321 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 322
96b644bd 323 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
324 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
325 elf_hwcap = list->elf_hwcap;
adeff422
CM
326#ifndef CONFIG_ARM_THUMB
327 elf_hwcap &= ~HWCAP_THUMB;
328#endif
1da177e4 329
f159f4ed
TL
330 feat_v6_fixup();
331
c0e95878 332 cacheid_init();
1da177e4
LT
333 cpu_proc_init();
334}
335
ccea7a19
RK
336/*
337 * cpu_init - initialise one CPU.
338 *
90f1e084 339 * cpu_init sets up the per-CPU stacks.
ccea7a19 340 */
36c5ed23 341void cpu_init(void)
ccea7a19
RK
342{
343 unsigned int cpu = smp_processor_id();
344 struct stack *stk = &stacks[cpu];
345
346 if (cpu >= NR_CPUS) {
347 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
348 BUG();
349 }
350
b86040a5
CM
351 /*
352 * Define the placement constraint for the inline asm directive below.
353 * In Thumb-2, msr with an immediate value is not allowed.
354 */
355#ifdef CONFIG_THUMB2_KERNEL
356#define PLC "r"
357#else
358#define PLC "I"
359#endif
360
ccea7a19
RK
361 /*
362 * setup stacks for re-entrant exception handlers
363 */
364 __asm__ (
365 "msr cpsr_c, %1\n\t"
b86040a5
CM
366 "add r14, %0, %2\n\t"
367 "mov sp, r14\n\t"
ccea7a19 368 "msr cpsr_c, %3\n\t"
b86040a5
CM
369 "add r14, %0, %4\n\t"
370 "mov sp, r14\n\t"
ccea7a19 371 "msr cpsr_c, %5\n\t"
b86040a5
CM
372 "add r14, %0, %6\n\t"
373 "mov sp, r14\n\t"
ccea7a19
RK
374 "msr cpsr_c, %7"
375 :
376 : "r" (stk),
b86040a5 377 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 378 "I" (offsetof(struct stack, irq[0])),
b86040a5 379 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 380 "I" (offsetof(struct stack, abt[0])),
b86040a5 381 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 382 "I" (offsetof(struct stack, und[0])),
b86040a5 383 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 384 : "r14");
ccea7a19
RK
385}
386
1da177e4
LT
387static struct machine_desc * __init setup_machine(unsigned int nr)
388{
389 struct machine_desc *list;
390
391 /*
392 * locate machine in the list of supported machines.
393 */
394 list = lookup_machine_type(nr);
395 if (!list) {
396 printk("Machine configuration botched (nr %d), unable "
397 "to continue.\n", nr);
398 while (1);
399 }
400
401 printk("Machine: %s\n", list->name);
402
403 return list;
404}
405
4b5f32ce 406static int __init arm_add_memory(unsigned long start, unsigned long size)
3a669411 407{
4b5f32ce
NP
408 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
409
410 if (meminfo.nr_banks >= NR_BANKS) {
411 printk(KERN_CRIT "NR_BANKS too low, "
412 "ignoring memory at %#lx\n", start);
413 return -EINVAL;
414 }
05f96ef1 415
3a669411
RK
416 /*
417 * Ensure that start/size are aligned to a page boundary.
418 * Size is appropriately rounded down, start is rounded up.
419 */
420 size -= start & ~PAGE_MASK;
05f96ef1
RK
421 bank->start = PAGE_ALIGN(start);
422 bank->size = size & PAGE_MASK;
423 bank->node = PHYS_TO_NID(start);
4b5f32ce
NP
424
425 /*
426 * Check whether this memory region has non-zero size or
427 * invalid node number.
428 */
429 if (bank->size == 0 || bank->node >= MAX_NUMNODES)
430 return -EINVAL;
431
432 meminfo.nr_banks++;
433 return 0;
3a669411
RK
434}
435
1da177e4
LT
436/*
437 * Pick out the memory size. We look for mem=size@start,
438 * where start and size are "size[KkMm]"
439 */
2b0d8c25 440static int __init early_mem(char *p)
1da177e4
LT
441{
442 static int usermem __initdata = 0;
443 unsigned long size, start;
2b0d8c25 444 char *endp;
1da177e4
LT
445
446 /*
447 * If the user specifies memory size, we
448 * blow away any automatically generated
449 * size.
450 */
451 if (usermem == 0) {
452 usermem = 1;
453 meminfo.nr_banks = 0;
454 }
455
456 start = PHYS_OFFSET;
2b0d8c25
JK
457 size = memparse(p, &endp);
458 if (*endp == '@')
459 start = memparse(endp + 1, NULL);
1da177e4 460
1c97b73e 461 arm_add_memory(start, size);
1da177e4 462
2b0d8c25 463 return 0;
1da177e4 464}
2b0d8c25 465early_param("mem", early_mem);
1da177e4
LT
466
467static void __init
468setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
469{
470#ifdef CONFIG_BLK_DEV_RAM
471 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
472
473 rd_image_start = image_start;
474 rd_prompt = prompt;
475 rd_doload = doload;
476
477 if (rd_sz)
478 rd_size = rd_sz;
479#endif
480}
481
482static void __init
483request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
484{
485 struct resource *res;
486 int i;
487
37efe642
RK
488 kernel_code.start = virt_to_phys(_text);
489 kernel_code.end = virt_to_phys(_etext - 1);
490 kernel_data.start = virt_to_phys(_data);
491 kernel_data.end = virt_to_phys(_end - 1);
1da177e4
LT
492
493 for (i = 0; i < mi->nr_banks; i++) {
1da177e4
LT
494 if (mi->bank[i].size == 0)
495 continue;
496
1da177e4
LT
497 res = alloc_bootmem_low(sizeof(*res));
498 res->name = "System RAM";
3319f5e5
NP
499 res->start = mi->bank[i].start;
500 res->end = mi->bank[i].start + mi->bank[i].size - 1;
1da177e4
LT
501 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
502
503 request_resource(&iomem_resource, res);
504
505 if (kernel_code.start >= res->start &&
506 kernel_code.end <= res->end)
507 request_resource(res, &kernel_code);
508 if (kernel_data.start >= res->start &&
509 kernel_data.end <= res->end)
510 request_resource(res, &kernel_data);
511 }
512
513 if (mdesc->video_start) {
514 video_ram.start = mdesc->video_start;
515 video_ram.end = mdesc->video_end;
516 request_resource(&iomem_resource, &video_ram);
517 }
518
519 /*
520 * Some machines don't have the possibility of ever
521 * possessing lp0, lp1 or lp2
522 */
523 if (mdesc->reserve_lp0)
524 request_resource(&ioport_resource, &lp0);
525 if (mdesc->reserve_lp1)
526 request_resource(&ioport_resource, &lp1);
527 if (mdesc->reserve_lp2)
528 request_resource(&ioport_resource, &lp2);
529}
530
531/*
532 * Tag parsing.
533 *
534 * This is the new way of passing data to the kernel at boot time. Rather
535 * than passing a fixed inflexible structure to the kernel, we pass a list
536 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
537 * tag for the list to be recognised (to distinguish the tagged list from
538 * a param_struct). The list is terminated with a zero-length tag (this tag
539 * is not parsed in any way).
540 */
541static int __init parse_tag_core(const struct tag *tag)
542{
543 if (tag->hdr.size > 2) {
544 if ((tag->u.core.flags & 1) == 0)
545 root_mountflags &= ~MS_RDONLY;
546 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
547 }
548 return 0;
549}
550
551__tagtable(ATAG_CORE, parse_tag_core);
552
553static int __init parse_tag_mem32(const struct tag *tag)
554{
4b5f32ce 555 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
556}
557
558__tagtable(ATAG_MEM, parse_tag_mem32);
559
560#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
561struct screen_info screen_info = {
562 .orig_video_lines = 30,
563 .orig_video_cols = 80,
564 .orig_video_mode = 0,
565 .orig_video_ega_bx = 0,
566 .orig_video_isVGA = 1,
567 .orig_video_points = 8
568};
569
570static int __init parse_tag_videotext(const struct tag *tag)
571{
572 screen_info.orig_x = tag->u.videotext.x;
573 screen_info.orig_y = tag->u.videotext.y;
574 screen_info.orig_video_page = tag->u.videotext.video_page;
575 screen_info.orig_video_mode = tag->u.videotext.video_mode;
576 screen_info.orig_video_cols = tag->u.videotext.video_cols;
577 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
578 screen_info.orig_video_lines = tag->u.videotext.video_lines;
579 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
580 screen_info.orig_video_points = tag->u.videotext.video_points;
581 return 0;
582}
583
584__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
585#endif
586
587static int __init parse_tag_ramdisk(const struct tag *tag)
588{
589 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
590 (tag->u.ramdisk.flags & 2) == 0,
591 tag->u.ramdisk.start, tag->u.ramdisk.size);
592 return 0;
593}
594
595__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
596
1da177e4
LT
597static int __init parse_tag_serialnr(const struct tag *tag)
598{
599 system_serial_low = tag->u.serialnr.low;
600 system_serial_high = tag->u.serialnr.high;
601 return 0;
602}
603
604__tagtable(ATAG_SERIAL, parse_tag_serialnr);
605
606static int __init parse_tag_revision(const struct tag *tag)
607{
608 system_rev = tag->u.revision.rev;
609 return 0;
610}
611
612__tagtable(ATAG_REVISION, parse_tag_revision);
613
92d2040d 614#ifndef CONFIG_CMDLINE_FORCE
1da177e4
LT
615static int __init parse_tag_cmdline(const struct tag *tag)
616{
617 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
618 return 0;
619}
620
621__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
92d2040d 622#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
623
624/*
625 * Scan the tag table for this tag, and call its parse function.
626 * The tag table is built by the linker from all the __tagtable
627 * declarations.
628 */
629static int __init parse_tag(const struct tag *tag)
630{
631 extern struct tagtable __tagtable_begin, __tagtable_end;
632 struct tagtable *t;
633
634 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
635 if (tag->hdr.tag == t->tag) {
636 t->parse(tag);
637 break;
638 }
639
640 return t < &__tagtable_end;
641}
642
643/*
644 * Parse all tags in the list, checking both the global and architecture
645 * specific tag tables.
646 */
647static void __init parse_tags(const struct tag *t)
648{
649 for (; t->hdr.size; t = tag_next(t))
650 if (!parse_tag(t))
651 printk(KERN_WARNING
652 "Ignoring unrecognised tag 0x%08x\n",
653 t->hdr.tag);
654}
655
656/*
657 * This holds our defaults.
658 */
659static struct init_tags {
660 struct tag_header hdr1;
661 struct tag_core core;
662 struct tag_header hdr2;
663 struct tag_mem32 mem;
664 struct tag_header hdr3;
665} init_tags __initdata = {
666 { tag_size(tag_core), ATAG_CORE },
667 { 1, PAGE_SIZE, 0xff },
668 { tag_size(tag_mem32), ATAG_MEM },
669 { MEM_SIZE, PHYS_OFFSET },
670 { 0, ATAG_NONE }
671};
672
673static void (*init_machine)(void) __initdata;
674
675static int __init customize_machine(void)
676{
677 /* customizes platform devices, or adds new ones */
678 if (init_machine)
679 init_machine();
680 return 0;
681}
682arch_initcall(customize_machine);
683
3c57fb43
MW
684#ifdef CONFIG_KEXEC
685static inline unsigned long long get_total_mem(void)
686{
687 unsigned long total;
688
689 total = max_low_pfn - min_low_pfn;
690 return total << PAGE_SHIFT;
691}
692
693/**
694 * reserve_crashkernel() - reserves memory are for crash kernel
695 *
696 * This function reserves memory area given in "crashkernel=" kernel command
697 * line parameter. The memory reserved is used by a dump capture kernel when
698 * primary kernel is crashing.
699 */
700static void __init reserve_crashkernel(void)
701{
702 unsigned long long crash_size, crash_base;
703 unsigned long long total_mem;
704 int ret;
705
706 total_mem = get_total_mem();
707 ret = parse_crashkernel(boot_command_line, total_mem,
708 &crash_size, &crash_base);
709 if (ret)
710 return;
711
712 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
713 if (ret < 0) {
714 printk(KERN_WARNING "crashkernel reservation failed - "
715 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
716 return;
717 }
718
719 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
720 "for crashkernel (System RAM: %ldMB)\n",
721 (unsigned long)(crash_size >> 20),
722 (unsigned long)(crash_base >> 20),
723 (unsigned long)(total_mem >> 20));
724
725 crashk_res.start = crash_base;
726 crashk_res.end = crash_base + crash_size - 1;
727 insert_resource(&iomem_resource, &crashk_res);
728}
729#else
730static inline void reserve_crashkernel(void) {}
731#endif /* CONFIG_KEXEC */
732
1da177e4
LT
733void __init setup_arch(char **cmdline_p)
734{
735 struct tag *tags = (struct tag *)&init_tags;
736 struct machine_desc *mdesc;
737 char *from = default_command_line;
738
bff595c1
CM
739 unwind_init();
740
1da177e4
LT
741 setup_processor();
742 mdesc = setup_machine(machine_arch_type);
743 machine_name = mdesc->name;
744
745 if (mdesc->soft_reboot)
746 reboot_setup("s");
747
9d20fdd5
BG
748 if (__atags_pointer)
749 tags = phys_to_virt(__atags_pointer);
750 else if (mdesc->boot_params)
f9bd6ea4 751 tags = phys_to_virt(mdesc->boot_params);
1da177e4
LT
752
753 /*
754 * If we have the old style parameters, convert them to
755 * a tag list.
756 */
757 if (tags->hdr.tag != ATAG_CORE)
758 convert_to_tag_list(tags);
759 if (tags->hdr.tag != ATAG_CORE)
760 tags = (struct tag *)&init_tags;
761
762 if (mdesc->fixup)
763 mdesc->fixup(mdesc, tags, &from, &meminfo);
764
765 if (tags->hdr.tag == ATAG_CORE) {
766 if (meminfo.nr_banks != 0)
767 squash_mem_tags(tags);
4cd9d6f7 768 save_atags(tags);
1da177e4
LT
769 parse_tags(tags);
770 }
771
37efe642
RK
772 init_mm.start_code = (unsigned long) _text;
773 init_mm.end_code = (unsigned long) _etext;
774 init_mm.end_data = (unsigned long) _edata;
775 init_mm.brk = (unsigned long) _end;
1da177e4 776
2b0d8c25
JK
777 /* parse_early_param needs a boot_command_line */
778 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
779
48ab7e09
JK
780 /* populate cmd_line too for later use, preserving boot_command_line */
781 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
782 *cmdline_p = cmd_line;
2b0d8c25
JK
783
784 parse_early_param();
785
4b5f32ce 786 paging_init(mdesc);
1da177e4
LT
787 request_standard_resources(&meminfo, mdesc);
788
7bbb7940
RK
789#ifdef CONFIG_SMP
790 smp_init_cpus();
791#endif
3c57fb43 792 reserve_crashkernel();
7bbb7940 793
ccea7a19 794 cpu_init();
bc581770 795 tcm_init();
ccea7a19 796
1da177e4
LT
797 /*
798 * Set up various architecture-specific pointers
799 */
354e6f72 800 arch_nr_irqs = mdesc->nr_irqs;
1da177e4
LT
801 init_arch_irq = mdesc->init_irq;
802 system_timer = mdesc->timer;
803 init_machine = mdesc->init_machine;
804
805#ifdef CONFIG_VT
806#if defined(CONFIG_VGA_CONSOLE)
807 conswitchp = &vga_con;
808#elif defined(CONFIG_DUMMY_CONSOLE)
809 conswitchp = &dummy_con;
810#endif
811#endif
5cbad0eb 812 early_trap_init();
1da177e4
LT
813}
814
815
816static int __init topology_init(void)
817{
818 int cpu;
819
66fb8bd2
RK
820 for_each_possible_cpu(cpu) {
821 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
822 cpuinfo->cpu.hotpluggable = 1;
823 register_cpu(&cpuinfo->cpu, cpu);
824 }
1da177e4
LT
825
826 return 0;
827}
1da177e4
LT
828subsys_initcall(topology_init);
829
e119bfff
RK
830#ifdef CONFIG_HAVE_PROC_CPU
831static int __init proc_cpu_init(void)
832{
833 struct proc_dir_entry *res;
834
835 res = proc_mkdir("cpu", NULL);
836 if (!res)
837 return -ENOMEM;
838 return 0;
839}
840fs_initcall(proc_cpu_init);
841#endif
842
1da177e4
LT
843static const char *hwcap_str[] = {
844 "swp",
845 "half",
846 "thumb",
847 "26bit",
848 "fastmult",
849 "fpa",
850 "vfp",
851 "edsp",
852 "java",
8f7f9435 853 "iwmmxt",
99e4a6dd 854 "crunch",
4369ae16 855 "thumbee",
2bedbdf4 856 "neon",
7279dc3e
CM
857 "vfpv3",
858 "vfpv3d16",
1da177e4
LT
859 NULL
860};
861
1da177e4
LT
862static int c_show(struct seq_file *m, void *v)
863{
864 int i;
865
866 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 867 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
868
869#if defined(CONFIG_SMP)
870 for_each_online_cpu(i) {
15559722
RK
871 /*
872 * glibc reads /proc/cpuinfo to determine the number of
873 * online processors, looking for lines beginning with
874 * "processor". Give glibc what it expects.
875 */
876 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
877 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
878 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
879 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
880 }
881#else /* CONFIG_SMP */
882 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
883 loops_per_jiffy / (500000/HZ),
884 (loops_per_jiffy / (5000/HZ)) % 100);
885#endif
886
887 /* dump out the processor features */
888 seq_puts(m, "Features\t: ");
889
890 for (i = 0; hwcap_str[i]; i++)
891 if (elf_hwcap & (1 << i))
892 seq_printf(m, "%s ", hwcap_str[i]);
893
0ba8b9b2 894 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
895 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
896
0ba8b9b2 897 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 898 /* pre-ARM7 */
0ba8b9b2 899 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 900 } else {
0ba8b9b2 901 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
902 /* ARM7 */
903 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 904 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
905 } else {
906 /* post-ARM7 */
907 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 908 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
909 }
910 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 911 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 912 }
0ba8b9b2 913 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 914
1da177e4
LT
915 seq_puts(m, "\n");
916
917 seq_printf(m, "Hardware\t: %s\n", machine_name);
918 seq_printf(m, "Revision\t: %04x\n", system_rev);
919 seq_printf(m, "Serial\t\t: %08x%08x\n",
920 system_serial_high, system_serial_low);
921
922 return 0;
923}
924
925static void *c_start(struct seq_file *m, loff_t *pos)
926{
927 return *pos < 1 ? (void *)1 : NULL;
928}
929
930static void *c_next(struct seq_file *m, void *v, loff_t *pos)
931{
932 ++*pos;
933 return NULL;
934}
935
936static void c_stop(struct seq_file *m, void *v)
937{
938}
939
2ffd6e18 940const struct seq_operations cpuinfo_op = {
1da177e4
LT
941 .start = c_start,
942 .next = c_next,
943 .stop = c_stop,
944 .show = c_show
945};