]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/mm/init.c
[IA64] fix arch/ia64/mm/contig.c:235: warning: unused variable `nid'
[net-next-2.6.git] / arch / ia64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
1da177e4
LT
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/module.h>
16#include <linux/personality.h>
17#include <linux/reboot.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/proc_fs.h>
21#include <linux/bitops.h>
22
23#include <asm/a.out.h>
24#include <asm/dma.h>
25#include <asm/ia32.h>
26#include <asm/io.h>
27#include <asm/machvec.h>
28#include <asm/numa.h>
29#include <asm/patch.h>
30#include <asm/pgalloc.h>
31#include <asm/sal.h>
32#include <asm/sections.h>
33#include <asm/system.h>
34#include <asm/tlb.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/mca.h>
38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
fde740e4
RH
41DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
42DEFINE_PER_CPU(long, __pgtable_quicklist_size);
43
1da177e4
LT
44extern void ia64_tlb_init (void);
45
46unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
47
48#ifdef CONFIG_VIRTUAL_MEM_MAP
49unsigned long vmalloc_end = VMALLOC_END_INIT;
50EXPORT_SYMBOL(vmalloc_end);
51struct page *vmem_map;
52EXPORT_SYMBOL(vmem_map);
53#endif
54
fde740e4 55struct page *zero_page_memmap_ptr; /* map entry for zero page */
1da177e4
LT
56EXPORT_SYMBOL(zero_page_memmap_ptr);
57
fde740e4 58#define MIN_PGT_PAGES 25UL
e96c9b47 59#define MAX_PGT_FREES_PER_PASS 16L
fde740e4
RH
60#define PGT_FRACTION_OF_NODE_MEM 16
61
62static inline long
63max_pgt_pages(void)
64{
65 u64 node_free_pages, max_pgt_pages;
66
67#ifndef CONFIG_NUMA
68 node_free_pages = nr_free_pages();
69#else
70 node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
71#endif
72 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
73 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
74 return max_pgt_pages;
75}
76
77static inline long
78min_pages_to_free(void)
79{
80 long pages_to_free;
81
82 pages_to_free = pgtable_quicklist_size - max_pgt_pages();
83 pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
84 return pages_to_free;
85}
86
1da177e4 87void
fde740e4 88check_pgt_cache(void)
1da177e4 89{
fde740e4 90 long pages_to_free;
1da177e4 91
fde740e4
RH
92 if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
93 return;
1da177e4
LT
94
95 preempt_disable();
fde740e4
RH
96 while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
97 while (pages_to_free--) {
98 free_page((unsigned long)pgtable_quicklist_alloc());
99 }
100 preempt_enable();
101 preempt_disable();
1da177e4
LT
102 }
103 preempt_enable();
104}
105
106void
107lazy_mmu_prot_update (pte_t pte)
108{
109 unsigned long addr;
110 struct page *page;
5e48521e 111 unsigned long order;
1da177e4
LT
112
113 if (!pte_exec(pte))
114 return; /* not an executable page... */
115
116 page = pte_page(pte);
117 addr = (unsigned long) page_address(page);
118
119 if (test_bit(PG_arch_1, &page->flags))
120 return; /* i-cache is already coherent with d-cache */
121
5e48521e
ZY
122 if (PageCompound(page)) {
123 order = (unsigned long) (page[1].lru.prev);
124 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
125 }
126 else
127 flush_icache_range(addr, addr + PAGE_SIZE);
1da177e4
LT
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129}
130
131inline void
132ia64_set_rbs_bot (void)
133{
134 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
135
136 if (stack_size > MAX_USER_STACK_SIZE)
137 stack_size = MAX_USER_STACK_SIZE;
138 current->thread.rbs_bot = STACK_TOP - stack_size;
139}
140
141/*
142 * This performs some platform-dependent address space initialization.
143 * On IA-64, we want to setup the VM area for the register backing
144 * store (which grows upwards) and install the gateway page which is
145 * used for signal trampolines, etc.
146 */
147void
148ia64_init_addr_space (void)
149{
150 struct vm_area_struct *vma;
151
152 ia64_set_rbs_bot();
153
154 /*
155 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
156 * the problem. When the process attempts to write to the register backing store
157 * for the first time, it will get a SEGFAULT in this case.
158 */
e94b1766 159 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
160 if (vma) {
161 memset(vma, 0, sizeof(*vma));
162 vma->vm_mm = current->mm;
163 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
164 vma->vm_end = vma->vm_start + PAGE_SIZE;
165 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
46dea3d0 166 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1da177e4
LT
167 down_write(&current->mm->mmap_sem);
168 if (insert_vm_struct(current->mm, vma)) {
169 up_write(&current->mm->mmap_sem);
170 kmem_cache_free(vm_area_cachep, vma);
171 return;
172 }
173 up_write(&current->mm->mmap_sem);
174 }
175
176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
177 if (!(current->personality & MMAP_PAGE_ZERO)) {
e94b1766 178 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
179 if (vma) {
180 memset(vma, 0, sizeof(*vma));
181 vma->vm_mm = current->mm;
182 vma->vm_end = PAGE_SIZE;
183 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
184 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
185 down_write(&current->mm->mmap_sem);
186 if (insert_vm_struct(current->mm, vma)) {
187 up_write(&current->mm->mmap_sem);
188 kmem_cache_free(vm_area_cachep, vma);
189 return;
190 }
191 up_write(&current->mm->mmap_sem);
192 }
193 }
194}
195
196void
197free_initmem (void)
198{
199 unsigned long addr, eaddr;
200
201 addr = (unsigned long) ia64_imva(__init_begin);
202 eaddr = (unsigned long) ia64_imva(__init_end);
203 while (addr < eaddr) {
204 ClearPageReserved(virt_to_page(addr));
7835e98b 205 init_page_count(virt_to_page(addr));
1da177e4
LT
206 free_page(addr);
207 ++totalram_pages;
208 addr += PAGE_SIZE;
209 }
210 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
211 (__init_end - __init_begin) >> 10);
212}
213
dae28066 214void __init
1da177e4
LT
215free_initrd_mem (unsigned long start, unsigned long end)
216{
217 struct page *page;
218 /*
219 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
220 * Thus EFI and the kernel may have different page sizes. It is
221 * therefore possible to have the initrd share the same page as
222 * the end of the kernel (given current setup).
223 *
224 * To avoid freeing/using the wrong page (kernel sized) we:
225 * - align up the beginning of initrd
226 * - align down the end of initrd
227 *
228 * | |
229 * |=============| a000
230 * | |
231 * | |
232 * | | 9000
233 * |/////////////|
234 * |/////////////|
235 * |=============| 8000
236 * |///INITRD////|
237 * |/////////////|
238 * |/////////////| 7000
239 * | |
240 * |KKKKKKKKKKKKK|
241 * |=============| 6000
242 * |KKKKKKKKKKKKK|
243 * |KKKKKKKKKKKKK|
244 * K=kernel using 8KB pages
245 *
246 * In this example, we must free page 8000 ONLY. So we must align up
247 * initrd_start and keep initrd_end as is.
248 */
249 start = PAGE_ALIGN(start);
250 end = end & PAGE_MASK;
251
252 if (start < end)
253 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
254
255 for (; start < end; start += PAGE_SIZE) {
256 if (!virt_addr_valid(start))
257 continue;
258 page = virt_to_page(start);
259 ClearPageReserved(page);
7835e98b 260 init_page_count(page);
1da177e4
LT
261 free_page(start);
262 ++totalram_pages;
263 }
264}
265
266/*
267 * This installs a clean page in the kernel's page table.
268 */
dae28066 269static struct page * __init
1da177e4
LT
270put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
271{
272 pgd_t *pgd;
273 pud_t *pud;
274 pmd_t *pmd;
275 pte_t *pte;
276
277 if (!PageReserved(page))
278 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
279 page_address(page));
280
281 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
282
1da177e4
LT
283 {
284 pud = pud_alloc(&init_mm, pgd, address);
285 if (!pud)
286 goto out;
1da177e4
LT
287 pmd = pmd_alloc(&init_mm, pud, address);
288 if (!pmd)
289 goto out;
872fec16 290 pte = pte_alloc_kernel(pmd, address);
1da177e4
LT
291 if (!pte)
292 goto out;
872fec16 293 if (!pte_none(*pte))
1da177e4 294 goto out;
1da177e4 295 set_pte(pte, mk_pte(page, pgprot));
1da177e4 296 }
872fec16 297 out:
1da177e4
LT
298 /* no need for flush_tlb */
299 return page;
300}
301
914a4ea4 302static void __init
1da177e4
LT
303setup_gate (void)
304{
305 struct page *page;
306
307 /*
ad597bd5
DMT
308 * Map the gate page twice: once read-only to export the ELF
309 * headers etc. and once execute-only page to enable
310 * privilege-promotion via "epc":
1da177e4
LT
311 */
312 page = virt_to_page(ia64_imva(__start_gate_section));
313 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
314#ifdef HAVE_BUGGY_SEGREL
315 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
316 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
317#else
318 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
ad597bd5
DMT
319 /* Fill in the holes (if any) with read-only zero pages: */
320 {
321 unsigned long addr;
322
323 for (addr = GATE_ADDR + PAGE_SIZE;
324 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
325 addr += PAGE_SIZE)
326 {
327 put_kernel_page(ZERO_PAGE(0), addr,
328 PAGE_READONLY);
329 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
330 PAGE_READONLY);
331 }
332 }
1da177e4
LT
333#endif
334 ia64_patch_gate();
335}
336
337void __devinit
338ia64_mmu_init (void *my_cpu_data)
339{
340 unsigned long psr, pta, impl_va_bits;
341 extern void __devinit tlb_init (void);
342
343#ifdef CONFIG_DISABLE_VHPT
344# define VHPT_ENABLE_BIT 0
345#else
346# define VHPT_ENABLE_BIT 1
347#endif
348
349 /* Pin mapping for percpu area into TLB */
350 psr = ia64_clear_ic();
351 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
352 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
353 PERCPU_PAGE_SHIFT);
354
355 ia64_set_psr(psr);
356 ia64_srlz_i();
357
358 /*
359 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
360 * address space. The IA-64 architecture guarantees that at least 50 bits of
361 * virtual address space are implemented but if we pick a large enough page size
362 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
363 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
364 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
365 * problem in practice. Alternatively, we could truncate the top of the mapped
366 * address space to not permit mappings that would overlap with the VMLPT.
367 * --davidm 00/12/06
368 */
369# define pte_bits 3
370# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
371 /*
372 * The virtual page table has to cover the entire implemented address space within
373 * a region even though not all of this space may be mappable. The reason for
374 * this is that the Access bit and Dirty bit fault handlers perform
375 * non-speculative accesses to the virtual page table, so the address range of the
376 * virtual page table itself needs to be covered by virtual page table.
377 */
378# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
379# define POW2(n) (1ULL << (n))
380
381 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
382
383 if (impl_va_bits < 51 || impl_va_bits > 61)
384 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
6cf07a8c
PC
385 /*
386 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
387 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
388 * the test makes sure that our mapped space doesn't overlap the
389 * unimplemented hole in the middle of the region.
390 */
391 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
392 (mapped_space_bits > impl_va_bits - 1))
393 panic("Cannot build a big enough virtual-linear page table"
394 " to cover mapped address space.\n"
395 " Try using a smaller page size.\n");
396
1da177e4
LT
397
398 /* place the VMLPT at the end of each page-table mapped region: */
399 pta = POW2(61) - POW2(vmlpt_bits);
400
1da177e4
LT
401 /*
402 * Set the (virtually mapped linear) page table address. Bit
403 * 8 selects between the short and long format, bits 2-7 the
404 * size of the table, and bit 0 whether the VHPT walker is
405 * enabled.
406 */
407 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
408
409 ia64_tlb_init();
410
411#ifdef CONFIG_HUGETLB_PAGE
412 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
413 ia64_srlz_d();
414#endif
415}
416
417#ifdef CONFIG_VIRTUAL_MEM_MAP
e44e41d0
BP
418int vmemmap_find_next_valid_pfn(int node, int i)
419{
420 unsigned long end_address, hole_next_pfn;
421 unsigned long stop_address;
422 pg_data_t *pgdat = NODE_DATA(node);
423
424 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
425 end_address = PAGE_ALIGN(end_address);
426
427 stop_address = (unsigned long) &vmem_map[
428 pgdat->node_start_pfn + pgdat->node_spanned_pages];
429
430 do {
431 pgd_t *pgd;
432 pud_t *pud;
433 pmd_t *pmd;
434 pte_t *pte;
435
436 pgd = pgd_offset_k(end_address);
437 if (pgd_none(*pgd)) {
438 end_address += PGDIR_SIZE;
439 continue;
440 }
441
442 pud = pud_offset(pgd, end_address);
443 if (pud_none(*pud)) {
444 end_address += PUD_SIZE;
445 continue;
446 }
447
448 pmd = pmd_offset(pud, end_address);
449 if (pmd_none(*pmd)) {
450 end_address += PMD_SIZE;
451 continue;
452 }
453
454 pte = pte_offset_kernel(pmd, end_address);
455retry_pte:
456 if (pte_none(*pte)) {
457 end_address += PAGE_SIZE;
458 pte++;
459 if ((end_address < stop_address) &&
460 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
461 goto retry_pte;
462 continue;
463 }
464 /* Found next valid vmem_map page */
465 break;
466 } while (end_address < stop_address);
467
468 end_address = min(end_address, stop_address);
469 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
470 hole_next_pfn = end_address / sizeof(struct page);
471 return hole_next_pfn - pgdat->node_start_pfn;
472}
1da177e4 473
dae28066 474int __init
1da177e4
LT
475create_mem_map_page_table (u64 start, u64 end, void *arg)
476{
477 unsigned long address, start_page, end_page;
478 struct page *map_start, *map_end;
479 int node;
480 pgd_t *pgd;
481 pud_t *pud;
482 pmd_t *pmd;
483 pte_t *pte;
484
485 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
486 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
487
488 start_page = (unsigned long) map_start & PAGE_MASK;
489 end_page = PAGE_ALIGN((unsigned long) map_end);
490 node = paddr_to_nid(__pa(start));
491
492 for (address = start_page; address < end_page; address += PAGE_SIZE) {
493 pgd = pgd_offset_k(address);
494 if (pgd_none(*pgd))
495 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
496 pud = pud_offset(pgd, address);
497
498 if (pud_none(*pud))
499 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
500 pmd = pmd_offset(pud, address);
501
502 if (pmd_none(*pmd))
503 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
504 pte = pte_offset_kernel(pmd, address);
505
506 if (pte_none(*pte))
507 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
508 PAGE_KERNEL));
509 }
510 return 0;
511}
512
513struct memmap_init_callback_data {
514 struct page *start;
515 struct page *end;
516 int nid;
517 unsigned long zone;
518};
519
520static int
521virtual_memmap_init (u64 start, u64 end, void *arg)
522{
523 struct memmap_init_callback_data *args;
524 struct page *map_start, *map_end;
525
526 args = (struct memmap_init_callback_data *) arg;
527 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
528 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
529
530 if (map_start < args->start)
531 map_start = args->start;
532 if (map_end > args->end)
533 map_end = args->end;
534
535 /*
536 * We have to initialize "out of bounds" struct page elements that fit completely
537 * on the same pages that were allocated for the "in bounds" elements because they
538 * may be referenced later (and found to be "reserved").
539 */
540 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
541 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
542 / sizeof(struct page));
543
544 if (map_start < map_end)
545 memmap_init_zone((unsigned long)(map_end - map_start),
546 args->nid, args->zone, page_to_pfn(map_start));
547 return 0;
548}
549
550void
551memmap_init (unsigned long size, int nid, unsigned long zone,
552 unsigned long start_pfn)
553{
554 if (!vmem_map)
555 memmap_init_zone(size, nid, zone, start_pfn);
556 else {
557 struct page *start;
558 struct memmap_init_callback_data args;
559
560 start = pfn_to_page(start_pfn);
561 args.start = start;
562 args.end = start + size;
563 args.nid = nid;
564 args.zone = zone;
565
566 efi_memmap_walk(virtual_memmap_init, &args);
567 }
568}
569
570int
571ia64_pfn_valid (unsigned long pfn)
572{
573 char byte;
574 struct page *pg = pfn_to_page(pfn);
575
576 return (__get_user(byte, (char __user *) pg) == 0)
577 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
578 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
579}
580EXPORT_SYMBOL(ia64_pfn_valid);
581
dae28066 582int __init
1da177e4
LT
583find_largest_hole (u64 start, u64 end, void *arg)
584{
585 u64 *max_gap = arg;
586
587 static u64 last_end = PAGE_OFFSET;
588
589 /* NOTE: this algorithm assumes efi memmap table is ordered */
590
591 if (*max_gap < (start - last_end))
592 *max_gap = start - last_end;
593 last_end = end;
594 return 0;
595}
05e0caad
MG
596
597int __init
8b9c1068 598register_active_ranges(u64 start, u64 end, void *arg)
05e0caad 599{
8b9c1068 600 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
05e0caad
MG
601 return 0;
602}
1da177e4
LT
603#endif /* CONFIG_VIRTUAL_MEM_MAP */
604
dae28066 605static int __init
1da177e4
LT
606count_reserved_pages (u64 start, u64 end, void *arg)
607{
608 unsigned long num_reserved = 0;
609 unsigned long *count = arg;
610
611 for (; start < end; start += PAGE_SIZE)
612 if (PageReserved(virt_to_page(start)))
613 ++num_reserved;
614 *count += num_reserved;
615 return 0;
616}
617
618/*
619 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
620 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
621 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
622 * useful for performance testing, but conceivably could also come in handy for debugging
623 * purposes.
624 */
625
03906ea0 626static int nolwsys __initdata;
1da177e4
LT
627
628static int __init
629nolwsys_setup (char *s)
630{
631 nolwsys = 1;
632 return 1;
633}
634
635__setup("nolwsys", nolwsys_setup);
636
dae28066 637void __init
1da177e4
LT
638mem_init (void)
639{
640 long reserved_pages, codesize, datasize, initsize;
1da177e4
LT
641 pg_data_t *pgdat;
642 int i;
643 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
644
fde740e4
RH
645 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
646 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
647 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
648
1da177e4
LT
649#ifdef CONFIG_PCI
650 /*
651 * This needs to be called _after_ the command line has been parsed but _before_
652 * any drivers that may need the PCI DMA interface are initialized or bootmem has
653 * been freed.
654 */
655 platform_dma_init();
656#endif
657
2d4b1fa2 658#ifdef CONFIG_FLATMEM
1da177e4
LT
659 if (!mem_map)
660 BUG();
661 max_mapnr = max_low_pfn;
662#endif
663
664 high_memory = __va(max_low_pfn * PAGE_SIZE);
665
666 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
667 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
668 kclist_add(&kcore_kernel, _stext, _end - _stext);
669
ec936fc5 670 for_each_online_pgdat(pgdat)
564601a5 671 if (pgdat->bdata->node_bootmem_map)
672 totalram_pages += free_all_bootmem_node(pgdat);
1da177e4
LT
673
674 reserved_pages = 0;
675 efi_memmap_walk(count_reserved_pages, &reserved_pages);
676
677 codesize = (unsigned long) _etext - (unsigned long) _stext;
678 datasize = (unsigned long) _edata - (unsigned long) _etext;
679 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
680
681 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
682 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
683 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
684 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
685
1da177e4
LT
686
687 /*
688 * For fsyscall entrpoints with no light-weight handler, use the ordinary
689 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
690 * code can tell them apart.
691 */
692 for (i = 0; i < NR_syscalls; ++i) {
693 extern unsigned long fsyscall_table[NR_syscalls];
694 extern unsigned long sys_call_table[NR_syscalls];
695
696 if (!fsyscall_table[i] || nolwsys)
697 fsyscall_table[i] = sys_call_table[i] | 1;
698 }
699 setup_gate();
700
701#ifdef CONFIG_IA32_SUPPORT
702 ia32_mem_init();
703#endif
704}
1681b8e1
YG
705
706#ifdef CONFIG_MEMORY_HOTPLUG
707void online_page(struct page *page)
708{
709 ClearPageReserved(page);
7835e98b 710 init_page_count(page);
1681b8e1
YG
711 __free_page(page);
712 totalram_pages++;
713 num_physpages++;
714}
715
bc02af93 716int arch_add_memory(int nid, u64 start, u64 size)
1681b8e1
YG
717{
718 pg_data_t *pgdat;
719 struct zone *zone;
720 unsigned long start_pfn = start >> PAGE_SHIFT;
721 unsigned long nr_pages = size >> PAGE_SHIFT;
722 int ret;
723
bc02af93 724 pgdat = NODE_DATA(nid);
1681b8e1
YG
725
726 zone = pgdat->node_zones + ZONE_NORMAL;
727 ret = __add_pages(zone, start_pfn, nr_pages);
728
729 if (ret)
730 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
731 __FUNCTION__, ret);
732
733 return ret;
734}
735
736int remove_memory(u64 start, u64 size)
737{
738 return -EINVAL;
739}
9c576ff1 740EXPORT_SYMBOL_GPL(remove_memory);
1681b8e1 741#endif