]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/i386/mm/init.c
[PATCH] MM: page allocation hooks for VMI backend
[net-next-2.6.git] / arch / i386 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
8
1da177e4
LT
9#include <linux/module.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/hugetlb.h>
20#include <linux/swap.h>
21#include <linux/smp.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/pagemap.h>
c9cf5528 25#include <linux/poison.h>
1da177e4
LT
26#include <linux/bootmem.h>
27#include <linux/slab.h>
28#include <linux/proc_fs.h>
29#include <linux/efi.h>
05039b92 30#include <linux/memory_hotplug.h>
27d99f7e 31#include <linux/initrd.h>
55b2355e 32#include <linux/cpumask.h>
1da177e4
LT
33
34#include <asm/processor.h>
35#include <asm/system.h>
36#include <asm/uaccess.h>
37#include <asm/pgtable.h>
38#include <asm/dma.h>
39#include <asm/fixmap.h>
40#include <asm/e820.h>
41#include <asm/apic.h>
42#include <asm/tlb.h>
43#include <asm/tlbflush.h>
44#include <asm/sections.h>
45
46unsigned int __VMALLOC_RESERVE = 128 << 20;
47
48DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
49unsigned long highstart_pfn, highend_pfn;
50
51static int noinline do_test_wp_bit(void);
52
53/*
54 * Creates a middle page table and puts a pointer to it in the
55 * given global directory entry. This only returns the gd entry
56 * in non-PAE compilation mode, since the middle layer is folded.
57 */
58static pmd_t * __init one_md_table_init(pgd_t *pgd)
59{
60 pud_t *pud;
61 pmd_t *pmd_table;
62
63#ifdef CONFIG_X86_PAE
64 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
c119ecce 65 paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
1da177e4
LT
66 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
67 pud = pud_offset(pgd, 0);
68 if (pmd_table != pmd_offset(pud, 0))
69 BUG();
70#else
71 pud = pud_offset(pgd, 0);
72 pmd_table = pmd_offset(pud, 0);
73#endif
74
75 return pmd_table;
76}
77
78/*
79 * Create a page table and place a pointer to it in a middle page
80 * directory entry.
81 */
82static pte_t * __init one_page_table_init(pmd_t *pmd)
83{
84 if (pmd_none(*pmd)) {
85 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
c119ecce 86 paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
1da177e4
LT
87 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
88 if (page_table != pte_offset_kernel(pmd, 0))
89 BUG();
90
91 return page_table;
92 }
93
94 return pte_offset_kernel(pmd, 0);
95}
96
97/*
98 * This function initializes a certain range of kernel virtual memory
99 * with new bootmem page tables, everywhere page tables are missing in
100 * the given range.
101 */
102
103/*
104 * NOTE: The pagetables are allocated contiguous on the physical space
105 * so we can cache the place of the first one and move around without
106 * checking the pgd every time.
107 */
108static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
109{
110 pgd_t *pgd;
111 pud_t *pud;
112 pmd_t *pmd;
113 int pgd_idx, pmd_idx;
114 unsigned long vaddr;
115
116 vaddr = start;
117 pgd_idx = pgd_index(vaddr);
118 pmd_idx = pmd_index(vaddr);
119 pgd = pgd_base + pgd_idx;
120
121 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
122 if (pgd_none(*pgd))
123 one_md_table_init(pgd);
124 pud = pud_offset(pgd, vaddr);
125 pmd = pmd_offset(pud, vaddr);
126 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
127 if (pmd_none(*pmd))
128 one_page_table_init(pmd);
129
130 vaddr += PMD_SIZE;
131 }
132 pmd_idx = 0;
133 }
134}
135
136static inline int is_kernel_text(unsigned long addr)
137{
138 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
139 return 1;
140 return 0;
141}
142
143/*
144 * This maps the physical memory to kernel virtual address space, a total
145 * of max_low_pfn pages, by creating page tables starting from address
146 * PAGE_OFFSET.
147 */
148static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
149{
150 unsigned long pfn;
151 pgd_t *pgd;
152 pmd_t *pmd;
153 pte_t *pte;
154 int pgd_idx, pmd_idx, pte_ofs;
155
156 pgd_idx = pgd_index(PAGE_OFFSET);
157 pgd = pgd_base + pgd_idx;
158 pfn = 0;
159
160 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
161 pmd = one_md_table_init(pgd);
162 if (pfn >= max_low_pfn)
163 continue;
164 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
165 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
166
167 /* Map with big pages if possible, otherwise create normal page tables. */
168 if (cpu_has_pse) {
169 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
170
171 if (is_kernel_text(address) || is_kernel_text(address2))
172 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
173 else
174 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
175 pfn += PTRS_PER_PTE;
176 } else {
177 pte = one_page_table_init(pmd);
178
179 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
180 if (is_kernel_text(address))
181 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
182 else
183 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
184 }
185 }
186 }
187 }
188}
189
190static inline int page_kills_ppro(unsigned long pagenr)
191{
192 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
193 return 1;
194 return 0;
195}
196
5b505b90 197int page_is_ram(unsigned long pagenr)
1da177e4
LT
198{
199 int i;
200 unsigned long addr, end;
201
202 if (efi_enabled) {
203 efi_memory_desc_t *md;
7ae65fd3 204 void *p;
1da177e4 205
7ae65fd3
MT
206 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
207 md = p;
1da177e4
LT
208 if (!is_available_memory(md))
209 continue;
210 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
211 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
212
213 if ((pagenr >= addr) && (pagenr < end))
214 return 1;
215 }
216 return 0;
217 }
218
219 for (i = 0; i < e820.nr_map; i++) {
220
221 if (e820.map[i].type != E820_RAM) /* not usable memory */
222 continue;
223 /*
224 * !!!FIXME!!! Some BIOSen report areas as RAM that
225 * are not. Notably the 640->1Mb area. We need a sanity
226 * check here.
227 */
228 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
229 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
230 if ((pagenr >= addr) && (pagenr < end))
231 return 1;
232 }
233 return 0;
234}
235
236#ifdef CONFIG_HIGHMEM
237pte_t *kmap_pte;
238pgprot_t kmap_prot;
239
240#define kmap_get_fixmap_pte(vaddr) \
241 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
242
243static void __init kmap_init(void)
244{
245 unsigned long kmap_vstart;
246
247 /* cache the first kmap pte */
248 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
249 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
250
251 kmap_prot = PAGE_KERNEL;
252}
253
254static void __init permanent_kmaps_init(pgd_t *pgd_base)
255{
256 pgd_t *pgd;
257 pud_t *pud;
258 pmd_t *pmd;
259 pte_t *pte;
260 unsigned long vaddr;
261
262 vaddr = PKMAP_BASE;
263 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
264
265 pgd = swapper_pg_dir + pgd_index(vaddr);
266 pud = pud_offset(pgd, vaddr);
267 pmd = pmd_offset(pud, vaddr);
268 pte = pte_offset_kernel(pmd, vaddr);
269 pkmap_page_table = pte;
270}
271
c09b4240 272static void __meminit free_new_highpage(struct page *page)
05039b92 273{
7835e98b 274 init_page_count(page);
05039b92
DH
275 __free_page(page);
276 totalhigh_pages++;
277}
278
279void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
1da177e4
LT
280{
281 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
282 ClearPageReserved(page);
05039b92 283 free_new_highpage(page);
1da177e4
LT
284 } else
285 SetPageReserved(page);
286}
287
0e0be25d 288static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
05039b92
DH
289{
290 free_new_highpage(page);
291 totalram_pages++;
292#ifdef CONFIG_FLATMEM
293 max_mapnr = max(pfn, max_mapnr);
294#endif
295 num_physpages++;
296 return 0;
297}
298
299/*
300 * Not currently handling the NUMA case.
301 * Assuming single node and all memory that
302 * has been added dynamically that would be
303 * onlined here is in HIGHMEM
304 */
0e0be25d 305void __meminit online_page(struct page *page)
05039b92
DH
306{
307 ClearPageReserved(page);
308 add_one_highpage_hotplug(page, page_to_pfn(page));
309}
310
311
05b79bdc
AW
312#ifdef CONFIG_NUMA
313extern void set_highmem_pages_init(int);
314#else
1da177e4
LT
315static void __init set_highmem_pages_init(int bad_ppro)
316{
317 int pfn;
318 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
05039b92 319 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
1da177e4
LT
320 totalram_pages += totalhigh_pages;
321}
05b79bdc 322#endif /* CONFIG_FLATMEM */
1da177e4
LT
323
324#else
325#define kmap_init() do { } while (0)
326#define permanent_kmaps_init(pgd_base) do { } while (0)
327#define set_highmem_pages_init(bad_ppro) do { } while (0)
328#endif /* CONFIG_HIGHMEM */
329
330unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
129f6946 331EXPORT_SYMBOL(__PAGE_KERNEL);
1da177e4
LT
332unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
333
05b79bdc 334#ifdef CONFIG_NUMA
1da177e4 335extern void __init remap_numa_kva(void);
05b79bdc
AW
336#else
337#define remap_numa_kva() do {} while (0)
1da177e4
LT
338#endif
339
340static void __init pagetable_init (void)
341{
342 unsigned long vaddr;
343 pgd_t *pgd_base = swapper_pg_dir;
344
345#ifdef CONFIG_X86_PAE
346 int i;
347 /* Init entries of the first-level page table to the zero page */
348 for (i = 0; i < PTRS_PER_PGD; i++)
349 set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
c119ecce
ZA
350#else
351 paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
1da177e4
LT
352#endif
353
354 /* Enable PSE if available */
355 if (cpu_has_pse) {
356 set_in_cr4(X86_CR4_PSE);
357 }
358
359 /* Enable PGE if available */
360 if (cpu_has_pge) {
361 set_in_cr4(X86_CR4_PGE);
362 __PAGE_KERNEL |= _PAGE_GLOBAL;
363 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
364 }
365
366 kernel_physical_mapping_init(pgd_base);
367 remap_numa_kva();
368
369 /*
370 * Fixed mappings, only the page table structure has to be
371 * created - mappings will be set by set_fixmap():
372 */
373 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
374 page_table_range_init(vaddr, 0, pgd_base);
375
376 permanent_kmaps_init(pgd_base);
377
378#ifdef CONFIG_X86_PAE
379 /*
380 * Add low memory identity-mappings - SMP needs it when
381 * starting up on an AP from real-mode. In the non-PAE
382 * case we already have these mappings through head.S.
383 * All user-space mappings are explicitly cleared after
384 * SMP startup.
385 */
c9b02a24 386 set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
1da177e4
LT
387#endif
388}
389
55b2355e 390#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
1da177e4
LT
391/*
392 * Swap suspend & friends need this for resume because things like the intel-agp
393 * driver might have split up a kernel 4MB mapping.
394 */
395char __nosavedata swsusp_pg_dir[PAGE_SIZE]
396 __attribute__ ((aligned (PAGE_SIZE)));
397
398static inline void save_pg_dir(void)
399{
400 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
401}
402#else
403static inline void save_pg_dir(void)
404{
405}
406#endif
407
408void zap_low_mappings (void)
409{
410 int i;
411
412 save_pg_dir();
413
414 /*
415 * Zap initial low-memory mappings.
416 *
417 * Note that "pgd_clear()" doesn't do it for
418 * us, because pgd_clear() is a no-op on i386.
419 */
420 for (i = 0; i < USER_PTRS_PER_PGD; i++)
421#ifdef CONFIG_X86_PAE
422 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
423#else
424 set_pgd(swapper_pg_dir+i, __pgd(0));
425#endif
426 flush_tlb_all();
427}
428
429static int disable_nx __initdata = 0;
6c231b7b 430u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
1da177e4
LT
431
432/*
433 * noexec = on|off
434 *
435 * Control non executable mappings.
436 *
437 * on Enable
438 * off Disable
439 */
1a3f239d 440static int __init noexec_setup(char *str)
1da177e4 441{
1a3f239d
RR
442 if (!str || !strcmp(str, "on")) {
443 if (cpu_has_nx) {
444 __supported_pte_mask |= _PAGE_NX;
445 disable_nx = 0;
446 }
447 } else if (!strcmp(str,"off")) {
1da177e4
LT
448 disable_nx = 1;
449 __supported_pte_mask &= ~_PAGE_NX;
1a3f239d
RR
450 } else
451 return -EINVAL;
452
453 return 0;
1da177e4 454}
1a3f239d 455early_param("noexec", noexec_setup);
1da177e4
LT
456
457int nx_enabled = 0;
458#ifdef CONFIG_X86_PAE
459
460static void __init set_nx(void)
461{
462 unsigned int v[4], l, h;
463
464 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
465 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
466 if ((v[3] & (1 << 20)) && !disable_nx) {
467 rdmsr(MSR_EFER, l, h);
468 l |= EFER_NX;
469 wrmsr(MSR_EFER, l, h);
470 nx_enabled = 1;
471 __supported_pte_mask |= _PAGE_NX;
472 }
473 }
474}
475
476/*
477 * Enables/disables executability of a given kernel page and
478 * returns the previous setting.
479 */
480int __init set_kernel_exec(unsigned long vaddr, int enable)
481{
482 pte_t *pte;
483 int ret = 1;
484
485 if (!nx_enabled)
486 goto out;
487
488 pte = lookup_address(vaddr);
489 BUG_ON(!pte);
490
491 if (!pte_exec_kernel(*pte))
492 ret = 0;
493
494 if (enable)
495 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
496 else
497 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
789e6ac0 498 pte_update_defer(&init_mm, vaddr, pte);
1da177e4
LT
499 __flush_tlb_all();
500out:
501 return ret;
502}
503
504#endif
505
506/*
507 * paging_init() sets up the page tables - note that the first 8MB are
508 * already mapped by head.S.
509 *
510 * This routines also unmaps the page at virtual kernel address 0, so
511 * that we can trap those pesky NULL-reference errors in the kernel.
512 */
513void __init paging_init(void)
514{
515#ifdef CONFIG_X86_PAE
516 set_nx();
517 if (nx_enabled)
518 printk("NX (Execute Disable) protection: active\n");
519#endif
520
521 pagetable_init();
522
523 load_cr3(swapper_pg_dir);
524
525#ifdef CONFIG_X86_PAE
526 /*
527 * We will bail out later - printk doesn't work right now so
528 * the user would just see a hanging kernel.
529 */
530 if (cpu_has_pae)
531 set_in_cr4(X86_CR4_PAE);
532#endif
533 __flush_tlb_all();
534
535 kmap_init();
536}
537
538/*
539 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
540 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
541 * used to involve black magic jumps to work around some nasty CPU bugs,
542 * but fortunately the switch to using exceptions got rid of all that.
543 */
544
545static void __init test_wp_bit(void)
546{
547 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
548
549 /* Any page-aligned address will do, the test is non-destructive */
550 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
551 boot_cpu_data.wp_works_ok = do_test_wp_bit();
552 clear_fixmap(FIX_WP_TEST);
553
554 if (!boot_cpu_data.wp_works_ok) {
555 printk("No.\n");
556#ifdef CONFIG_X86_WP_WORKS_OK
557 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
558#endif
559 } else {
560 printk("Ok.\n");
561 }
562}
563
1da177e4
LT
564static struct kcore_list kcore_mem, kcore_vmalloc;
565
566void __init mem_init(void)
567{
568 extern int ppro_with_ram_bug(void);
569 int codesize, reservedpages, datasize, initsize;
570 int tmp;
571 int bad_ppro;
572
05b79bdc 573#ifdef CONFIG_FLATMEM
8d8f3cbe 574 BUG_ON(!mem_map);
1da177e4
LT
575#endif
576
577 bad_ppro = ppro_with_ram_bug();
578
579#ifdef CONFIG_HIGHMEM
580 /* check that fixmap and pkmap do not overlap */
581 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
582 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
583 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
584 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
585 BUG();
586 }
587#endif
588
1da177e4
LT
589 /* this will put all low memory onto the freelists */
590 totalram_pages += free_all_bootmem();
591
592 reservedpages = 0;
593 for (tmp = 0; tmp < max_low_pfn; tmp++)
594 /*
595 * Only count reserved RAM pages
596 */
597 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
598 reservedpages++;
599
600 set_highmem_pages_init(bad_ppro);
601
602 codesize = (unsigned long) &_etext - (unsigned long) &_text;
603 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
604 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
605
606 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
607 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
608 VMALLOC_END-VMALLOC_START);
609
610 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
611 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
612 num_physpages << (PAGE_SHIFT-10),
613 codesize >> 10,
614 reservedpages << (PAGE_SHIFT-10),
615 datasize >> 10,
616 initsize >> 10,
617 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
618 );
619
052e7994
JF
620#if 1 /* double-sanity-check paranoia */
621 printk("virtual kernel memory layout:\n"
622 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
623#ifdef CONFIG_HIGHMEM
624 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
625#endif
626 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
627 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
628 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
629 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
630 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
631 FIXADDR_START, FIXADDR_TOP,
632 (FIXADDR_TOP - FIXADDR_START) >> 10,
633
634#ifdef CONFIG_HIGHMEM
635 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
636 (LAST_PKMAP*PAGE_SIZE) >> 10,
637#endif
638
639 VMALLOC_START, VMALLOC_END,
640 (VMALLOC_END - VMALLOC_START) >> 20,
641
642 (unsigned long)__va(0), (unsigned long)high_memory,
643 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
644
645 (unsigned long)&__init_begin, (unsigned long)&__init_end,
646 ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
647
648 (unsigned long)&_etext, (unsigned long)&_edata,
649 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
650
651 (unsigned long)&_text, (unsigned long)&_etext,
652 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
653
654#ifdef CONFIG_HIGHMEM
655 BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
656 BUG_ON(VMALLOC_END > PKMAP_BASE);
657#endif
658 BUG_ON(VMALLOC_START > VMALLOC_END);
659 BUG_ON((unsigned long)high_memory > VMALLOC_START);
660#endif /* double-sanity-check paranoia */
661
1da177e4
LT
662#ifdef CONFIG_X86_PAE
663 if (!cpu_has_pae)
664 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
665#endif
666 if (boot_cpu_data.wp_works_ok < 0)
667 test_wp_bit();
668
669 /*
670 * Subtle. SMP is doing it's boot stuff late (because it has to
671 * fork idle threads) - but it also needs low mappings for the
672 * protected-mode entry to work. We zap these entries only after
673 * the WP-bit has been tested.
674 */
675#ifndef CONFIG_SMP
676 zap_low_mappings();
677#endif
678}
679
ad8f5797 680#ifdef CONFIG_MEMORY_HOTPLUG
bc02af93 681int arch_add_memory(int nid, u64 start, u64 size)
05039b92 682{
7c7e9425 683 struct pglist_data *pgdata = NODE_DATA(nid);
776ed98b 684 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
05039b92
DH
685 unsigned long start_pfn = start >> PAGE_SHIFT;
686 unsigned long nr_pages = size >> PAGE_SHIFT;
687
688 return __add_pages(zone, start_pfn, nr_pages);
689}
690
691int remove_memory(u64 start, u64 size)
692{
693 return -EINVAL;
694}
7c7e9425 695EXPORT_SYMBOL_GPL(remove_memory);
9d99aaa3 696#endif
05039b92 697
e18b890b
CL
698struct kmem_cache *pgd_cache;
699struct kmem_cache *pmd_cache;
1da177e4
LT
700
701void __init pgtable_cache_init(void)
702{
703 if (PTRS_PER_PMD > 1) {
704 pmd_cache = kmem_cache_create("pmd",
705 PTRS_PER_PMD*sizeof(pmd_t),
706 PTRS_PER_PMD*sizeof(pmd_t),
707 0,
708 pmd_ctor,
709 NULL);
710 if (!pmd_cache)
711 panic("pgtable_cache_init(): cannot create pmd cache");
712 }
713 pgd_cache = kmem_cache_create("pgd",
714 PTRS_PER_PGD*sizeof(pgd_t),
715 PTRS_PER_PGD*sizeof(pgd_t),
716 0,
717 pgd_ctor,
718 PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
719 if (!pgd_cache)
720 panic("pgtable_cache_init(): Cannot create pgd cache");
721}
722
723/*
724 * This function cannot be __init, since exceptions don't work in that
725 * section. Put this after the callers, so that it cannot be inlined.
726 */
727static int noinline do_test_wp_bit(void)
728{
729 char tmp_reg;
730 int flag;
731
732 __asm__ __volatile__(
733 " movb %0,%1 \n"
734 "1: movb %1,%0 \n"
735 " xorl %2,%2 \n"
736 "2: \n"
737 ".section __ex_table,\"a\"\n"
738 " .align 4 \n"
739 " .long 1b,2b \n"
740 ".previous \n"
741 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
742 "=q" (tmp_reg),
743 "=r" (flag)
744 :"2" (1)
745 :"memory");
746
747 return flag;
748}
749
63aaf308
AV
750#ifdef CONFIG_DEBUG_RODATA
751
63aaf308
AV
752void mark_rodata_ro(void)
753{
a581c2a4 754 unsigned long addr = (unsigned long)__start_rodata;
63aaf308 755
a581c2a4 756 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
63aaf308
AV
757 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
758
a581c2a4
HC
759 printk("Write protecting the kernel read-only data: %uk\n",
760 (__end_rodata - __start_rodata) >> 10);
63aaf308
AV
761
762 /*
763 * change_page_attr() requires a global_flush_tlb() call after it.
764 * We do this after the printk so that if something went wrong in the
765 * change, the printk gets out at least to give a better debug hint
766 * of who is the culprit.
767 */
768 global_flush_tlb();
769}
770#endif
771
9a0b5817
GH
772void free_init_pages(char *what, unsigned long begin, unsigned long end)
773{
774 unsigned long addr;
775
776 for (addr = begin; addr < end; addr += PAGE_SIZE) {
777 ClearPageReserved(virt_to_page(addr));
778 init_page_count(virt_to_page(addr));
c9cf5528 779 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
9a0b5817
GH
780 free_page(addr);
781 totalram_pages++;
782 }
783 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
784}
785
786void free_initmem(void)
787{
788 free_init_pages("unused kernel memory",
789 (unsigned long)(&__init_begin),
790 (unsigned long)(&__init_end));
791}
63aaf308 792
1da177e4
LT
793#ifdef CONFIG_BLK_DEV_INITRD
794void free_initrd_mem(unsigned long start, unsigned long end)
795{
9a0b5817 796 free_init_pages("initrd memory", start, end);
1da177e4
LT
797}
798#endif
9a0b5817 799