]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/init_64.c
x86: move nonx_setup etc from common.c to init_64.c
[net-next-2.6.git] / arch / x86 / mm / init_64.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
11034d55 21#include <linux/initrd.h>
1da177e4
LT
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/proc_fs.h>
59170891 25#include <linux/pci.h>
6fb14755 26#include <linux/pfn.h>
c9cf5528 27#include <linux/poison.h>
17a941d8 28#include <linux/dma-mapping.h>
44df75e6
MT
29#include <linux/module.h>
30#include <linux/memory_hotplug.h>
ae32b129 31#include <linux/nmi.h>
1da177e4
LT
32
33#include <asm/processor.h>
34#include <asm/system.h>
35#include <asm/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/pgalloc.h>
38#include <asm/dma.h>
39#include <asm/fixmap.h>
40#include <asm/e820.h>
41#include <asm/apic.h>
42#include <asm/tlb.h>
43#include <asm/mmu_context.h>
44#include <asm/proto.h>
45#include <asm/smp.h>
2bc0414e 46#include <asm/sections.h>
718fc13b 47#include <asm/kdebug.h>
aaa64e04 48#include <asm/numa.h>
7bfeab9a 49#include <asm/cacheflush.h>
1da177e4 50
064d25f1
YL
51/*
52 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
53 * The direct mapping extends to max_pfn_mapped, so that we can directly access
54 * apertures, ACPI and other tables without having to play with fixmaps.
55 */
f361a450 56unsigned long max_low_pfn_mapped;
064d25f1
YL
57unsigned long max_pfn_mapped;
58
e18c6874
AK
59static unsigned long dma_reserve __initdata;
60
1da177e4
LT
61DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
62
a06de630 63int direct_gbpages
00d1c5e0
IM
64#ifdef CONFIG_DIRECT_GBPAGES
65 = 1
66#endif
67;
68
69static int __init parse_direct_gbpages_off(char *arg)
70{
71 direct_gbpages = 0;
72 return 0;
73}
74early_param("nogbpages", parse_direct_gbpages_off);
75
76static int __init parse_direct_gbpages_on(char *arg)
77{
78 direct_gbpages = 1;
79 return 0;
80}
81early_param("gbpages", parse_direct_gbpages_on);
82
1da177e4
LT
83/*
84 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
85 * physical space so we can cache the place of the first one and move
86 * around without checking the pgd every time.
87 */
88
1da177e4
LT
89int after_bootmem;
90
bd220a24
YL
91unsigned long __supported_pte_mask __read_mostly = ~0UL;
92EXPORT_SYMBOL_GPL(__supported_pte_mask);
93
94static int do_not_nx __cpuinitdata;
95
96/* noexec=on|off
97Control non executable mappings for 64bit processes.
98
99on Enable(default)
100off Disable
101*/
102static int __init nonx_setup(char *str)
103{
104 if (!str)
105 return -EINVAL;
106 if (!strncmp(str, "on", 2)) {
107 __supported_pte_mask |= _PAGE_NX;
108 do_not_nx = 0;
109 } else if (!strncmp(str, "off", 3)) {
110 do_not_nx = 1;
111 __supported_pte_mask &= ~_PAGE_NX;
112 }
113 return 0;
114}
115early_param("noexec", nonx_setup);
116
117void __cpuinit check_efer(void)
118{
119 unsigned long efer;
120
121 rdmsrl(MSR_EFER, efer);
122 if (!(efer & EFER_NX) || do_not_nx)
123 __supported_pte_mask &= ~_PAGE_NX;
124}
125
126int force_personality32;
127
128/* noexec32=on|off
129Control non executable heap for 32bit processes.
130To control the stack too use noexec=off
131
132on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
133off PROT_READ implies PROT_EXEC
134*/
135static int __init nonx32_setup(char *str)
136{
137 if (!strcmp(str, "on"))
138 force_personality32 &= ~READ_IMPLIES_EXEC;
139 else if (!strcmp(str, "off"))
140 force_personality32 |= READ_IMPLIES_EXEC;
141 return 1;
142}
143__setup("noexec32=", nonx32_setup);
144
8d6ea967
MS
145/*
146 * NOTE: This function is marked __ref because it calls __init function
147 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
148 */
149static __ref void *spp_getpage(void)
14a62c34 150{
1da177e4 151 void *ptr;
14a62c34 152
1da177e4 153 if (after_bootmem)
14a62c34 154 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
155 else
156 ptr = alloc_bootmem_pages(PAGE_SIZE);
14a62c34
TG
157
158 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
159 panic("set_pte_phys: cannot allocate page data %s\n",
160 after_bootmem ? "after bootmem" : "");
161 }
1da177e4 162
10f22dde 163 pr_debug("spp_getpage %p\n", ptr);
14a62c34 164
1da177e4 165 return ptr;
14a62c34 166}
1da177e4 167
d494a961 168void
0814e0ba 169set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
1da177e4 170{
1da177e4
LT
171 pud_t *pud;
172 pmd_t *pmd;
d494a961 173 pte_t *pte;
1da177e4 174
0814e0ba 175 pud = pud_page + pud_index(vaddr);
1da177e4 176 if (pud_none(*pud)) {
14a62c34 177 pmd = (pmd_t *) spp_getpage();
bb23e403 178 pud_populate(&init_mm, pud, pmd);
1da177e4 179 if (pmd != pmd_offset(pud, 0)) {
10f22dde 180 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
14a62c34 181 pmd, pmd_offset(pud, 0));
1da177e4
LT
182 return;
183 }
184 }
185 pmd = pmd_offset(pud, vaddr);
186 if (pmd_none(*pmd)) {
187 pte = (pte_t *) spp_getpage();
bb23e403 188 pmd_populate_kernel(&init_mm, pmd, pte);
1da177e4 189 if (pte != pte_offset_kernel(pmd, 0)) {
10f22dde 190 printk(KERN_ERR "PAGETABLE BUG #02!\n");
1da177e4
LT
191 return;
192 }
193 }
1da177e4
LT
194
195 pte = pte_offset_kernel(pmd, vaddr);
70c9f590 196 if (!pte_none(*pte) && pte_val(new_pte) &&
1da177e4
LT
197 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
198 pte_ERROR(*pte);
199 set_pte(pte, new_pte);
200
201 /*
202 * It's enough to flush this one mapping.
203 * (PGE mappings get flushed as well)
204 */
205 __flush_tlb_one(vaddr);
206}
207
0814e0ba
EH
208void
209set_pte_vaddr(unsigned long vaddr, pte_t pteval)
210{
211 pgd_t *pgd;
212 pud_t *pud_page;
213
214 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
215
216 pgd = pgd_offset_k(vaddr);
217 if (pgd_none(*pgd)) {
218 printk(KERN_ERR
219 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
220 return;
221 }
222 pud_page = (pud_t*)pgd_page_vaddr(*pgd);
223 set_pte_vaddr_pud(pud_page, vaddr, pteval);
224}
225
3a9e189d
JS
226/*
227 * Create large page table mappings for a range of physical addresses.
228 */
229static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
230 pgprot_t prot)
231{
232 pgd_t *pgd;
233 pud_t *pud;
234 pmd_t *pmd;
235
236 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
237 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
238 pgd = pgd_offset_k((unsigned long)__va(phys));
239 if (pgd_none(*pgd)) {
240 pud = (pud_t *) spp_getpage();
241 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
242 _PAGE_USER));
243 }
244 pud = pud_offset(pgd, (unsigned long)__va(phys));
245 if (pud_none(*pud)) {
246 pmd = (pmd_t *) spp_getpage();
247 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
248 _PAGE_USER));
249 }
250 pmd = pmd_offset(pud, phys);
251 BUG_ON(!pmd_none(*pmd));
252 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
253 }
254}
255
256void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
257{
258 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
259}
260
261void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
262{
263 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
264}
265
31eedd82 266/*
88f3aec7
IM
267 * The head.S code sets up the kernel high mapping:
268 *
269 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
31eedd82
TG
270 *
271 * phys_addr holds the negative offset to the kernel, which is added
272 * to the compile time generated pmds. This results in invalid pmds up
273 * to the point where we hit the physaddr 0 mapping.
274 *
275 * We limit the mappings to the region from _text to _end. _end is
276 * rounded up to the 2MB boundary. This catches the invalid pmds as
277 * well, as they are located before _text:
278 */
279void __init cleanup_highmap(void)
280{
281 unsigned long vaddr = __START_KERNEL_map;
282 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
283 pmd_t *pmd = level2_kernel_pgt;
284 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
285
286 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
2884f110 287 if (pmd_none(*pmd))
31eedd82
TG
288 continue;
289 if (vaddr < (unsigned long) _text || vaddr > end)
290 set_pmd(pmd, __pmd(0));
291 }
292}
293
75175278
AK
294static unsigned long __initdata table_start;
295static unsigned long __meminitdata table_end;
d86623a0 296static unsigned long __meminitdata table_top;
1da177e4 297
9482ac6e 298static __ref void *alloc_low_page(unsigned long *phys)
14a62c34 299{
dafe41ee 300 unsigned long pfn = table_end++;
1da177e4
LT
301 void *adr;
302
44df75e6
MT
303 if (after_bootmem) {
304 adr = (void *)get_zeroed_page(GFP_ATOMIC);
305 *phys = __pa(adr);
14a62c34 306
44df75e6
MT
307 return adr;
308 }
309
d86623a0 310 if (pfn >= table_top)
14a62c34 311 panic("alloc_low_page: ran out of memory");
dafe41ee
VG
312
313 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
44df75e6 314 memset(adr, 0, PAGE_SIZE);
dafe41ee
VG
315 *phys = pfn * PAGE_SIZE;
316 return adr;
317}
1da177e4 318
9482ac6e 319static __ref void unmap_low_page(void *adr)
14a62c34 320{
44df75e6
MT
321 if (after_bootmem)
322 return;
323
dafe41ee 324 early_iounmap(adr, PAGE_SIZE);
14a62c34 325}
1da177e4 326
7b16eb89 327static unsigned long __meminit
4f9c11dd
JF
328phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
329{
330 unsigned pages = 0;
7b16eb89 331 unsigned long last_map_addr = end;
4f9c11dd 332 int i;
7b16eb89 333
4f9c11dd
JF
334 pte_t *pte = pte_page + pte_index(addr);
335
336 for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
337
338 if (addr >= end) {
339 if (!after_bootmem) {
340 for(; i < PTRS_PER_PTE; i++, pte++)
341 set_pte(pte, __pte(0));
342 }
343 break;
344 }
345
346 if (pte_val(*pte))
347 continue;
348
349 if (0)
350 printk(" pte=%p addr=%lx pte=%016lx\n",
351 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
352 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
7b16eb89 353 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
4f9c11dd
JF
354 pages++;
355 }
356 update_page_count(PG_LEVEL_4K, pages);
7b16eb89
YL
357
358 return last_map_addr;
4f9c11dd
JF
359}
360
7b16eb89 361static unsigned long __meminit
4f9c11dd
JF
362phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
363{
364 pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
365
7b16eb89 366 return phys_pte_init(pte, address, end);
4f9c11dd
JF
367}
368
cc615032 369static unsigned long __meminit
b50efd2a
YL
370phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
371 unsigned long page_size_mask)
44df75e6 372{
ce0c0e50 373 unsigned long pages = 0;
7b16eb89 374 unsigned long last_map_addr = end;
a06de630 375 unsigned long start = address;
ce0c0e50 376
6ad91658 377 int i = pmd_index(address);
44df75e6 378
6ad91658 379 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
4f9c11dd 380 unsigned long pte_phys;
6ad91658 381 pmd_t *pmd = pmd_page + pmd_index(address);
4f9c11dd 382 pte_t *pte;
44df75e6 383
5f51e139 384 if (address >= end) {
14a62c34 385 if (!after_bootmem) {
5f51e139
JB
386 for (; i < PTRS_PER_PMD; i++, pmd++)
387 set_pmd(pmd, __pmd(0));
14a62c34 388 }
44df75e6
MT
389 break;
390 }
6ad91658 391
4f9c11dd 392 if (pmd_val(*pmd)) {
8ae3a5a8
JB
393 if (!pmd_large(*pmd)) {
394 spin_lock(&init_mm.page_table_lock);
7b16eb89 395 last_map_addr = phys_pte_update(pmd, address,
8ae3a5a8
JB
396 end);
397 spin_unlock(&init_mm.page_table_lock);
398 }
a06de630
HD
399 /* Count entries we're using from level2_ident_pgt */
400 if (start == 0)
401 pages++;
4f9c11dd
JF
402 continue;
403 }
404
b50efd2a 405 if (page_size_mask & (1<<PG_LEVEL_2M)) {
4f9c11dd 406 pages++;
8ae3a5a8 407 spin_lock(&init_mm.page_table_lock);
4f9c11dd
JF
408 set_pte((pte_t *)pmd,
409 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
8ae3a5a8 410 spin_unlock(&init_mm.page_table_lock);
7b16eb89 411 last_map_addr = (address & PMD_MASK) + PMD_SIZE;
6ad91658 412 continue;
4f9c11dd 413 }
6ad91658 414
4f9c11dd 415 pte = alloc_low_page(&pte_phys);
7b16eb89 416 last_map_addr = phys_pte_init(pte, address, end);
4f9c11dd
JF
417 unmap_low_page(pte);
418
8ae3a5a8 419 spin_lock(&init_mm.page_table_lock);
4f9c11dd 420 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
8ae3a5a8 421 spin_unlock(&init_mm.page_table_lock);
44df75e6 422 }
ce0c0e50 423 update_page_count(PG_LEVEL_2M, pages);
7b16eb89 424 return last_map_addr;
44df75e6
MT
425}
426
cc615032 427static unsigned long __meminit
b50efd2a
YL
428phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
429 unsigned long page_size_mask)
44df75e6 430{
14a62c34 431 pmd_t *pmd = pmd_offset(pud, 0);
cc615032
AK
432 unsigned long last_map_addr;
433
b50efd2a 434 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
6ad91658 435 __flush_tlb_all();
cc615032 436 return last_map_addr;
44df75e6
MT
437}
438
cc615032 439static unsigned long __meminit
b50efd2a
YL
440phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
441 unsigned long page_size_mask)
14a62c34 442{
ce0c0e50 443 unsigned long pages = 0;
cc615032 444 unsigned long last_map_addr = end;
6ad91658 445 int i = pud_index(addr);
44df75e6 446
14a62c34 447 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) {
6ad91658
KM
448 unsigned long pmd_phys;
449 pud_t *pud = pud_page + pud_index(addr);
1da177e4
LT
450 pmd_t *pmd;
451
6ad91658 452 if (addr >= end)
1da177e4 453 break;
1da177e4 454
14a62c34
TG
455 if (!after_bootmem &&
456 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) {
457 set_pud(pud, __pud(0));
1da177e4 458 continue;
14a62c34 459 }
1da177e4 460
6ad91658 461 if (pud_val(*pud)) {
ef925766 462 if (!pud_large(*pud))
b50efd2a
YL
463 last_map_addr = phys_pmd_update(pud, addr, end,
464 page_size_mask);
ef925766
AK
465 continue;
466 }
467
b50efd2a 468 if (page_size_mask & (1<<PG_LEVEL_1G)) {
ce0c0e50 469 pages++;
8ae3a5a8 470 spin_lock(&init_mm.page_table_lock);
ef925766
AK
471 set_pte((pte_t *)pud,
472 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
8ae3a5a8 473 spin_unlock(&init_mm.page_table_lock);
cc615032 474 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
6ad91658
KM
475 continue;
476 }
477
dafe41ee 478 pmd = alloc_low_page(&pmd_phys);
b50efd2a 479 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
4f9c11dd 480 unmap_low_page(pmd);
8ae3a5a8
JB
481
482 spin_lock(&init_mm.page_table_lock);
4f9c11dd 483 pud_populate(&init_mm, pud, __va(pmd_phys));
44df75e6 484 spin_unlock(&init_mm.page_table_lock);
1da177e4 485 }
1a2b4412 486 __flush_tlb_all();
ce0c0e50 487 update_page_count(PG_LEVEL_1G, pages);
cc615032 488
1a0db38e 489 return last_map_addr;
14a62c34 490}
1da177e4 491
4f9c11dd 492static unsigned long __meminit
b50efd2a
YL
493phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
494 unsigned long page_size_mask)
4f9c11dd
JF
495{
496 pud_t *pud;
497
498 pud = (pud_t *)pgd_page_vaddr(*pgd);
499
b50efd2a 500 return phys_pud_init(pud, addr, end, page_size_mask);
4f9c11dd
JF
501}
502
1da177e4
LT
503static void __init find_early_table_space(unsigned long end)
504{
c2e6d65b 505 unsigned long puds, pmds, ptes, tables, start;
1da177e4
LT
506
507 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
ef925766 508 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
c2e6d65b
YL
509 if (direct_gbpages) {
510 unsigned long extra;
511 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
512 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
513 } else
514 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
515 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
516
517 if (cpu_has_pse) {
518 unsigned long extra;
519 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
520 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
521 } else
522 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
523 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE);
1da177e4 524
14a62c34
TG
525 /*
526 * RED-PEN putting page tables only on node 0 could
527 * cause a hotspot and fill up ZONE_DMA. The page tables
528 * need roughly 0.5KB per GB.
529 */
530 start = 0x8000;
24a5da73 531 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
1da177e4
LT
532 if (table_start == -1UL)
533 panic("Cannot find space for the kernel page tables");
534
535 table_start >>= PAGE_SHIFT;
536 table_end = table_start;
d86623a0 537 table_top = table_start + (tables >> PAGE_SHIFT);
44df75e6 538
d86623a0
YL
539 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
540 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
1da177e4
LT
541}
542
ef925766
AK
543static void __init init_gbpages(void)
544{
545 if (direct_gbpages && cpu_has_gbpages)
546 printk(KERN_INFO "Using GB pages for direct mapping\n");
547 else
548 direct_gbpages = 0;
549}
550
b50efd2a
YL
551static unsigned long __init kernel_physical_mapping_init(unsigned long start,
552 unsigned long end,
553 unsigned long page_size_mask)
14a62c34 554{
1da177e4 555
b50efd2a 556 unsigned long next, last_map_addr = end;
1da177e4
LT
557
558 start = (unsigned long)__va(start);
559 end = (unsigned long)__va(end);
560
561 for (; start < end; start = next) {
44df75e6 562 pgd_t *pgd = pgd_offset_k(start);
14a62c34 563 unsigned long pud_phys;
44df75e6
MT
564 pud_t *pud;
565
e22146e6 566 next = (start + PGDIR_SIZE) & PGDIR_MASK;
4f9c11dd
JF
567 if (next > end)
568 next = end;
569
570 if (pgd_val(*pgd)) {
b50efd2a
YL
571 last_map_addr = phys_pud_update(pgd, __pa(start),
572 __pa(end), page_size_mask);
4f9c11dd
JF
573 continue;
574 }
575
8ae3a5a8 576 pud = alloc_low_page(&pud_phys);
b50efd2a
YL
577 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
578 page_size_mask);
4f9c11dd 579 unmap_low_page(pud);
8ae3a5a8
JB
580
581 spin_lock(&init_mm.page_table_lock);
582 pgd_populate(&init_mm, pgd, __va(pud_phys));
583 spin_unlock(&init_mm.page_table_lock);
14a62c34 584 }
1da177e4 585
b50efd2a
YL
586 return last_map_addr;
587}
7b16eb89
YL
588
589struct map_range {
590 unsigned long start;
591 unsigned long end;
592 unsigned page_size_mask;
593};
594
595#define NR_RANGE_MR 5
596
597static int save_mr(struct map_range *mr, int nr_range,
598 unsigned long start_pfn, unsigned long end_pfn,
599 unsigned long page_size_mask)
600{
601
602 if (start_pfn < end_pfn) {
603 if (nr_range >= NR_RANGE_MR)
604 panic("run out of range for init_memory_mapping\n");
605 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
606 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
607 mr[nr_range].page_size_mask = page_size_mask;
608 nr_range++;
609 }
610
611 return nr_range;
612}
613
b50efd2a
YL
614/*
615 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
616 * This runs before bootmem is initialized and gets pages directly from
617 * the physical memory. To access them they are temporarily mapped.
618 */
619unsigned long __init_refok init_memory_mapping(unsigned long start,
620 unsigned long end)
621{
7b16eb89 622 unsigned long last_map_addr = 0;
b50efd2a 623 unsigned long page_size_mask = 0;
c2e6d65b 624 unsigned long start_pfn, end_pfn;
b50efd2a 625
7b16eb89
YL
626 struct map_range mr[NR_RANGE_MR];
627 int nr_range, i;
628
b50efd2a
YL
629 printk(KERN_INFO "init_memory_mapping\n");
630
631 /*
632 * Find space for the kernel direct mapping tables.
633 *
634 * Later we should allocate these tables in the local node of the
635 * memory mapped. Unfortunately this is done currently before the
636 * nodes are discovered.
637 */
7b16eb89 638 if (!after_bootmem)
b50efd2a 639 init_gbpages();
b50efd2a
YL
640
641 if (direct_gbpages)
642 page_size_mask |= 1 << PG_LEVEL_1G;
643 if (cpu_has_pse)
644 page_size_mask |= 1 << PG_LEVEL_2M;
645
7b16eb89
YL
646 memset(mr, 0, sizeof(mr));
647 nr_range = 0;
648
649 /* head if not big page alignment ?*/
c2e6d65b
YL
650 start_pfn = start >> PAGE_SHIFT;
651 end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
652 << (PMD_SHIFT - PAGE_SHIFT);
7b16eb89 653 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
c2e6d65b
YL
654
655 /* big page (2M) range*/
656 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
657 << (PMD_SHIFT - PAGE_SHIFT);
658 end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
659 << (PUD_SHIFT - PAGE_SHIFT);
660 if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
661 end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
7b16eb89
YL
662 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
663 page_size_mask & (1<<PG_LEVEL_2M));
c2e6d65b
YL
664
665 /* big page (1G) range */
666 start_pfn = end_pfn;
667 end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
7b16eb89
YL
668 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
669 page_size_mask &
670 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
c2e6d65b
YL
671
672 /* tail is not big page (1G) alignment */
673 start_pfn = end_pfn;
674 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
7b16eb89
YL
675 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
676 page_size_mask & (1<<PG_LEVEL_2M));
677
c2e6d65b
YL
678 /* tail is not big page (2M) alignment */
679 start_pfn = end_pfn;
680 end_pfn = end>>PAGE_SHIFT;
7b16eb89
YL
681 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
682
9958e810
YL
683 /* try to merge same page size and continuous */
684 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
685 unsigned long old_start;
686 if (mr[i].end != mr[i+1].start ||
687 mr[i].page_size_mask != mr[i+1].page_size_mask)
688 continue;
689 /* move it */
690 old_start = mr[i].start;
691 memmove(&mr[i], &mr[i+1],
692 (nr_range - 1 - i) * sizeof (struct map_range));
693 mr[i].start = old_start;
694 nr_range--;
695 }
696
7b16eb89
YL
697 for (i = 0; i < nr_range; i++)
698 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
699 mr[i].start, mr[i].end,
700 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
701 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
702
703 if (!after_bootmem)
704 find_early_table_space(end);
705
706 for (i = 0; i < nr_range; i++)
c2e6d65b 707 last_map_addr = kernel_physical_mapping_init(
7b16eb89
YL
708 mr[i].start, mr[i].end,
709 mr[i].page_size_mask);
b50efd2a 710
44df75e6 711 if (!after_bootmem)
f51c9452 712 mmu_cr4_features = read_cr4();
1da177e4 713 __flush_tlb_all();
75175278 714
b50efd2a 715 if (!after_bootmem && table_end > table_start)
24a5da73
YL
716 reserve_early(table_start << PAGE_SHIFT,
717 table_end << PAGE_SHIFT, "PGTABLE");
272b9cad 718
b50efd2a
YL
719 printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
720 last_map_addr, end);
721
272b9cad 722 if (!after_bootmem)
b50efd2a 723 early_memtest(start, end);
cc615032 724
1a0db38e 725 return last_map_addr >> PAGE_SHIFT;
1da177e4
LT
726}
727
2b97690f 728#ifndef CONFIG_NUMA
1f75d7e3
YL
729void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
730{
731 unsigned long bootmap_size, bootmap;
732
733 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
734 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
735 PAGE_SIZE);
736 if (bootmap == -1L)
737 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
346cafec
YL
738 /* don't touch min_low_pfn */
739 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
740 0, end_pfn);
1f75d7e3
YL
741 e820_register_active_regions(0, start_pfn, end_pfn);
742 free_bootmem_with_active_regions(0, end_pfn);
743 early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
744 reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
745}
746
1da177e4
LT
747void __init paging_init(void)
748{
6391af17 749 unsigned long max_zone_pfns[MAX_NR_ZONES];
14a62c34 750
6391af17
MG
751 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
752 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
753 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
c987d12f 754 max_zone_pfns[ZONE_NORMAL] = max_pfn;
6391af17 755
c987d12f 756 memory_present(0, 0, max_pfn);
44df75e6 757 sparse_init();
5cb248ab 758 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
759}
760#endif
761
44df75e6
MT
762/*
763 * Memory hotplug specific functions
44df75e6 764 */
bc02af93 765#ifdef CONFIG_MEMORY_HOTPLUG
9d99aaa3
AK
766/*
767 * Memory is added always to NORMAL zone. This means you will never get
768 * additional DMA/DMA32 memory.
769 */
bc02af93 770int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 771{
bc02af93 772 struct pglist_data *pgdat = NODE_DATA(nid);
776ed98b 773 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
cc615032 774 unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
44df75e6
MT
775 unsigned long nr_pages = size >> PAGE_SHIFT;
776 int ret;
777
cc615032
AK
778 last_mapped_pfn = init_memory_mapping(start, start + size-1);
779 if (last_mapped_pfn > max_pfn_mapped)
780 max_pfn_mapped = last_mapped_pfn;
45e0b78b 781
44df75e6 782 ret = __add_pages(zone, start_pfn, nr_pages);
10f22dde 783 WARN_ON(1);
44df75e6 784
44df75e6 785 return ret;
44df75e6 786}
bc02af93 787EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6 788
8243229f 789#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
4942e998
KM
790int memory_add_physaddr_to_nid(u64 start)
791{
792 return 0;
793}
8c2676a5 794EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
4942e998
KM
795#endif
796
45e0b78b
KM
797#endif /* CONFIG_MEMORY_HOTPLUG */
798
ae531c26
AV
799/*
800 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
801 * is valid. The argument is a physical page number.
802 *
803 *
804 * On x86, access has to be given to the first megabyte of ram because that area
805 * contains bios code and data regions used by X and dosemu and similar apps.
806 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
807 * mmio resources as well as potential bios/acpi data regions.
808 */
809int devmem_is_allowed(unsigned long pagenr)
810{
811 if (pagenr <= 256)
812 return 1;
813 if (!page_is_ram(pagenr))
814 return 1;
815 return 0;
816}
817
818
14a62c34
TG
819static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
820 kcore_modules, kcore_vsyscall;
1da177e4
LT
821
822void __init mem_init(void)
823{
0a43e4bf 824 long codesize, reservedpages, datasize, initsize;
1da177e4 825
0dc243ae 826 pci_iommu_alloc();
1da177e4 827
48ddb154 828 /* clear_bss() already clear the empty_zero_page */
1da177e4
LT
829
830 reservedpages = 0;
831
832 /* this will put all low memory onto the freelists */
2b97690f 833#ifdef CONFIG_NUMA
0a43e4bf 834 totalram_pages = numa_free_all_bootmem();
1da177e4 835#else
0a43e4bf 836 totalram_pages = free_all_bootmem();
1da177e4 837#endif
c987d12f
YL
838 reservedpages = max_pfn - totalram_pages -
839 absent_pages_in_range(0, max_pfn);
1da177e4
LT
840 after_bootmem = 1;
841
842 codesize = (unsigned long) &_etext - (unsigned long) &_text;
843 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
844 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
845
846 /* Register memory areas for /proc/kcore */
14a62c34
TG
847 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
848 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1da177e4
LT
849 VMALLOC_END-VMALLOC_START);
850 kclist_add(&kcore_kernel, &_stext, _end - _stext);
851 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
14a62c34 852 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
1da177e4
LT
853 VSYSCALL_END - VSYSCALL_START);
854
10f22dde 855 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
14a62c34 856 "%ldk reserved, %ldk data, %ldk init)\n",
1da177e4 857 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
c987d12f 858 max_pfn << (PAGE_SHIFT-10),
1da177e4
LT
859 codesize >> 10,
860 reservedpages << (PAGE_SHIFT-10),
861 datasize >> 10,
862 initsize >> 10);
76ebd054
TG
863
864 cpa_init();
1da177e4
LT
865}
866
d167a518 867void free_init_pages(char *what, unsigned long begin, unsigned long end)
1da177e4 868{
bfc734b2 869 unsigned long addr = begin;
1da177e4 870
bfc734b2 871 if (addr >= end)
d167a518
GH
872 return;
873
ee01f112
IM
874 /*
875 * If debugging page accesses then do not free this memory but
876 * mark them not present - any buggy init-section access will
877 * create a kernel page fault:
878 */
879#ifdef CONFIG_DEBUG_PAGEALLOC
880 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
881 begin, PAGE_ALIGN(end));
882 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
883#else
6fb14755 884 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
14a62c34 885
bfc734b2 886 for (; addr < end; addr += PAGE_SIZE) {
e3ebadd9
LT
887 ClearPageReserved(virt_to_page(addr));
888 init_page_count(virt_to_page(addr));
889 memset((void *)(addr & ~(PAGE_SIZE-1)),
890 POISON_FREE_INITMEM, PAGE_SIZE);
e3ebadd9 891 free_page(addr);
1da177e4
LT
892 totalram_pages++;
893 }
ee01f112 894#endif
d167a518
GH
895}
896
897void free_initmem(void)
898{
d167a518 899 free_init_pages("unused kernel memory",
e3ebadd9
LT
900 (unsigned long)(&__init_begin),
901 (unsigned long)(&__init_end));
1da177e4
LT
902}
903
67df197b 904#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
905const int rodata_test_data = 0xC3;
906EXPORT_SYMBOL_GPL(rodata_test_data);
67df197b 907
67df197b
AV
908void mark_rodata_ro(void)
909{
4e4eee0e 910 unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
8f0f996e
SR
911 unsigned long rodata_start =
912 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
913
914#ifdef CONFIG_DYNAMIC_FTRACE
915 /* Dynamic tracing modifies the kernel text section */
916 start = rodata_start;
917#endif
67df197b 918
6fb14755 919 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
e3ebadd9 920 (end - start) >> 10);
984bb80d
AV
921 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
922
923 /*
924 * The rodata section (but not the kernel text!) should also be
925 * not-executable.
926 */
72b59d67 927 set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
67df197b 928
1a487252
AV
929 rodata_test();
930
0c42f392 931#ifdef CONFIG_CPA_DEBUG
10f22dde 932 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
6d238cc4 933 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
0c42f392 934
10f22dde 935 printk(KERN_INFO "Testing CPA: again\n");
6d238cc4 936 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
0c42f392 937#endif
67df197b 938}
4e4eee0e 939
67df197b
AV
940#endif
941
1da177e4
LT
942#ifdef CONFIG_BLK_DEV_INITRD
943void free_initrd_mem(unsigned long start, unsigned long end)
944{
e3ebadd9 945 free_init_pages("initrd memory", start, end);
1da177e4
LT
946}
947#endif
948
d2dbf343
YL
949int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
950 int flags)
14a62c34 951{
2b97690f 952#ifdef CONFIG_NUMA
8b3cd09e 953 int nid, next_nid;
6a07a0ed 954 int ret;
5e58a02a
AK
955#endif
956 unsigned long pfn = phys >> PAGE_SHIFT;
14a62c34 957
c987d12f 958 if (pfn >= max_pfn) {
14a62c34
TG
959 /*
960 * This can happen with kdump kernels when accessing
961 * firmware tables:
962 */
67794292 963 if (pfn < max_pfn_mapped)
8b2ef1d7 964 return -EFAULT;
14a62c34 965
6a07a0ed 966 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n",
5e58a02a 967 phys, len);
8b2ef1d7 968 return -EFAULT;
5e58a02a
AK
969 }
970
971 /* Should check here against the e820 map to avoid double free */
972#ifdef CONFIG_NUMA
8b3cd09e
YL
973 nid = phys_to_nid(phys);
974 next_nid = phys_to_nid(phys + len - 1);
975 if (nid == next_nid)
8b2ef1d7 976 ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags);
8b3cd09e 977 else
8b2ef1d7
BW
978 ret = reserve_bootmem(phys, len, flags);
979
980 if (ret != 0)
981 return ret;
982
14a62c34 983#else
72a7fe39 984 reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
1da177e4 985#endif
8b3cd09e 986
0e0b864e 987 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
e18c6874 988 dma_reserve += len / PAGE_SIZE;
0e0b864e
MG
989 set_dma_reserve(dma_reserve);
990 }
8b2ef1d7
BW
991
992 return 0;
1da177e4
LT
993}
994
14a62c34
TG
995int kern_addr_valid(unsigned long addr)
996{
1da177e4 997 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
14a62c34
TG
998 pgd_t *pgd;
999 pud_t *pud;
1000 pmd_t *pmd;
1001 pte_t *pte;
1da177e4
LT
1002
1003 if (above != 0 && above != -1UL)
14a62c34
TG
1004 return 0;
1005
1da177e4
LT
1006 pgd = pgd_offset_k(addr);
1007 if (pgd_none(*pgd))
1008 return 0;
1009
1010 pud = pud_offset(pgd, addr);
1011 if (pud_none(*pud))
14a62c34 1012 return 0;
1da177e4
LT
1013
1014 pmd = pmd_offset(pud, addr);
1015 if (pmd_none(*pmd))
1016 return 0;
14a62c34 1017
1da177e4
LT
1018 if (pmd_large(*pmd))
1019 return pfn_valid(pmd_pfn(*pmd));
1020
1021 pte = pte_offset_kernel(pmd, addr);
1022 if (pte_none(*pte))
1023 return 0;
14a62c34 1024
1da177e4
LT
1025 return pfn_valid(pte_pfn(*pte));
1026}
1027
14a62c34
TG
1028/*
1029 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
1030 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1031 * not need special handling anymore:
1032 */
1da177e4 1033static struct vm_area_struct gate_vma = {
14a62c34
TG
1034 .vm_start = VSYSCALL_START,
1035 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
1036 .vm_page_prot = PAGE_READONLY_EXEC,
1037 .vm_flags = VM_READ | VM_EXEC
1da177e4
LT
1038};
1039
1da177e4
LT
1040struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
1041{
1042#ifdef CONFIG_IA32_EMULATION
1e014410
AK
1043 if (test_tsk_thread_flag(tsk, TIF_IA32))
1044 return NULL;
1da177e4
LT
1045#endif
1046 return &gate_vma;
1047}
1048
1049int in_gate_area(struct task_struct *task, unsigned long addr)
1050{
1051 struct vm_area_struct *vma = get_gate_vma(task);
14a62c34 1052
1e014410
AK
1053 if (!vma)
1054 return 0;
14a62c34 1055
1da177e4
LT
1056 return (addr >= vma->vm_start) && (addr < vma->vm_end);
1057}
1058
14a62c34
TG
1059/*
1060 * Use this when you have no reliable task/vma, typically from interrupt
1061 * context. It is less reliable than using the task's vma and may give
1062 * false positives:
1da177e4
LT
1063 */
1064int in_gate_area_no_task(unsigned long addr)
1065{
1e014410 1066 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 1067}
2e1c49db 1068
2aae950b
AK
1069const char *arch_vma_name(struct vm_area_struct *vma)
1070{
1071 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
1072 return "[vdso]";
1073 if (vma == &gate_vma)
1074 return "[vsyscall]";
1075 return NULL;
1076}
0889eba5
CL
1077
1078#ifdef CONFIG_SPARSEMEM_VMEMMAP
1079/*
1080 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1081 */
c2b91e2e
YL
1082static long __meminitdata addr_start, addr_end;
1083static void __meminitdata *p_start, *p_end;
1084static int __meminitdata node_start;
1085
14a62c34
TG
1086int __meminit
1087vmemmap_populate(struct page *start_page, unsigned long size, int node)
0889eba5
CL
1088{
1089 unsigned long addr = (unsigned long)start_page;
1090 unsigned long end = (unsigned long)(start_page + size);
1091 unsigned long next;
1092 pgd_t *pgd;
1093 pud_t *pud;
1094 pmd_t *pmd;
1095
1096 for (; addr < end; addr = next) {
7c934d39 1097 void *p = NULL;
0889eba5
CL
1098
1099 pgd = vmemmap_pgd_populate(addr, node);
1100 if (!pgd)
1101 return -ENOMEM;
14a62c34 1102
0889eba5
CL
1103 pud = vmemmap_pud_populate(pgd, addr, node);
1104 if (!pud)
1105 return -ENOMEM;
1106
7c934d39
JF
1107 if (!cpu_has_pse) {
1108 next = (addr + PAGE_SIZE) & PAGE_MASK;
1109 pmd = vmemmap_pmd_populate(pud, addr, node);
1110
1111 if (!pmd)
1112 return -ENOMEM;
1113
1114 p = vmemmap_pte_populate(pmd, addr, node);
14a62c34 1115
0889eba5
CL
1116 if (!p)
1117 return -ENOMEM;
1118
7c934d39
JF
1119 addr_end = addr + PAGE_SIZE;
1120 p_end = p + PAGE_SIZE;
14a62c34 1121 } else {
7c934d39
JF
1122 next = pmd_addr_end(addr, end);
1123
1124 pmd = pmd_offset(pud, addr);
1125 if (pmd_none(*pmd)) {
1126 pte_t entry;
1127
1128 p = vmemmap_alloc_block(PMD_SIZE, node);
1129 if (!p)
1130 return -ENOMEM;
1131
1132 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1133 PAGE_KERNEL_LARGE);
1134 set_pmd(pmd, __pmd(pte_val(entry)));
1135
7c934d39
JF
1136 /* check to see if we have contiguous blocks */
1137 if (p_end != p || node_start != node) {
1138 if (p_start)
1139 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1140 addr_start, addr_end-1, p_start, p_end-1, node_start);
1141 addr_start = addr;
1142 node_start = node;
1143 p_start = p;
1144 }
49c980df
YL
1145
1146 addr_end = addr + PMD_SIZE;
1147 p_end = p + PMD_SIZE;
7c934d39
JF
1148 } else
1149 vmemmap_verify((pte_t *)pmd, node, addr, next);
14a62c34 1150 }
7c934d39 1151
0889eba5 1152 }
0889eba5
CL
1153 return 0;
1154}
c2b91e2e
YL
1155
1156void __meminit vmemmap_populate_print_last(void)
1157{
1158 if (p_start) {
1159 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1160 addr_start, addr_end-1, p_start, p_end-1, node_start);
1161 p_start = NULL;
1162 p_end = NULL;
1163 node_start = 0;
1164 }
1165}
0889eba5 1166#endif