]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86_64/mm/init.c
[PATCH] x86-64: vsyscall_gtod_data diet and vgettimeofday() fix
[net-next-2.6.git] / arch / x86_64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
1da177e4
LT
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/pagemap.h>
22#include <linux/bootmem.h>
23#include <linux/proc_fs.h>
59170891 24#include <linux/pci.h>
6fb14755 25#include <linux/pfn.h>
c9cf5528 26#include <linux/poison.h>
17a941d8 27#include <linux/dma-mapping.h>
44df75e6
MT
28#include <linux/module.h>
29#include <linux/memory_hotplug.h>
1da177e4
LT
30
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#include <asm/dma.h>
37#include <asm/fixmap.h>
38#include <asm/e820.h>
39#include <asm/apic.h>
40#include <asm/tlb.h>
41#include <asm/mmu_context.h>
42#include <asm/proto.h>
43#include <asm/smp.h>
2bc0414e 44#include <asm/sections.h>
1da177e4
LT
45
46#ifndef Dprintk
47#define Dprintk(x...)
48#endif
49
e6584504 50const struct dma_mapping_ops* dma_ops;
17a941d8
MBY
51EXPORT_SYMBOL(dma_ops);
52
e18c6874
AK
53static unsigned long dma_reserve __initdata;
54
1da177e4
LT
55DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
56
57/*
58 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
59 * physical space so we can cache the place of the first one and move
60 * around without checking the pgd every time.
61 */
62
63void show_mem(void)
64{
e92343cc
AK
65 long i, total = 0, reserved = 0;
66 long shared = 0, cached = 0;
1da177e4
LT
67 pg_data_t *pgdat;
68 struct page *page;
69
e92343cc 70 printk(KERN_INFO "Mem-info:\n");
1da177e4 71 show_free_areas();
e92343cc 72 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
1da177e4 73
ec936fc5 74 for_each_online_pgdat(pgdat) {
1da177e4
LT
75 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
76 page = pfn_to_page(pgdat->node_start_pfn + i);
77 total++;
e92343cc
AK
78 if (PageReserved(page))
79 reserved++;
80 else if (PageSwapCache(page))
81 cached++;
82 else if (page_count(page))
83 shared += page_count(page) - 1;
1da177e4
LT
84 }
85 }
e92343cc
AK
86 printk(KERN_INFO "%lu pages of RAM\n", total);
87 printk(KERN_INFO "%lu reserved pages\n",reserved);
88 printk(KERN_INFO "%lu pages shared\n",shared);
89 printk(KERN_INFO "%lu pages swap cached\n",cached);
1da177e4
LT
90}
91
1da177e4
LT
92int after_bootmem;
93
5f44a669 94static __init void *spp_getpage(void)
1da177e4
LT
95{
96 void *ptr;
97 if (after_bootmem)
98 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 else
100 ptr = alloc_bootmem_pages(PAGE_SIZE);
101 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
102 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103
104 Dprintk("spp_getpage %p\n", ptr);
105 return ptr;
106}
107
5f44a669 108static __init void set_pte_phys(unsigned long vaddr,
1da177e4
LT
109 unsigned long phys, pgprot_t prot)
110{
111 pgd_t *pgd;
112 pud_t *pud;
113 pmd_t *pmd;
114 pte_t *pte, new_pte;
115
116 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117
118 pgd = pgd_offset_k(vaddr);
119 if (pgd_none(*pgd)) {
120 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
121 return;
122 }
123 pud = pud_offset(pgd, vaddr);
124 if (pud_none(*pud)) {
125 pmd = (pmd_t *) spp_getpage();
126 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
127 if (pmd != pmd_offset(pud, 0)) {
128 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
129 return;
130 }
131 }
132 pmd = pmd_offset(pud, vaddr);
133 if (pmd_none(*pmd)) {
134 pte = (pte_t *) spp_getpage();
135 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
136 if (pte != pte_offset_kernel(pmd, 0)) {
137 printk("PAGETABLE BUG #02!\n");
138 return;
139 }
140 }
141 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142
143 pte = pte_offset_kernel(pmd, vaddr);
144 if (!pte_none(*pte) &&
145 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 pte_ERROR(*pte);
147 set_pte(pte, new_pte);
148
149 /*
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
152 */
153 __flush_tlb_one(vaddr);
154}
155
156/* NOTE: this is meant to be run only at boot */
5f44a669
AK
157void __init
158__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
1da177e4
LT
159{
160 unsigned long address = __fix_to_virt(idx);
161
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
164 return;
165 }
166 set_pte_phys(address, phys, prot);
167}
168
169unsigned long __initdata table_start, table_end;
170
dafe41ee 171static __meminit void *alloc_low_page(unsigned long *phys)
1da177e4 172{
dafe41ee 173 unsigned long pfn = table_end++;
1da177e4
LT
174 void *adr;
175
44df75e6
MT
176 if (after_bootmem) {
177 adr = (void *)get_zeroed_page(GFP_ATOMIC);
178 *phys = __pa(adr);
179 return adr;
180 }
181
1da177e4
LT
182 if (pfn >= end_pfn)
183 panic("alloc_low_page: ran out of memory");
dafe41ee
VG
184
185 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
44df75e6 186 memset(adr, 0, PAGE_SIZE);
dafe41ee
VG
187 *phys = pfn * PAGE_SIZE;
188 return adr;
189}
1da177e4 190
dafe41ee 191static __meminit void unmap_low_page(void *adr)
1da177e4 192{
44df75e6
MT
193
194 if (after_bootmem)
195 return;
196
dafe41ee 197 early_iounmap(adr, PAGE_SIZE);
1da177e4
LT
198}
199
f2d3efed
AK
200/* Must run before zap_low_mappings */
201__init void *early_ioremap(unsigned long addr, unsigned long size)
202{
dafe41ee
VG
203 unsigned long vaddr;
204 pmd_t *pmd, *last_pmd;
205 int i, pmds;
206
207 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
208 vaddr = __START_KERNEL_map;
209 pmd = level2_kernel_pgt;
210 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
211 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
212 for (i = 0; i < pmds; i++) {
213 if (pmd_present(pmd[i]))
214 goto next;
215 }
216 vaddr += addr & ~PMD_MASK;
217 addr &= PMD_MASK;
218 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
219 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
220 __flush_tlb();
221 return (void *)vaddr;
222 next:
223 ;
f2d3efed 224 }
dafe41ee
VG
225 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
226 return NULL;
f2d3efed
AK
227}
228
229/* To avoid virtual aliases later */
230__init void early_iounmap(void *addr, unsigned long size)
231{
dafe41ee
VG
232 unsigned long vaddr;
233 pmd_t *pmd;
234 int i, pmds;
235
236 vaddr = (unsigned long)addr;
237 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
238 pmd = level2_kernel_pgt + pmd_index(vaddr);
239 for (i = 0; i < pmds; i++)
240 pmd_clear(pmd + i);
f2d3efed
AK
241 __flush_tlb();
242}
243
44df75e6 244static void __meminit
6ad91658 245phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
44df75e6 246{
6ad91658 247 int i = pmd_index(address);
44df75e6 248
6ad91658 249 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
44df75e6 250 unsigned long entry;
6ad91658 251 pmd_t *pmd = pmd_page + pmd_index(address);
44df75e6 252
5f51e139
JB
253 if (address >= end) {
254 if (!after_bootmem)
255 for (; i < PTRS_PER_PMD; i++, pmd++)
256 set_pmd(pmd, __pmd(0));
44df75e6
MT
257 break;
258 }
6ad91658
KM
259
260 if (pmd_val(*pmd))
261 continue;
262
44df75e6
MT
263 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
264 entry &= __supported_pte_mask;
265 set_pmd(pmd, __pmd(entry));
266 }
267}
268
269static void __meminit
270phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
271{
6ad91658
KM
272 pmd_t *pmd = pmd_offset(pud,0);
273 spin_lock(&init_mm.page_table_lock);
274 phys_pmd_init(pmd, address, end);
275 spin_unlock(&init_mm.page_table_lock);
276 __flush_tlb_all();
44df75e6
MT
277}
278
6ad91658 279static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
1da177e4 280{
6ad91658 281 int i = pud_index(addr);
44df75e6 282
44df75e6 283
6ad91658 284 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
6ad91658
KM
285 unsigned long pmd_phys;
286 pud_t *pud = pud_page + pud_index(addr);
1da177e4
LT
287 pmd_t *pmd;
288
6ad91658 289 if (addr >= end)
1da177e4 290 break;
1da177e4 291
6ad91658 292 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
1da177e4
LT
293 set_pud(pud, __pud(0));
294 continue;
295 }
296
6ad91658
KM
297 if (pud_val(*pud)) {
298 phys_pmd_update(pud, addr, end);
299 continue;
300 }
301
dafe41ee 302 pmd = alloc_low_page(&pmd_phys);
44df75e6 303 spin_lock(&init_mm.page_table_lock);
1da177e4 304 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
6ad91658 305 phys_pmd_init(pmd, addr, end);
44df75e6 306 spin_unlock(&init_mm.page_table_lock);
dafe41ee 307 unmap_low_page(pmd);
1da177e4
LT
308 }
309 __flush_tlb();
310}
311
312static void __init find_early_table_space(unsigned long end)
313{
6c5acd16 314 unsigned long puds, pmds, tables, start;
1da177e4
LT
315
316 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
317 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
318 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
319 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
320
ee408c79
AK
321 /* RED-PEN putting page tables only on node 0 could
322 cause a hotspot and fill up ZONE_DMA. The page tables
323 need roughly 0.5KB per GB. */
324 start = 0x8000;
325 table_start = find_e820_area(start, end, tables);
1da177e4
LT
326 if (table_start == -1UL)
327 panic("Cannot find space for the kernel page tables");
328
329 table_start >>= PAGE_SHIFT;
330 table_end = table_start;
44df75e6
MT
331
332 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
5f51e139
JB
333 end, table_start << PAGE_SHIFT,
334 (table_start << PAGE_SHIFT) + tables);
1da177e4
LT
335}
336
337/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
338 This runs before bootmem is initialized and gets pages directly from the
339 physical memory. To access them they are temporarily mapped. */
44df75e6 340void __meminit init_memory_mapping(unsigned long start, unsigned long end)
1da177e4
LT
341{
342 unsigned long next;
343
344 Dprintk("init_memory_mapping\n");
345
346 /*
347 * Find space for the kernel direct mapping tables.
348 * Later we should allocate these tables in the local node of the memory
349 * mapped. Unfortunately this is done currently before the nodes are
350 * discovered.
351 */
44df75e6
MT
352 if (!after_bootmem)
353 find_early_table_space(end);
1da177e4
LT
354
355 start = (unsigned long)__va(start);
356 end = (unsigned long)__va(end);
357
358 for (; start < end; start = next) {
1da177e4 359 unsigned long pud_phys;
44df75e6
MT
360 pgd_t *pgd = pgd_offset_k(start);
361 pud_t *pud;
362
363 if (after_bootmem)
d2ae5b5f 364 pud = pud_offset(pgd, start & PGDIR_MASK);
44df75e6 365 else
dafe41ee 366 pud = alloc_low_page(&pud_phys);
44df75e6 367
1da177e4
LT
368 next = start + PGDIR_SIZE;
369 if (next > end)
370 next = end;
371 phys_pud_init(pud, __pa(start), __pa(next));
44df75e6
MT
372 if (!after_bootmem)
373 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
dafe41ee 374 unmap_low_page(pud);
1da177e4
LT
375 }
376
44df75e6
MT
377 if (!after_bootmem)
378 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
1da177e4 379 __flush_tlb_all();
1da177e4
LT
380}
381
2b97690f 382#ifndef CONFIG_NUMA
1da177e4
LT
383void __init paging_init(void)
384{
6391af17
MG
385 unsigned long max_zone_pfns[MAX_NR_ZONES];
386 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
387 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
388 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
389 max_zone_pfns[ZONE_NORMAL] = end_pfn;
390
44df75e6
MT
391 memory_present(0, 0, end_pfn);
392 sparse_init();
5cb248ab 393 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
394}
395#endif
396
397/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
398 from the CPU leading to inconsistent cache lines. address and size
399 must be aligned to 2MB boundaries.
400 Does nothing when the mapping doesn't exist. */
401void __init clear_kernel_mapping(unsigned long address, unsigned long size)
402{
403 unsigned long end = address + size;
404
405 BUG_ON(address & ~LARGE_PAGE_MASK);
406 BUG_ON(size & ~LARGE_PAGE_MASK);
407
408 for (; address < end; address += LARGE_PAGE_SIZE) {
409 pgd_t *pgd = pgd_offset_k(address);
410 pud_t *pud;
411 pmd_t *pmd;
412 if (pgd_none(*pgd))
413 continue;
414 pud = pud_offset(pgd, address);
415 if (pud_none(*pud))
416 continue;
417 pmd = pmd_offset(pud, address);
418 if (!pmd || pmd_none(*pmd))
419 continue;
420 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
421 /* Could handle this, but it should not happen currently. */
422 printk(KERN_ERR
423 "clear_kernel_mapping: mapping has been split. will leak memory\n");
424 pmd_ERROR(*pmd);
425 }
426 set_pmd(pmd, __pmd(0));
427 }
428 __flush_tlb_all();
429}
430
44df75e6
MT
431/*
432 * Memory hotplug specific functions
44df75e6 433 */
44df75e6
MT
434void online_page(struct page *page)
435{
436 ClearPageReserved(page);
7835e98b 437 init_page_count(page);
44df75e6
MT
438 __free_page(page);
439 totalram_pages++;
440 num_physpages++;
441}
442
bc02af93 443#ifdef CONFIG_MEMORY_HOTPLUG
9d99aaa3
AK
444/*
445 * Memory is added always to NORMAL zone. This means you will never get
446 * additional DMA/DMA32 memory.
447 */
bc02af93 448int arch_add_memory(int nid, u64 start, u64 size)
44df75e6 449{
bc02af93 450 struct pglist_data *pgdat = NODE_DATA(nid);
776ed98b 451 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
44df75e6
MT
452 unsigned long start_pfn = start >> PAGE_SHIFT;
453 unsigned long nr_pages = size >> PAGE_SHIFT;
454 int ret;
455
45e0b78b
KM
456 init_memory_mapping(start, (start + size -1));
457
44df75e6
MT
458 ret = __add_pages(zone, start_pfn, nr_pages);
459 if (ret)
460 goto error;
461
44df75e6
MT
462 return ret;
463error:
464 printk("%s: Problem encountered in __add_pages!\n", __func__);
465 return ret;
466}
bc02af93 467EXPORT_SYMBOL_GPL(arch_add_memory);
44df75e6
MT
468
469int remove_memory(u64 start, u64 size)
470{
471 return -EINVAL;
472}
473EXPORT_SYMBOL_GPL(remove_memory);
474
8243229f 475#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
4942e998
KM
476int memory_add_physaddr_to_nid(u64 start)
477{
478 return 0;
479}
8c2676a5 480EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
4942e998
KM
481#endif
482
45e0b78b
KM
483#endif /* CONFIG_MEMORY_HOTPLUG */
484
485#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
bc02af93
YG
486/*
487 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
488 * just online the pages.
489 */
490int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
491{
492 int err = -EIO;
493 unsigned long pfn;
494 unsigned long total = 0, mem = 0;
495 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
496 if (pfn_valid(pfn)) {
497 online_page(pfn_to_page(pfn));
498 err = 0;
499 mem++;
500 }
501 total++;
502 }
503 if (!err) {
504 z->spanned_pages += total;
505 z->present_pages += mem;
506 z->zone_pgdat->node_spanned_pages += total;
507 z->zone_pgdat->node_present_pages += mem;
508 }
509 return err;
510}
45e0b78b 511#endif
44df75e6 512
1da177e4
LT
513static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
514 kcore_vsyscall;
515
516void __init mem_init(void)
517{
0a43e4bf 518 long codesize, reservedpages, datasize, initsize;
1da177e4 519
0dc243ae 520 pci_iommu_alloc();
1da177e4 521
1da177e4
LT
522 /* clear the zero-page */
523 memset(empty_zero_page, 0, PAGE_SIZE);
524
525 reservedpages = 0;
526
527 /* this will put all low memory onto the freelists */
2b97690f 528#ifdef CONFIG_NUMA
0a43e4bf 529 totalram_pages = numa_free_all_bootmem();
1da177e4 530#else
0a43e4bf 531 totalram_pages = free_all_bootmem();
1da177e4 532#endif
5cb248ab
MG
533 reservedpages = end_pfn - totalram_pages -
534 absent_pages_in_range(0, end_pfn);
1da177e4
LT
535
536 after_bootmem = 1;
537
538 codesize = (unsigned long) &_etext - (unsigned long) &_text;
539 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
540 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
541
542 /* Register memory areas for /proc/kcore */
543 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
544 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
545 VMALLOC_END-VMALLOC_START);
546 kclist_add(&kcore_kernel, &_stext, _end - _stext);
547 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
548 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
549 VSYSCALL_END - VSYSCALL_START);
550
0a43e4bf 551 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
1da177e4
LT
552 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
553 end_pfn << (PAGE_SHIFT-10),
554 codesize >> 10,
555 reservedpages << (PAGE_SHIFT-10),
556 datasize >> 10,
557 initsize >> 10);
1da177e4
LT
558}
559
d167a518 560void free_init_pages(char *what, unsigned long begin, unsigned long end)
1da177e4
LT
561{
562 unsigned long addr;
563
d167a518
GH
564 if (begin >= end)
565 return;
566
6fb14755 567 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
d167a518 568 for (addr = begin; addr < end; addr += PAGE_SIZE) {
0dbf7028
VG
569 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
570 ClearPageReserved(page);
571 init_page_count(page);
572 memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE);
6fb14755
JB
573 if (addr >= __START_KERNEL_map)
574 change_page_attr_addr(addr, 1, __pgprot(0));
0dbf7028 575 __free_page(page);
1da177e4
LT
576 totalram_pages++;
577 }
6fb14755
JB
578 if (addr > __START_KERNEL_map)
579 global_flush_tlb();
d167a518
GH
580}
581
582void free_initmem(void)
583{
d167a518 584 free_init_pages("unused kernel memory",
0dbf7028
VG
585 __pa_symbol(&__init_begin),
586 __pa_symbol(&__init_end));
1da177e4
LT
587}
588
67df197b
AV
589#ifdef CONFIG_DEBUG_RODATA
590
67df197b
AV
591void mark_rodata_ro(void)
592{
6fb14755 593 unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size;
67df197b 594
6fb14755
JB
595#ifdef CONFIG_HOTPLUG_CPU
596 /* It must still be possible to apply SMP alternatives. */
597 if (num_possible_cpus() > 1)
598 start = PFN_ALIGN(__va(__pa_symbol(&_etext)));
599#endif
600 size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start;
601 change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO);
67df197b 602
6fb14755
JB
603 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
604 size >> 10);
67df197b
AV
605
606 /*
607 * change_page_attr_addr() requires a global_flush_tlb() call after it.
608 * We do this after the printk so that if something went wrong in the
609 * change, the printk gets out at least to give a better debug hint
610 * of who is the culprit.
611 */
612 global_flush_tlb();
613}
614#endif
615
1da177e4
LT
616#ifdef CONFIG_BLK_DEV_INITRD
617void free_initrd_mem(unsigned long start, unsigned long end)
618{
0dbf7028 619 free_init_pages("initrd memory", __pa(start), __pa(end));
1da177e4
LT
620}
621#endif
622
623void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
624{
2b97690f 625#ifdef CONFIG_NUMA
1da177e4 626 int nid = phys_to_nid(phys);
5e58a02a
AK
627#endif
628 unsigned long pfn = phys >> PAGE_SHIFT;
629 if (pfn >= end_pfn) {
630 /* This can happen with kdump kernels when accessing firmware
631 tables. */
632 if (pfn < end_pfn_map)
633 return;
634 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
635 phys, len);
636 return;
637 }
638
639 /* Should check here against the e820 map to avoid double free */
640#ifdef CONFIG_NUMA
1da177e4
LT
641 reserve_bootmem_node(NODE_DATA(nid), phys, len);
642#else
643 reserve_bootmem(phys, len);
644#endif
0e0b864e 645 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
e18c6874 646 dma_reserve += len / PAGE_SIZE;
0e0b864e
MG
647 set_dma_reserve(dma_reserve);
648 }
1da177e4
LT
649}
650
651int kern_addr_valid(unsigned long addr)
652{
653 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
654 pgd_t *pgd;
655 pud_t *pud;
656 pmd_t *pmd;
657 pte_t *pte;
658
659 if (above != 0 && above != -1UL)
660 return 0;
661
662 pgd = pgd_offset_k(addr);
663 if (pgd_none(*pgd))
664 return 0;
665
666 pud = pud_offset(pgd, addr);
667 if (pud_none(*pud))
668 return 0;
669
670 pmd = pmd_offset(pud, addr);
671 if (pmd_none(*pmd))
672 return 0;
673 if (pmd_large(*pmd))
674 return pfn_valid(pmd_pfn(*pmd));
675
676 pte = pte_offset_kernel(pmd, addr);
677 if (pte_none(*pte))
678 return 0;
679 return pfn_valid(pte_pfn(*pte));
680}
681
682#ifdef CONFIG_SYSCTL
683#include <linux/sysctl.h>
684
685extern int exception_trace, page_fault_trace;
686
687static ctl_table debug_table2[] = {
c37ce032
EB
688 {
689 .ctl_name = 99,
690 .procname = "exception-trace",
691 .data = &exception_trace,
692 .maxlen = sizeof(int),
693 .mode = 0644,
694 .proc_handler = proc_dointvec
695 },
696 {}
1da177e4
LT
697};
698
699static ctl_table debug_root_table2[] = {
c37ce032
EB
700 {
701 .ctl_name = CTL_DEBUG,
702 .procname = "debug",
703 .mode = 0555,
704 .child = debug_table2
705 },
706 {}
1da177e4
LT
707};
708
709static __init int x8664_sysctl_init(void)
710{
0b4d4147 711 register_sysctl_table(debug_root_table2);
1da177e4
LT
712 return 0;
713}
714__initcall(x8664_sysctl_init);
715#endif
716
103efcd9 717/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
1e014410
AK
718 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
719 not need special handling anymore. */
1da177e4
LT
720
721static struct vm_area_struct gate_vma = {
722 .vm_start = VSYSCALL_START,
103efcd9
EP
723 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
724 .vm_page_prot = PAGE_READONLY_EXEC,
725 .vm_flags = VM_READ | VM_EXEC
1da177e4
LT
726};
727
1da177e4
LT
728struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
729{
730#ifdef CONFIG_IA32_EMULATION
1e014410
AK
731 if (test_tsk_thread_flag(tsk, TIF_IA32))
732 return NULL;
1da177e4
LT
733#endif
734 return &gate_vma;
735}
736
737int in_gate_area(struct task_struct *task, unsigned long addr)
738{
739 struct vm_area_struct *vma = get_gate_vma(task);
1e014410
AK
740 if (!vma)
741 return 0;
1da177e4
LT
742 return (addr >= vma->vm_start) && (addr < vma->vm_end);
743}
744
745/* Use this when you have no reliable task/vma, typically from interrupt
746 * context. It is less reliable than using the task's vma and may give
747 * false positives.
748 */
749int in_gate_area_no_task(unsigned long addr)
750{
1e014410 751 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1da177e4 752}