]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * arch/sh/kernel/vsyscall/vsyscall.c | |
3 | * | |
4 | * Copyright (C) 2006 Paul Mundt | |
5 | * | |
6 | * vDSO randomization | |
7 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar | |
8 | * | |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | #include <linux/mm.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/gfp.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/elf.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/err.h> | |
21 | ||
22 | /* | |
23 | * Should the kernel map a VDSO page into processes and pass its | |
24 | * address down to glibc upon exec()? | |
25 | */ | |
26 | unsigned int __read_mostly vdso_enabled = 1; | |
27 | EXPORT_SYMBOL_GPL(vdso_enabled); | |
28 | ||
29 | static int __init vdso_setup(char *s) | |
30 | { | |
31 | vdso_enabled = simple_strtoul(s, NULL, 0); | |
32 | return 1; | |
33 | } | |
34 | __setup("vdso=", vdso_setup); | |
35 | ||
36 | /* | |
37 | * These symbols are defined by vsyscall.o to mark the bounds | |
38 | * of the ELF DSO images included therein. | |
39 | */ | |
40 | extern const char vsyscall_trapa_start, vsyscall_trapa_end; | |
41 | static struct page *syscall_pages[1]; | |
42 | ||
43 | int __init vsyscall_init(void) | |
44 | { | |
45 | void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); | |
46 | syscall_pages[0] = virt_to_page(syscall_page); | |
47 | ||
48 | /* | |
49 | * XXX: Map this page to a fixmap entry if we get around | |
50 | * to adding the page to ELF core dumps | |
51 | */ | |
52 | ||
53 | memcpy(syscall_page, | |
54 | &vsyscall_trapa_start, | |
55 | &vsyscall_trapa_end - &vsyscall_trapa_start); | |
56 | ||
57 | return 0; | |
58 | } | |
59 | ||
60 | /* Setup a VMA at program startup for the vsyscall page */ | |
61 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |
62 | { | |
63 | struct mm_struct *mm = current->mm; | |
64 | unsigned long addr; | |
65 | int ret; | |
66 | ||
67 | down_write(&mm->mmap_sem); | |
68 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | |
69 | if (IS_ERR_VALUE(addr)) { | |
70 | ret = addr; | |
71 | goto up_fail; | |
72 | } | |
73 | ||
74 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | |
75 | VM_READ | VM_EXEC | | |
76 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | | |
77 | VM_ALWAYSDUMP, | |
78 | syscall_pages); | |
79 | if (unlikely(ret)) | |
80 | goto up_fail; | |
81 | ||
82 | current->mm->context.vdso = (void *)addr; | |
83 | ||
84 | up_fail: | |
85 | up_write(&mm->mmap_sem); | |
86 | return ret; | |
87 | } | |
88 | ||
89 | const char *arch_vma_name(struct vm_area_struct *vma) | |
90 | { | |
91 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | |
92 | return "[vdso]"; | |
93 | ||
94 | return NULL; | |
95 | } | |
96 | ||
97 | struct vm_area_struct *get_gate_vma(struct task_struct *task) | |
98 | { | |
99 | return NULL; | |
100 | } | |
101 | ||
102 | int in_gate_area(struct task_struct *task, unsigned long address) | |
103 | { | |
104 | return 0; | |
105 | } | |
106 | ||
107 | int in_gate_area_no_task(unsigned long address) | |
108 | { | |
109 | return 0; | |
110 | } |