]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * MMU fault handling support. | |
3 | * | |
4 | * Copyright (C) 1998-2002 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | */ | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/mm.h> | |
1da177e4 | 10 | #include <linux/interrupt.h> |
1f7ad57b | 11 | #include <linux/kprobes.h> |
1eeb66a1 | 12 | #include <linux/kdebug.h> |
1da177e4 LT |
13 | |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/processor.h> | |
16 | #include <asm/system.h> | |
17 | #include <asm/uaccess.h> | |
18 | ||
19 | extern void die (char *, struct pt_regs *, long); | |
20 | ||
ae9a5b85 | 21 | #ifdef CONFIG_KPROBES |
576fe0bd | 22 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
ae9a5b85 | 23 | { |
576fe0bd | 24 | int ret = 0; |
ae9a5b85 | 25 | |
576fe0bd CH |
26 | if (!user_mode(regs)) { |
27 | /* kprobe_running() needs smp_processor_id() */ | |
28 | preempt_disable(); | |
29 | if (kprobe_running() && kprobes_fault_handler(regs, trap)) | |
30 | ret = 1; | |
31 | preempt_enable(); | |
32 | } | |
ae9a5b85 | 33 | |
576fe0bd | 34 | return ret; |
ae9a5b85 AK |
35 | } |
36 | #else | |
576fe0bd | 37 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
ae9a5b85 | 38 | { |
576fe0bd | 39 | return 0; |
ae9a5b85 AK |
40 | } |
41 | #endif | |
42 | ||
1da177e4 LT |
43 | /* |
44 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | |
45 | * (inside region 5, on ia64) and that page is present. | |
46 | */ | |
47 | static int | |
48 | mapped_kernel_page_is_present (unsigned long address) | |
49 | { | |
50 | pgd_t *pgd; | |
51 | pud_t *pud; | |
52 | pmd_t *pmd; | |
53 | pte_t *ptep, pte; | |
54 | ||
55 | pgd = pgd_offset_k(address); | |
56 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | |
57 | return 0; | |
58 | ||
59 | pud = pud_offset(pgd, address); | |
60 | if (pud_none(*pud) || pud_bad(*pud)) | |
61 | return 0; | |
62 | ||
63 | pmd = pmd_offset(pud, address); | |
64 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | |
65 | return 0; | |
66 | ||
67 | ptep = pte_offset_kernel(pmd, address); | |
68 | if (!ptep) | |
69 | return 0; | |
70 | ||
71 | pte = *ptep; | |
72 | return pte_present(pte); | |
73 | } | |
74 | ||
1f7ad57b | 75 | void __kprobes |
1da177e4 LT |
76 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
77 | { | |
78 | int signal = SIGSEGV, code = SEGV_MAPERR; | |
79 | struct vm_area_struct *vma, *prev_vma; | |
80 | struct mm_struct *mm = current->mm; | |
81 | struct siginfo si; | |
82 | unsigned long mask; | |
83c54070 | 83 | int fault; |
1da177e4 | 84 | |
0ffe9849 CL |
85 | /* mmap_sem is performance critical.... */ |
86 | prefetchw(&mm->mmap_sem); | |
87 | ||
1da177e4 LT |
88 | /* |
89 | * If we're in an interrupt or have no user context, we must not take the fault.. | |
90 | */ | |
91 | if (in_atomic() || !mm) | |
92 | goto no_context; | |
93 | ||
94 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
95 | /* | |
96 | * If fault is in region 5 and we are in the kernel, we may already | |
97 | * have the mmap_sem (pfn_valid macro is called during mmap). There | |
98 | * is no vma for region 5 addr's anyway, so skip getting the semaphore | |
99 | * and go directly to the exception handling code. | |
100 | */ | |
101 | ||
102 | if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) | |
103 | goto bad_area_no_up; | |
104 | #endif | |
105 | ||
7213b252 AK |
106 | /* |
107 | * This is to handle the kprobes on user space access instructions | |
108 | */ | |
576fe0bd | 109 | if (notify_page_fault(regs, TRAP_BRKPT)) |
7213b252 AK |
110 | return; |
111 | ||
1da177e4 LT |
112 | down_read(&mm->mmap_sem); |
113 | ||
114 | vma = find_vma_prev(mm, address, &prev_vma); | |
115 | if (!vma) | |
116 | goto bad_area; | |
117 | ||
118 | /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */ | |
119 | if (address < vma->vm_start) | |
120 | goto check_expansion; | |
121 | ||
122 | good_area: | |
123 | code = SEGV_ACCERR; | |
124 | ||
125 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ | |
126 | ||
127 | # define VM_READ_BIT 0 | |
128 | # define VM_WRITE_BIT 1 | |
129 | # define VM_EXEC_BIT 2 | |
130 | ||
131 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ | |
132 | || (1 << VM_EXEC_BIT) != VM_EXEC) | |
133 | # error File is out of sync with <linux/mm.h>. Please update. | |
134 | # endif | |
135 | ||
df67b3da JB |
136 | if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) |
137 | goto bad_area; | |
138 | ||
1da177e4 | 139 | mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) |
df67b3da | 140 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); |
1da177e4 LT |
141 | |
142 | if ((vma->vm_flags & mask) != mask) | |
143 | goto bad_area; | |
144 | ||
145 | survive: | |
146 | /* | |
147 | * If for any reason at all we couldn't handle the fault, make | |
148 | * sure we exit gracefully rather than endlessly redo the | |
149 | * fault. | |
150 | */ | |
83c54070 NP |
151 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); |
152 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
1da177e4 LT |
153 | /* |
154 | * We ran out of memory, or some other thing happened | |
155 | * to us that made us unable to handle the page fault | |
156 | * gracefully. | |
157 | */ | |
83c54070 NP |
158 | if (fault & VM_FAULT_OOM) { |
159 | goto out_of_memory; | |
160 | } else if (fault & VM_FAULT_SIGBUS) { | |
161 | signal = SIGBUS; | |
162 | goto bad_area; | |
163 | } | |
1da177e4 LT |
164 | BUG(); |
165 | } | |
83c54070 NP |
166 | if (fault & VM_FAULT_MAJOR) |
167 | current->maj_flt++; | |
168 | else | |
169 | current->min_flt++; | |
1da177e4 LT |
170 | up_read(&mm->mmap_sem); |
171 | return; | |
172 | ||
173 | check_expansion: | |
174 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { | |
175 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
176 | goto bad_area; | |
177 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
178 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
179 | goto bad_area; | |
180 | if (expand_stack(vma, address)) | |
181 | goto bad_area; | |
182 | } else { | |
183 | vma = prev_vma; | |
184 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | |
185 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | |
186 | goto bad_area; | |
46dea3d0 HD |
187 | /* |
188 | * Since the register backing store is accessed sequentially, | |
189 | * we disallow growing it by more than a page at a time. | |
190 | */ | |
191 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) | |
192 | goto bad_area; | |
193 | if (expand_upwards(vma, address)) | |
1da177e4 LT |
194 | goto bad_area; |
195 | } | |
196 | goto good_area; | |
197 | ||
198 | bad_area: | |
199 | up_read(&mm->mmap_sem); | |
200 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
201 | bad_area_no_up: | |
202 | #endif | |
203 | if ((isr & IA64_ISR_SP) | |
204 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
205 | { | |
206 | /* | |
207 | * This fault was due to a speculative load or lfetch.fault, set the "ed" | |
208 | * bit in the psr to ensure forward progress. (Target register will get a | |
209 | * NaT for ld.s, lfetch will be canceled.) | |
210 | */ | |
211 | ia64_psr(regs)->ed = 1; | |
212 | return; | |
213 | } | |
214 | if (user_mode(regs)) { | |
215 | si.si_signo = signal; | |
216 | si.si_errno = 0; | |
217 | si.si_code = code; | |
218 | si.si_addr = (void __user *) address; | |
219 | si.si_isr = isr; | |
220 | si.si_flags = __ISR_VALID; | |
221 | force_sig_info(signal, &si, current); | |
222 | return; | |
223 | } | |
224 | ||
225 | no_context: | |
f0a8d3c9 TL |
226 | if ((isr & IA64_ISR_SP) |
227 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | |
228 | { | |
1da177e4 | 229 | /* |
f0a8d3c9 TL |
230 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
231 | * bit in the psr to ensure forward progress. (Target register will get a | |
232 | * NaT for ld.s, lfetch will be canceled.) | |
1da177e4 LT |
233 | */ |
234 | ia64_psr(regs)->ed = 1; | |
235 | return; | |
236 | } | |
237 | ||
1da177e4 LT |
238 | /* |
239 | * Since we have no vma's for region 5, we might get here even if the address is | |
240 | * valid, due to the VHPT walker inserting a non present translation that becomes | |
241 | * stale. If that happens, the non present fault handler already purged the stale | |
242 | * translation, which fixed the problem. So, we check to see if the translation is | |
243 | * valid, and return if it is. | |
244 | */ | |
245 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) | |
246 | return; | |
247 | ||
63028aa7 KU |
248 | if (ia64_done_with_exception(regs)) |
249 | return; | |
250 | ||
1da177e4 LT |
251 | /* |
252 | * Oops. The kernel tried to access some bad page. We'll have to terminate things | |
253 | * with extreme prejudice. | |
254 | */ | |
255 | bust_spinlocks(1); | |
256 | ||
257 | if (address < PAGE_SIZE) | |
258 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); | |
259 | else | |
260 | printk(KERN_ALERT "Unable to handle kernel paging request at " | |
261 | "virtual address %016lx\n", address); | |
262 | die("Oops", regs, isr); | |
263 | bust_spinlocks(0); | |
264 | do_exit(SIGKILL); | |
265 | return; | |
266 | ||
267 | out_of_memory: | |
268 | up_read(&mm->mmap_sem); | |
f400e198 | 269 | if (is_init(current)) { |
1da177e4 LT |
270 | yield(); |
271 | down_read(&mm->mmap_sem); | |
272 | goto survive; | |
273 | } | |
274 | printk(KERN_CRIT "VM: killing process %s\n", current->comm); | |
275 | if (user_mode(regs)) | |
276 | do_exit(SIGKILL); | |
277 | goto no_context; | |
278 | } |