]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1995 Linus Torvalds | |
3 | * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. | |
4 | */ | |
5 | ||
6 | #include <linux/signal.h> | |
7 | #include <linux/sched.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/ptrace.h> | |
13 | #include <linux/mmiotrace.h> | |
14 | #include <linux/mman.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/tty.h> | |
20 | #include <linux/vt_kern.h> /* For unblank_screen() */ | |
21 | #include <linux/compiler.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/bootmem.h> /* for max_low_pfn */ | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/kprobes.h> | |
27 | #include <linux/uaccess.h> | |
28 | #include <linux/kdebug.h> | |
29 | ||
30 | #include <asm/system.h> | |
31 | #include <asm/desc.h> | |
32 | #include <asm/segment.h> | |
33 | #include <asm/pgalloc.h> | |
34 | #include <asm/smp.h> | |
35 | #include <asm/tlbflush.h> | |
36 | #include <asm/proto.h> | |
37 | #include <asm-generic/sections.h> | |
38 | #include <asm/traps.h> | |
39 | ||
40 | /* | |
41 | * Page fault error code bits | |
42 | * bit 0 == 0 means no page found, 1 means protection fault | |
43 | * bit 1 == 0 means read, 1 means write | |
44 | * bit 2 == 0 means kernel, 1 means user-mode | |
45 | * bit 3 == 1 means use of reserved bit detected | |
46 | * bit 4 == 1 means fault was an instruction fetch | |
47 | */ | |
48 | #define PF_PROT (1<<0) | |
49 | #define PF_WRITE (1<<1) | |
50 | #define PF_USER (1<<2) | |
51 | #define PF_RSVD (1<<3) | |
52 | #define PF_INSTR (1<<4) | |
53 | ||
54 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | |
55 | { | |
56 | #ifdef CONFIG_MMIOTRACE | |
57 | if (unlikely(is_kmmio_active())) | |
58 | if (kmmio_handler(regs, addr) == 1) | |
59 | return -1; | |
60 | #endif | |
61 | return 0; | |
62 | } | |
63 | ||
64 | static inline int notify_page_fault(struct pt_regs *regs) | |
65 | { | |
66 | #ifdef CONFIG_KPROBES | |
67 | int ret = 0; | |
68 | ||
69 | /* kprobe_running() needs smp_processor_id() */ | |
70 | if (!user_mode_vm(regs)) { | |
71 | preempt_disable(); | |
72 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | |
73 | ret = 1; | |
74 | preempt_enable(); | |
75 | } | |
76 | ||
77 | return ret; | |
78 | #else | |
79 | return 0; | |
80 | #endif | |
81 | } | |
82 | ||
83 | /* | |
84 | * X86_32 | |
85 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | |
86 | * Check that here and ignore it. | |
87 | * | |
88 | * X86_64 | |
89 | * Sometimes the CPU reports invalid exceptions on prefetch. | |
90 | * Check that here and ignore it. | |
91 | * | |
92 | * Opcode checker based on code by Richard Brunner | |
93 | */ | |
94 | static int is_prefetch(struct pt_regs *regs, unsigned long addr, | |
95 | unsigned long error_code) | |
96 | { | |
97 | unsigned char *instr; | |
98 | int scan_more = 1; | |
99 | int prefetch = 0; | |
100 | unsigned char *max_instr; | |
101 | ||
102 | /* | |
103 | * If it was a exec (instruction fetch) fault on NX page, then | |
104 | * do not ignore the fault: | |
105 | */ | |
106 | if (error_code & PF_INSTR) | |
107 | return 0; | |
108 | ||
109 | instr = (unsigned char *)convert_ip_to_linear(current, regs); | |
110 | max_instr = instr + 15; | |
111 | ||
112 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | |
113 | return 0; | |
114 | ||
115 | while (scan_more && instr < max_instr) { | |
116 | unsigned char opcode; | |
117 | unsigned char instr_hi; | |
118 | unsigned char instr_lo; | |
119 | ||
120 | if (probe_kernel_address(instr, opcode)) | |
121 | break; | |
122 | ||
123 | instr_hi = opcode & 0xf0; | |
124 | instr_lo = opcode & 0x0f; | |
125 | instr++; | |
126 | ||
127 | switch (instr_hi) { | |
128 | case 0x20: | |
129 | case 0x30: | |
130 | /* | |
131 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | |
132 | * In X86_64 long mode, the CPU will signal invalid | |
133 | * opcode if some of these prefixes are present so | |
134 | * X86_64 will never get here anyway | |
135 | */ | |
136 | scan_more = ((instr_lo & 7) == 0x6); | |
137 | break; | |
138 | #ifdef CONFIG_X86_64 | |
139 | case 0x40: | |
140 | /* | |
141 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | |
142 | * Need to figure out under what instruction mode the | |
143 | * instruction was issued. Could check the LDT for lm, | |
144 | * but for now it's good enough to assume that long | |
145 | * mode only uses well known segments or kernel. | |
146 | */ | |
147 | scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); | |
148 | break; | |
149 | #endif | |
150 | case 0x60: | |
151 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | |
152 | scan_more = (instr_lo & 0xC) == 0x4; | |
153 | break; | |
154 | case 0xF0: | |
155 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | |
156 | scan_more = !instr_lo || (instr_lo>>1) == 1; | |
157 | break; | |
158 | case 0x00: | |
159 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | |
160 | scan_more = 0; | |
161 | ||
162 | if (probe_kernel_address(instr, opcode)) | |
163 | break; | |
164 | prefetch = (instr_lo == 0xF) && | |
165 | (opcode == 0x0D || opcode == 0x18); | |
166 | break; | |
167 | default: | |
168 | scan_more = 0; | |
169 | break; | |
170 | } | |
171 | } | |
172 | return prefetch; | |
173 | } | |
174 | ||
175 | static void force_sig_info_fault(int si_signo, int si_code, | |
176 | unsigned long address, struct task_struct *tsk) | |
177 | { | |
178 | siginfo_t info; | |
179 | ||
180 | info.si_signo = si_signo; | |
181 | info.si_errno = 0; | |
182 | info.si_code = si_code; | |
183 | info.si_addr = (void __user *)address; | |
184 | force_sig_info(si_signo, &info, tsk); | |
185 | } | |
186 | ||
187 | #ifdef CONFIG_X86_64 | |
188 | static int bad_address(void *p) | |
189 | { | |
190 | unsigned long dummy; | |
191 | return probe_kernel_address((unsigned long *)p, dummy); | |
192 | } | |
193 | #endif | |
194 | ||
195 | static void dump_pagetable(unsigned long address) | |
196 | { | |
197 | #ifdef CONFIG_X86_32 | |
198 | __typeof__(pte_val(__pte(0))) page; | |
199 | ||
200 | page = read_cr3(); | |
201 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | |
202 | #ifdef CONFIG_X86_PAE | |
203 | printk("*pdpt = %016Lx ", page); | |
204 | if ((page >> PAGE_SHIFT) < max_low_pfn | |
205 | && page & _PAGE_PRESENT) { | |
206 | page &= PAGE_MASK; | |
207 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | |
208 | & (PTRS_PER_PMD - 1)]; | |
209 | printk(KERN_CONT "*pde = %016Lx ", page); | |
210 | page &= ~_PAGE_NX; | |
211 | } | |
212 | #else | |
213 | printk("*pde = %08lx ", page); | |
214 | #endif | |
215 | ||
216 | /* | |
217 | * We must not directly access the pte in the highpte | |
218 | * case if the page table is located in highmem. | |
219 | * And let's rather not kmap-atomic the pte, just in case | |
220 | * it's allocated already. | |
221 | */ | |
222 | if ((page >> PAGE_SHIFT) < max_low_pfn | |
223 | && (page & _PAGE_PRESENT) | |
224 | && !(page & _PAGE_PSE)) { | |
225 | page &= PAGE_MASK; | |
226 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | |
227 | & (PTRS_PER_PTE - 1)]; | |
228 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); | |
229 | } | |
230 | ||
231 | printk("\n"); | |
232 | #else /* CONFIG_X86_64 */ | |
233 | pgd_t *pgd; | |
234 | pud_t *pud; | |
235 | pmd_t *pmd; | |
236 | pte_t *pte; | |
237 | ||
238 | pgd = (pgd_t *)read_cr3(); | |
239 | ||
240 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | |
241 | pgd += pgd_index(address); | |
242 | if (bad_address(pgd)) goto bad; | |
243 | printk("PGD %lx ", pgd_val(*pgd)); | |
244 | if (!pgd_present(*pgd)) goto ret; | |
245 | ||
246 | pud = pud_offset(pgd, address); | |
247 | if (bad_address(pud)) goto bad; | |
248 | printk("PUD %lx ", pud_val(*pud)); | |
249 | if (!pud_present(*pud) || pud_large(*pud)) | |
250 | goto ret; | |
251 | ||
252 | pmd = pmd_offset(pud, address); | |
253 | if (bad_address(pmd)) goto bad; | |
254 | printk("PMD %lx ", pmd_val(*pmd)); | |
255 | if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; | |
256 | ||
257 | pte = pte_offset_kernel(pmd, address); | |
258 | if (bad_address(pte)) goto bad; | |
259 | printk("PTE %lx", pte_val(*pte)); | |
260 | ret: | |
261 | printk("\n"); | |
262 | return; | |
263 | bad: | |
264 | printk("BAD\n"); | |
265 | #endif | |
266 | } | |
267 | ||
268 | #ifdef CONFIG_X86_32 | |
269 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |
270 | { | |
271 | unsigned index = pgd_index(address); | |
272 | pgd_t *pgd_k; | |
273 | pud_t *pud, *pud_k; | |
274 | pmd_t *pmd, *pmd_k; | |
275 | ||
276 | pgd += index; | |
277 | pgd_k = init_mm.pgd + index; | |
278 | ||
279 | if (!pgd_present(*pgd_k)) | |
280 | return NULL; | |
281 | ||
282 | /* | |
283 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | |
284 | * and redundant with the set_pmd() on non-PAE. As would | |
285 | * set_pud. | |
286 | */ | |
287 | ||
288 | pud = pud_offset(pgd, address); | |
289 | pud_k = pud_offset(pgd_k, address); | |
290 | if (!pud_present(*pud_k)) | |
291 | return NULL; | |
292 | ||
293 | pmd = pmd_offset(pud, address); | |
294 | pmd_k = pmd_offset(pud_k, address); | |
295 | if (!pmd_present(*pmd_k)) | |
296 | return NULL; | |
297 | if (!pmd_present(*pmd)) { | |
298 | set_pmd(pmd, *pmd_k); | |
299 | arch_flush_lazy_mmu_mode(); | |
300 | } else | |
301 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | |
302 | return pmd_k; | |
303 | } | |
304 | #endif | |
305 | ||
306 | #ifdef CONFIG_X86_64 | |
307 | static const char errata93_warning[] = | |
308 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | |
309 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | |
310 | KERN_ERR "******* Please consider a BIOS update.\n" | |
311 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | |
312 | #endif | |
313 | ||
314 | /* Workaround for K8 erratum #93 & buggy BIOS. | |
315 | BIOS SMM functions are required to use a specific workaround | |
316 | to avoid corruption of the 64bit RIP register on C stepping K8. | |
317 | A lot of BIOS that didn't get tested properly miss this. | |
318 | The OS sees this as a page fault with the upper 32bits of RIP cleared. | |
319 | Try to work around it here. | |
320 | Note we only handle faults in kernel here. | |
321 | Does nothing for X86_32 | |
322 | */ | |
323 | static int is_errata93(struct pt_regs *regs, unsigned long address) | |
324 | { | |
325 | #ifdef CONFIG_X86_64 | |
326 | static int warned; | |
327 | if (address != regs->ip) | |
328 | return 0; | |
329 | if ((address >> 32) != 0) | |
330 | return 0; | |
331 | address |= 0xffffffffUL << 32; | |
332 | if ((address >= (u64)_stext && address <= (u64)_etext) || | |
333 | (address >= MODULES_VADDR && address <= MODULES_END)) { | |
334 | if (!warned) { | |
335 | printk(errata93_warning); | |
336 | warned = 1; | |
337 | } | |
338 | regs->ip = address; | |
339 | return 1; | |
340 | } | |
341 | #endif | |
342 | return 0; | |
343 | } | |
344 | ||
345 | /* | |
346 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal | |
347 | * addresses >4GB. We catch this in the page fault handler because these | |
348 | * addresses are not reachable. Just detect this case and return. Any code | |
349 | * segment in LDT is compatibility mode. | |
350 | */ | |
351 | static int is_errata100(struct pt_regs *regs, unsigned long address) | |
352 | { | |
353 | #ifdef CONFIG_X86_64 | |
354 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && | |
355 | (address >> 32)) | |
356 | return 1; | |
357 | #endif | |
358 | return 0; | |
359 | } | |
360 | ||
361 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |
362 | { | |
363 | #ifdef CONFIG_X86_F00F_BUG | |
364 | unsigned long nr; | |
365 | /* | |
366 | * Pentium F0 0F C7 C8 bug workaround. | |
367 | */ | |
368 | if (boot_cpu_data.f00f_bug) { | |
369 | nr = (address - idt_descr.address) >> 3; | |
370 | ||
371 | if (nr == 6) { | |
372 | do_invalid_op(regs, 0); | |
373 | return 1; | |
374 | } | |
375 | } | |
376 | #endif | |
377 | return 0; | |
378 | } | |
379 | ||
380 | static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |
381 | unsigned long address) | |
382 | { | |
383 | #ifdef CONFIG_X86_32 | |
384 | if (!oops_may_print()) | |
385 | return; | |
386 | #endif | |
387 | ||
388 | #ifdef CONFIG_X86_PAE | |
389 | if (error_code & PF_INSTR) { | |
390 | unsigned int level; | |
391 | pte_t *pte = lookup_address(address, &level); | |
392 | ||
393 | if (pte && pte_present(*pte) && !pte_exec(*pte)) | |
394 | printk(KERN_CRIT "kernel tried to execute " | |
395 | "NX-protected page - exploit attempt? " | |
396 | "(uid: %d)\n", current_uid()); | |
397 | } | |
398 | #endif | |
399 | ||
400 | printk(KERN_ALERT "BUG: unable to handle kernel "); | |
401 | if (address < PAGE_SIZE) | |
402 | printk(KERN_CONT "NULL pointer dereference"); | |
403 | else | |
404 | printk(KERN_CONT "paging request"); | |
405 | printk(KERN_CONT " at %p\n", (void *) address); | |
406 | printk(KERN_ALERT "IP:"); | |
407 | printk_address(regs->ip, 1); | |
408 | dump_pagetable(address); | |
409 | } | |
410 | ||
411 | #ifdef CONFIG_X86_64 | |
412 | static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs, | |
413 | unsigned long error_code) | |
414 | { | |
415 | unsigned long flags = oops_begin(); | |
416 | int sig = SIGKILL; | |
417 | struct task_struct *tsk; | |
418 | ||
419 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", | |
420 | current->comm, address); | |
421 | dump_pagetable(address); | |
422 | tsk = current; | |
423 | tsk->thread.cr2 = address; | |
424 | tsk->thread.trap_no = 14; | |
425 | tsk->thread.error_code = error_code; | |
426 | if (__die("Bad pagetable", regs, error_code)) | |
427 | sig = 0; | |
428 | oops_end(flags, regs, sig); | |
429 | } | |
430 | #endif | |
431 | ||
432 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) | |
433 | { | |
434 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | |
435 | return 0; | |
436 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) | |
437 | return 0; | |
438 | ||
439 | return 1; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Handle a spurious fault caused by a stale TLB entry. This allows | |
444 | * us to lazily refresh the TLB when increasing the permissions of a | |
445 | * kernel page (RO -> RW or NX -> X). Doing it eagerly is very | |
446 | * expensive since that implies doing a full cross-processor TLB | |
447 | * flush, even if no stale TLB entries exist on other processors. | |
448 | * There are no security implications to leaving a stale TLB when | |
449 | * increasing the permissions on a page. | |
450 | */ | |
451 | static int spurious_fault(unsigned long address, | |
452 | unsigned long error_code) | |
453 | { | |
454 | pgd_t *pgd; | |
455 | pud_t *pud; | |
456 | pmd_t *pmd; | |
457 | pte_t *pte; | |
458 | ||
459 | /* Reserved-bit violation or user access to kernel space? */ | |
460 | if (error_code & (PF_USER | PF_RSVD)) | |
461 | return 0; | |
462 | ||
463 | pgd = init_mm.pgd + pgd_index(address); | |
464 | if (!pgd_present(*pgd)) | |
465 | return 0; | |
466 | ||
467 | pud = pud_offset(pgd, address); | |
468 | if (!pud_present(*pud)) | |
469 | return 0; | |
470 | ||
471 | if (pud_large(*pud)) | |
472 | return spurious_fault_check(error_code, (pte_t *) pud); | |
473 | ||
474 | pmd = pmd_offset(pud, address); | |
475 | if (!pmd_present(*pmd)) | |
476 | return 0; | |
477 | ||
478 | if (pmd_large(*pmd)) | |
479 | return spurious_fault_check(error_code, (pte_t *) pmd); | |
480 | ||
481 | pte = pte_offset_kernel(pmd, address); | |
482 | if (!pte_present(*pte)) | |
483 | return 0; | |
484 | ||
485 | return spurious_fault_check(error_code, pte); | |
486 | } | |
487 | ||
488 | /* | |
489 | * X86_32 | |
490 | * Handle a fault on the vmalloc or module mapping area | |
491 | * | |
492 | * X86_64 | |
493 | * Handle a fault on the vmalloc area | |
494 | * | |
495 | * This assumes no large pages in there. | |
496 | */ | |
497 | static int vmalloc_fault(unsigned long address) | |
498 | { | |
499 | #ifdef CONFIG_X86_32 | |
500 | unsigned long pgd_paddr; | |
501 | pmd_t *pmd_k; | |
502 | pte_t *pte_k; | |
503 | ||
504 | /* Make sure we are in vmalloc area */ | |
505 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
506 | return -1; | |
507 | ||
508 | /* | |
509 | * Synchronize this task's top level page-table | |
510 | * with the 'reference' page table. | |
511 | * | |
512 | * Do _not_ use "current" here. We might be inside | |
513 | * an interrupt in the middle of a task switch.. | |
514 | */ | |
515 | pgd_paddr = read_cr3(); | |
516 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | |
517 | if (!pmd_k) | |
518 | return -1; | |
519 | pte_k = pte_offset_kernel(pmd_k, address); | |
520 | if (!pte_present(*pte_k)) | |
521 | return -1; | |
522 | return 0; | |
523 | #else | |
524 | pgd_t *pgd, *pgd_ref; | |
525 | pud_t *pud, *pud_ref; | |
526 | pmd_t *pmd, *pmd_ref; | |
527 | pte_t *pte, *pte_ref; | |
528 | ||
529 | /* Make sure we are in vmalloc area */ | |
530 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | |
531 | return -1; | |
532 | ||
533 | /* Copy kernel mappings over when needed. This can also | |
534 | happen within a race in page table update. In the later | |
535 | case just flush. */ | |
536 | ||
537 | pgd = pgd_offset(current->mm ?: &init_mm, address); | |
538 | pgd_ref = pgd_offset_k(address); | |
539 | if (pgd_none(*pgd_ref)) | |
540 | return -1; | |
541 | if (pgd_none(*pgd)) | |
542 | set_pgd(pgd, *pgd_ref); | |
543 | else | |
544 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
545 | ||
546 | /* Below here mismatches are bugs because these lower tables | |
547 | are shared */ | |
548 | ||
549 | pud = pud_offset(pgd, address); | |
550 | pud_ref = pud_offset(pgd_ref, address); | |
551 | if (pud_none(*pud_ref)) | |
552 | return -1; | |
553 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | |
554 | BUG(); | |
555 | pmd = pmd_offset(pud, address); | |
556 | pmd_ref = pmd_offset(pud_ref, address); | |
557 | if (pmd_none(*pmd_ref)) | |
558 | return -1; | |
559 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | |
560 | BUG(); | |
561 | pte_ref = pte_offset_kernel(pmd_ref, address); | |
562 | if (!pte_present(*pte_ref)) | |
563 | return -1; | |
564 | pte = pte_offset_kernel(pmd, address); | |
565 | /* Don't use pte_page here, because the mappings can point | |
566 | outside mem_map, and the NUMA hash lookup cannot handle | |
567 | that. */ | |
568 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | |
569 | BUG(); | |
570 | return 0; | |
571 | #endif | |
572 | } | |
573 | ||
574 | int show_unhandled_signals = 1; | |
575 | ||
576 | /* | |
577 | * This routine handles page faults. It determines the address, | |
578 | * and the problem, and then passes it off to one of the appropriate | |
579 | * routines. | |
580 | */ | |
581 | #ifdef CONFIG_X86_64 | |
582 | asmlinkage | |
583 | #endif | |
584 | void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |
585 | { | |
586 | struct task_struct *tsk; | |
587 | struct mm_struct *mm; | |
588 | struct vm_area_struct *vma; | |
589 | unsigned long address; | |
590 | int write, si_code; | |
591 | int fault; | |
592 | #ifdef CONFIG_X86_64 | |
593 | unsigned long flags; | |
594 | int sig; | |
595 | #endif | |
596 | ||
597 | tsk = current; | |
598 | mm = tsk->mm; | |
599 | prefetchw(&mm->mmap_sem); | |
600 | ||
601 | /* get the address */ | |
602 | address = read_cr2(); | |
603 | ||
604 | si_code = SEGV_MAPERR; | |
605 | ||
606 | if (notify_page_fault(regs)) | |
607 | return; | |
608 | if (unlikely(kmmio_fault(regs, address))) | |
609 | return; | |
610 | ||
611 | /* | |
612 | * We fault-in kernel-space virtual memory on-demand. The | |
613 | * 'reference' page table is init_mm.pgd. | |
614 | * | |
615 | * NOTE! We MUST NOT take any locks for this case. We may | |
616 | * be in an interrupt or a critical region, and should | |
617 | * only copy the information from the master page table, | |
618 | * nothing more. | |
619 | * | |
620 | * This verifies that the fault happens in kernel space | |
621 | * (error_code & 4) == 0, and that the fault was not a | |
622 | * protection error (error_code & 9) == 0. | |
623 | */ | |
624 | #ifdef CONFIG_X86_32 | |
625 | if (unlikely(address >= TASK_SIZE)) { | |
626 | #else | |
627 | if (unlikely(address >= TASK_SIZE64)) { | |
628 | #endif | |
629 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && | |
630 | vmalloc_fault(address) >= 0) | |
631 | return; | |
632 | ||
633 | /* Can handle a stale RO->RW TLB */ | |
634 | if (spurious_fault(address, error_code)) | |
635 | return; | |
636 | ||
637 | /* | |
638 | * Don't take the mm semaphore here. If we fixup a prefetch | |
639 | * fault we could otherwise deadlock. | |
640 | */ | |
641 | goto bad_area_nosemaphore; | |
642 | } | |
643 | ||
644 | ||
645 | /* | |
646 | * It's safe to allow irq's after cr2 has been saved and the | |
647 | * vmalloc fault has been handled. | |
648 | * | |
649 | * User-mode registers count as a user access even for any | |
650 | * potential system fault or CPU buglet. | |
651 | */ | |
652 | if (user_mode_vm(regs)) { | |
653 | local_irq_enable(); | |
654 | error_code |= PF_USER; | |
655 | } else if (regs->flags & X86_EFLAGS_IF) | |
656 | local_irq_enable(); | |
657 | ||
658 | #ifdef CONFIG_X86_64 | |
659 | if (unlikely(error_code & PF_RSVD)) | |
660 | pgtable_bad(address, regs, error_code); | |
661 | #endif | |
662 | ||
663 | /* | |
664 | * If we're in an interrupt, have no user context or are running in an | |
665 | * atomic region then we must not take the fault. | |
666 | */ | |
667 | if (unlikely(in_atomic() || !mm)) | |
668 | goto bad_area_nosemaphore; | |
669 | ||
670 | /* | |
671 | * When running in the kernel we expect faults to occur only to | |
672 | * addresses in user space. All other faults represent errors in the | |
673 | * kernel and should generate an OOPS. Unfortunately, in the case of an | |
674 | * erroneous fault occurring in a code path which already holds mmap_sem | |
675 | * we will deadlock attempting to validate the fault against the | |
676 | * address space. Luckily the kernel only validly references user | |
677 | * space from well defined areas of code, which are listed in the | |
678 | * exceptions table. | |
679 | * | |
680 | * As the vast majority of faults will be valid we will only perform | |
681 | * the source reference check when there is a possibility of a deadlock. | |
682 | * Attempt to lock the address space, if we cannot we then validate the | |
683 | * source. If this is invalid we can skip the address space check, | |
684 | * thus avoiding the deadlock. | |
685 | */ | |
686 | if (!down_read_trylock(&mm->mmap_sem)) { | |
687 | if ((error_code & PF_USER) == 0 && | |
688 | !search_exception_tables(regs->ip)) | |
689 | goto bad_area_nosemaphore; | |
690 | down_read(&mm->mmap_sem); | |
691 | } | |
692 | ||
693 | vma = find_vma(mm, address); | |
694 | if (!vma) | |
695 | goto bad_area; | |
696 | if (vma->vm_start <= address) | |
697 | goto good_area; | |
698 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
699 | goto bad_area; | |
700 | if (error_code & PF_USER) { | |
701 | /* | |
702 | * Accessing the stack below %sp is always a bug. | |
703 | * The large cushion allows instructions like enter | |
704 | * and pusha to work. ("enter $65535,$31" pushes | |
705 | * 32 pointers and then decrements %sp by 65535.) | |
706 | */ | |
707 | if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp) | |
708 | goto bad_area; | |
709 | } | |
710 | if (expand_stack(vma, address)) | |
711 | goto bad_area; | |
712 | /* | |
713 | * Ok, we have a good vm_area for this memory access, so | |
714 | * we can handle it.. | |
715 | */ | |
716 | good_area: | |
717 | si_code = SEGV_ACCERR; | |
718 | write = 0; | |
719 | switch (error_code & (PF_PROT|PF_WRITE)) { | |
720 | default: /* 3: write, present */ | |
721 | /* fall through */ | |
722 | case PF_WRITE: /* write, not present */ | |
723 | if (!(vma->vm_flags & VM_WRITE)) | |
724 | goto bad_area; | |
725 | write++; | |
726 | break; | |
727 | case PF_PROT: /* read, present */ | |
728 | goto bad_area; | |
729 | case 0: /* read, not present */ | |
730 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | |
731 | goto bad_area; | |
732 | } | |
733 | ||
734 | /* | |
735 | * If for any reason at all we couldn't handle the fault, | |
736 | * make sure we exit gracefully rather than endlessly redo | |
737 | * the fault. | |
738 | */ | |
739 | fault = handle_mm_fault(mm, vma, address, write); | |
740 | if (unlikely(fault & VM_FAULT_ERROR)) { | |
741 | if (fault & VM_FAULT_OOM) | |
742 | goto out_of_memory; | |
743 | else if (fault & VM_FAULT_SIGBUS) | |
744 | goto do_sigbus; | |
745 | BUG(); | |
746 | } | |
747 | if (fault & VM_FAULT_MAJOR) | |
748 | tsk->maj_flt++; | |
749 | else | |
750 | tsk->min_flt++; | |
751 | ||
752 | #ifdef CONFIG_X86_32 | |
753 | /* | |
754 | * Did it hit the DOS screen memory VA from vm86 mode? | |
755 | */ | |
756 | if (v8086_mode(regs)) { | |
757 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; | |
758 | if (bit < 32) | |
759 | tsk->thread.screen_bitmap |= 1 << bit; | |
760 | } | |
761 | #endif | |
762 | up_read(&mm->mmap_sem); | |
763 | return; | |
764 | ||
765 | /* | |
766 | * Something tried to access memory that isn't in our memory map.. | |
767 | * Fix it, but check if it's kernel or user first.. | |
768 | */ | |
769 | bad_area: | |
770 | up_read(&mm->mmap_sem); | |
771 | ||
772 | bad_area_nosemaphore: | |
773 | /* User mode accesses just cause a SIGSEGV */ | |
774 | if (error_code & PF_USER) { | |
775 | /* | |
776 | * It's possible to have interrupts off here. | |
777 | */ | |
778 | local_irq_enable(); | |
779 | ||
780 | /* | |
781 | * Valid to do another page fault here because this one came | |
782 | * from user space. | |
783 | */ | |
784 | if (is_prefetch(regs, address, error_code)) | |
785 | return; | |
786 | ||
787 | if (is_errata100(regs, address)) | |
788 | return; | |
789 | ||
790 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | |
791 | printk_ratelimit()) { | |
792 | printk( | |
793 | "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | |
794 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | |
795 | tsk->comm, task_pid_nr(tsk), address, | |
796 | (void *) regs->ip, (void *) regs->sp, error_code); | |
797 | print_vma_addr(" in ", regs->ip); | |
798 | printk("\n"); | |
799 | } | |
800 | ||
801 | tsk->thread.cr2 = address; | |
802 | /* Kernel addresses are always protection faults */ | |
803 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | |
804 | tsk->thread.trap_no = 14; | |
805 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | |
806 | return; | |
807 | } | |
808 | ||
809 | if (is_f00f_bug(regs, address)) | |
810 | return; | |
811 | ||
812 | no_context: | |
813 | /* Are we prepared to handle this kernel fault? */ | |
814 | if (fixup_exception(regs)) | |
815 | return; | |
816 | ||
817 | /* | |
818 | * X86_32 | |
819 | * Valid to do another page fault here, because if this fault | |
820 | * had been triggered by is_prefetch fixup_exception would have | |
821 | * handled it. | |
822 | * | |
823 | * X86_64 | |
824 | * Hall of shame of CPU/BIOS bugs. | |
825 | */ | |
826 | if (is_prefetch(regs, address, error_code)) | |
827 | return; | |
828 | ||
829 | if (is_errata93(regs, address)) | |
830 | return; | |
831 | ||
832 | /* | |
833 | * Oops. The kernel tried to access some bad page. We'll have to | |
834 | * terminate things with extreme prejudice. | |
835 | */ | |
836 | #ifdef CONFIG_X86_32 | |
837 | bust_spinlocks(1); | |
838 | #else | |
839 | flags = oops_begin(); | |
840 | #endif | |
841 | ||
842 | show_fault_oops(regs, error_code, address); | |
843 | ||
844 | tsk->thread.cr2 = address; | |
845 | tsk->thread.trap_no = 14; | |
846 | tsk->thread.error_code = error_code; | |
847 | ||
848 | #ifdef CONFIG_X86_32 | |
849 | die("Oops", regs, error_code); | |
850 | bust_spinlocks(0); | |
851 | do_exit(SIGKILL); | |
852 | #else | |
853 | sig = SIGKILL; | |
854 | if (__die("Oops", regs, error_code)) | |
855 | sig = 0; | |
856 | /* Executive summary in case the body of the oops scrolled away */ | |
857 | printk(KERN_EMERG "CR2: %016lx\n", address); | |
858 | oops_end(flags, regs, sig); | |
859 | #endif | |
860 | ||
861 | out_of_memory: | |
862 | /* | |
863 | * We ran out of memory, call the OOM killer, and return the userspace | |
864 | * (which will retry the fault, or kill us if we got oom-killed). | |
865 | */ | |
866 | up_read(&mm->mmap_sem); | |
867 | pagefault_out_of_memory(); | |
868 | return; | |
869 | ||
870 | do_sigbus: | |
871 | up_read(&mm->mmap_sem); | |
872 | ||
873 | /* Kernel mode? Handle exceptions or die */ | |
874 | if (!(error_code & PF_USER)) | |
875 | goto no_context; | |
876 | #ifdef CONFIG_X86_32 | |
877 | /* User space => ok to do another page fault */ | |
878 | if (is_prefetch(regs, address, error_code)) | |
879 | return; | |
880 | #endif | |
881 | tsk->thread.cr2 = address; | |
882 | tsk->thread.error_code = error_code; | |
883 | tsk->thread.trap_no = 14; | |
884 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | |
885 | } | |
886 | ||
887 | DEFINE_SPINLOCK(pgd_lock); | |
888 | LIST_HEAD(pgd_list); | |
889 | ||
890 | void vmalloc_sync_all(void) | |
891 | { | |
892 | unsigned long address; | |
893 | ||
894 | #ifdef CONFIG_X86_32 | |
895 | if (SHARED_KERNEL_PMD) | |
896 | return; | |
897 | ||
898 | for (address = VMALLOC_START & PMD_MASK; | |
899 | address >= TASK_SIZE && address < FIXADDR_TOP; | |
900 | address += PMD_SIZE) { | |
901 | unsigned long flags; | |
902 | struct page *page; | |
903 | ||
904 | spin_lock_irqsave(&pgd_lock, flags); | |
905 | list_for_each_entry(page, &pgd_list, lru) { | |
906 | if (!vmalloc_sync_one(page_address(page), | |
907 | address)) | |
908 | break; | |
909 | } | |
910 | spin_unlock_irqrestore(&pgd_lock, flags); | |
911 | } | |
912 | #else /* CONFIG_X86_64 */ | |
913 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | |
914 | address += PGDIR_SIZE) { | |
915 | const pgd_t *pgd_ref = pgd_offset_k(address); | |
916 | unsigned long flags; | |
917 | struct page *page; | |
918 | ||
919 | if (pgd_none(*pgd_ref)) | |
920 | continue; | |
921 | spin_lock_irqsave(&pgd_lock, flags); | |
922 | list_for_each_entry(page, &pgd_list, lru) { | |
923 | pgd_t *pgd; | |
924 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
925 | if (pgd_none(*pgd)) | |
926 | set_pgd(pgd, *pgd_ref); | |
927 | else | |
928 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | |
929 | } | |
930 | spin_unlock_irqrestore(&pgd_lock, flags); | |
931 | } | |
932 | #endif | |
933 | } |