]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/fault_64.c
Convert cpu_sibling_map to be a per cpu variable
[net-next-2.6.git] / arch / x86 / mm / fault_64.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
6 */
7
1da177e4
LT
8#include <linux/signal.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/types.h>
14#include <linux/ptrace.h>
15#include <linux/mman.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
1da177e4
LT
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/tty.h>
21#include <linux/vt_kern.h> /* For unblank_screen() */
22#include <linux/compiler.h>
1eeb66a1 23#include <linux/vmalloc.h>
1da177e4 24#include <linux/module.h>
0f2fbdcb 25#include <linux/kprobes.h>
ab2bf0c1 26#include <linux/uaccess.h>
1eeb66a1 27#include <linux/kdebug.h>
1da177e4
LT
28
29#include <asm/system.h>
1da177e4
LT
30#include <asm/pgalloc.h>
31#include <asm/smp.h>
32#include <asm/tlbflush.h>
33#include <asm/proto.h>
1da177e4 34#include <asm-generic/sections.h>
1da177e4 35
66c58156
AK
36/* Page fault error code bits */
37#define PF_PROT (1<<0) /* or no page found */
38#define PF_WRITE (1<<1)
39#define PF_USER (1<<2)
40#define PF_RSVD (1<<3)
41#define PF_INSTR (1<<4)
42
273819a2 43static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
1bd858a5
AK
44
45/* Hook to register for page fault notifications */
46int register_page_fault_notifier(struct notifier_block *nb)
47{
48 vmalloc_sync_all();
49 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
50}
273819a2 51EXPORT_SYMBOL_GPL(register_page_fault_notifier);
1bd858a5
AK
52
53int unregister_page_fault_notifier(struct notifier_block *nb)
54{
55 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
56}
273819a2 57EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
1bd858a5 58
9b355897 59static inline int notify_page_fault(struct pt_regs *regs, long err)
1bd858a5
AK
60{
61 struct die_args args = {
62 .regs = regs,
9b355897 63 .str = "page fault",
1bd858a5 64 .err = err,
9b355897
JB
65 .trapnr = 14,
66 .signr = SIGSEGV
1bd858a5 67 };
9b355897
JB
68 return atomic_notifier_call_chain(&notify_page_fault_chain,
69 DIE_PAGE_FAULT, &args);
1bd858a5 70}
1bd858a5 71
1da177e4
LT
72/* Sometimes the CPU reports invalid exceptions on prefetch.
73 Check that here and ignore.
74 Opcode checker based on code by Richard Brunner */
75static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
76 unsigned long error_code)
77{
ab2bf0c1 78 unsigned char *instr;
1da177e4
LT
79 int scan_more = 1;
80 int prefetch = 0;
f1290ec9 81 unsigned char *max_instr;
1da177e4
LT
82
83 /* If it was a exec fault ignore */
66c58156 84 if (error_code & PF_INSTR)
1da177e4
LT
85 return 0;
86
dd2994f6 87 instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
f1290ec9 88 max_instr = instr + 15;
1da177e4 89
76381fee 90 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
1da177e4
LT
91 return 0;
92
93 while (scan_more && instr < max_instr) {
94 unsigned char opcode;
95 unsigned char instr_hi;
96 unsigned char instr_lo;
97
ab2bf0c1 98 if (probe_kernel_address(instr, opcode))
1da177e4
LT
99 break;
100
101 instr_hi = opcode & 0xf0;
102 instr_lo = opcode & 0x0f;
103 instr++;
104
105 switch (instr_hi) {
106 case 0x20:
107 case 0x30:
108 /* Values 0x26,0x2E,0x36,0x3E are valid x86
109 prefixes. In long mode, the CPU will signal
110 invalid opcode if some of these prefixes are
111 present so we will never get here anyway */
112 scan_more = ((instr_lo & 7) == 0x6);
113 break;
114
115 case 0x40:
116 /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
117 Need to figure out under what instruction mode the
118 instruction was issued ... */
119 /* Could check the LDT for lm, but for now it's good
120 enough to assume that long mode only uses well known
121 segments or kernel. */
76381fee 122 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
1da177e4
LT
123 break;
124
125 case 0x60:
126 /* 0x64 thru 0x67 are valid prefixes in all modes. */
127 scan_more = (instr_lo & 0xC) == 0x4;
128 break;
129 case 0xF0:
130 /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
131 scan_more = !instr_lo || (instr_lo>>1) == 1;
132 break;
133 case 0x00:
134 /* Prefetch instruction is 0x0F0D or 0x0F18 */
135 scan_more = 0;
ab2bf0c1 136 if (probe_kernel_address(instr, opcode))
1da177e4
LT
137 break;
138 prefetch = (instr_lo == 0xF) &&
139 (opcode == 0x0D || opcode == 0x18);
140 break;
141 default:
142 scan_more = 0;
143 break;
144 }
145 }
146 return prefetch;
147}
148
149static int bad_address(void *p)
150{
151 unsigned long dummy;
ab2bf0c1 152 return probe_kernel_address((unsigned long *)p, dummy);
1da177e4
LT
153}
154
155void dump_pagetable(unsigned long address)
156{
157 pgd_t *pgd;
158 pud_t *pud;
159 pmd_t *pmd;
160 pte_t *pte;
161
f51c9452 162 pgd = (pgd_t *)read_cr3();
1da177e4
LT
163
164 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
165 pgd += pgd_index(address);
1da177e4 166 if (bad_address(pgd)) goto bad;
d646bce4 167 printk("PGD %lx ", pgd_val(*pgd));
1da177e4
LT
168 if (!pgd_present(*pgd)) goto ret;
169
d2ae5b5f 170 pud = pud_offset(pgd, address);
1da177e4
LT
171 if (bad_address(pud)) goto bad;
172 printk("PUD %lx ", pud_val(*pud));
173 if (!pud_present(*pud)) goto ret;
174
175 pmd = pmd_offset(pud, address);
176 if (bad_address(pmd)) goto bad;
177 printk("PMD %lx ", pmd_val(*pmd));
178 if (!pmd_present(*pmd)) goto ret;
179
180 pte = pte_offset_kernel(pmd, address);
181 if (bad_address(pte)) goto bad;
182 printk("PTE %lx", pte_val(*pte));
183ret:
184 printk("\n");
185 return;
186bad:
187 printk("BAD\n");
188}
189
190static const char errata93_warning[] =
191KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
192KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
193KERN_ERR "******* Please consider a BIOS update.\n"
194KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
195
196/* Workaround for K8 erratum #93 & buggy BIOS.
197 BIOS SMM functions are required to use a specific workaround
198 to avoid corruption of the 64bit RIP register on C stepping K8.
199 A lot of BIOS that didn't get tested properly miss this.
200 The OS sees this as a page fault with the upper 32bits of RIP cleared.
201 Try to work around it here.
202 Note we only handle faults in kernel here. */
203
204static int is_errata93(struct pt_regs *regs, unsigned long address)
205{
206 static int warned;
207 if (address != regs->rip)
208 return 0;
209 if ((address >> 32) != 0)
210 return 0;
211 address |= 0xffffffffUL << 32;
212 if ((address >= (u64)_stext && address <= (u64)_etext) ||
213 (address >= MODULES_VADDR && address <= MODULES_END)) {
214 if (!warned) {
215 printk(errata93_warning);
216 warned = 1;
217 }
218 regs->rip = address;
219 return 1;
220 }
221 return 0;
222}
223
1da177e4
LT
224static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
225 unsigned long error_code)
226{
1209140c 227 unsigned long flags = oops_begin();
6e3f3617 228 struct task_struct *tsk;
1209140c 229
1da177e4
LT
230 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
231 current->comm, address);
232 dump_pagetable(address);
6e3f3617
JB
233 tsk = current;
234 tsk->thread.cr2 = address;
235 tsk->thread.trap_no = 14;
236 tsk->thread.error_code = error_code;
1da177e4 237 __die("Bad pagetable", regs, error_code);
1209140c 238 oops_end(flags);
1da177e4
LT
239 do_exit(SIGKILL);
240}
241
242/*
f95190b2 243 * Handle a fault on the vmalloc area
3b9ba4d5
AK
244 *
245 * This assumes no large pages in there.
1da177e4
LT
246 */
247static int vmalloc_fault(unsigned long address)
248{
249 pgd_t *pgd, *pgd_ref;
250 pud_t *pud, *pud_ref;
251 pmd_t *pmd, *pmd_ref;
252 pte_t *pte, *pte_ref;
253
254 /* Copy kernel mappings over when needed. This can also
255 happen within a race in page table update. In the later
256 case just flush. */
257
258 pgd = pgd_offset(current->mm ?: &init_mm, address);
259 pgd_ref = pgd_offset_k(address);
260 if (pgd_none(*pgd_ref))
261 return -1;
262 if (pgd_none(*pgd))
263 set_pgd(pgd, *pgd_ref);
8c914cb7 264 else
46a82b2d 265 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
1da177e4
LT
266
267 /* Below here mismatches are bugs because these lower tables
268 are shared */
269
270 pud = pud_offset(pgd, address);
271 pud_ref = pud_offset(pgd_ref, address);
272 if (pud_none(*pud_ref))
273 return -1;
46a82b2d 274 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
1da177e4
LT
275 BUG();
276 pmd = pmd_offset(pud, address);
277 pmd_ref = pmd_offset(pud_ref, address);
278 if (pmd_none(*pmd_ref))
279 return -1;
280 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
281 BUG();
282 pte_ref = pte_offset_kernel(pmd_ref, address);
283 if (!pte_present(*pte_ref))
284 return -1;
285 pte = pte_offset_kernel(pmd, address);
3b9ba4d5
AK
286 /* Don't use pte_page here, because the mappings can point
287 outside mem_map, and the NUMA hash lookup cannot handle
288 that. */
289 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
1da177e4 290 BUG();
1da177e4
LT
291 return 0;
292}
293
74a1ddc5 294static int page_fault_trace;
abd4f750 295int show_unhandled_signals = 1;
1da177e4
LT
296
297/*
298 * This routine handles page faults. It determines the address,
299 * and the problem, and then passes it off to one of the appropriate
300 * routines.
1da177e4 301 */
0f2fbdcb
PP
302asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
303 unsigned long error_code)
1da177e4
LT
304{
305 struct task_struct *tsk;
306 struct mm_struct *mm;
307 struct vm_area_struct * vma;
308 unsigned long address;
309 const struct exception_table_entry *fixup;
83c54070 310 int write, fault;
1209140c 311 unsigned long flags;
1da177e4
LT
312 siginfo_t info;
313
a9ba9a3b
AV
314 tsk = current;
315 mm = tsk->mm;
316 prefetchw(&mm->mmap_sem);
317
1da177e4 318 /* get the address */
f51c9452 319 address = read_cr2();
1da177e4 320
1da177e4
LT
321 info.si_code = SEGV_MAPERR;
322
323
324 /*
325 * We fault-in kernel-space virtual memory on-demand. The
326 * 'reference' page table is init_mm.pgd.
327 *
328 * NOTE! We MUST NOT take any locks for this case. We may
329 * be in an interrupt or a critical region, and should
330 * only copy the information from the master page table,
331 * nothing more.
332 *
333 * This verifies that the fault happens in kernel space
334 * (error_code & 4) == 0, and that the fault was not a
8b1bde93 335 * protection error (error_code & 9) == 0.
1da177e4 336 */
84929801 337 if (unlikely(address >= TASK_SIZE64)) {
f95190b2
AK
338 /*
339 * Don't check for the module range here: its PML4
340 * is always initialized because it's shared with the main
341 * kernel text. Only vmalloc may need PML4 syncups.
342 */
66c58156 343 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
f95190b2 344 ((address >= VMALLOC_START && address < VMALLOC_END))) {
8c914cb7
JB
345 if (vmalloc_fault(address) >= 0)
346 return;
1da177e4 347 }
9b355897 348 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
8c914cb7 349 return;
1da177e4
LT
350 /*
351 * Don't take the mm semaphore here. If we fixup a prefetch
352 * fault we could otherwise deadlock.
353 */
354 goto bad_area_nosemaphore;
355 }
356
9b355897 357 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
8c914cb7
JB
358 return;
359
360 if (likely(regs->eflags & X86_EFLAGS_IF))
361 local_irq_enable();
362
363 if (unlikely(page_fault_trace))
364 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
365 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
366
66c58156 367 if (unlikely(error_code & PF_RSVD))
1da177e4
LT
368 pgtable_bad(address, regs, error_code);
369
370 /*
371 * If we're in an interrupt or have no user
372 * context, we must not take the fault..
373 */
374 if (unlikely(in_atomic() || !mm))
375 goto bad_area_nosemaphore;
376
dbe3ed1c
LT
377 /*
378 * User-mode registers count as a user access even for any
379 * potential system fault or CPU buglet.
380 */
381 if (user_mode_vm(regs))
382 error_code |= PF_USER;
383
1da177e4
LT
384 again:
385 /* When running in the kernel we expect faults to occur only to
386 * addresses in user space. All other faults represent errors in the
387 * kernel and should generate an OOPS. Unfortunatly, in the case of an
80f7228b 388 * erroneous fault occurring in a code path which already holds mmap_sem
1da177e4
LT
389 * we will deadlock attempting to validate the fault against the
390 * address space. Luckily the kernel only validly references user
391 * space from well defined areas of code, which are listed in the
392 * exceptions table.
393 *
394 * As the vast majority of faults will be valid we will only perform
395 * the source reference check when there is a possibilty of a deadlock.
396 * Attempt to lock the address space, if we cannot we then validate the
397 * source. If this is invalid we can skip the address space check,
398 * thus avoiding the deadlock.
399 */
400 if (!down_read_trylock(&mm->mmap_sem)) {
66c58156 401 if ((error_code & PF_USER) == 0 &&
1da177e4
LT
402 !search_exception_tables(regs->rip))
403 goto bad_area_nosemaphore;
404 down_read(&mm->mmap_sem);
405 }
406
407 vma = find_vma(mm, address);
408 if (!vma)
409 goto bad_area;
410 if (likely(vma->vm_start <= address))
411 goto good_area;
412 if (!(vma->vm_flags & VM_GROWSDOWN))
413 goto bad_area;
414 if (error_code & 4) {
03fdc2c2
CE
415 /* Allow userspace just enough access below the stack pointer
416 * to let the 'enter' instruction work.
417 */
418 if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
1da177e4
LT
419 goto bad_area;
420 }
421 if (expand_stack(vma, address))
422 goto bad_area;
423/*
424 * Ok, we have a good vm_area for this memory access, so
425 * we can handle it..
426 */
427good_area:
428 info.si_code = SEGV_ACCERR;
429 write = 0;
66c58156 430 switch (error_code & (PF_PROT|PF_WRITE)) {
1da177e4
LT
431 default: /* 3: write, present */
432 /* fall through */
66c58156 433 case PF_WRITE: /* write, not present */
1da177e4
LT
434 if (!(vma->vm_flags & VM_WRITE))
435 goto bad_area;
436 write++;
437 break;
66c58156 438 case PF_PROT: /* read, present */
1da177e4 439 goto bad_area;
66c58156 440 case 0: /* read, not present */
df67b3da 441 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1da177e4
LT
442 goto bad_area;
443 }
444
445 /*
446 * If for any reason at all we couldn't handle the fault,
447 * make sure we exit gracefully rather than endlessly redo
448 * the fault.
449 */
83c54070
NP
450 fault = handle_mm_fault(mm, vma, address, write);
451 if (unlikely(fault & VM_FAULT_ERROR)) {
452 if (fault & VM_FAULT_OOM)
453 goto out_of_memory;
454 else if (fault & VM_FAULT_SIGBUS)
455 goto do_sigbus;
456 BUG();
1da177e4 457 }
83c54070
NP
458 if (fault & VM_FAULT_MAJOR)
459 tsk->maj_flt++;
460 else
461 tsk->min_flt++;
1da177e4
LT
462 up_read(&mm->mmap_sem);
463 return;
464
465/*
466 * Something tried to access memory that isn't in our memory map..
467 * Fix it, but check if it's kernel or user first..
468 */
469bad_area:
470 up_read(&mm->mmap_sem);
471
472bad_area_nosemaphore:
1da177e4 473 /* User mode accesses just cause a SIGSEGV */
66c58156 474 if (error_code & PF_USER) {
e5e3c84b
SR
475
476 /*
477 * It's possible to have interrupts off here.
478 */
479 local_irq_enable();
480
1da177e4
LT
481 if (is_prefetch(regs, address, error_code))
482 return;
483
484 /* Work around K8 erratum #100 K8 in compat mode
485 occasionally jumps to illegal addresses >4GB. We
486 catch this here in the page fault handler because
487 these addresses are not reachable. Just detect this
488 case and return. Any code segment in LDT is
489 compatibility mode. */
490 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
491 (address >> 32))
492 return;
493
abd4f750
MAS
494 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
495 printk_ratelimit()) {
1da177e4
LT
496 printk(
497 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
498 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
499 tsk->comm, tsk->pid, address, regs->rip,
500 regs->rsp, error_code);
501 }
502
503 tsk->thread.cr2 = address;
504 /* Kernel addresses are always protection faults */
505 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
506 tsk->thread.trap_no = 14;
507 info.si_signo = SIGSEGV;
508 info.si_errno = 0;
509 /* info.si_code has been set above */
510 info.si_addr = (void __user *)address;
511 force_sig_info(SIGSEGV, &info, tsk);
512 return;
513 }
514
515no_context:
516
517 /* Are we prepared to handle this kernel fault? */
518 fixup = search_exception_tables(regs->rip);
519 if (fixup) {
520 regs->rip = fixup->fixup;
521 return;
522 }
523
524 /*
525 * Hall of shame of CPU/BIOS bugs.
526 */
527
528 if (is_prefetch(regs, address, error_code))
529 return;
530
531 if (is_errata93(regs, address))
532 return;
533
534/*
535 * Oops. The kernel tried to access some bad page. We'll have to
536 * terminate things with extreme prejudice.
537 */
538
1209140c 539 flags = oops_begin();
1da177e4
LT
540
541 if (address < PAGE_SIZE)
542 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
543 else
544 printk(KERN_ALERT "Unable to handle kernel paging request");
545 printk(" at %016lx RIP: \n" KERN_ALERT,address);
546 printk_address(regs->rip);
1da177e4 547 dump_pagetable(address);
6e3f3617
JB
548 tsk->thread.cr2 = address;
549 tsk->thread.trap_no = 14;
550 tsk->thread.error_code = error_code;
1da177e4
LT
551 __die("Oops", regs, error_code);
552 /* Executive summary in case the body of the oops scrolled away */
553 printk(KERN_EMERG "CR2: %016lx\n", address);
1209140c 554 oops_end(flags);
1da177e4
LT
555 do_exit(SIGKILL);
556
557/*
558 * We ran out of memory, or some other thing happened to us that made
559 * us unable to handle the page fault gracefully.
560 */
561out_of_memory:
562 up_read(&mm->mmap_sem);
f400e198 563 if (is_init(current)) {
1da177e4
LT
564 yield();
565 goto again;
566 }
567 printk("VM: killing process %s\n", tsk->comm);
568 if (error_code & 4)
021daae2 569 do_group_exit(SIGKILL);
1da177e4
LT
570 goto no_context;
571
572do_sigbus:
573 up_read(&mm->mmap_sem);
574
575 /* Kernel mode? Handle exceptions or die */
66c58156 576 if (!(error_code & PF_USER))
1da177e4
LT
577 goto no_context;
578
579 tsk->thread.cr2 = address;
580 tsk->thread.error_code = error_code;
581 tsk->thread.trap_no = 14;
582 info.si_signo = SIGBUS;
583 info.si_errno = 0;
584 info.si_code = BUS_ADRERR;
585 info.si_addr = (void __user *)address;
586 force_sig_info(SIGBUS, &info, tsk);
587 return;
588}
9e43e1b7 589
8c914cb7 590DEFINE_SPINLOCK(pgd_lock);
2bff7383 591LIST_HEAD(pgd_list);
8c914cb7
JB
592
593void vmalloc_sync_all(void)
594{
595 /* Note that races in the updates of insync and start aren't
596 problematic:
597 insync can only get set bits added, and updates to start are only
598 improving performance (without affecting correctness if undone). */
599 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
600 static unsigned long start = VMALLOC_START & PGDIR_MASK;
601 unsigned long address;
602
603 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
604 if (!test_bit(pgd_index(address), insync)) {
605 const pgd_t *pgd_ref = pgd_offset_k(address);
606 struct page *page;
607
608 if (pgd_none(*pgd_ref))
609 continue;
610 spin_lock(&pgd_lock);
2bff7383 611 list_for_each_entry(page, &pgd_list, lru) {
8c914cb7
JB
612 pgd_t *pgd;
613 pgd = (pgd_t *)page_address(page) + pgd_index(address);
614 if (pgd_none(*pgd))
615 set_pgd(pgd, *pgd_ref);
616 else
46a82b2d 617 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
8c914cb7
JB
618 }
619 spin_unlock(&pgd_lock);
620 set_bit(pgd_index(address), insync);
621 }
622 if (address == start)
623 start = address + PGDIR_SIZE;
624 }
625 /* Check that there is no need to do the same for the modules area. */
626 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
627 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
628 (__START_KERNEL & PGDIR_MASK)));
629}
630
9e43e1b7
AK
631static int __init enable_pagefaulttrace(char *str)
632{
633 page_fault_trace = 1;
9b41046c 634 return 1;
9e43e1b7
AK
635}
636__setup("pagefaulttrace", enable_pagefaulttrace);