4 * Copyright (C) 2000-2010 Axis Communications AB
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <asm/uaccess.h>
12 extern int find_fixup_code(struct pt_regs *);
13 extern void die_if_kernel(const char *, struct pt_regs *, long);
15 /* debug of low-level TLB reload */
24 /* debug of higher-level faults */
27 /* current active page directory */
29 DEFINE_PER_CPU(pgd_t *, current_pgd);
30 unsigned long cris_signal_return_page;
33 * This routine handles page faults. It determines the address,
34 * and the problem, and then passes it off to one of the appropriate
37 * Notice that the address we're given is aligned to the page the fault
38 * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete
42 * bit 0 == 0 means no page found, 1 means protection fault
43 * bit 1 == 0 means read, 1 means write
45 * If this routine detects a bad access, it returns 1, otherwise it
50 do_page_fault(unsigned long address, struct pt_regs *regs,
51 int protection, int writeaccess)
53 struct task_struct *tsk;
55 struct vm_area_struct * vma;
60 "Page fault for %lX on %X at %lX, prot %d write %d\n",
61 address, smp_processor_id(), instruction_pointer(regs),
62 protection, writeaccess));
67 * We fault-in kernel-space virtual memory on-demand. The
68 * 'reference' page table is init_mm.pgd.
70 * NOTE! We MUST NOT take any locks for this case. We may
71 * be in an interrupt or a critical region, and should
72 * only copy the information from the master page table,
75 * NOTE2: This is done so that, when updating the vmalloc
76 * mappings we don't have to walk all processes pgdirs and
77 * add the high mappings all at once. Instead we do it as they
78 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
79 * bit set so sometimes the TLB can use a lingering entry.
81 * This verifies that the fault happens in kernel space
82 * and that the fault was not a protection error (error_code & 1).
85 if (address >= VMALLOC_START &&
90 /* When stack execution is not allowed we store the signal
91 * trampolines in the reserved cris_signal_return_page.
92 * Handle this in the exact same way as vmalloc (we know
93 * that the mapping is there and is valid so no need to
94 * call handle_mm_fault).
96 if (cris_signal_return_page &&
97 address == cris_signal_return_page &&
98 !protection && user_mode(regs))
101 /* we can and should enable interrupts at this point */
105 info.si_code = SEGV_MAPERR;
108 * If we're in an interrupt or "atomic" operation or have no
109 * user context, we must not take the fault.
112 if (in_atomic() || !mm)
115 down_read(&mm->mmap_sem);
116 vma = find_vma(mm, address);
119 if (vma->vm_start <= address)
121 if (!(vma->vm_flags & VM_GROWSDOWN))
123 if (user_mode(regs)) {
125 * accessing the stack below usp is always a bug.
126 * we get page-aligned addresses so we can only check
127 * if we're within a page from usp, but that might be
128 * enough to catch brutal errors at least.
130 if (address + PAGE_SIZE < rdusp())
133 if (expand_stack(vma, address))
137 * Ok, we have a good vm_area for this memory access, so
142 info.si_code = SEGV_ACCERR;
144 /* first do some preliminary protection checks */
146 if (writeaccess == 2){
147 if (!(vma->vm_flags & VM_EXEC))
149 } else if (writeaccess == 1) {
150 if (!(vma->vm_flags & VM_WRITE))
153 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
158 * If for any reason at all we couldn't handle the fault,
159 * make sure we exit gracefully rather than endlessly redo
163 fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
164 if (unlikely(fault & VM_FAULT_ERROR)) {
165 if (fault & VM_FAULT_OOM)
167 else if (fault & VM_FAULT_SIGBUS)
171 if (fault & VM_FAULT_MAJOR)
176 up_read(&mm->mmap_sem);
180 * Something tried to access memory that isn't in our memory map..
181 * Fix it, but check if it's kernel or user first..
185 up_read(&mm->mmap_sem);
187 bad_area_nosemaphore:
188 DPG(show_registers(regs));
190 /* User mode accesses just cause a SIGSEGV */
192 if (user_mode(regs)) {
193 info.si_signo = SIGSEGV;
195 /* info.si_code has been set above */
196 info.si_addr = (void *)address;
197 force_sig_info(SIGSEGV, &info, tsk);
198 printk(KERN_NOTICE "%s (pid %d) segfaults for page "
199 "address %08lx at pc %08lx\n",
200 tsk->comm, tsk->pid, address, instruction_pointer(regs));
206 /* Are we prepared to handle this kernel fault?
208 * (The kernel has valid exception-points in the source
209 * when it accesses user-memory. When it fails in one
210 * of those points, we find it in a table and do a jump
211 * to some fixup code that loads an appropriate error
215 if (find_fixup_code(regs))
219 * Oops. The kernel tried to access some bad page. We'll have to
220 * terminate things with extreme prejudice.
223 if (!oops_in_progress) {
224 oops_in_progress = 1;
225 if ((unsigned long) (address) < PAGE_SIZE)
226 printk(KERN_ALERT "Unable to handle kernel NULL "
227 "pointer dereference");
229 printk(KERN_ALERT "Unable to handle kernel access"
230 " at virtual address %08lx\n", address);
232 die_if_kernel("Oops", regs, (writeaccess << 1) | protection);
233 oops_in_progress = 0;
239 * We ran out of memory, or some other thing happened to us that made
240 * us unable to handle the page fault gracefully.
244 up_read(&mm->mmap_sem);
245 if (!user_mode(regs))
247 pagefault_out_of_memory();
251 up_read(&mm->mmap_sem);
254 * Send a sigbus, regardless of whether we were in kernel
257 info.si_signo = SIGBUS;
259 info.si_code = BUS_ADRERR;
260 info.si_addr = (void *)address;
261 force_sig_info(SIGBUS, &info, tsk);
263 /* Kernel mode? Handle exceptions or die */
264 if (!user_mode(regs))
271 * Synchronize this task's top level page-table
272 * with the 'reference' page table.
274 * Use current_pgd instead of tsk->active_mm->pgd
275 * since the latter might be unavailable if this
276 * code is executed in a misfortunately run irq
277 * (like inside schedule() between switch_mm and
281 int offset = pgd_index(address);
287 pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset;
288 pgd_k = init_mm.pgd + offset;
290 /* Since we're two-level, we don't need to do both
291 * set_pgd and set_pmd (they do the same thing). If
292 * we go three-level at some point, do the right thing
293 * with pgd_present and set_pgd here.
295 * Also, since the vmalloc area is global, we don't
296 * need to copy individual PTE's, it is enough to
297 * copy the pgd pointer into the pte page of the
298 * root task. If that is there, we'll find our pte if
302 pud = pud_offset(pgd, address);
303 pud_k = pud_offset(pgd_k, address);
304 if (!pud_present(*pud_k))
307 pmd = pmd_offset(pud, address);
308 pmd_k = pmd_offset(pud_k, address);
310 if (!pmd_present(*pmd_k))
311 goto bad_area_nosemaphore;
313 set_pmd(pmd, *pmd_k);
315 /* Make sure the actual PTE exists as well to
316 * catch kernel vmalloc-area accesses to non-mapped
317 * addresses. If we don't do this, this will just
318 * silently loop forever.
321 pte_k = pte_offset_kernel(pmd_k, address);
322 if (!pte_present(*pte_k))
329 /* Find fixup code. */
331 find_fixup_code(struct pt_regs *regs)
333 const struct exception_table_entry *fixup;
334 /* in case of delay slot fault (v32) */
335 unsigned long ip = (instruction_pointer(regs) & ~0x1);
337 fixup = search_exception_tables(ip);
339 /* Adjust the instruction pointer in the stackframe. */
340 instruction_pointer(regs) = fixup->fixup;