]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/avr32/mm/fault.c
move die notifier handling to common code
[net-next-2.6.git] / arch / avr32 / mm / fault.c
CommitLineData
5f97f7f9
HS
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * Based on linux/arch/sh/mm/fault.c:
5 * Copyright (C) 1999 Niibe Yutaka
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/pagemap.h>
15
1eeb66a1 16#include <linux/kdebug.h>
5f97f7f9
HS
17#include <asm/mmu_context.h>
18#include <asm/sysreg.h>
5f97f7f9 19#include <asm/tlb.h>
623b0355 20#include <asm/uaccess.h>
5f97f7f9
HS
21
22#ifdef CONFIG_KPROBES
23ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
24
25/* Hook to register for page fault notifications */
26int register_page_fault_notifier(struct notifier_block *nb)
27{
28 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
29}
30
31int unregister_page_fault_notifier(struct notifier_block *nb)
32{
33 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
34}
35
36static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
37 int trap, int sig)
38{
39 struct die_args args = {
40 .regs = regs,
41 .trapnr = trap,
42 };
43 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
44}
45#else
46static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
47 int trap, int sig)
48{
49 return NOTIFY_DONE;
50}
51#endif
52
623b0355
HS
53int exception_trace = 1;
54
5f97f7f9
HS
55/*
56 * This routine handles page faults. It determines the address and the
57 * problem, and then passes it off to one of the appropriate routines.
58 *
59 * ecr is the Exception Cause Register. Possible values are:
5f97f7f9 60 * 6: Protection fault (instruction access)
623b0355
HS
61 * 15: Protection fault (read access)
62 * 16: Protection fault (write access)
63 * 20: Page not found (instruction access)
64 * 24: Page not found (read access)
65 * 28: Page not found (write access)
5f97f7f9
HS
66 */
67asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
68{
69 struct task_struct *tsk;
70 struct mm_struct *mm;
71 struct vm_area_struct *vma;
72 const struct exception_table_entry *fixup;
73 unsigned long address;
74 unsigned long page;
623b0355
HS
75 int writeaccess;
76 long signr;
77 int code;
5f97f7f9
HS
78
79 if (notify_page_fault(DIE_PAGE_FAULT, regs,
80 ecr, SIGSEGV) == NOTIFY_STOP)
81 return;
82
83 address = sysreg_read(TLBEAR);
84
85 tsk = current;
86 mm = tsk->mm;
87
623b0355
HS
88 signr = SIGSEGV;
89 code = SEGV_MAPERR;
90
5f97f7f9
HS
91 /*
92 * If we're in an interrupt or have no user context, we must
93 * not take the fault...
94 */
95 if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
96 goto no_context;
97
98 local_irq_enable();
99
100 down_read(&mm->mmap_sem);
101
102 vma = find_vma(mm, address);
103 if (!vma)
104 goto bad_area;
105 if (vma->vm_start <= address)
106 goto good_area;
107 if (!(vma->vm_flags & VM_GROWSDOWN))
108 goto bad_area;
109 if (expand_stack(vma, address))
110 goto bad_area;
111
112 /*
113 * Ok, we have a good vm_area for this memory access, so we
114 * can handle it...
115 */
116good_area:
623b0355
HS
117 code = SEGV_ACCERR;
118 writeaccess = 0;
119
5f97f7f9
HS
120 switch (ecr) {
121 case ECR_PROTECTION_X:
122 case ECR_TLB_MISS_X:
123 if (!(vma->vm_flags & VM_EXEC))
124 goto bad_area;
125 break;
126 case ECR_PROTECTION_R:
127 case ECR_TLB_MISS_R:
128 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
129 goto bad_area;
130 break;
131 case ECR_PROTECTION_W:
132 case ECR_TLB_MISS_W:
133 if (!(vma->vm_flags & VM_WRITE))
134 goto bad_area;
135 writeaccess = 1;
136 break;
137 default:
138 panic("Unhandled case %lu in do_page_fault!", ecr);
139 }
140
141 /*
142 * If for any reason at all we couldn't handle the fault, make
143 * sure we exit gracefully rather than endlessly redo the
144 * fault.
145 */
146survive:
147 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
148 case VM_FAULT_MINOR:
149 tsk->min_flt++;
150 break;
151 case VM_FAULT_MAJOR:
152 tsk->maj_flt++;
153 break;
154 case VM_FAULT_SIGBUS:
155 goto do_sigbus;
156 case VM_FAULT_OOM:
157 goto out_of_memory;
158 default:
159 BUG();
160 }
161
162 up_read(&mm->mmap_sem);
163 return;
164
165 /*
166 * Something tried to access memory that isn't in our memory
167 * map. Fix it, but check if it's kernel or user first...
168 */
169bad_area:
5f97f7f9
HS
170 up_read(&mm->mmap_sem);
171
172 if (user_mode(regs)) {
623b0355
HS
173 if (exception_trace)
174 printk("%s%s[%d]: segfault at %08lx pc %08lx "
175 "sp %08lx ecr %lu\n",
176 is_init(tsk) ? KERN_EMERG : KERN_INFO,
177 tsk->comm, tsk->pid, address, regs->pc,
178 regs->sp, ecr);
179 _exception(SIGSEGV, regs, code, address);
5f97f7f9
HS
180 return;
181 }
182
183no_context:
5f97f7f9
HS
184 /* Are we prepared to handle this kernel fault? */
185 fixup = search_exception_tables(regs->pc);
186 if (fixup) {
187 regs->pc = fixup->fixup;
5f97f7f9
HS
188 return;
189 }
190
191 /*
192 * Oops. The kernel tried to access some bad page. We'll have
193 * to terminate things with extreme prejudice.
194 */
195 if (address < PAGE_SIZE)
196 printk(KERN_ALERT
197 "Unable to handle kernel NULL pointer dereference");
198 else
199 printk(KERN_ALERT
200 "Unable to handle kernel paging request");
201 printk(" at virtual address %08lx\n", address);
5f97f7f9
HS
202
203 page = sysreg_read(PTBR);
204 printk(KERN_ALERT "ptbr = %08lx", page);
205 if (page) {
206 page = ((unsigned long *)page)[address >> 22];
207 printk(" pgd = %08lx", page);
208 if (page & _PAGE_PRESENT) {
209 page &= PAGE_MASK;
210 address &= 0x003ff000;
211 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
623b0355 212 printk(" pte = %08lx", page);
5f97f7f9
HS
213 }
214 }
623b0355
HS
215 printk("\n");
216 die("Kernel access of bad area", regs, signr);
217 return;
5f97f7f9
HS
218
219 /*
220 * We ran out of memory, or some other thing happened to us
221 * that made us unable to handle the page fault gracefully.
222 */
223out_of_memory:
5f97f7f9 224 up_read(&mm->mmap_sem);
623b0355 225 if (is_init(current)) {
5f97f7f9
HS
226 yield();
227 down_read(&mm->mmap_sem);
228 goto survive;
229 }
230 printk("VM: Killing process %s\n", tsk->comm);
231 if (user_mode(regs))
232 do_exit(SIGKILL);
233 goto no_context;
234
235do_sigbus:
236 up_read(&mm->mmap_sem);
237
5f97f7f9 238 /* Kernel mode? Handle exceptions or die */
623b0355
HS
239 signr = SIGBUS;
240 code = BUS_ADRERR;
5f97f7f9
HS
241 if (!user_mode(regs))
242 goto no_context;
623b0355
HS
243
244 if (exception_trace)
245 printk("%s%s[%d]: bus error at %08lx pc %08lx "
246 "sp %08lx ecr %lu\n",
247 is_init(tsk) ? KERN_EMERG : KERN_INFO,
248 tsk->comm, tsk->pid, address, regs->pc,
249 regs->sp, ecr);
250
251 _exception(SIGBUS, regs, BUS_ADRERR, address);
5f97f7f9
HS
252}
253
254asmlinkage void do_bus_error(unsigned long addr, int write_access,
255 struct pt_regs *regs)
256{
257 printk(KERN_ALERT
258 "Bus error at physical address 0x%08lx (%s access)\n",
259 addr, write_access ? "write" : "read");
260 printk(KERN_INFO "DTLB dump:\n");
261 dump_dtlb();
623b0355 262 die("Bus Error", regs, SIGKILL);
5f97f7f9
HS
263}
264
265/*
266 * This functionality is currently not possible to implement because
267 * we're using segmentation to ensure a fixed mapping of the kernel
268 * virtual address space.
269 *
270 * It would be possible to implement this, but it would require us to
271 * disable segmentation at startup and load the kernel mappings into
272 * the TLB like any other pages. There will be lots of trickery to
273 * avoid recursive invocation of the TLB miss handler, though...
274 */
275#ifdef CONFIG_DEBUG_PAGEALLOC
276void kernel_map_pages(struct page *page, int numpages, int enable)
277{
278
279}
280EXPORT_SYMBOL(kernel_map_pages);
281#endif