]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/fault-armv.c
ARM: make_coherent: convert adjust_pte() to use p*d_none_or_clear_bad()
[net-next-2.6.git] / arch / arm / mm / fault-armv.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/fault-armv.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19
09d9bae0 20#include <asm/bugs.h>
1da177e4 21#include <asm/cacheflush.h>
46097c7d 22#include <asm/cachetype.h>
1da177e4
LT
23#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
7b0a1003
RK
26#include "mm.h"
27
bb30f36f 28static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
1da177e4
LT
29
30/*
31 * We take the easy way out of this problem - we make the
32 * PTE uncacheable. However, we leave the write buffer on.
69b04754
HD
33 *
34 * Note that the pte lock held when calling update_mmu_cache must also
35 * guard the pte (somewhere else in the same mm) that we modify here.
36 * Therefore those configurations which might call adjust_pte (those
37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
1da177e4 38 */
c26c20b8
RK
39static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
40 pte_t *ptep)
1da177e4 41{
c26c20b8 42 pte_t entry = *ptep;
53cdb27a 43 int ret;
1da177e4 44
53cdb27a
RK
45 /*
46 * If this page is present, it's actually being shared.
47 */
48 ret = pte_present(entry);
49
1da177e4
LT
50 /*
51 * If this page isn't present, or is already setup to
52 * fault (ie, is old), we can safely ignore any issues.
53 */
bb30f36f 54 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
08e445bd
NP
55 unsigned long pfn = pte_pfn(entry);
56 flush_cache_page(vma, address, pfn);
57 outer_flush_range((pfn << PAGE_SHIFT),
58 (pfn << PAGE_SHIFT) + PAGE_SIZE);
bb30f36f
RK
59 pte_val(entry) &= ~L_PTE_MT_MASK;
60 pte_val(entry) |= shared_pte_mask;
c26c20b8 61 set_pte_at(vma->vm_mm, address, ptep, entry);
1da177e4 62 flush_tlb_page(vma, address);
1da177e4 63 }
c26c20b8
RK
64
65 return ret;
66}
67
68static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
69{
70 pgd_t *pgd;
71 pmd_t *pmd;
72 pte_t *pte;
73 int ret;
74
75 pgd = pgd_offset(vma->vm_mm, address);
f8a85f11
RK
76 if (pgd_none_or_clear_bad(pgd))
77 return 0;
c26c20b8
RK
78
79 pmd = pmd_offset(pgd, address);
f8a85f11
RK
80 if (pmd_none_or_clear_bad(pmd))
81 return 0;
c26c20b8
RK
82
83 pte = pte_offset_map(pmd, address);
84
85 ret = do_adjust_pte(vma, address, pte);
86
1da177e4 87 pte_unmap(pte);
c26c20b8 88
1da177e4 89 return ret;
1da177e4
LT
90}
91
92static void
8830f04a 93make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
1da177e4 94{
1da177e4
LT
95 struct mm_struct *mm = vma->vm_mm;
96 struct vm_area_struct *mpnt;
97 struct prio_tree_iter iter;
98 unsigned long offset;
99 pgoff_t pgoff;
100 int aliases = 0;
101
1da177e4
LT
102 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
103
104 /*
105 * If we have any shared mappings that are in the same mm
106 * space, then we need to handle them specially to maintain
107 * cache coherency.
108 */
109 flush_dcache_mmap_lock(mapping);
110 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
111 /*
112 * If this VMA is not in our MM, we can ignore it.
113 * Note that we intentionally mask out the VMA
114 * that we are fixing up.
115 */
116 if (mpnt->vm_mm != mm || mpnt == vma)
117 continue;
118 if (!(mpnt->vm_flags & VM_MAYSHARE))
119 continue;
120 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
121 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
122 }
123 flush_dcache_mmap_unlock(mapping);
124 if (aliases)
125 adjust_pte(vma, addr);
126 else
8830f04a 127 flush_cache_page(vma, addr, pfn);
1da177e4
LT
128}
129
130/*
131 * Take care of architecture specific things when placing a new PTE into
132 * a page table, or changing an existing PTE. Basically, there are two
133 * things that we need to take care of:
134 *
135 * 1. If PG_dcache_dirty is set for the page, we need to ensure
136 * that any cache entries for the kernels virtual memory
137 * range are written back to the page.
138 * 2. If we have multiple shared mappings of the same space in
139 * an object, we need to deal with the cache aliasing issues.
140 *
69b04754 141 * Note that the pte lock will be held.
1da177e4
LT
142 */
143void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
144{
145 unsigned long pfn = pte_pfn(pte);
8830f04a 146 struct address_space *mapping;
1da177e4
LT
147 struct page *page;
148
149 if (!pfn_valid(pfn))
150 return;
8830f04a 151
421fe93c
RK
152 /*
153 * The zero page is never written to, so never has any dirty
154 * cache lines, and therefore never needs to be flushed.
155 */
1da177e4 156 page = pfn_to_page(pfn);
421fe93c
RK
157 if (page == ZERO_PAGE(0))
158 return;
159
8830f04a 160 mapping = page_mapping(page);
826cbdaf 161#ifndef CONFIG_SMP
787b2faa
NG
162 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
163 __flush_dcache_page(mapping, page);
826cbdaf 164#endif
787b2faa 165 if (mapping) {
1da177e4 166 if (cache_is_vivt())
8830f04a 167 make_coherent(mapping, vma, addr, pfn);
826cbdaf
CM
168 else if (vma->vm_flags & VM_EXEC)
169 __flush_icache_all();
1da177e4
LT
170 }
171}
172
173/*
174 * Check whether the write buffer has physical address aliasing
175 * issues. If it has, we need to avoid them for the case where
176 * we have several shared mappings of the same object in user
177 * space.
178 */
179static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
180{
181 register unsigned long zero = 0, one = 1, val;
182
183 local_irq_disable();
184 mb();
185 *p1 = one;
186 mb();
187 *p2 = zero;
188 mb();
189 val = *p1;
190 mb();
191 local_irq_enable();
192 return val != zero;
193}
194
195void __init check_writebuffer_bugs(void)
196{
197 struct page *page;
198 const char *reason;
199 unsigned long v = 1;
200
201 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
202
203 page = alloc_page(GFP_KERNEL);
204 if (page) {
205 unsigned long *p1, *p2;
52e8bfd8
RK
206 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
207 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
1da177e4
LT
208
209 p1 = vmap(&page, 1, VM_IOREMAP, prot);
210 p2 = vmap(&page, 1, VM_IOREMAP, prot);
211
212 if (p1 && p2) {
213 v = check_writebuffer(p1, p2);
214 reason = "enabling work-around";
215 } else {
216 reason = "unable to map memory\n";
217 }
218
219 vunmap(p1);
220 vunmap(p2);
221 put_page(page);
222 } else {
223 reason = "unable to grab page\n";
224 }
225
226 if (v) {
227 printk("failed, %s\n", reason);
bb30f36f 228 shared_pte_mask = L_PTE_MT_UNCACHED;
1da177e4
LT
229 } else {
230 printk("ok\n");
231 }
232}