]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/asm-generic/pgtable.h
tg3: Apply 10Mbps fix to all 57765 revisions
[net-next-2.6.git] / include / asm-generic / pgtable.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
673eae82 4#ifndef __ASSEMBLY__
9535239f 5#ifdef CONFIG_MMU
673eae82 6
1da177e4
LT
7#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8/*
9 * Largely same as above, but only sets the access flags (dirty,
10 * accessed, and writable). Furthermore, we know it always gets set
11 * to a "more permissive" setting, which allows most architectures
8dab5241
BH
12 * to optimize this. We return whether the PTE actually changed, which
13 * in turn instructs the caller to do things like update__mmu_cache.
14 * This used to be done in the caller, but sparc needs minor faults to
15 * force that call on sun4c so we changed this macro slightly
1da177e4
LT
16 */
17#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
8dab5241
BH
18({ \
19 int __changed = !pte_same(*(__ptep), __entry); \
20 if (__changed) { \
21 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
22 flush_tlb_page(__vma, __address); \
23 } \
24 __changed; \
25})
1da177e4
LT
26#endif
27
28#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
29#define ptep_test_and_clear_young(__vma, __address, __ptep) \
30({ \
31 pte_t __pte = *(__ptep); \
32 int r = 1; \
33 if (!pte_young(__pte)) \
34 r = 0; \
35 else \
36 set_pte_at((__vma)->vm_mm, (__address), \
37 (__ptep), pte_mkold(__pte)); \
38 r; \
39})
40#endif
41
42#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
43#define ptep_clear_flush_young(__vma, __address, __ptep) \
44({ \
45 int __young; \
46 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
47 if (__young) \
48 flush_tlb_page(__vma, __address); \
49 __young; \
50})
51#endif
52
1da177e4
LT
53#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
54#define ptep_get_and_clear(__mm, __address, __ptep) \
55({ \
56 pte_t __pte = *(__ptep); \
57 pte_clear((__mm), (__address), (__ptep)); \
58 __pte; \
59})
60#endif
61
a600388d
ZA
62#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
63#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
64({ \
65 pte_t __pte; \
66 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
67 __pte; \
68})
69#endif
70
9888a1ca
ZA
71/*
72 * Some architectures may be able to avoid expensive synchronization
73 * primitives when modifications are made to PTE's which are already
74 * not present, or in the process of an address space destruction.
75 */
76#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
77#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
a600388d
ZA
78do { \
79 pte_clear((__mm), (__address), (__ptep)); \
80} while (0)
81#endif
82
1da177e4
LT
83#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
84#define ptep_clear_flush(__vma, __address, __ptep) \
85({ \
86 pte_t __pte; \
87 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
88 flush_tlb_page(__vma, __address); \
89 __pte; \
90})
91#endif
92
93#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
8c65b4a6 94struct mm_struct;
1da177e4
LT
95static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
96{
97 pte_t old_pte = *ptep;
98 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
99}
100#endif
101
102#ifndef __HAVE_ARCH_PTE_SAME
103#define pte_same(A,B) (pte_val(A) == pte_val(B))
104#endif
105
6c210482
MS
106#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
107#define page_test_dirty(page) (0)
108#endif
109
110#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
e2b8d7af 111#define page_clear_dirty(page, mapped) do { } while (0)
6c210482
MS
112#endif
113
114#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
b4955ce3
AK
115#define pte_maybe_dirty(pte) pte_dirty(pte)
116#else
117#define pte_maybe_dirty(pte) (1)
1da177e4
LT
118#endif
119
120#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
121#define page_test_and_clear_young(page) (0)
122#endif
123
124#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
125#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
126#endif
127
0b0968a3 128#ifndef __HAVE_ARCH_MOVE_PTE
8b1f3124 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
8b1f3124
NP
130#endif
131
61c77326
SL
132#ifndef flush_tlb_fix_spurious_fault
133#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
134#endif
135
0634a632
PM
136#ifndef pgprot_noncached
137#define pgprot_noncached(prot) (prot)
138#endif
139
2520bd31 140#ifndef pgprot_writecombine
141#define pgprot_writecombine pgprot_noncached
142#endif
143
1da177e4 144/*
8f6c99c1
HD
145 * When walking page tables, get the address of the next boundary,
146 * or the end address of the range if that comes earlier. Although no
147 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1da177e4
LT
148 */
149
1da177e4
LT
150#define pgd_addr_end(addr, end) \
151({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
152 (__boundary - 1 < (end) - 1)? __boundary: (end); \
153})
1da177e4
LT
154
155#ifndef pud_addr_end
156#define pud_addr_end(addr, end) \
157({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
158 (__boundary - 1 < (end) - 1)? __boundary: (end); \
159})
160#endif
161
162#ifndef pmd_addr_end
163#define pmd_addr_end(addr, end) \
164({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
165 (__boundary - 1 < (end) - 1)? __boundary: (end); \
166})
167#endif
168
1da177e4
LT
169/*
170 * When walking page tables, we usually want to skip any p?d_none entries;
171 * and any p?d_bad entries - reporting the error before resetting to none.
172 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
173 */
174void pgd_clear_bad(pgd_t *);
175void pud_clear_bad(pud_t *);
176void pmd_clear_bad(pmd_t *);
177
178static inline int pgd_none_or_clear_bad(pgd_t *pgd)
179{
180 if (pgd_none(*pgd))
181 return 1;
182 if (unlikely(pgd_bad(*pgd))) {
183 pgd_clear_bad(pgd);
184 return 1;
185 }
186 return 0;
187}
188
189static inline int pud_none_or_clear_bad(pud_t *pud)
190{
191 if (pud_none(*pud))
192 return 1;
193 if (unlikely(pud_bad(*pud))) {
194 pud_clear_bad(pud);
195 return 1;
196 }
197 return 0;
198}
199
200static inline int pmd_none_or_clear_bad(pmd_t *pmd)
201{
202 if (pmd_none(*pmd))
203 return 1;
204 if (unlikely(pmd_bad(*pmd))) {
205 pmd_clear_bad(pmd);
206 return 1;
207 }
208 return 0;
209}
9535239f 210
1ea0704e
JF
211static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
212 unsigned long addr,
213 pte_t *ptep)
214{
215 /*
216 * Get the current pte state, but zero it out to make it
217 * non-present, preventing the hardware from asynchronously
218 * updating it.
219 */
220 return ptep_get_and_clear(mm, addr, ptep);
221}
222
223static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
224 unsigned long addr,
225 pte_t *ptep, pte_t pte)
226{
227 /*
228 * The pte is non-present, so there's no hardware state to
229 * preserve.
230 */
231 set_pte_at(mm, addr, ptep, pte);
232}
233
234#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
235/*
236 * Start a pte protection read-modify-write transaction, which
237 * protects against asynchronous hardware modifications to the pte.
238 * The intention is not to prevent the hardware from making pte
239 * updates, but to prevent any updates it may make from being lost.
240 *
241 * This does not protect against other software modifications of the
242 * pte; the appropriate pte lock must be held over the transation.
243 *
244 * Note that this interface is intended to be batchable, meaning that
245 * ptep_modify_prot_commit may not actually update the pte, but merely
246 * queue the update to be done at some later time. The update must be
247 * actually committed before the pte lock is released, however.
248 */
249static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
250 unsigned long addr,
251 pte_t *ptep)
252{
253 return __ptep_modify_prot_start(mm, addr, ptep);
254}
255
256/*
257 * Commit an update to a pte, leaving any hardware-controlled bits in
258 * the PTE unmodified.
259 */
260static inline void ptep_modify_prot_commit(struct mm_struct *mm,
261 unsigned long addr,
262 pte_t *ptep, pte_t pte)
263{
264 __ptep_modify_prot_commit(mm, addr, ptep, pte);
265}
266#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
fe1a6875 267#endif /* CONFIG_MMU */
1ea0704e 268
9535239f
GU
269/*
270 * A facility to provide lazy MMU batching. This allows PTE updates and
271 * page invalidations to be delayed until a call to leave lazy MMU mode
272 * is issued. Some architectures may benefit from doing this, and it is
273 * beneficial for both shadow and direct mode hypervisors, which may batch
274 * the PTE updates which happen during this window. Note that using this
275 * interface requires that read hazards be removed from the code. A read
276 * hazard could result in the direct mode hypervisor case, since the actual
277 * write to the page tables may not yet have taken place, so reads though
278 * a raw PTE pointer after it has been modified are not guaranteed to be
279 * up to date. This mode can only be entered and left under the protection of
280 * the page table locks for all page tables which may be modified. In the UP
281 * case, this is required so that preemption is disabled, and in the SMP case,
282 * it must synchronize the delayed page table writes properly on other CPUs.
283 */
284#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
285#define arch_enter_lazy_mmu_mode() do {} while (0)
286#define arch_leave_lazy_mmu_mode() do {} while (0)
287#define arch_flush_lazy_mmu_mode() do {} while (0)
288#endif
289
290/*
7fd7d83d
JF
291 * A facility to provide batching of the reload of page tables and
292 * other process state with the actual context switch code for
293 * paravirtualized guests. By convention, only one of the batched
294 * update (lazy) modes (CPU, MMU) should be active at any given time,
295 * entry should never be nested, and entry and exits should always be
296 * paired. This is for sanity of maintaining and reasoning about the
297 * kernel code. In this case, the exit (end of the context switch) is
298 * in architecture-specific code, and so doesn't need a generic
299 * definition.
9535239f 300 */
7fd7d83d 301#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 302#define arch_start_context_switch(prev) do {} while (0)
9535239f
GU
303#endif
304
34801ba9 305#ifndef __HAVE_PFNMAP_TRACKING
306/*
307 * Interface that can be used by architecture code to keep track of
308 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
309 *
310 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
311 * for physical range indicated by pfn and size.
312 */
e4b866ed 313static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
34801ba9 314 unsigned long pfn, unsigned long size)
315{
316 return 0;
317}
318
319/*
320 * Interface that can be used by architecture code to keep track of
321 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
322 *
323 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
324 * copied through copy_page_range().
325 */
326static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
327{
328 return 0;
329}
330
331/*
332 * Interface that can be used by architecture code to keep track of
333 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
334 *
335 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
336 * untrack can be called for a specific region indicated by pfn and size or
337 * can be for the entire vma (in which case size can be zero).
338 */
339static inline void untrack_pfn_vma(struct vm_area_struct *vma,
340 unsigned long pfn, unsigned long size)
341{
342}
343#else
e4b866ed 344extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
34801ba9 345 unsigned long pfn, unsigned long size);
346extern int track_pfn_vma_copy(struct vm_area_struct *vma);
347extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
348 unsigned long size);
349#endif
350
1da177e4
LT
351#endif /* !__ASSEMBLY__ */
352
353#endif /* _ASM_GENERIC_PGTABLE_H */