]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/include/asm/pgtable.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[net-next-2.6.git] / arch / arm / include / asm / pgtable.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/pgtable.h
1da177e4
LT
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
13#include <asm-generic/4level-fixup.h>
002547b4
RK
14#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
18#include "pgtable-nommu.h"
19
20#else
1da177e4
LT
21
22#include <asm/memory.h>
a09e64fb 23#include <mach/vmalloc.h>
ad1ae2fe 24#include <asm/pgtable-hwdef.h>
1da177e4 25
5c3073e6
RK
26/*
27 * Just any arbitrary offset to the start of the vmalloc VM area: the
28 * current 8MB value just means that there will be a 8MB "hole" after the
29 * physical memory until the kernel virtual memory starts. That means that
30 * any out-of-bounds memory accesses will hopefully be caught.
31 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
32 * area for the same reason. ;)
33 *
34 * Note that platforms may override VMALLOC_START, but they must provide
35 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
36 * which may not overlap IO space.
37 */
38#ifndef VMALLOC_START
39#define VMALLOC_OFFSET (8*1024*1024)
40#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
41#endif
42
1da177e4
LT
43/*
44 * Hardware-wise, we have a two level page table structure, where the first
45 * level has 4096 entries, and the second level has 256 entries. Each entry
46 * is one 32-bit word. Most of the bits in the second level entry are used
47 * by hardware, and there aren't any "accessed" and "dirty" bits.
48 *
49 * Linux on the other hand has a three level page table structure, which can
50 * be wrapped to fit a two level page table structure easily - using the PGD
51 * and PTE only. However, Linux also expects one "PTE" table per page, and
52 * at least a "dirty" bit.
53 *
54 * Therefore, we tweak the implementation slightly - we tell Linux that we
55 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
56 * hardware pointers to the second level.) The second level contains two
57 * hardware PTE tables arranged contiguously, followed by Linux versions
58 * which contain the state information Linux needs. We, therefore, end up
59 * with 512 entries in the "PTE" level.
60 *
61 * This leads to the page tables having the following layout:
62 *
63 * pgd pte
64 * | |
65 * +--------+ +0
66 * | |-----> +------------+ +0
67 * +- - - - + +4 | h/w pt 0 |
68 * | |-----> +------------+ +1024
69 * +--------+ +8 | h/w pt 1 |
70 * | | +------------+ +2048
71 * +- - - - + | Linux pt 0 |
72 * | | +------------+ +3072
73 * +--------+ | Linux pt 1 |
74 * | | +------------+ +4096
75 *
76 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
77 * PTE_xxx for definitions of bits appearing in the "h/w pt".
78 *
79 * PMD_xxx definitions refer to bits in the first level page table.
80 *
81 * The "dirty" bit is emulated by only granting hardware write permission
82 * iff the page is marked "writable" and "dirty" in the Linux PTE. This
83 * means that a write to a clean page will cause a permission fault, and
84 * the Linux MM layer will mark the page dirty via handle_pte_fault().
85 * For the hardware to notice the permission change, the TLB entry must
f0e47c22 86 * be flushed, and ptep_set_access_flags() does that for us.
1da177e4
LT
87 *
88 * The "accessed" or "young" bit is emulated by a similar method; we only
89 * allow accesses to the page if the "young" bit is set. Accesses to the
90 * page will cause a fault, and handle_pte_fault() will set the young bit
91 * for us as long as the page is marked present in the corresponding Linux
f0e47c22
MS
92 * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
93 * up to date.
1da177e4
LT
94 *
95 * However, when the "young" bit is cleared, we deny access to the page
96 * by clearing the hardware PTE. Currently Linux does not flush the TLB
97 * for us in this case, which means the TLB will retain the transation
98 * until either the TLB entry is evicted under pressure, or a context
99 * switch which changes the user space mapping occurs.
100 */
101#define PTRS_PER_PTE 512
102#define PTRS_PER_PMD 1
103#define PTRS_PER_PGD 2048
104
105/*
106 * PMD_SHIFT determines the size of the area a second-level page table can map
107 * PGDIR_SHIFT determines what a third-level page table entry can map
108 */
109#define PMD_SHIFT 21
110#define PGDIR_SHIFT 21
111
112#define LIBRARY_TEXT_START 0x0c000000
113
114#ifndef __ASSEMBLY__
115extern void __pte_error(const char *file, int line, unsigned long val);
116extern void __pmd_error(const char *file, int line, unsigned long val);
117extern void __pgd_error(const char *file, int line, unsigned long val);
118
119#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
120#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
121#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
122#endif /* !__ASSEMBLY__ */
123
124#define PMD_SIZE (1UL << PMD_SHIFT)
125#define PMD_MASK (~(PMD_SIZE-1))
126#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
127#define PGDIR_MASK (~(PGDIR_SIZE-1))
128
6119be0b
HD
129/*
130 * This is the lowest virtual address we can permit any user space
131 * mapping to be mapped at. This is particularly important for
132 * non-high vector CPUs.
133 */
134#define FIRST_USER_ADDRESS PAGE_SIZE
135
1da177e4
LT
136#define FIRST_USER_PGD_NR 1
137#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
138
4052ebb7
GD
139/*
140 * section address mask and size definitions.
141 */
142#define SECTION_SHIFT 20
143#define SECTION_SIZE (1UL << SECTION_SHIFT)
144#define SECTION_MASK (~(SECTION_SIZE-1))
145
1da177e4
LT
146/*
147 * ARMv6 supersection address mask and size definitions.
148 */
149#define SUPERSECTION_SHIFT 24
150#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
151#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
152
1da177e4
LT
153/*
154 * "Linux" PTE definitions.
155 *
156 * We keep two sets of PTEs - the hardware and the linux version.
157 * This allows greater flexibility in the way we map the Linux bits
158 * onto the hardware tables, and allows us to have YOUNG and DIRTY
159 * bits.
160 *
161 * The PTE table pointer refers to the hardware entries; the "Linux"
162 * entries are stored 1024 bytes below.
163 */
164#define L_PTE_PRESENT (1 << 0)
1da177e4 165#define L_PTE_YOUNG (1 << 1)
6a00cded 166#define L_PTE_FILE (1 << 2) /* only when !PRESENT */
9cff96e5
RK
167#define L_PTE_DIRTY (1 << 6)
168#define L_PTE_WRITE (1 << 7)
169#define L_PTE_USER (1 << 8)
170#define L_PTE_EXEC (1 << 9)
0e5fdca7 171#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
1da177e4 172
bb30f36f
RK
173/*
174 * These are the memory types, defined to be compatible with
175 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
bb30f36f
RK
176 */
177#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
178#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
179#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
180#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
181#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
182#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
639b0ae7 183#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
bb30f36f 184#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
639b0ae7 185#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
bb30f36f
RK
186#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
187#define L_PTE_MT_MASK (0x0f << 2)
188
1da177e4
LT
189#ifndef __ASSEMBLY__
190
1da177e4 191/*
44b18693
I
192 * The pgprot_* and protection_map entries will be fixed up in runtime
193 * to include the cachable and bufferable bits based on memory policy,
194 * as well as any architecture dependent bits like global/ASID and SMP
195 * shared mapping bits.
1da177e4 196 */
bb30f36f 197#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1da177e4 198
44b18693 199extern pgprot_t pgprot_user;
1da177e4
LT
200extern pgprot_t pgprot_kernel;
201
8ec53663 202#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
1da177e4 203
8ec53663
RK
204#define PAGE_NONE pgprot_user
205#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
206#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
207#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
208#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
209#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
210#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
211#define PAGE_KERNEL pgprot_kernel
212#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
213
214#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
215#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
216#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
217#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
218#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
219#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
220#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
44b18693 221
1da177e4
LT
222#endif /* __ASSEMBLY__ */
223
224/*
225 * The table below defines the page protection levels that we insert into our
226 * Linux page table version. These get translated into the best that the
227 * architecture can perform. Note that on most ARM hardware:
228 * 1) We cannot do execute protection
229 * 2) If we could do execute protection, then read is implied
230 * 3) write implies read permissions
231 */
44b18693
I
232#define __P000 __PAGE_NONE
233#define __P001 __PAGE_READONLY
234#define __P010 __PAGE_COPY
235#define __P011 __PAGE_COPY
8ec53663
RK
236#define __P100 __PAGE_READONLY_EXEC
237#define __P101 __PAGE_READONLY_EXEC
238#define __P110 __PAGE_COPY_EXEC
239#define __P111 __PAGE_COPY_EXEC
44b18693
I
240
241#define __S000 __PAGE_NONE
242#define __S001 __PAGE_READONLY
243#define __S010 __PAGE_SHARED
244#define __S011 __PAGE_SHARED
8ec53663
RK
245#define __S100 __PAGE_READONLY_EXEC
246#define __S101 __PAGE_READONLY_EXEC
247#define __S110 __PAGE_SHARED_EXEC
248#define __S111 __PAGE_SHARED_EXEC
1da177e4
LT
249
250#ifndef __ASSEMBLY__
251/*
252 * ZERO_PAGE is a global shared page that is always zero: used
253 * for zero-mapped memory areas etc..
254 */
255extern struct page *empty_zero_page;
256#define ZERO_PAGE(vaddr) (empty_zero_page)
257
258#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
259#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
260
261#define pte_none(pte) (!pte_val(pte))
ad1ae2fe 262#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
1da177e4 263#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
46a82b2d 264#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
65cec8e3
RK
265
266#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr))
267#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr))
268#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0)
269#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1)
270
271#ifndef CONFIG_HIGHPTE
272#define __pte_map(dir,km) pmd_page_vaddr(*(dir))
273#define __pte_unmap(pte,km) do { } while (0)
274#else
275#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE)
276#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km)
277#endif
1da177e4 278
ad1ae2fe
RK
279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
280
281#define set_pte_at(mm,addr,ptep,pteval) do { \
a8fa9ba6 282 set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
ad1ae2fe 283 } while (0)
1da177e4
LT
284
285/*
286 * The following only work if pte_present() is true.
287 * Undefined behaviour if not..
288 */
289#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
1da177e4 290#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
1da177e4
LT
291#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
292#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
7e675137 293#define pte_special(pte) (0)
1da177e4 294
1da177e4
LT
295#define PTE_BIT_FUNC(fn,op) \
296static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
297
1da177e4
LT
298PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
299PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
1da177e4
LT
300PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
301PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
302PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
303PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
304
7e675137
NP
305static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
306
26a26d32
RK
307#define __pgprot_modify(prot,mask,bits) \
308 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
309
1da177e4
LT
310/*
311 * Mark the prot value as uncacheable and unbufferable.
312 */
bb30f36f 313#define pgprot_noncached(prot) \
26a26d32 314 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
bb30f36f 315#define pgprot_writecombine(prot) \
26a26d32 316 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
47ab0dee 317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
26a26d32
RK
318#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
d907387c
CM
320#define __HAVE_PHYS_MEM_ACCESS_PROT
321struct file;
322extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
323 unsigned long size, pgprot_t vma_prot);
26a26d32
RK
324#else
325#define pgprot_dmacoherent(prot) \
326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
327#endif
1da177e4
LT
328
329#define pmd_none(pmd) (!pmd_val(pmd))
330#define pmd_present(pmd) (pmd_val(pmd))
331#define pmd_bad(pmd) (pmd_val(pmd) & 2)
332
333#define copy_pmd(pmdpd,pmdps) \
334 do { \
335 pmdpd[0] = pmdps[0]; \
336 pmdpd[1] = pmdps[1]; \
337 flush_pmd_entry(pmdpd); \
338 } while (0)
339
340#define pmd_clear(pmdp) \
341 do { \
342 pmdp[0] = __pmd(0); \
343 pmdp[1] = __pmd(0); \
344 clean_pmd_entry(pmdp); \
345 } while (0)
346
46a82b2d 347static inline pte_t *pmd_page_vaddr(pmd_t pmd)
1da177e4
LT
348{
349 unsigned long ptr;
350
351 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
352 ptr += PTRS_PER_PTE * sizeof(void *);
353
354 return __va(ptr);
355}
356
924a158a 357#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
1da177e4 358
1da177e4
LT
359/*
360 * Conversion functions: convert a page and protection to a page entry,
361 * and a page entry and page directory to the page they refer to.
362 */
363#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
364
365/*
366 * The "pgd_xxx()" functions here are trivial for a folded two-level
367 * setup: the pgd is never bad, and a pmd always exists (as it's folded
368 * into the pgd entry)
369 */
370#define pgd_none(pgd) (0)
371#define pgd_bad(pgd) (0)
372#define pgd_present(pgd) (1)
373#define pgd_clear(pgdp) do { } while (0)
374#define set_pgd(pgd,pgdp) do { } while (0)
375
1da177e4
LT
376/* to find an entry in a page-table-directory */
377#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
378
379#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
380
381/* to find an entry in a kernel page-table-directory */
382#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
383
384/* Find an entry in the second-level page table.. */
385#define pmd_offset(dir, addr) ((pmd_t *)(dir))
386
387/* Find an entry in the third-level page table.. */
388#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
389
390static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
391{
392 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
393 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
394 return pte;
395}
396
397extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
398
fb93a1c7
RK
399/*
400 * Encode and decode a swap entry. Swap entries are stored in the Linux
401 * page tables as follows:
402 *
403 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
404 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6a00cded 405 * <--------------- offset --------------------> <- type --> 0 0 0
1da177e4 406 *
6a00cded 407 * This gives us up to 63 swap files and 32GB per swap file. Note that
fb93a1c7 408 * the offset field is always non-zero.
1da177e4 409 */
6a00cded
RK
410#define __SWP_TYPE_SHIFT 3
411#define __SWP_TYPE_BITS 6
fb93a1c7
RK
412#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
413#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
414
415#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
416#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
417#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
418
1da177e4
LT
419#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
420#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
421
fb93a1c7
RK
422/*
423 * It is an error for the kernel to have more swap files than we can
424 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
425 * is increased beyond what we presently support.
426 */
427#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
428
65b1bfc1
RK
429/*
430 * Encode and decode a file entry. File entries are stored in the Linux
431 * page tables as follows:
432 *
433 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
434 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
6a00cded 435 * <----------------------- offset ------------------------> 1 0 0
65b1bfc1
RK
436 */
437#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
6a00cded
RK
438#define pte_to_pgoff(x) (pte_val(x) >> 3)
439#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
65b1bfc1 440
6a00cded 441#define PTE_FILE_MAX_BITS 29
65b1bfc1 442
1da177e4
LT
443/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
444/* FIXME: this is not correct */
445#define kern_addr_valid(addr) (1)
446
447#include <asm-generic/pgtable.h>
448
449/*
450 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
451 */
452#define HAVE_ARCH_UNMAPPED_AREA
453
454/*
33bf5610 455 * remap a physical page `pfn' of size `size' with page protection `prot'
1da177e4
LT
456 * into virtual address `from'
457 */
1da177e4
LT
458#define io_remap_pfn_range(vma,from,pfn,size,prot) \
459 remap_pfn_range(vma, from, pfn, size, prot)
460
1da177e4
LT
461#define pgtable_cache_init() do { } while (0)
462
463#endif /* !__ASSEMBLY__ */
464
002547b4
RK
465#endif /* CONFIG_MMU */
466
1da177e4 467#endif /* _ASMARM_PGTABLE_H */