]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/mmu.c
ARM: 6445/1: fixup TCM memory types
[net-next-2.6.git] / arch / arm / mm / mmu.c
CommitLineData
d111e8f9
RK
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ae8f1541 10#include <linux/module.h>
d111e8f9
RK
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
d111e8f9
RK
14#include <linux/mman.h>
15#include <linux/nodemask.h>
2778f620 16#include <linux/memblock.h>
ceb683d3 17#include <linux/sort.h>
d907387c 18#include <linux/fs.h>
d111e8f9 19
0ba8b9b2 20#include <asm/cputype.h>
37efe642 21#include <asm/sections.h>
3f973e22 22#include <asm/cachetype.h>
d111e8f9
RK
23#include <asm/setup.h>
24#include <asm/sizes.h>
e616c591 25#include <asm/smp_plat.h>
d111e8f9 26#include <asm/tlb.h>
d73cd428 27#include <asm/highmem.h>
d111e8f9
RK
28
29#include <asm/mach/arch.h>
30#include <asm/mach/map.h>
31
32#include "mm.h"
33
34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35
d111e8f9
RK
36/*
37 * empty_zero_page is a special page that is used for
38 * zero-initialized data and COW.
39 */
40struct page *empty_zero_page;
3653f3ab 41EXPORT_SYMBOL(empty_zero_page);
d111e8f9
RK
42
43/*
44 * The pmd table for the upper-most set of pages.
45 */
46pmd_t *top_pmd;
47
ae8f1541
RK
48#define CPOLICY_UNCACHED 0
49#define CPOLICY_BUFFERED 1
50#define CPOLICY_WRITETHROUGH 2
51#define CPOLICY_WRITEBACK 3
52#define CPOLICY_WRITEALLOC 4
53
54static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
55static unsigned int ecc_mask __initdata = 0;
44b18693 56pgprot_t pgprot_user;
ae8f1541
RK
57pgprot_t pgprot_kernel;
58
44b18693 59EXPORT_SYMBOL(pgprot_user);
ae8f1541
RK
60EXPORT_SYMBOL(pgprot_kernel);
61
62struct cachepolicy {
63 const char policy[16];
64 unsigned int cr_mask;
65 unsigned int pmd;
66 unsigned int pte;
67};
68
69static struct cachepolicy cache_policies[] __initdata = {
70 {
71 .policy = "uncached",
72 .cr_mask = CR_W|CR_C,
73 .pmd = PMD_SECT_UNCACHED,
bb30f36f 74 .pte = L_PTE_MT_UNCACHED,
ae8f1541
RK
75 }, {
76 .policy = "buffered",
77 .cr_mask = CR_C,
78 .pmd = PMD_SECT_BUFFERED,
bb30f36f 79 .pte = L_PTE_MT_BUFFERABLE,
ae8f1541
RK
80 }, {
81 .policy = "writethrough",
82 .cr_mask = 0,
83 .pmd = PMD_SECT_WT,
bb30f36f 84 .pte = L_PTE_MT_WRITETHROUGH,
ae8f1541
RK
85 }, {
86 .policy = "writeback",
87 .cr_mask = 0,
88 .pmd = PMD_SECT_WB,
bb30f36f 89 .pte = L_PTE_MT_WRITEBACK,
ae8f1541
RK
90 }, {
91 .policy = "writealloc",
92 .cr_mask = 0,
93 .pmd = PMD_SECT_WBWA,
bb30f36f 94 .pte = L_PTE_MT_WRITEALLOC,
ae8f1541
RK
95 }
96};
97
98/*
6cbdc8c5 99 * These are useful for identifying cache coherency
ae8f1541
RK
100 * problems by allowing the cache or the cache and
101 * writebuffer to be turned off. (Note: the write
102 * buffer should not be on and the cache off).
103 */
2b0d8c25 104static int __init early_cachepolicy(char *p)
ae8f1541
RK
105{
106 int i;
107
108 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
109 int len = strlen(cache_policies[i].policy);
110
2b0d8c25 111 if (memcmp(p, cache_policies[i].policy, len) == 0) {
ae8f1541
RK
112 cachepolicy = i;
113 cr_alignment &= ~cache_policies[i].cr_mask;
114 cr_no_alignment &= ~cache_policies[i].cr_mask;
ae8f1541
RK
115 break;
116 }
117 }
118 if (i == ARRAY_SIZE(cache_policies))
119 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
4b46d641
RK
120 /*
121 * This restriction is partly to do with the way we boot; it is
122 * unpredictable to have memory mapped using two different sets of
123 * memory attributes (shared, type, and cache attribs). We can not
124 * change these attributes once the initial assembly has setup the
125 * page tables.
126 */
11179d8c
CM
127 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
128 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
129 cachepolicy = CPOLICY_WRITEBACK;
130 }
ae8f1541
RK
131 flush_cache_all();
132 set_cr(cr_alignment);
2b0d8c25 133 return 0;
ae8f1541 134}
2b0d8c25 135early_param("cachepolicy", early_cachepolicy);
ae8f1541 136
2b0d8c25 137static int __init early_nocache(char *__unused)
ae8f1541
RK
138{
139 char *p = "buffered";
140 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
2b0d8c25
JK
141 early_cachepolicy(p);
142 return 0;
ae8f1541 143}
2b0d8c25 144early_param("nocache", early_nocache);
ae8f1541 145
2b0d8c25 146static int __init early_nowrite(char *__unused)
ae8f1541
RK
147{
148 char *p = "uncached";
149 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
2b0d8c25
JK
150 early_cachepolicy(p);
151 return 0;
ae8f1541 152}
2b0d8c25 153early_param("nowb", early_nowrite);
ae8f1541 154
2b0d8c25 155static int __init early_ecc(char *p)
ae8f1541 156{
2b0d8c25 157 if (memcmp(p, "on", 2) == 0)
ae8f1541 158 ecc_mask = PMD_PROTECTION;
2b0d8c25 159 else if (memcmp(p, "off", 3) == 0)
ae8f1541 160 ecc_mask = 0;
2b0d8c25 161 return 0;
ae8f1541 162}
2b0d8c25 163early_param("ecc", early_ecc);
ae8f1541
RK
164
165static int __init noalign_setup(char *__unused)
166{
167 cr_alignment &= ~CR_A;
168 cr_no_alignment &= ~CR_A;
169 set_cr(cr_alignment);
170 return 1;
171}
172__setup("noalign", noalign_setup);
173
255d1f86
RK
174#ifndef CONFIG_SMP
175void adjust_cr(unsigned long mask, unsigned long set)
176{
177 unsigned long flags;
178
179 mask &= ~CR_A;
180
181 set &= mask;
182
183 local_irq_save(flags);
184
185 cr_no_alignment = (cr_no_alignment & ~mask) | set;
186 cr_alignment = (cr_alignment & ~mask) | set;
187
188 set_cr((get_cr() & ~mask) | set);
189
190 local_irq_restore(flags);
191}
192#endif
193
0af92bef 194#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
b1cce6b1 195#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
0af92bef 196
b29e9f5e 197static struct mem_type mem_types[] = {
0af92bef 198 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
bb30f36f
RK
199 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
200 L_PTE_SHARED,
0af92bef 201 .prot_l1 = PMD_TYPE_TABLE,
b1cce6b1 202 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
0af92bef
RK
203 .domain = DOMAIN_IO,
204 },
205 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
bb30f36f 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
0af92bef 207 .prot_l1 = PMD_TYPE_TABLE,
b1cce6b1 208 .prot_sect = PROT_SECT_DEVICE,
0af92bef
RK
209 .domain = DOMAIN_IO,
210 },
211 [MT_DEVICE_CACHED] = { /* ioremap_cached */
bb30f36f 212 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
0af92bef
RK
213 .prot_l1 = PMD_TYPE_TABLE,
214 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
215 .domain = DOMAIN_IO,
216 },
1ad77a87 217 [MT_DEVICE_WC] = { /* ioremap_wc */
bb30f36f 218 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
0af92bef 219 .prot_l1 = PMD_TYPE_TABLE,
b1cce6b1 220 .prot_sect = PROT_SECT_DEVICE,
0af92bef 221 .domain = DOMAIN_IO,
ae8f1541 222 },
ebb4c658
RK
223 [MT_UNCACHED] = {
224 .prot_pte = PROT_PTE_DEVICE,
225 .prot_l1 = PMD_TYPE_TABLE,
226 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
227 .domain = DOMAIN_IO,
228 },
ae8f1541 229 [MT_CACHECLEAN] = {
9ef79635 230 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
ae8f1541
RK
231 .domain = DOMAIN_KERNEL,
232 },
233 [MT_MINICLEAN] = {
9ef79635 234 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
ae8f1541
RK
235 .domain = DOMAIN_KERNEL,
236 },
237 [MT_LOW_VECTORS] = {
238 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
239 L_PTE_EXEC,
240 .prot_l1 = PMD_TYPE_TABLE,
241 .domain = DOMAIN_USER,
242 },
243 [MT_HIGH_VECTORS] = {
244 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
245 L_PTE_USER | L_PTE_EXEC,
246 .prot_l1 = PMD_TYPE_TABLE,
247 .domain = DOMAIN_USER,
248 },
249 [MT_MEMORY] = {
f1a2481c 250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7f58217b 251 L_PTE_WRITE | L_PTE_EXEC,
f1a2481c 252 .prot_l1 = PMD_TYPE_TABLE,
9ef79635 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
ae8f1541
RK
254 .domain = DOMAIN_KERNEL,
255 },
256 [MT_ROM] = {
9ef79635 257 .prot_sect = PMD_TYPE_SECT,
ae8f1541
RK
258 .domain = DOMAIN_KERNEL,
259 },
e4707dd3 260 [MT_MEMORY_NONCACHED] = {
f1a2481c 261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7f58217b 262 L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
f1a2481c 263 .prot_l1 = PMD_TYPE_TABLE,
e4707dd3
PW
264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
265 .domain = DOMAIN_KERNEL,
266 },
cb9d7707 267 [MT_MEMORY_DTCM] = {
f444fce3
LW
268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
269 L_PTE_WRITE,
270 .prot_l1 = PMD_TYPE_TABLE,
271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
272 .domain = DOMAIN_KERNEL,
cb9d7707
LW
273 },
274 [MT_MEMORY_ITCM] = {
275 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
f444fce3 276 L_PTE_WRITE | L_PTE_EXEC,
cb9d7707 277 .prot_l1 = PMD_TYPE_TABLE,
f444fce3 278 .domain = DOMAIN_KERNEL,
cb9d7707 279 },
ae8f1541
RK
280};
281
b29e9f5e
RK
282const struct mem_type *get_mem_type(unsigned int type)
283{
284 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
285}
69d3a84a 286EXPORT_SYMBOL(get_mem_type);
b29e9f5e 287
ae8f1541
RK
288/*
289 * Adjust the PMD section entries according to the CPU in use.
290 */
291static void __init build_mem_type_table(void)
292{
293 struct cachepolicy *cp;
294 unsigned int cr = get_cr();
bb30f36f 295 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
ae8f1541
RK
296 int cpu_arch = cpu_architecture();
297 int i;
298
11179d8c 299 if (cpu_arch < CPU_ARCH_ARMv6) {
ae8f1541 300#if defined(CONFIG_CPU_DCACHE_DISABLE)
11179d8c
CM
301 if (cachepolicy > CPOLICY_BUFFERED)
302 cachepolicy = CPOLICY_BUFFERED;
ae8f1541 303#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
11179d8c
CM
304 if (cachepolicy > CPOLICY_WRITETHROUGH)
305 cachepolicy = CPOLICY_WRITETHROUGH;
ae8f1541 306#endif
11179d8c 307 }
ae8f1541
RK
308 if (cpu_arch < CPU_ARCH_ARMv5) {
309 if (cachepolicy >= CPOLICY_WRITEALLOC)
310 cachepolicy = CPOLICY_WRITEBACK;
311 ecc_mask = 0;
312 }
f00ec48f
RK
313 if (is_smp())
314 cachepolicy = CPOLICY_WRITEALLOC;
ae8f1541 315
1ad77a87 316 /*
b1cce6b1
RK
317 * Strip out features not present on earlier architectures.
318 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
319 * without extended page tables don't have the 'Shared' bit.
1ad77a87 320 */
b1cce6b1
RK
321 if (cpu_arch < CPU_ARCH_ARMv5)
322 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
323 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
324 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
325 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
326 mem_types[i].prot_sect &= ~PMD_SECT_S;
ae8f1541
RK
327
328 /*
b1cce6b1
RK
329 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
330 * "update-able on write" bit on ARM610). However, Xscale and
331 * Xscale3 require this bit to be cleared.
ae8f1541 332 */
b1cce6b1 333 if (cpu_is_xscale() || cpu_is_xsc3()) {
9ef79635 334 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
ae8f1541 335 mem_types[i].prot_sect &= ~PMD_BIT4;
9ef79635
RK
336 mem_types[i].prot_l1 &= ~PMD_BIT4;
337 }
338 } else if (cpu_arch < CPU_ARCH_ARMv6) {
339 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
ae8f1541
RK
340 if (mem_types[i].prot_l1)
341 mem_types[i].prot_l1 |= PMD_BIT4;
9ef79635
RK
342 if (mem_types[i].prot_sect)
343 mem_types[i].prot_sect |= PMD_BIT4;
344 }
345 }
ae8f1541 346
b1cce6b1
RK
347 /*
348 * Mark the device areas according to the CPU/architecture.
349 */
350 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
351 if (!cpu_is_xsc3()) {
352 /*
353 * Mark device regions on ARMv6+ as execute-never
354 * to prevent speculative instruction fetches.
355 */
356 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
357 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
358 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
359 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
360 }
361 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
362 /*
363 * For ARMv7 with TEX remapping,
364 * - shared device is SXCB=1100
365 * - nonshared device is SXCB=0100
366 * - write combine device mem is SXCB=0001
367 * (Uncached Normal memory)
368 */
369 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
370 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
371 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
372 } else if (cpu_is_xsc3()) {
373 /*
374 * For Xscale3,
375 * - shared device is TEXCB=00101
376 * - nonshared device is TEXCB=01000
377 * - write combine device mem is TEXCB=00100
378 * (Inner/Outer Uncacheable in xsc3 parlance)
379 */
380 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
381 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
382 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
383 } else {
384 /*
385 * For ARMv6 and ARMv7 without TEX remapping,
386 * - shared device is TEXCB=00001
387 * - nonshared device is TEXCB=01000
388 * - write combine device mem is TEXCB=00100
389 * (Uncached Normal in ARMv6 parlance).
390 */
391 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
392 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
393 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
394 }
395 } else {
396 /*
397 * On others, write combining is "Uncached/Buffered"
398 */
399 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
400 }
401
402 /*
403 * Now deal with the memory-type mappings
404 */
ae8f1541 405 cp = &cache_policies[cachepolicy];
bb30f36f
RK
406 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
407
bb30f36f
RK
408 /*
409 * Only use write-through for non-SMP systems
410 */
f00ec48f 411 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
bb30f36f 412 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
ae8f1541
RK
413
414 /*
415 * Enable CPU-specific coherency if supported.
416 * (Only available on XSC3 at the moment.)
417 */
f1a2481c 418 if (arch_is_coherent() && cpu_is_xsc3()) {
b1cce6b1 419 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
f1a2481c
SS
420 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
421 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
422 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
423 }
ae8f1541
RK
424 /*
425 * ARMv6 and above have extended page tables.
426 */
427 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
ae8f1541
RK
428 /*
429 * Mark cache clean areas and XIP ROM read only
430 * from SVC mode and no access from userspace.
431 */
432 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
433 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
434 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
435
f00ec48f
RK
436 if (is_smp()) {
437 /*
438 * Mark memory with the "shared" attribute
439 * for SMP systems
440 */
441 user_pgprot |= L_PTE_SHARED;
442 kern_pgprot |= L_PTE_SHARED;
443 vecs_pgprot |= L_PTE_SHARED;
444 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
445 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
446 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
447 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
448 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
449 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
450 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
451 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
452 }
ae8f1541
RK
453 }
454
e4707dd3
PW
455 /*
456 * Non-cacheable Normal - intended for memory areas that must
457 * not cause dirty cache line writebacks when used
458 */
459 if (cpu_arch >= CPU_ARCH_ARMv6) {
460 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
461 /* Non-cacheable Normal is XCB = 001 */
462 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
463 PMD_SECT_BUFFERED;
464 } else {
465 /* For both ARMv6 and non-TEX-remapping ARMv7 */
466 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
467 PMD_SECT_TEX(1);
468 }
469 } else {
470 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
471 }
472
ae8f1541
RK
473 for (i = 0; i < 16; i++) {
474 unsigned long v = pgprot_val(protection_map[i]);
bb30f36f 475 protection_map[i] = __pgprot(v | user_pgprot);
ae8f1541
RK
476 }
477
bb30f36f
RK
478 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
479 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
ae8f1541 480
44b18693 481 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
ae8f1541 482 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
6dc995a3 483 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot);
ae8f1541
RK
484
485 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
486 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
487 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
f1a2481c
SS
488 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
489 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
ae8f1541
RK
490 mem_types[MT_ROM].prot_sect |= cp->pmd;
491
492 switch (cp->pmd) {
493 case PMD_SECT_WT:
494 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
495 break;
496 case PMD_SECT_WB:
497 case PMD_SECT_WBWA:
498 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
499 break;
500 }
501 printk("Memory policy: ECC %sabled, Data cache %s\n",
502 ecc_mask ? "en" : "dis", cp->policy);
2497f0a8
RK
503
504 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
505 struct mem_type *t = &mem_types[i];
506 if (t->prot_l1)
507 t->prot_l1 |= PMD_DOMAIN(t->domain);
508 if (t->prot_sect)
509 t->prot_sect |= PMD_DOMAIN(t->domain);
510 }
ae8f1541
RK
511}
512
d907387c
CM
513#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
514pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
515 unsigned long size, pgprot_t vma_prot)
516{
517 if (!pfn_valid(pfn))
518 return pgprot_noncached(vma_prot);
519 else if (file->f_flags & O_SYNC)
520 return pgprot_writecombine(vma_prot);
521 return vma_prot;
522}
523EXPORT_SYMBOL(phys_mem_access_prot);
524#endif
525
ae8f1541
RK
526#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
527
3abe9d33
RK
528static void __init *early_alloc(unsigned long sz)
529{
2778f620
RK
530 void *ptr = __va(memblock_alloc(sz, sz));
531 memset(ptr, 0, sz);
532 return ptr;
3abe9d33
RK
533}
534
4bb2e27d 535static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
ae8f1541 536{
24e6c699 537 if (pmd_none(*pmd)) {
4bb2e27d
RK
538 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
539 __pmd_populate(pmd, __pa(pte) | prot);
24e6c699 540 }
4bb2e27d
RK
541 BUG_ON(pmd_bad(*pmd));
542 return pte_offset_kernel(pmd, addr);
543}
ae8f1541 544
4bb2e27d
RK
545static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
546 unsigned long end, unsigned long pfn,
547 const struct mem_type *type)
548{
549 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
24e6c699 550 do {
40d192b6 551 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
24e6c699
RK
552 pfn++;
553 } while (pte++, addr += PAGE_SIZE, addr != end);
ae8f1541
RK
554}
555
24e6c699
RK
556static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
557 unsigned long end, unsigned long phys,
558 const struct mem_type *type)
ae8f1541 559{
24e6c699 560 pmd_t *pmd = pmd_offset(pgd, addr);
ae8f1541 561
24e6c699
RK
562 /*
563 * Try a section mapping - end, addr and phys must all be aligned
564 * to a section boundary. Note that PMDs refer to the individual
565 * L1 entries, whereas PGDs refer to a group of L1 entries making
566 * up one logical pointer to an L2 table.
567 */
568 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
569 pmd_t *p = pmd;
ae8f1541 570
24e6c699
RK
571 if (addr & SECTION_SIZE)
572 pmd++;
573
574 do {
575 *pmd = __pmd(phys | type->prot_sect);
576 phys += SECTION_SIZE;
577 } while (pmd++, addr += SECTION_SIZE, addr != end);
ae8f1541 578
24e6c699
RK
579 flush_pmd_entry(p);
580 } else {
581 /*
582 * No need to loop; pte's aren't interested in the
583 * individual L1 entries.
584 */
585 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
586 }
ae8f1541
RK
587}
588
4a56c1e4
RK
589static void __init create_36bit_mapping(struct map_desc *md,
590 const struct mem_type *type)
591{
592 unsigned long phys, addr, length, end;
593 pgd_t *pgd;
594
595 addr = md->virtual;
596 phys = (unsigned long)__pfn_to_phys(md->pfn);
597 length = PAGE_ALIGN(md->length);
598
599 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
600 printk(KERN_ERR "MM: CPU does not support supersection "
601 "mapping for 0x%08llx at 0x%08lx\n",
602 __pfn_to_phys((u64)md->pfn), addr);
603 return;
604 }
605
606 /* N.B. ARMv6 supersections are only defined to work with domain 0.
607 * Since domain assignments can in fact be arbitrary, the
608 * 'domain == 0' check below is required to insure that ARMv6
609 * supersections are only allocated for domain 0 regardless
610 * of the actual domain assignments in use.
611 */
612 if (type->domain) {
613 printk(KERN_ERR "MM: invalid domain in supersection "
614 "mapping for 0x%08llx at 0x%08lx\n",
615 __pfn_to_phys((u64)md->pfn), addr);
616 return;
617 }
618
619 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
620 printk(KERN_ERR "MM: cannot create mapping for "
621 "0x%08llx at 0x%08lx invalid alignment\n",
622 __pfn_to_phys((u64)md->pfn), addr);
623 return;
624 }
625
626 /*
627 * Shift bits [35:32] of address into bits [23:20] of PMD
628 * (See ARMv6 spec).
629 */
630 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
631
632 pgd = pgd_offset_k(addr);
633 end = addr + length;
634 do {
635 pmd_t *pmd = pmd_offset(pgd, addr);
636 int i;
637
638 for (i = 0; i < 16; i++)
639 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
640
641 addr += SUPERSECTION_SIZE;
642 phys += SUPERSECTION_SIZE;
643 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
644 } while (addr != end);
645}
646
ae8f1541
RK
647/*
648 * Create the page directory entries and any necessary
649 * page tables for the mapping specified by `md'. We
650 * are able to cope here with varying sizes and address
651 * offsets, and we take full advantage of sections and
652 * supersections.
653 */
a2227120 654static void __init create_mapping(struct map_desc *md)
ae8f1541 655{
24e6c699 656 unsigned long phys, addr, length, end;
d5c98176 657 const struct mem_type *type;
24e6c699 658 pgd_t *pgd;
ae8f1541
RK
659
660 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
661 printk(KERN_WARNING "BUG: not creating mapping for "
662 "0x%08llx at 0x%08lx in user region\n",
663 __pfn_to_phys((u64)md->pfn), md->virtual);
664 return;
665 }
666
667 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
668 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
669 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
670 "overlaps vmalloc space\n",
671 __pfn_to_phys((u64)md->pfn), md->virtual);
672 }
673
d5c98176 674 type = &mem_types[md->type];
ae8f1541
RK
675
676 /*
677 * Catch 36-bit addresses
678 */
4a56c1e4
RK
679 if (md->pfn >= 0x100000) {
680 create_36bit_mapping(md, type);
681 return;
ae8f1541
RK
682 }
683
7b9c7b4d 684 addr = md->virtual & PAGE_MASK;
24e6c699 685 phys = (unsigned long)__pfn_to_phys(md->pfn);
7b9c7b4d 686 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
ae8f1541 687
24e6c699 688 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
ae8f1541
RK
689 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
690 "be mapped using pages, ignoring.\n",
24e6c699 691 __pfn_to_phys(md->pfn), addr);
ae8f1541
RK
692 return;
693 }
694
24e6c699
RK
695 pgd = pgd_offset_k(addr);
696 end = addr + length;
697 do {
698 unsigned long next = pgd_addr_end(addr, end);
ae8f1541 699
24e6c699 700 alloc_init_section(pgd, addr, next, phys, type);
ae8f1541 701
24e6c699
RK
702 phys += next - addr;
703 addr = next;
704 } while (pgd++, addr != end);
ae8f1541
RK
705}
706
707/*
708 * Create the architecture specific mappings
709 */
710void __init iotable_init(struct map_desc *io_desc, int nr)
711{
712 int i;
713
714 for (i = 0; i < nr; i++)
715 create_mapping(io_desc + i);
716}
717
79612395 718static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
6c5da7ac
RK
719
720/*
721 * vmalloc=size forces the vmalloc area to be exactly 'size'
722 * bytes. This can be used to increase (or decrease) the vmalloc
723 * area - the default is 128m.
724 */
2b0d8c25 725static int __init early_vmalloc(char *arg)
6c5da7ac 726{
79612395 727 unsigned long vmalloc_reserve = memparse(arg, NULL);
6c5da7ac
RK
728
729 if (vmalloc_reserve < SZ_16M) {
730 vmalloc_reserve = SZ_16M;
731 printk(KERN_WARNING
732 "vmalloc area too small, limiting to %luMB\n",
733 vmalloc_reserve >> 20);
734 }
9210807c
NP
735
736 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
737 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
738 printk(KERN_WARNING
739 "vmalloc area is too big, limiting to %luMB\n",
740 vmalloc_reserve >> 20);
741 }
79612395
RK
742
743 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
2b0d8c25 744 return 0;
6c5da7ac 745}
2b0d8c25 746early_param("vmalloc", early_vmalloc);
6c5da7ac 747
2778f620
RK
748phys_addr_t lowmem_end_addr;
749
4b5f32ce 750static void __init sanity_check_meminfo(void)
60296c71 751{
dde5828f 752 int i, j, highmem = 0;
60296c71 753
2778f620
RK
754 lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
755
4b5f32ce 756 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
a1bbaec0
NP
757 struct membank *bank = &meminfo.bank[j];
758 *bank = meminfo.bank[i];
60296c71 759
a1bbaec0 760#ifdef CONFIG_HIGHMEM
79612395 761 if (__va(bank->start) > vmalloc_min ||
dde5828f
RK
762 __va(bank->start) < (void *)PAGE_OFFSET)
763 highmem = 1;
764
765 bank->highmem = highmem;
766
a1bbaec0
NP
767 /*
768 * Split those memory banks which are partially overlapping
769 * the vmalloc area greatly simplifying things later.
770 */
79612395
RK
771 if (__va(bank->start) < vmalloc_min &&
772 bank->size > vmalloc_min - __va(bank->start)) {
a1bbaec0
NP
773 if (meminfo.nr_banks >= NR_BANKS) {
774 printk(KERN_CRIT "NR_BANKS too low, "
775 "ignoring high memory\n");
776 } else {
777 memmove(bank + 1, bank,
778 (meminfo.nr_banks - i) * sizeof(*bank));
779 meminfo.nr_banks++;
780 i++;
79612395
RK
781 bank[1].size -= vmalloc_min - __va(bank->start);
782 bank[1].start = __pa(vmalloc_min - 1) + 1;
dde5828f 783 bank[1].highmem = highmem = 1;
a1bbaec0
NP
784 j++;
785 }
79612395 786 bank->size = vmalloc_min - __va(bank->start);
a1bbaec0
NP
787 }
788#else
041d785f
RK
789 bank->highmem = highmem;
790
a1bbaec0
NP
791 /*
792 * Check whether this memory bank would entirely overlap
793 * the vmalloc area.
794 */
79612395 795 if (__va(bank->start) >= vmalloc_min ||
f0bba9f9 796 __va(bank->start) < (void *)PAGE_OFFSET) {
a1bbaec0
NP
797 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
798 "(vmalloc region overlap).\n",
799 bank->start, bank->start + bank->size - 1);
800 continue;
801 }
60296c71 802
a1bbaec0
NP
803 /*
804 * Check whether this memory bank would partially overlap
805 * the vmalloc area.
806 */
79612395 807 if (__va(bank->start + bank->size) > vmalloc_min ||
a1bbaec0 808 __va(bank->start + bank->size) < __va(bank->start)) {
79612395 809 unsigned long newsize = vmalloc_min - __va(bank->start);
a1bbaec0
NP
810 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
811 "to -%.8lx (vmalloc region overlap).\n",
812 bank->start, bank->start + bank->size - 1,
813 bank->start + newsize - 1);
814 bank->size = newsize;
815 }
816#endif
817 j++;
60296c71 818 }
e616c591
RK
819#ifdef CONFIG_HIGHMEM
820 if (highmem) {
821 const char *reason = NULL;
822
823 if (cache_is_vipt_aliasing()) {
824 /*
825 * Interactions between kmap and other mappings
826 * make highmem support with aliasing VIPT caches
827 * rather difficult.
828 */
829 reason = "with VIPT aliasing cache";
f00ec48f 830 } else if (is_smp() && tlb_ops_need_broadcast()) {
e616c591
RK
831 /*
832 * kmap_high needs to occasionally flush TLB entries,
833 * however, if the TLB entries need to be broadcast
834 * we may deadlock:
835 * kmap_high(irqs off)->flush_all_zero_pkmaps->
836 * flush_tlb_kernel_range->smp_call_function_many
837 * (must not be called with irqs off)
838 */
839 reason = "without hardware TLB ops broadcasting";
e616c591
RK
840 }
841 if (reason) {
842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
843 reason);
844 while (j > 0 && meminfo.bank[j - 1].highmem)
845 j--;
846 }
847 }
848#endif
4b5f32ce 849 meminfo.nr_banks = j;
60296c71
LB
850}
851
4b5f32ce 852static inline void prepare_page_table(void)
d111e8f9
RK
853{
854 unsigned long addr;
855
856 /*
857 * Clear out all the mappings below the kernel image.
858 */
ab4f2ee1 859 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
d111e8f9
RK
860 pmd_clear(pmd_off_k(addr));
861
862#ifdef CONFIG_XIP_KERNEL
863 /* The XIP kernel is mapped in the module area -- skip over it */
37efe642 864 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
d111e8f9
RK
865#endif
866 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
867 pmd_clear(pmd_off_k(addr));
868
869 /*
870 * Clear out all the kernel space mappings, except for the first
871 * memory bank, up to the end of the vmalloc region.
872 */
4b5f32ce 873 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
d111e8f9
RK
874 addr < VMALLOC_END; addr += PGDIR_SIZE)
875 pmd_clear(pmd_off_k(addr));
876}
877
878/*
2778f620 879 * Reserve the special regions of memory
d111e8f9 880 */
2778f620 881void __init arm_mm_memblock_reserve(void)
d111e8f9 882{
d111e8f9
RK
883 /*
884 * Reserve the page tables. These are already in use,
885 * and can only be in node 0.
886 */
2778f620 887 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
d111e8f9 888
d111e8f9
RK
889#ifdef CONFIG_SA1111
890 /*
891 * Because of the SA1111 DMA bug, we want to preserve our
892 * precious DMA-able memory...
893 */
2778f620 894 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
d111e8f9 895#endif
d111e8f9
RK
896}
897
898/*
899 * Set up device the mappings. Since we clear out the page tables for all
900 * mappings above VMALLOC_END, we will remove any debug device mappings.
901 * This means you have to be careful how you debug this function, or any
902 * called function. This means you can't use any function or debugging
903 * method which may touch any device, otherwise the kernel _will_ crash.
904 */
905static void __init devicemaps_init(struct machine_desc *mdesc)
906{
907 struct map_desc map;
908 unsigned long addr;
909 void *vectors;
910
911 /*
912 * Allocate the vector page early.
913 */
3abe9d33 914 vectors = early_alloc(PAGE_SIZE);
d111e8f9
RK
915
916 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
917 pmd_clear(pmd_off_k(addr));
918
919 /*
920 * Map the kernel if it is XIP.
921 * It is always first in the modulearea.
922 */
923#ifdef CONFIG_XIP_KERNEL
924 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
ab4f2ee1 925 map.virtual = MODULES_VADDR;
37efe642 926 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
d111e8f9
RK
927 map.type = MT_ROM;
928 create_mapping(&map);
929#endif
930
931 /*
932 * Map the cache flushing regions.
933 */
934#ifdef FLUSH_BASE
935 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
936 map.virtual = FLUSH_BASE;
937 map.length = SZ_1M;
938 map.type = MT_CACHECLEAN;
939 create_mapping(&map);
940#endif
941#ifdef FLUSH_BASE_MINICACHE
942 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
943 map.virtual = FLUSH_BASE_MINICACHE;
944 map.length = SZ_1M;
945 map.type = MT_MINICLEAN;
946 create_mapping(&map);
947#endif
948
949 /*
950 * Create a mapping for the machine vectors at the high-vectors
951 * location (0xffff0000). If we aren't using high-vectors, also
952 * create a mapping at the low-vectors virtual address.
953 */
954 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
955 map.virtual = 0xffff0000;
956 map.length = PAGE_SIZE;
957 map.type = MT_HIGH_VECTORS;
958 create_mapping(&map);
959
960 if (!vectors_high()) {
961 map.virtual = 0;
962 map.type = MT_LOW_VECTORS;
963 create_mapping(&map);
964 }
965
966 /*
967 * Ask the machine support to map in the statically mapped devices.
968 */
969 if (mdesc->map_io)
970 mdesc->map_io();
971
972 /*
973 * Finally flush the caches and tlb to ensure that we're in a
974 * consistent state wrt the writebuffer. This also ensures that
975 * any write-allocated cache lines in the vector page are written
976 * back. After this point, we can start to touch devices again.
977 */
978 local_flush_tlb_all();
979 flush_cache_all();
980}
981
d73cd428
NP
982static void __init kmap_init(void)
983{
984#ifdef CONFIG_HIGHMEM
4bb2e27d
RK
985 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
986 PKMAP_BASE, _PAGE_KERNEL_TABLE);
d73cd428
NP
987#endif
988}
989
a2227120
RK
990static inline void map_memory_bank(struct membank *bank)
991{
992 struct map_desc map;
993
994 map.pfn = bank_pfn_start(bank);
995 map.virtual = __phys_to_virt(bank_phys_start(bank));
996 map.length = bank_phys_size(bank);
997 map.type = MT_MEMORY;
998
999 create_mapping(&map);
1000}
1001
1002static void __init map_lowmem(void)
1003{
1004 struct meminfo *mi = &meminfo;
1005 int i;
1006
1007 /* Map all the lowmem memory banks. */
1008 for (i = 0; i < mi->nr_banks; i++) {
1009 struct membank *bank = &mi->bank[i];
1010
1011 if (!bank->highmem)
1012 map_memory_bank(bank);
1013 }
1014}
1015
ceb683d3
RK
1016static int __init meminfo_cmp(const void *_a, const void *_b)
1017{
1018 const struct membank *a = _a, *b = _b;
1019 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
1020 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
1021}
1022
d111e8f9
RK
1023/*
1024 * paging_init() sets up the page tables, initialises the zone memory
1025 * maps, and sets up the zero page, bad page and bad page tables.
1026 */
4b5f32ce 1027void __init paging_init(struct machine_desc *mdesc)
d111e8f9
RK
1028{
1029 void *zero_page;
1030
ceb683d3
RK
1031 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
1032
d111e8f9 1033 build_mem_type_table();
4b5f32ce
NP
1034 sanity_check_meminfo();
1035 prepare_page_table();
a2227120 1036 map_lowmem();
d111e8f9 1037 devicemaps_init(mdesc);
d73cd428 1038 kmap_init();
d111e8f9
RK
1039
1040 top_pmd = pmd_off_k(0xffff0000);
1041
3abe9d33
RK
1042 /* allocate the zero page. */
1043 zero_page = early_alloc(PAGE_SIZE);
2778f620 1044
8d717a52 1045 bootmem_init();
2778f620 1046
d111e8f9 1047 empty_zero_page = virt_to_page(zero_page);
421fe93c 1048 __flush_dcache_page(NULL, empty_zero_page);
d111e8f9 1049}
ae8f1541
RK
1050
1051/*
1052 * In order to soft-boot, we need to insert a 1:1 mapping in place of
1053 * the user-mode pages. This will then ensure that we have predictable
1054 * results when turning the mmu off
1055 */
1056void setup_mm_for_reboot(char mode)
1057{
1058 unsigned long base_pmdval;
1059 pgd_t *pgd;
1060 int i;
1061
3f2d4f56
MW
1062 /*
1063 * We need to access to user-mode page tables here. For kernel threads
1064 * we don't have any user-mode mappings so we use the context that we
1065 * "borrowed".
1066 */
1067 pgd = current->active_mm->pgd;
ae8f1541
RK
1068
1069 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1070 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
1071 base_pmdval |= PMD_BIT4;
1072
1073 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
1074 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
1075 pmd_t *pmd;
1076
1077 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
1078 pmd[0] = __pmd(pmdval);
1079 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1080 flush_pmd_entry(pmd);
1081 }
ad3e6c0b
TL
1082
1083 local_flush_tlb_all();
ae8f1541 1084}