]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/pageattr.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[net-next-2.6.git] / arch / x86 / mm / pageattr.c
CommitLineData
9f4c815c
IM
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
1da177e4 3 * Thanks to Ben LaHaise for precious feedback.
9f4c815c 4 */
1da177e4 5#include <linux/highmem.h>
8192206d 6#include <linux/bootmem.h>
1da177e4 7#include <linux/module.h>
9f4c815c 8#include <linux/sched.h>
1da177e4 9#include <linux/slab.h>
9f4c815c 10#include <linux/mm.h>
76ebd054 11#include <linux/interrupt.h>
ee7ae7a1
TG
12#include <linux/seq_file.h>
13#include <linux/debugfs.h>
9f4c815c 14
950f9d95 15#include <asm/e820.h>
1da177e4
LT
16#include <asm/processor.h>
17#include <asm/tlbflush.h>
f8af095d 18#include <asm/sections.h>
9f4c815c
IM
19#include <asm/uaccess.h>
20#include <asm/pgalloc.h>
c31c7d48 21#include <asm/proto.h>
1219333d 22#include <asm/pat.h>
1da177e4 23
9df84993
IM
24/*
25 * The current flushing context - we pass it instead of 5 arguments:
26 */
72e458df 27struct cpa_data {
d75586ad 28 unsigned long *vaddr;
72e458df
TG
29 pgprot_t mask_set;
30 pgprot_t mask_clr;
65e074df 31 int numpages;
d75586ad 32 int flags;
c31c7d48 33 unsigned long pfn;
c9caa02c 34 unsigned force_split : 1;
d75586ad 35 int curpage;
72e458df
TG
36};
37
ad5ca55f
SS
38/*
39 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
40 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
41 * entries change the page attribute in parallel to some other cpu
42 * splitting a large page entry along with changing the attribute.
43 */
44static DEFINE_SPINLOCK(cpa_lock);
45
d75586ad
SL
46#define CPA_FLUSHTLB 1
47#define CPA_ARRAY 2
48
65280e61 49#ifdef CONFIG_PROC_FS
ce0c0e50
AK
50static unsigned long direct_pages_count[PG_LEVEL_NUM];
51
65280e61 52void update_page_count(int level, unsigned long pages)
ce0c0e50 53{
ce0c0e50 54 unsigned long flags;
65280e61 55
ce0c0e50
AK
56 /* Protect against CPA */
57 spin_lock_irqsave(&pgd_lock, flags);
58 direct_pages_count[level] += pages;
59 spin_unlock_irqrestore(&pgd_lock, flags);
65280e61
TG
60}
61
62static void split_page_count(int level)
63{
64 direct_pages_count[level]--;
65 direct_pages_count[level - 1] += PTRS_PER_PTE;
66}
67
e1759c21 68void arch_report_meminfo(struct seq_file *m)
65280e61 69{
b9c3bfc2 70 seq_printf(m, "DirectMap4k: %8lu kB\n",
a06de630
HD
71 direct_pages_count[PG_LEVEL_4K] << 2);
72#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
b9c3bfc2 73 seq_printf(m, "DirectMap2M: %8lu kB\n",
a06de630
HD
74 direct_pages_count[PG_LEVEL_2M] << 11);
75#else
b9c3bfc2 76 seq_printf(m, "DirectMap4M: %8lu kB\n",
a06de630
HD
77 direct_pages_count[PG_LEVEL_2M] << 12);
78#endif
65280e61 79#ifdef CONFIG_X86_64
a06de630 80 if (direct_gbpages)
b9c3bfc2 81 seq_printf(m, "DirectMap1G: %8lu kB\n",
a06de630 82 direct_pages_count[PG_LEVEL_1G] << 20);
ce0c0e50
AK
83#endif
84}
65280e61
TG
85#else
86static inline void split_page_count(int level) { }
87#endif
ce0c0e50 88
c31c7d48
TG
89#ifdef CONFIG_X86_64
90
91static inline unsigned long highmap_start_pfn(void)
92{
93 return __pa(_text) >> PAGE_SHIFT;
94}
95
96static inline unsigned long highmap_end_pfn(void)
97{
15ae2d76 98 return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
c31c7d48
TG
99}
100
101#endif
102
92cb54a3
IM
103#ifdef CONFIG_DEBUG_PAGEALLOC
104# define debug_pagealloc 1
105#else
106# define debug_pagealloc 0
107#endif
108
ed724be6
AV
109static inline int
110within(unsigned long addr, unsigned long start, unsigned long end)
687c4825 111{
ed724be6
AV
112 return addr >= start && addr < end;
113}
114
d7c8f21a
TG
115/*
116 * Flushing functions
117 */
cd8ddf1a 118
cd8ddf1a
TG
119/**
120 * clflush_cache_range - flush a cache range with clflush
121 * @addr: virtual start address
122 * @size: number of bytes to flush
123 *
124 * clflush is an unordered instruction which needs fencing with mfence
125 * to avoid ordering issues.
126 */
4c61afcd 127void clflush_cache_range(void *vaddr, unsigned int size)
d7c8f21a 128{
4c61afcd 129 void *vend = vaddr + size - 1;
d7c8f21a 130
cd8ddf1a 131 mb();
4c61afcd
IM
132
133 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
134 clflush(vaddr);
135 /*
136 * Flush any possible final partial cacheline:
137 */
138 clflush(vend);
139
cd8ddf1a 140 mb();
d7c8f21a
TG
141}
142
af1e6844 143static void __cpa_flush_all(void *arg)
d7c8f21a 144{
6bb8383b
AK
145 unsigned long cache = (unsigned long)arg;
146
d7c8f21a
TG
147 /*
148 * Flush all to work around Errata in early athlons regarding
149 * large page flushing.
150 */
151 __flush_tlb_all();
152
6bb8383b 153 if (cache && boot_cpu_data.x86_model >= 4)
d7c8f21a
TG
154 wbinvd();
155}
156
6bb8383b 157static void cpa_flush_all(unsigned long cache)
d7c8f21a
TG
158{
159 BUG_ON(irqs_disabled());
160
15c8b6c1 161 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
d7c8f21a
TG
162}
163
57a6a46a
TG
164static void __cpa_flush_range(void *arg)
165{
57a6a46a
TG
166 /*
167 * We could optimize that further and do individual per page
168 * tlb invalidates for a low number of pages. Caveat: we must
169 * flush the high aliases on 64bit as well.
170 */
171 __flush_tlb_all();
57a6a46a
TG
172}
173
6bb8383b 174static void cpa_flush_range(unsigned long start, int numpages, int cache)
57a6a46a 175{
4c61afcd
IM
176 unsigned int i, level;
177 unsigned long addr;
178
57a6a46a 179 BUG_ON(irqs_disabled());
4c61afcd 180 WARN_ON(PAGE_ALIGN(start) != start);
57a6a46a 181
15c8b6c1 182 on_each_cpu(__cpa_flush_range, NULL, 1);
57a6a46a 183
6bb8383b
AK
184 if (!cache)
185 return;
186
3b233e52
TG
187 /*
188 * We only need to flush on one CPU,
189 * clflush is a MESI-coherent instruction that
190 * will cause all other CPUs to flush the same
191 * cachelines:
192 */
4c61afcd
IM
193 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
194 pte_t *pte = lookup_address(addr, &level);
195
196 /*
197 * Only flush present addresses:
198 */
7bfb72e8 199 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
4c61afcd
IM
200 clflush_cache_range((void *) addr, PAGE_SIZE);
201 }
57a6a46a
TG
202}
203
d75586ad
SL
204static void cpa_flush_array(unsigned long *start, int numpages, int cache)
205{
206 unsigned int i, level;
207 unsigned long *addr;
208
209 BUG_ON(irqs_disabled());
210
211 on_each_cpu(__cpa_flush_range, NULL, 1);
212
213 if (!cache)
214 return;
215
216 /* 4M threshold */
217 if (numpages >= 1024) {
218 if (boot_cpu_data.x86_model >= 4)
219 wbinvd();
220 return;
221 }
222 /*
223 * We only need to flush on one CPU,
224 * clflush is a MESI-coherent instruction that
225 * will cause all other CPUs to flush the same
226 * cachelines:
227 */
228 for (i = 0, addr = start; i < numpages; i++, addr++) {
229 pte_t *pte = lookup_address(*addr, &level);
230
231 /*
232 * Only flush present addresses:
233 */
234 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
235 clflush_cache_range((void *) *addr, PAGE_SIZE);
236 }
237}
238
ed724be6
AV
239/*
240 * Certain areas of memory on x86 require very specific protection flags,
241 * for example the BIOS area or kernel text. Callers don't always get this
242 * right (again, ioremap() on BIOS memory is not uncommon) so this function
243 * checks and fixes these known static required protection bits.
244 */
c31c7d48
TG
245static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
246 unsigned long pfn)
ed724be6
AV
247{
248 pgprot_t forbidden = __pgprot(0);
249
687c4825 250 /*
ed724be6
AV
251 * The BIOS area between 640k and 1Mb needs to be executable for
252 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
687c4825 253 */
c31c7d48 254 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
ed724be6
AV
255 pgprot_val(forbidden) |= _PAGE_NX;
256
257 /*
258 * The kernel text needs to be executable for obvious reasons
c31c7d48
TG
259 * Does not cover __inittext since that is gone later on. On
260 * 64bit we do not enforce !NX on the low mapping
ed724be6
AV
261 */
262 if (within(address, (unsigned long)_text, (unsigned long)_etext))
263 pgprot_val(forbidden) |= _PAGE_NX;
cc0f21bb 264
cc0f21bb 265 /*
c31c7d48
TG
266 * The .rodata section needs to be read-only. Using the pfn
267 * catches all aliases.
cc0f21bb 268 */
c31c7d48
TG
269 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
270 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
cc0f21bb 271 pgprot_val(forbidden) |= _PAGE_RW;
ed724be6
AV
272
273 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
687c4825
IM
274
275 return prot;
276}
277
9a14aefc
TG
278/*
279 * Lookup the page table entry for a virtual address. Return a pointer
280 * to the entry and the level of the mapping.
281 *
282 * Note: We return pud and pmd either when the entry is marked large
283 * or when the present bit is not set. Otherwise we would return a
284 * pointer to a nonexisting mapping.
285 */
da7bfc50 286pte_t *lookup_address(unsigned long address, unsigned int *level)
9f4c815c 287{
1da177e4
LT
288 pgd_t *pgd = pgd_offset_k(address);
289 pud_t *pud;
290 pmd_t *pmd;
9f4c815c 291
30551bb3
TG
292 *level = PG_LEVEL_NONE;
293
1da177e4
LT
294 if (pgd_none(*pgd))
295 return NULL;
9df84993 296
1da177e4
LT
297 pud = pud_offset(pgd, address);
298 if (pud_none(*pud))
299 return NULL;
c2f71ee2
AK
300
301 *level = PG_LEVEL_1G;
302 if (pud_large(*pud) || !pud_present(*pud))
303 return (pte_t *)pud;
304
1da177e4
LT
305 pmd = pmd_offset(pud, address);
306 if (pmd_none(*pmd))
307 return NULL;
30551bb3
TG
308
309 *level = PG_LEVEL_2M;
9a14aefc 310 if (pmd_large(*pmd) || !pmd_present(*pmd))
1da177e4 311 return (pte_t *)pmd;
1da177e4 312
30551bb3 313 *level = PG_LEVEL_4K;
9df84993 314
9f4c815c
IM
315 return pte_offset_kernel(pmd, address);
316}
75bb8835 317EXPORT_SYMBOL_GPL(lookup_address);
9f4c815c 318
9df84993
IM
319/*
320 * Set the new pmd in all the pgds we know about:
321 */
9a3dc780 322static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
9f4c815c 323{
9f4c815c
IM
324 /* change init_mm */
325 set_pte_atomic(kpte, pte);
44af6c41 326#ifdef CONFIG_X86_32
e4b71dcf 327 if (!SHARED_KERNEL_PMD) {
44af6c41
IM
328 struct page *page;
329
e3ed910d 330 list_for_each_entry(page, &pgd_list, lru) {
44af6c41
IM
331 pgd_t *pgd;
332 pud_t *pud;
333 pmd_t *pmd;
334
335 pgd = (pgd_t *)page_address(page) + pgd_index(address);
336 pud = pud_offset(pgd, address);
337 pmd = pmd_offset(pud, address);
338 set_pte_atomic((pte_t *)pmd, pte);
339 }
1da177e4 340 }
44af6c41 341#endif
1da177e4
LT
342}
343
9df84993
IM
344static int
345try_preserve_large_page(pte_t *kpte, unsigned long address,
346 struct cpa_data *cpa)
65e074df 347{
c31c7d48 348 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
65e074df
TG
349 pte_t new_pte, old_pte, *tmp;
350 pgprot_t old_prot, new_prot;
fac84939 351 int i, do_split = 1;
da7bfc50 352 unsigned int level;
65e074df 353
c9caa02c
AK
354 if (cpa->force_split)
355 return 1;
356
65e074df
TG
357 spin_lock_irqsave(&pgd_lock, flags);
358 /*
359 * Check for races, another CPU might have split this page
360 * up already:
361 */
362 tmp = lookup_address(address, &level);
363 if (tmp != kpte)
364 goto out_unlock;
365
366 switch (level) {
367 case PG_LEVEL_2M:
31422c51
AK
368 psize = PMD_PAGE_SIZE;
369 pmask = PMD_PAGE_MASK;
65e074df 370 break;
f07333fd 371#ifdef CONFIG_X86_64
65e074df 372 case PG_LEVEL_1G:
5d3c8b21
AK
373 psize = PUD_PAGE_SIZE;
374 pmask = PUD_PAGE_MASK;
f07333fd
AK
375 break;
376#endif
65e074df 377 default:
beaff633 378 do_split = -EINVAL;
65e074df
TG
379 goto out_unlock;
380 }
381
382 /*
383 * Calculate the number of pages, which fit into this large
384 * page starting at address:
385 */
386 nextpage_addr = (address + psize) & pmask;
387 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
9b5cf48b
RW
388 if (numpages < cpa->numpages)
389 cpa->numpages = numpages;
65e074df
TG
390
391 /*
392 * We are safe now. Check whether the new pgprot is the same:
393 */
394 old_pte = *kpte;
395 old_prot = new_prot = pte_pgprot(old_pte);
396
397 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
398 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
c31c7d48
TG
399
400 /*
401 * old_pte points to the large page base address. So we need
402 * to add the offset of the virtual address:
403 */
404 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
405 cpa->pfn = pfn;
406
407 new_prot = static_protections(new_prot, address, pfn);
65e074df 408
fac84939
TG
409 /*
410 * We need to check the full range, whether
411 * static_protection() requires a different pgprot for one of
412 * the pages in the range we try to preserve:
413 */
414 addr = address + PAGE_SIZE;
c31c7d48 415 pfn++;
9b5cf48b 416 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
c31c7d48 417 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
fac84939
TG
418
419 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
420 goto out_unlock;
421 }
422
65e074df
TG
423 /*
424 * If there are no changes, return. maxpages has been updated
425 * above:
426 */
427 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
beaff633 428 do_split = 0;
65e074df
TG
429 goto out_unlock;
430 }
431
432 /*
433 * We need to change the attributes. Check, whether we can
434 * change the large page in one go. We request a split, when
435 * the address is not aligned and the number of pages is
436 * smaller than the number of pages in the large page. Note
437 * that we limited the number of possible pages already to
438 * the number of pages in the large page.
439 */
9b5cf48b 440 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
65e074df
TG
441 /*
442 * The address is aligned and the number of pages
443 * covers the full page.
444 */
445 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
446 __set_pmd_pte(kpte, address, new_pte);
d75586ad 447 cpa->flags |= CPA_FLUSHTLB;
beaff633 448 do_split = 0;
65e074df
TG
449 }
450
451out_unlock:
452 spin_unlock_irqrestore(&pgd_lock, flags);
9df84993 453
beaff633 454 return do_split;
65e074df
TG
455}
456
7afe15b9 457static int split_large_page(pte_t *kpte, unsigned long address)
bb5c2dbd 458{
7b610eec 459 unsigned long flags, pfn, pfninc = 1;
9df84993 460 unsigned int i, level;
bb5c2dbd 461 pte_t *pbase, *tmp;
9df84993 462 pgprot_t ref_prot;
ad5ca55f
SS
463 struct page *base;
464
465 if (!debug_pagealloc)
466 spin_unlock(&cpa_lock);
467 base = alloc_pages(GFP_KERNEL, 0);
468 if (!debug_pagealloc)
469 spin_lock(&cpa_lock);
8311eb84
SS
470 if (!base)
471 return -ENOMEM;
bb5c2dbd 472
eb5b5f02 473 spin_lock_irqsave(&pgd_lock, flags);
bb5c2dbd
IM
474 /*
475 * Check for races, another CPU might have split this page
476 * up for us already:
477 */
478 tmp = lookup_address(address, &level);
6ce9fc17 479 if (tmp != kpte)
bb5c2dbd
IM
480 goto out_unlock;
481
bb5c2dbd 482 pbase = (pte_t *)page_address(base);
6944a9c8 483 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
07cf89c0 484 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
bb5c2dbd 485
f07333fd
AK
486#ifdef CONFIG_X86_64
487 if (level == PG_LEVEL_1G) {
488 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
489 pgprot_val(ref_prot) |= _PAGE_PSE;
f07333fd
AK
490 }
491#endif
492
63c1dcf4
TG
493 /*
494 * Get the target pfn from the original entry:
495 */
496 pfn = pte_pfn(*kpte);
f07333fd 497 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
63c1dcf4 498 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
bb5c2dbd 499
ce0c0e50 500 if (address >= (unsigned long)__va(0) &&
f361a450
YL
501 address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
502 split_page_count(level);
503
504#ifdef CONFIG_X86_64
505 if (address >= (unsigned long)__va(1UL<<32) &&
65280e61
TG
506 address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
507 split_page_count(level);
f361a450 508#endif
ce0c0e50 509
bb5c2dbd 510 /*
07cf89c0 511 * Install the new, split up pagetable. Important details here:
4c881ca1
HY
512 *
513 * On Intel the NX bit of all levels must be cleared to make a
514 * page executable. See section 4.13.2 of Intel 64 and IA-32
515 * Architectures Software Developer's Manual).
07cf89c0
TG
516 *
517 * Mark the entry present. The current mapping might be
518 * set to not present, which we preserved above.
bb5c2dbd 519 */
4c881ca1 520 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
07cf89c0 521 pgprot_val(ref_prot) |= _PAGE_PRESENT;
9a3dc780 522 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
bb5c2dbd
IM
523 base = NULL;
524
525out_unlock:
eb5b5f02
TG
526 /*
527 * If we dropped out via the lookup_address check under
528 * pgd_lock then stick the page back into the pool:
529 */
8311eb84
SS
530 if (base)
531 __free_page(base);
9a3dc780 532 spin_unlock_irqrestore(&pgd_lock, flags);
bb5c2dbd 533
bb5c2dbd
IM
534 return 0;
535}
536
a1e46212
SS
537static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
538 int primary)
539{
540 /*
541 * Ignore all non primary paths.
542 */
543 if (!primary)
544 return 0;
545
546 /*
547 * Ignore the NULL PTE for kernel identity mapping, as it is expected
548 * to have holes.
549 * Also set numpages to '1' indicating that we processed cpa req for
550 * one virtual address page and its pfn. TBD: numpages can be set based
551 * on the initial value and the level returned by lookup_address().
552 */
553 if (within(vaddr, PAGE_OFFSET,
554 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
555 cpa->numpages = 1;
556 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
557 return 0;
558 } else {
559 WARN(1, KERN_WARNING "CPA: called for zero pte. "
560 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
561 *cpa->vaddr);
562
563 return -EFAULT;
564 }
565}
566
c31c7d48 567static int __change_page_attr(struct cpa_data *cpa, int primary)
9f4c815c 568{
d75586ad 569 unsigned long address;
da7bfc50
HH
570 int do_split, err;
571 unsigned int level;
c31c7d48 572 pte_t *kpte, old_pte;
1da177e4 573
d75586ad
SL
574 if (cpa->flags & CPA_ARRAY)
575 address = cpa->vaddr[cpa->curpage];
576 else
577 address = *cpa->vaddr;
578
97f99fed 579repeat:
f0646e43 580 kpte = lookup_address(address, &level);
1da177e4 581 if (!kpte)
a1e46212 582 return __cpa_process_fault(cpa, address, primary);
c31c7d48
TG
583
584 old_pte = *kpte;
a1e46212
SS
585 if (!pte_val(old_pte))
586 return __cpa_process_fault(cpa, address, primary);
9f4c815c 587
30551bb3 588 if (level == PG_LEVEL_4K) {
c31c7d48 589 pte_t new_pte;
626c2c9d 590 pgprot_t new_prot = pte_pgprot(old_pte);
c31c7d48 591 unsigned long pfn = pte_pfn(old_pte);
86f03989 592
72e458df
TG
593 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
594 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
86f03989 595
c31c7d48 596 new_prot = static_protections(new_prot, address, pfn);
86f03989 597
626c2c9d
AV
598 /*
599 * We need to keep the pfn from the existing PTE,
600 * after all we're only going to change it's attributes
601 * not the memory it points to
602 */
c31c7d48
TG
603 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
604 cpa->pfn = pfn;
f4ae5da0
TG
605 /*
606 * Do we really change anything ?
607 */
608 if (pte_val(old_pte) != pte_val(new_pte)) {
609 set_pte_atomic(kpte, new_pte);
d75586ad 610 cpa->flags |= CPA_FLUSHTLB;
f4ae5da0 611 }
9b5cf48b 612 cpa->numpages = 1;
65e074df 613 return 0;
1da177e4 614 }
65e074df
TG
615
616 /*
617 * Check, whether we can keep the large page intact
618 * and just change the pte:
619 */
beaff633 620 do_split = try_preserve_large_page(kpte, address, cpa);
65e074df
TG
621 /*
622 * When the range fits into the existing large page,
9b5cf48b 623 * return. cp->numpages and cpa->tlbflush have been updated in
65e074df
TG
624 * try_large_page:
625 */
87f7f8fe
IM
626 if (do_split <= 0)
627 return do_split;
65e074df
TG
628
629 /*
630 * We have to split the large page:
631 */
87f7f8fe
IM
632 err = split_large_page(kpte, address);
633 if (!err) {
ad5ca55f
SS
634 /*
635 * Do a global flush tlb after splitting the large page
636 * and before we do the actual change page attribute in the PTE.
637 *
638 * With out this, we violate the TLB application note, that says
639 * "The TLBs may contain both ordinary and large-page
640 * translations for a 4-KByte range of linear addresses. This
641 * may occur if software modifies the paging structures so that
642 * the page size used for the address range changes. If the two
643 * translations differ with respect to page frame or attributes
644 * (e.g., permissions), processor behavior is undefined and may
645 * be implementation-specific."
646 *
647 * We do this global tlb flush inside the cpa_lock, so that we
648 * don't allow any other cpu, with stale tlb entries change the
649 * page attribute in parallel, that also falls into the
650 * just split large page entry.
651 */
652 flush_tlb_all();
87f7f8fe
IM
653 goto repeat;
654 }
beaff633 655
87f7f8fe 656 return err;
9f4c815c 657}
1da177e4 658
c31c7d48
TG
659static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
660
661static int cpa_process_alias(struct cpa_data *cpa)
1da177e4 662{
c31c7d48 663 struct cpa_data alias_cpa;
f34b439f 664 int ret = 0;
d75586ad 665 unsigned long temp_cpa_vaddr, vaddr;
44af6c41 666
965194c1 667 if (cpa->pfn >= max_pfn_mapped)
c31c7d48 668 return 0;
626c2c9d 669
f361a450 670#ifdef CONFIG_X86_64
965194c1 671 if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
f361a450
YL
672 return 0;
673#endif
f34b439f
TG
674 /*
675 * No need to redo, when the primary call touched the direct
676 * mapping already:
677 */
d75586ad
SL
678 if (cpa->flags & CPA_ARRAY)
679 vaddr = cpa->vaddr[cpa->curpage];
680 else
681 vaddr = *cpa->vaddr;
682
683 if (!(within(vaddr, PAGE_OFFSET,
a1e46212 684 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
44af6c41 685
f34b439f 686 alias_cpa = *cpa;
d75586ad
SL
687 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
688 alias_cpa.vaddr = &temp_cpa_vaddr;
689 alias_cpa.flags &= ~CPA_ARRAY;
690
f34b439f
TG
691
692 ret = __change_page_attr_set_clr(&alias_cpa, 0);
693 }
44af6c41 694
44af6c41 695#ifdef CONFIG_X86_64
c31c7d48
TG
696 if (ret)
697 return ret;
f34b439f
TG
698 /*
699 * No need to redo, when the primary call touched the high
700 * mapping already:
701 */
d75586ad 702 if (within(vaddr, (unsigned long) _text, (unsigned long) _end))
f34b439f
TG
703 return 0;
704
488fd995 705 /*
0879750f
TG
706 * If the physical address is inside the kernel map, we need
707 * to touch the high mapped kernel as well:
488fd995 708 */
c31c7d48
TG
709 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
710 return 0;
0879750f 711
c31c7d48 712 alias_cpa = *cpa;
d75586ad
SL
713 temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
714 alias_cpa.vaddr = &temp_cpa_vaddr;
715 alias_cpa.flags &= ~CPA_ARRAY;
c31c7d48
TG
716
717 /*
718 * The high mapping range is imprecise, so ignore the return value.
719 */
720 __change_page_attr_set_clr(&alias_cpa, 0);
488fd995 721#endif
c31c7d48 722 return ret;
1da177e4
LT
723}
724
c31c7d48 725static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
ff31452b 726{
65e074df 727 int ret, numpages = cpa->numpages;
ff31452b 728
65e074df
TG
729 while (numpages) {
730 /*
731 * Store the remaining nr of pages for the large page
732 * preservation check.
733 */
9b5cf48b 734 cpa->numpages = numpages;
d75586ad
SL
735 /* for array changes, we can't use large page */
736 if (cpa->flags & CPA_ARRAY)
737 cpa->numpages = 1;
c31c7d48 738
ad5ca55f
SS
739 if (!debug_pagealloc)
740 spin_lock(&cpa_lock);
c31c7d48 741 ret = __change_page_attr(cpa, checkalias);
ad5ca55f
SS
742 if (!debug_pagealloc)
743 spin_unlock(&cpa_lock);
ff31452b
TG
744 if (ret)
745 return ret;
ff31452b 746
c31c7d48
TG
747 if (checkalias) {
748 ret = cpa_process_alias(cpa);
749 if (ret)
750 return ret;
751 }
752
65e074df
TG
753 /*
754 * Adjust the number of pages with the result of the
755 * CPA operation. Either a large page has been
756 * preserved or a single page update happened.
757 */
9b5cf48b
RW
758 BUG_ON(cpa->numpages > numpages);
759 numpages -= cpa->numpages;
d75586ad
SL
760 if (cpa->flags & CPA_ARRAY)
761 cpa->curpage++;
762 else
763 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
764
65e074df 765 }
ff31452b
TG
766 return 0;
767}
768
6bb8383b
AK
769static inline int cache_attr(pgprot_t attr)
770{
771 return pgprot_val(attr) &
772 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
773}
774
d75586ad 775static int change_page_attr_set_clr(unsigned long *addr, int numpages,
c9caa02c 776 pgprot_t mask_set, pgprot_t mask_clr,
d75586ad 777 int force_split, int array)
ff31452b 778{
72e458df 779 struct cpa_data cpa;
cacf8906 780 int ret, cache, checkalias;
331e4065
TG
781
782 /*
783 * Check, if we are requested to change a not supported
784 * feature:
785 */
786 mask_set = canon_pgprot(mask_set);
787 mask_clr = canon_pgprot(mask_clr);
c9caa02c 788 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
331e4065
TG
789 return 0;
790
69b1415e 791 /* Ensure we are PAGE_SIZE aligned */
d75586ad
SL
792 if (!array) {
793 if (*addr & ~PAGE_MASK) {
794 *addr &= PAGE_MASK;
795 /*
796 * People should not be passing in unaligned addresses:
797 */
798 WARN_ON_ONCE(1);
799 }
800 } else {
801 int i;
802 for (i = 0; i < numpages; i++) {
803 if (addr[i] & ~PAGE_MASK) {
804 addr[i] &= PAGE_MASK;
805 WARN_ON_ONCE(1);
806 }
807 }
69b1415e
TG
808 }
809
5843d9a4
NP
810 /* Must avoid aliasing mappings in the highmem code */
811 kmap_flush_unused();
812
db64fe02
NP
813 vm_unmap_aliases();
814
72e458df
TG
815 cpa.vaddr = addr;
816 cpa.numpages = numpages;
817 cpa.mask_set = mask_set;
818 cpa.mask_clr = mask_clr;
d75586ad
SL
819 cpa.flags = 0;
820 cpa.curpage = 0;
c9caa02c 821 cpa.force_split = force_split;
72e458df 822
d75586ad
SL
823 if (array)
824 cpa.flags |= CPA_ARRAY;
825
af96e443
TG
826 /* No alias checking for _NX bit modifications */
827 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
828
829 ret = __change_page_attr_set_clr(&cpa, checkalias);
ff31452b 830
f4ae5da0
TG
831 /*
832 * Check whether we really changed something:
833 */
d75586ad 834 if (!(cpa.flags & CPA_FLUSHTLB))
1ac2f7d5 835 goto out;
cacf8906 836
6bb8383b
AK
837 /*
838 * No need to flush, when we did not set any of the caching
839 * attributes:
840 */
841 cache = cache_attr(mask_set);
842
57a6a46a
TG
843 /*
844 * On success we use clflush, when the CPU supports it to
845 * avoid the wbindv. If the CPU does not support it and in the
af1e6844 846 * error case we fall back to cpa_flush_all (which uses
57a6a46a
TG
847 * wbindv):
848 */
d75586ad
SL
849 if (!ret && cpu_has_clflush) {
850 if (cpa.flags & CPA_ARRAY)
851 cpa_flush_array(addr, numpages, cache);
852 else
853 cpa_flush_range(*addr, numpages, cache);
854 } else
6bb8383b 855 cpa_flush_all(cache);
cacf8906 856
76ebd054 857out:
ff31452b
TG
858 return ret;
859}
860
d75586ad
SL
861static inline int change_page_attr_set(unsigned long *addr, int numpages,
862 pgprot_t mask, int array)
75cbade8 863{
d75586ad
SL
864 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
865 array);
75cbade8
AV
866}
867
d75586ad
SL
868static inline int change_page_attr_clear(unsigned long *addr, int numpages,
869 pgprot_t mask, int array)
72932c7a 870{
d75586ad
SL
871 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
872 array);
72932c7a
TG
873}
874
1219333d 875int _set_memory_uc(unsigned long addr, int numpages)
72932c7a 876{
de33c442
SS
877 /*
878 * for now UC MINUS. see comments in ioremap_nocache()
879 */
d75586ad
SL
880 return change_page_attr_set(&addr, numpages,
881 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
75cbade8 882}
1219333d 883
884int set_memory_uc(unsigned long addr, int numpages)
885{
de33c442
SS
886 /*
887 * for now UC MINUS. see comments in ioremap_nocache()
888 */
c15238df 889 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
de33c442 890 _PAGE_CACHE_UC_MINUS, NULL))
1219333d 891 return -EINVAL;
892
893 return _set_memory_uc(addr, numpages);
894}
75cbade8
AV
895EXPORT_SYMBOL(set_memory_uc);
896
d75586ad
SL
897int set_memory_array_uc(unsigned long *addr, int addrinarray)
898{
c5e147cf
RH
899 unsigned long start;
900 unsigned long end;
d75586ad
SL
901 int i;
902 /*
903 * for now UC MINUS. see comments in ioremap_nocache()
904 */
905 for (i = 0; i < addrinarray; i++) {
c5e147cf
RH
906 start = __pa(addr[i]);
907 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
908 if (end != __pa(addr[i + 1]))
909 break;
910 i++;
911 }
912 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
d75586ad
SL
913 goto out;
914 }
915
916 return change_page_attr_set(addr, addrinarray,
917 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
918out:
c5e147cf
RH
919 for (i = 0; i < addrinarray; i++) {
920 unsigned long tmp = __pa(addr[i]);
921
922 if (tmp == start)
923 break;
01de05af 924 for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
c5e147cf
RH
925 if (end != __pa(addr[i + 1]))
926 break;
927 i++;
928 }
929 free_memtype(tmp, end);
930 }
d75586ad
SL
931 return -EINVAL;
932}
933EXPORT_SYMBOL(set_memory_array_uc);
934
ef354af4 935int _set_memory_wc(unsigned long addr, int numpages)
936{
d75586ad
SL
937 return change_page_attr_set(&addr, numpages,
938 __pgprot(_PAGE_CACHE_WC), 0);
ef354af4 939}
940
941int set_memory_wc(unsigned long addr, int numpages)
942{
499f8f84 943 if (!pat_enabled)
ef354af4 944 return set_memory_uc(addr, numpages);
945
c15238df 946 if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
ef354af4 947 _PAGE_CACHE_WC, NULL))
948 return -EINVAL;
949
950 return _set_memory_wc(addr, numpages);
951}
952EXPORT_SYMBOL(set_memory_wc);
953
1219333d 954int _set_memory_wb(unsigned long addr, int numpages)
75cbade8 955{
d75586ad
SL
956 return change_page_attr_clear(&addr, numpages,
957 __pgprot(_PAGE_CACHE_MASK), 0);
75cbade8 958}
1219333d 959
960int set_memory_wb(unsigned long addr, int numpages)
961{
c15238df 962 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1219333d 963
964 return _set_memory_wb(addr, numpages);
965}
75cbade8
AV
966EXPORT_SYMBOL(set_memory_wb);
967
d75586ad
SL
968int set_memory_array_wb(unsigned long *addr, int addrinarray)
969{
970 int i;
d75586ad 971
c5e147cf
RH
972 for (i = 0; i < addrinarray; i++) {
973 unsigned long start = __pa(addr[i]);
974 unsigned long end;
975
976 for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
977 if (end != __pa(addr[i + 1]))
978 break;
979 i++;
980 }
981 free_memtype(start, end);
982 }
d75586ad
SL
983 return change_page_attr_clear(addr, addrinarray,
984 __pgprot(_PAGE_CACHE_MASK), 1);
985}
986EXPORT_SYMBOL(set_memory_array_wb);
987
75cbade8
AV
988int set_memory_x(unsigned long addr, int numpages)
989{
d75586ad 990 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
75cbade8
AV
991}
992EXPORT_SYMBOL(set_memory_x);
993
994int set_memory_nx(unsigned long addr, int numpages)
995{
d75586ad 996 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
75cbade8
AV
997}
998EXPORT_SYMBOL(set_memory_nx);
999
1000int set_memory_ro(unsigned long addr, int numpages)
1001{
d75586ad 1002 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
75cbade8 1003}
a03352d2 1004EXPORT_SYMBOL_GPL(set_memory_ro);
75cbade8
AV
1005
1006int set_memory_rw(unsigned long addr, int numpages)
1007{
d75586ad 1008 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
75cbade8 1009}
a03352d2 1010EXPORT_SYMBOL_GPL(set_memory_rw);
f62d0f00
IM
1011
1012int set_memory_np(unsigned long addr, int numpages)
1013{
d75586ad 1014 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
f62d0f00 1015}
75cbade8 1016
c9caa02c
AK
1017int set_memory_4k(unsigned long addr, int numpages)
1018{
d75586ad
SL
1019 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1020 __pgprot(0), 1, 0);
c9caa02c
AK
1021}
1022
75cbade8
AV
1023int set_pages_uc(struct page *page, int numpages)
1024{
1025 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1026
d7c8f21a 1027 return set_memory_uc(addr, numpages);
75cbade8
AV
1028}
1029EXPORT_SYMBOL(set_pages_uc);
1030
1031int set_pages_wb(struct page *page, int numpages)
1032{
1033 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1034
d7c8f21a 1035 return set_memory_wb(addr, numpages);
75cbade8
AV
1036}
1037EXPORT_SYMBOL(set_pages_wb);
1038
1039int set_pages_x(struct page *page, int numpages)
1040{
1041 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1042
d7c8f21a 1043 return set_memory_x(addr, numpages);
75cbade8
AV
1044}
1045EXPORT_SYMBOL(set_pages_x);
1046
1047int set_pages_nx(struct page *page, int numpages)
1048{
1049 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1050
d7c8f21a 1051 return set_memory_nx(addr, numpages);
75cbade8
AV
1052}
1053EXPORT_SYMBOL(set_pages_nx);
1054
1055int set_pages_ro(struct page *page, int numpages)
1056{
1057 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1058
d7c8f21a 1059 return set_memory_ro(addr, numpages);
75cbade8 1060}
75cbade8
AV
1061
1062int set_pages_rw(struct page *page, int numpages)
1063{
1064 unsigned long addr = (unsigned long)page_address(page);
e81d5dc4 1065
d7c8f21a 1066 return set_memory_rw(addr, numpages);
78c94aba
IM
1067}
1068
1da177e4 1069#ifdef CONFIG_DEBUG_PAGEALLOC
f62d0f00
IM
1070
1071static int __set_pages_p(struct page *page, int numpages)
1072{
d75586ad
SL
1073 unsigned long tempaddr = (unsigned long) page_address(page);
1074 struct cpa_data cpa = { .vaddr = &tempaddr,
72e458df
TG
1075 .numpages = numpages,
1076 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
d75586ad
SL
1077 .mask_clr = __pgprot(0),
1078 .flags = 0};
72932c7a 1079
55121b43
SS
1080 /*
1081 * No alias checking needed for setting present flag. otherwise,
1082 * we may need to break large pages for 64-bit kernel text
1083 * mappings (this adds to complexity if we want to do this from
1084 * atomic context especially). Let's keep it simple!
1085 */
1086 return __change_page_attr_set_clr(&cpa, 0);
f62d0f00
IM
1087}
1088
1089static int __set_pages_np(struct page *page, int numpages)
1090{
d75586ad
SL
1091 unsigned long tempaddr = (unsigned long) page_address(page);
1092 struct cpa_data cpa = { .vaddr = &tempaddr,
72e458df
TG
1093 .numpages = numpages,
1094 .mask_set = __pgprot(0),
d75586ad
SL
1095 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1096 .flags = 0};
72932c7a 1097
55121b43
SS
1098 /*
1099 * No alias checking needed for setting not present flag. otherwise,
1100 * we may need to break large pages for 64-bit kernel text
1101 * mappings (this adds to complexity if we want to do this from
1102 * atomic context especially). Let's keep it simple!
1103 */
1104 return __change_page_attr_set_clr(&cpa, 0);
f62d0f00
IM
1105}
1106
1da177e4
LT
1107void kernel_map_pages(struct page *page, int numpages, int enable)
1108{
1109 if (PageHighMem(page))
1110 return;
9f4c815c 1111 if (!enable) {
f9b8404c
IM
1112 debug_check_no_locks_freed(page_address(page),
1113 numpages * PAGE_SIZE);
9f4c815c 1114 }
de5097c2 1115
12d6f21e
IM
1116 /*
1117 * If page allocator is not up yet then do not call c_p_a():
1118 */
1119 if (!debug_pagealloc_enabled)
1120 return;
1121
9f4c815c 1122 /*
f8d8406b 1123 * The return value is ignored as the calls cannot fail.
55121b43
SS
1124 * Large pages for identity mappings are not used at boot time
1125 * and hence no memory allocations during large page split.
1da177e4 1126 */
f62d0f00
IM
1127 if (enable)
1128 __set_pages_p(page, numpages);
1129 else
1130 __set_pages_np(page, numpages);
9f4c815c
IM
1131
1132 /*
e4b71dcf
IM
1133 * We should perform an IPI and flush all tlbs,
1134 * but that can deadlock->flush only current cpu:
1da177e4
LT
1135 */
1136 __flush_tlb_all();
ee7ae7a1
TG
1137}
1138
8a235efa
RW
1139#ifdef CONFIG_HIBERNATION
1140
1141bool kernel_page_present(struct page *page)
1142{
1143 unsigned int level;
1144 pte_t *pte;
1145
1146 if (PageHighMem(page))
1147 return false;
1148
1149 pte = lookup_address((unsigned long)page_address(page), &level);
1150 return (pte_val(*pte) & _PAGE_PRESENT);
1151}
1152
1153#endif /* CONFIG_HIBERNATION */
1154
1155#endif /* CONFIG_DEBUG_PAGEALLOC */
d1028a15
AV
1156
1157/*
1158 * The testcases use internal knowledge of the implementation that shouldn't
1159 * be exposed to the rest of the kernel. Include these directly here.
1160 */
1161#ifdef CONFIG_CPA_DEBUG
1162#include "pageattr-test.c"
1163#endif