]>
Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c | 10 | #include <linux/mm.h> |
76ebd054 | 11 | #include <linux/interrupt.h> |
9f4c815c | 12 | |
950f9d95 | 13 | #include <asm/e820.h> |
1da177e4 LT |
14 | #include <asm/processor.h> |
15 | #include <asm/tlbflush.h> | |
f8af095d | 16 | #include <asm/sections.h> |
9f4c815c IM |
17 | #include <asm/uaccess.h> |
18 | #include <asm/pgalloc.h> | |
1da177e4 | 19 | |
9df84993 IM |
20 | /* |
21 | * The current flushing context - we pass it instead of 5 arguments: | |
22 | */ | |
72e458df TG |
23 | struct cpa_data { |
24 | unsigned long vaddr; | |
72e458df TG |
25 | pgprot_t mask_set; |
26 | pgprot_t mask_clr; | |
65e074df | 27 | int numpages; |
f4ae5da0 | 28 | int flushtlb; |
72e458df TG |
29 | }; |
30 | ||
ed724be6 AV |
31 | static inline int |
32 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 33 | { |
ed724be6 AV |
34 | return addr >= start && addr < end; |
35 | } | |
36 | ||
d7c8f21a TG |
37 | /* |
38 | * Flushing functions | |
39 | */ | |
cd8ddf1a | 40 | |
cd8ddf1a TG |
41 | /** |
42 | * clflush_cache_range - flush a cache range with clflush | |
43 | * @addr: virtual start address | |
44 | * @size: number of bytes to flush | |
45 | * | |
46 | * clflush is an unordered instruction which needs fencing with mfence | |
47 | * to avoid ordering issues. | |
48 | */ | |
4c61afcd | 49 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 50 | { |
4c61afcd | 51 | void *vend = vaddr + size - 1; |
d7c8f21a | 52 | |
cd8ddf1a | 53 | mb(); |
4c61afcd IM |
54 | |
55 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | |
56 | clflush(vaddr); | |
57 | /* | |
58 | * Flush any possible final partial cacheline: | |
59 | */ | |
60 | clflush(vend); | |
61 | ||
cd8ddf1a | 62 | mb(); |
d7c8f21a TG |
63 | } |
64 | ||
af1e6844 | 65 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 66 | { |
6bb8383b AK |
67 | unsigned long cache = (unsigned long)arg; |
68 | ||
d7c8f21a TG |
69 | /* |
70 | * Flush all to work around Errata in early athlons regarding | |
71 | * large page flushing. | |
72 | */ | |
73 | __flush_tlb_all(); | |
74 | ||
6bb8383b | 75 | if (cache && boot_cpu_data.x86_model >= 4) |
d7c8f21a TG |
76 | wbinvd(); |
77 | } | |
78 | ||
6bb8383b | 79 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a TG |
80 | { |
81 | BUG_ON(irqs_disabled()); | |
82 | ||
6bb8383b | 83 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); |
d7c8f21a TG |
84 | } |
85 | ||
57a6a46a TG |
86 | static void __cpa_flush_range(void *arg) |
87 | { | |
57a6a46a TG |
88 | /* |
89 | * We could optimize that further and do individual per page | |
90 | * tlb invalidates for a low number of pages. Caveat: we must | |
91 | * flush the high aliases on 64bit as well. | |
92 | */ | |
93 | __flush_tlb_all(); | |
57a6a46a TG |
94 | } |
95 | ||
6bb8383b | 96 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 97 | { |
4c61afcd IM |
98 | unsigned int i, level; |
99 | unsigned long addr; | |
100 | ||
57a6a46a | 101 | BUG_ON(irqs_disabled()); |
4c61afcd | 102 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 103 | |
3b233e52 | 104 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
57a6a46a | 105 | |
6bb8383b AK |
106 | if (!cache) |
107 | return; | |
108 | ||
3b233e52 TG |
109 | /* |
110 | * We only need to flush on one CPU, | |
111 | * clflush is a MESI-coherent instruction that | |
112 | * will cause all other CPUs to flush the same | |
113 | * cachelines: | |
114 | */ | |
4c61afcd IM |
115 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
116 | pte_t *pte = lookup_address(addr, &level); | |
117 | ||
118 | /* | |
119 | * Only flush present addresses: | |
120 | */ | |
7bfb72e8 | 121 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
122 | clflush_cache_range((void *) addr, PAGE_SIZE); |
123 | } | |
57a6a46a TG |
124 | } |
125 | ||
cc0f21bb AV |
126 | #define HIGH_MAP_START __START_KERNEL_map |
127 | #define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE) | |
128 | ||
129 | ||
130 | /* | |
131 | * Converts a virtual address to a X86-64 highmap address | |
132 | */ | |
133 | static unsigned long virt_to_highmap(void *address) | |
134 | { | |
135 | #ifdef CONFIG_X86_64 | |
136 | return __pa((unsigned long)address) + HIGH_MAP_START - phys_base; | |
137 | #else | |
138 | return (unsigned long)address; | |
139 | #endif | |
140 | } | |
141 | ||
ed724be6 AV |
142 | /* |
143 | * Certain areas of memory on x86 require very specific protection flags, | |
144 | * for example the BIOS area or kernel text. Callers don't always get this | |
145 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
146 | * checks and fixes these known static required protection bits. | |
147 | */ | |
148 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) | |
149 | { | |
150 | pgprot_t forbidden = __pgprot(0); | |
151 | ||
687c4825 | 152 | /* |
ed724be6 AV |
153 | * The BIOS area between 640k and 1Mb needs to be executable for |
154 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 155 | */ |
ed724be6 AV |
156 | if (within(__pa(address), BIOS_BEGIN, BIOS_END)) |
157 | pgprot_val(forbidden) |= _PAGE_NX; | |
158 | ||
159 | /* | |
160 | * The kernel text needs to be executable for obvious reasons | |
161 | * Does not cover __inittext since that is gone later on | |
162 | */ | |
163 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
164 | pgprot_val(forbidden) |= _PAGE_NX; | |
cc0f21bb AV |
165 | /* |
166 | * Do the same for the x86-64 high kernel mapping | |
167 | */ | |
168 | if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext))) | |
169 | pgprot_val(forbidden) |= _PAGE_NX; | |
170 | ||
ed724be6 AV |
171 | /* The .rodata section needs to be read-only */ |
172 | if (within(address, (unsigned long)__start_rodata, | |
173 | (unsigned long)__end_rodata)) | |
174 | pgprot_val(forbidden) |= _PAGE_RW; | |
cc0f21bb AV |
175 | /* |
176 | * Do the same for the x86-64 high kernel mapping | |
177 | */ | |
178 | if (within(address, virt_to_highmap(__start_rodata), | |
179 | virt_to_highmap(__end_rodata))) | |
180 | pgprot_val(forbidden) |= _PAGE_RW; | |
ed724be6 AV |
181 | |
182 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
687c4825 IM |
183 | |
184 | return prot; | |
185 | } | |
186 | ||
9a14aefc TG |
187 | /* |
188 | * Lookup the page table entry for a virtual address. Return a pointer | |
189 | * to the entry and the level of the mapping. | |
190 | * | |
191 | * Note: We return pud and pmd either when the entry is marked large | |
192 | * or when the present bit is not set. Otherwise we would return a | |
193 | * pointer to a nonexisting mapping. | |
194 | */ | |
da7bfc50 | 195 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
9f4c815c | 196 | { |
1da177e4 LT |
197 | pgd_t *pgd = pgd_offset_k(address); |
198 | pud_t *pud; | |
199 | pmd_t *pmd; | |
9f4c815c | 200 | |
30551bb3 TG |
201 | *level = PG_LEVEL_NONE; |
202 | ||
1da177e4 LT |
203 | if (pgd_none(*pgd)) |
204 | return NULL; | |
9df84993 | 205 | |
1da177e4 LT |
206 | pud = pud_offset(pgd, address); |
207 | if (pud_none(*pud)) | |
208 | return NULL; | |
c2f71ee2 AK |
209 | |
210 | *level = PG_LEVEL_1G; | |
211 | if (pud_large(*pud) || !pud_present(*pud)) | |
212 | return (pte_t *)pud; | |
213 | ||
1da177e4 LT |
214 | pmd = pmd_offset(pud, address); |
215 | if (pmd_none(*pmd)) | |
216 | return NULL; | |
30551bb3 TG |
217 | |
218 | *level = PG_LEVEL_2M; | |
9a14aefc | 219 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 220 | return (pte_t *)pmd; |
1da177e4 | 221 | |
30551bb3 | 222 | *level = PG_LEVEL_4K; |
9df84993 | 223 | |
9f4c815c IM |
224 | return pte_offset_kernel(pmd, address); |
225 | } | |
226 | ||
9df84993 IM |
227 | /* |
228 | * Set the new pmd in all the pgds we know about: | |
229 | */ | |
9a3dc780 | 230 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 231 | { |
9f4c815c IM |
232 | /* change init_mm */ |
233 | set_pte_atomic(kpte, pte); | |
44af6c41 | 234 | #ifdef CONFIG_X86_32 |
e4b71dcf | 235 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
236 | struct page *page; |
237 | ||
e3ed910d | 238 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 IM |
239 | pgd_t *pgd; |
240 | pud_t *pud; | |
241 | pmd_t *pmd; | |
242 | ||
243 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
244 | pud = pud_offset(pgd, address); | |
245 | pmd = pmd_offset(pud, address); | |
246 | set_pte_atomic((pte_t *)pmd, pte); | |
247 | } | |
1da177e4 | 248 | } |
44af6c41 | 249 | #endif |
1da177e4 LT |
250 | } |
251 | ||
9df84993 IM |
252 | static int |
253 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
254 | struct cpa_data *cpa) | |
65e074df | 255 | { |
fac84939 | 256 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr; |
65e074df TG |
257 | pte_t new_pte, old_pte, *tmp; |
258 | pgprot_t old_prot, new_prot; | |
fac84939 | 259 | int i, do_split = 1; |
da7bfc50 | 260 | unsigned int level; |
65e074df TG |
261 | |
262 | spin_lock_irqsave(&pgd_lock, flags); | |
263 | /* | |
264 | * Check for races, another CPU might have split this page | |
265 | * up already: | |
266 | */ | |
267 | tmp = lookup_address(address, &level); | |
268 | if (tmp != kpte) | |
269 | goto out_unlock; | |
270 | ||
271 | switch (level) { | |
272 | case PG_LEVEL_2M: | |
31422c51 AK |
273 | psize = PMD_PAGE_SIZE; |
274 | pmask = PMD_PAGE_MASK; | |
65e074df | 275 | break; |
f07333fd | 276 | #ifdef CONFIG_X86_64 |
65e074df | 277 | case PG_LEVEL_1G: |
5d3c8b21 AK |
278 | psize = PUD_PAGE_SIZE; |
279 | pmask = PUD_PAGE_MASK; | |
f07333fd AK |
280 | break; |
281 | #endif | |
65e074df | 282 | default: |
beaff633 | 283 | do_split = -EINVAL; |
65e074df TG |
284 | goto out_unlock; |
285 | } | |
286 | ||
287 | /* | |
288 | * Calculate the number of pages, which fit into this large | |
289 | * page starting at address: | |
290 | */ | |
291 | nextpage_addr = (address + psize) & pmask; | |
292 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
293 | if (numpages < cpa->numpages) | |
294 | cpa->numpages = numpages; | |
295 | ||
296 | /* | |
297 | * We are safe now. Check whether the new pgprot is the same: | |
298 | */ | |
299 | old_pte = *kpte; | |
300 | old_prot = new_prot = pte_pgprot(old_pte); | |
301 | ||
302 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | |
303 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
304 | new_prot = static_protections(new_prot, address); | |
305 | ||
fac84939 TG |
306 | /* |
307 | * We need to check the full range, whether | |
308 | * static_protection() requires a different pgprot for one of | |
309 | * the pages in the range we try to preserve: | |
310 | */ | |
311 | addr = address + PAGE_SIZE; | |
312 | for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE) { | |
313 | pgprot_t chk_prot = static_protections(new_prot, addr); | |
314 | ||
315 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
316 | goto out_unlock; | |
317 | } | |
318 | ||
65e074df TG |
319 | /* |
320 | * If there are no changes, return. maxpages has been updated | |
321 | * above: | |
322 | */ | |
323 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
beaff633 | 324 | do_split = 0; |
65e074df TG |
325 | goto out_unlock; |
326 | } | |
327 | ||
328 | /* | |
329 | * We need to change the attributes. Check, whether we can | |
330 | * change the large page in one go. We request a split, when | |
331 | * the address is not aligned and the number of pages is | |
332 | * smaller than the number of pages in the large page. Note | |
333 | * that we limited the number of possible pages already to | |
334 | * the number of pages in the large page. | |
335 | */ | |
336 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | |
337 | /* | |
338 | * The address is aligned and the number of pages | |
339 | * covers the full page. | |
340 | */ | |
341 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | |
342 | __set_pmd_pte(kpte, address, new_pte); | |
343 | cpa->flushtlb = 1; | |
beaff633 | 344 | do_split = 0; |
65e074df TG |
345 | } |
346 | ||
347 | out_unlock: | |
348 | spin_unlock_irqrestore(&pgd_lock, flags); | |
9df84993 | 349 | |
beaff633 | 350 | return do_split; |
65e074df TG |
351 | } |
352 | ||
76ebd054 TG |
353 | static LIST_HEAD(page_pool); |
354 | static unsigned long pool_size, pool_pages, pool_low; | |
355 | static unsigned long pool_used, pool_failed, pool_refill; | |
356 | ||
357 | static void cpa_fill_pool(void) | |
358 | { | |
359 | struct page *p; | |
360 | gfp_t gfp = GFP_KERNEL; | |
361 | ||
362 | /* Do not allocate from interrupt context */ | |
363 | if (in_irq() || irqs_disabled()) | |
364 | return; | |
365 | /* | |
366 | * Check unlocked. I does not matter when we have one more | |
367 | * page in the pool. The bit lock avoids recursive pool | |
368 | * allocations: | |
369 | */ | |
370 | if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill)) | |
371 | return; | |
372 | ||
373 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
374 | /* | |
375 | * We could do: | |
376 | * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; | |
377 | * but this fails on !PREEMPT kernels | |
378 | */ | |
379 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | |
380 | #endif | |
381 | ||
382 | while (pool_pages < pool_size) { | |
383 | p = alloc_pages(gfp, 0); | |
384 | if (!p) { | |
385 | pool_failed++; | |
386 | break; | |
387 | } | |
388 | spin_lock_irq(&pgd_lock); | |
389 | list_add(&p->lru, &page_pool); | |
390 | pool_pages++; | |
391 | spin_unlock_irq(&pgd_lock); | |
392 | } | |
393 | clear_bit_unlock(0, &pool_refill); | |
394 | } | |
395 | ||
396 | #define SHIFT_MB (20 - PAGE_SHIFT) | |
397 | #define ROUND_MB_GB ((1 << 10) - 1) | |
398 | #define SHIFT_MB_GB 10 | |
399 | #define POOL_PAGES_PER_GB 16 | |
400 | ||
401 | void __init cpa_init(void) | |
402 | { | |
403 | struct sysinfo si; | |
404 | unsigned long gb; | |
405 | ||
406 | si_meminfo(&si); | |
407 | /* | |
408 | * Calculate the number of pool pages: | |
409 | * | |
410 | * Convert totalram (nr of pages) to MiB and round to the next | |
411 | * GiB. Shift MiB to Gib and multiply the result by | |
412 | * POOL_PAGES_PER_GB: | |
413 | */ | |
414 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; | |
415 | pool_size = POOL_PAGES_PER_GB * gb; | |
416 | pool_low = pool_size; | |
417 | ||
418 | cpa_fill_pool(); | |
419 | printk(KERN_DEBUG | |
420 | "CPA: page pool initialized %lu of %lu pages preallocated\n", | |
421 | pool_pages, pool_size); | |
422 | } | |
423 | ||
7afe15b9 | 424 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 425 | { |
7b610eec | 426 | unsigned long flags, pfn, pfninc = 1; |
9df84993 | 427 | unsigned int i, level; |
bb5c2dbd | 428 | pte_t *pbase, *tmp; |
9df84993 | 429 | pgprot_t ref_prot; |
bb5c2dbd IM |
430 | struct page *base; |
431 | ||
eb5b5f02 TG |
432 | /* |
433 | * Get a page from the pool. The pool list is protected by the | |
434 | * pgd_lock, which we have to take anyway for the split | |
435 | * operation: | |
436 | */ | |
437 | spin_lock_irqsave(&pgd_lock, flags); | |
438 | if (list_empty(&page_pool)) { | |
439 | spin_unlock_irqrestore(&pgd_lock, flags); | |
bb5c2dbd | 440 | return -ENOMEM; |
eb5b5f02 TG |
441 | } |
442 | ||
443 | base = list_first_entry(&page_pool, struct page, lru); | |
444 | list_del(&base->lru); | |
445 | pool_pages--; | |
446 | ||
447 | if (pool_pages < pool_low) | |
448 | pool_low = pool_pages; | |
bb5c2dbd | 449 | |
bb5c2dbd IM |
450 | /* |
451 | * Check for races, another CPU might have split this page | |
452 | * up for us already: | |
453 | */ | |
454 | tmp = lookup_address(address, &level); | |
6ce9fc17 | 455 | if (tmp != kpte) |
bb5c2dbd IM |
456 | goto out_unlock; |
457 | ||
bb5c2dbd | 458 | pbase = (pte_t *)page_address(base); |
44af6c41 | 459 | #ifdef CONFIG_X86_32 |
bb5c2dbd | 460 | paravirt_alloc_pt(&init_mm, page_to_pfn(base)); |
44af6c41 | 461 | #endif |
07cf89c0 | 462 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
bb5c2dbd | 463 | |
f07333fd AK |
464 | #ifdef CONFIG_X86_64 |
465 | if (level == PG_LEVEL_1G) { | |
466 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | |
467 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
f07333fd AK |
468 | } |
469 | #endif | |
470 | ||
63c1dcf4 TG |
471 | /* |
472 | * Get the target pfn from the original entry: | |
473 | */ | |
474 | pfn = pte_pfn(*kpte); | |
f07333fd | 475 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
63c1dcf4 | 476 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
bb5c2dbd IM |
477 | |
478 | /* | |
07cf89c0 | 479 | * Install the new, split up pagetable. Important details here: |
4c881ca1 HY |
480 | * |
481 | * On Intel the NX bit of all levels must be cleared to make a | |
482 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | |
483 | * Architectures Software Developer's Manual). | |
07cf89c0 TG |
484 | * |
485 | * Mark the entry present. The current mapping might be | |
486 | * set to not present, which we preserved above. | |
bb5c2dbd | 487 | */ |
4c881ca1 | 488 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
07cf89c0 | 489 | pgprot_val(ref_prot) |= _PAGE_PRESENT; |
9a3dc780 | 490 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
bb5c2dbd IM |
491 | base = NULL; |
492 | ||
493 | out_unlock: | |
eb5b5f02 TG |
494 | /* |
495 | * If we dropped out via the lookup_address check under | |
496 | * pgd_lock then stick the page back into the pool: | |
497 | */ | |
498 | if (base) { | |
499 | list_add(&base->lru, &page_pool); | |
500 | pool_pages++; | |
501 | } else | |
502 | pool_used++; | |
9a3dc780 | 503 | spin_unlock_irqrestore(&pgd_lock, flags); |
bb5c2dbd | 504 | |
bb5c2dbd IM |
505 | return 0; |
506 | } | |
507 | ||
72e458df | 508 | static int __change_page_attr(unsigned long address, struct cpa_data *cpa) |
9f4c815c | 509 | { |
da7bfc50 HH |
510 | int do_split, err; |
511 | unsigned int level; | |
1da177e4 | 512 | struct page *kpte_page; |
9f4c815c | 513 | pte_t *kpte; |
1da177e4 | 514 | |
97f99fed | 515 | repeat: |
f0646e43 | 516 | kpte = lookup_address(address, &level); |
1da177e4 LT |
517 | if (!kpte) |
518 | return -EINVAL; | |
9f4c815c | 519 | |
1da177e4 | 520 | kpte_page = virt_to_page(kpte); |
65d2f0bc AK |
521 | BUG_ON(PageLRU(kpte_page)); |
522 | BUG_ON(PageCompound(kpte_page)); | |
523 | ||
30551bb3 | 524 | if (level == PG_LEVEL_4K) { |
86f03989 | 525 | pte_t new_pte, old_pte = *kpte; |
626c2c9d AV |
526 | pgprot_t new_prot = pte_pgprot(old_pte); |
527 | ||
528 | if(!pte_val(old_pte)) { | |
72e458df TG |
529 | printk(KERN_WARNING "CPA: called for zero pte. " |
530 | "vaddr = %lx cpa->vaddr = %lx\n", address, | |
531 | cpa->vaddr); | |
532 | WARN_ON(1); | |
626c2c9d AV |
533 | return -EINVAL; |
534 | } | |
86f03989 | 535 | |
72e458df TG |
536 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
537 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 IM |
538 | |
539 | new_prot = static_protections(new_prot, address); | |
540 | ||
626c2c9d AV |
541 | /* |
542 | * We need to keep the pfn from the existing PTE, | |
543 | * after all we're only going to change it's attributes | |
544 | * not the memory it points to | |
545 | */ | |
546 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | |
f4ae5da0 TG |
547 | |
548 | /* | |
549 | * Do we really change anything ? | |
550 | */ | |
551 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
552 | set_pte_atomic(kpte, new_pte); | |
553 | cpa->flushtlb = 1; | |
554 | } | |
65e074df TG |
555 | cpa->numpages = 1; |
556 | return 0; | |
1da177e4 | 557 | } |
65e074df TG |
558 | |
559 | /* | |
560 | * Check, whether we can keep the large page intact | |
561 | * and just change the pte: | |
562 | */ | |
beaff633 | 563 | do_split = try_preserve_large_page(kpte, address, cpa); |
65e074df TG |
564 | /* |
565 | * When the range fits into the existing large page, | |
566 | * return. cp->numpages and cpa->tlbflush have been updated in | |
567 | * try_large_page: | |
568 | */ | |
87f7f8fe IM |
569 | if (do_split <= 0) |
570 | return do_split; | |
65e074df TG |
571 | |
572 | /* | |
573 | * We have to split the large page: | |
574 | */ | |
87f7f8fe IM |
575 | err = split_large_page(kpte, address); |
576 | if (!err) { | |
577 | cpa->flushtlb = 1; | |
578 | goto repeat; | |
579 | } | |
beaff633 | 580 | |
87f7f8fe | 581 | return err; |
9f4c815c | 582 | } |
1da177e4 | 583 | |
44af6c41 IM |
584 | /** |
585 | * change_page_attr_addr - Change page table attributes in linear mapping | |
586 | * @address: Virtual address in linear mapping. | |
44af6c41 | 587 | * @prot: New page table attribute (PAGE_*) |
1da177e4 | 588 | * |
44af6c41 IM |
589 | * Change page attributes of a page in the direct mapping. This is a variant |
590 | * of change_page_attr() that also works on memory holes that do not have | |
591 | * mem_map entry (pfn_valid() is false). | |
9f4c815c | 592 | * |
44af6c41 | 593 | * See change_page_attr() documentation for more details. |
75cbade8 AV |
594 | * |
595 | * Modules and drivers should use the set_memory_* APIs instead. | |
1da177e4 | 596 | */ |
72e458df | 597 | static int change_page_attr_addr(struct cpa_data *cpa) |
1da177e4 | 598 | { |
0879750f | 599 | int err; |
72e458df | 600 | unsigned long address = cpa->vaddr; |
44af6c41 IM |
601 | |
602 | #ifdef CONFIG_X86_64 | |
626c2c9d AV |
603 | unsigned long phys_addr = __pa(address); |
604 | ||
0879750f TG |
605 | /* |
606 | * If we are inside the high mapped kernel range, then we | |
607 | * fixup the low mapping first. __va() returns the virtual | |
608 | * address in the linear mapping: | |
609 | */ | |
610 | if (within(address, HIGH_MAP_START, HIGH_MAP_END)) | |
611 | address = (unsigned long) __va(phys_addr); | |
44af6c41 IM |
612 | #endif |
613 | ||
72e458df | 614 | err = __change_page_attr(address, cpa); |
0879750f TG |
615 | if (err) |
616 | return err; | |
44af6c41 | 617 | |
44af6c41 | 618 | #ifdef CONFIG_X86_64 |
488fd995 | 619 | /* |
0879750f TG |
620 | * If the physical address is inside the kernel map, we need |
621 | * to touch the high mapped kernel as well: | |
488fd995 | 622 | */ |
0879750f TG |
623 | if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) { |
624 | /* | |
625 | * Calc the high mapping address. See __phys_addr() | |
626 | * for the non obvious details. | |
cc0f21bb AV |
627 | * |
628 | * Note that NX and other required permissions are | |
629 | * checked in static_protections(). | |
0879750f TG |
630 | */ |
631 | address = phys_addr + HIGH_MAP_START - phys_base; | |
0879750f | 632 | |
86f03989 | 633 | /* |
0879750f TG |
634 | * Our high aliases are imprecise, because we check |
635 | * everything between 0 and KERNEL_TEXT_SIZE, so do | |
636 | * not propagate lookup failures back to users: | |
86f03989 | 637 | */ |
72e458df | 638 | __change_page_attr(address, cpa); |
9f4c815c | 639 | } |
488fd995 | 640 | #endif |
1da177e4 LT |
641 | return err; |
642 | } | |
643 | ||
72e458df | 644 | static int __change_page_attr_set_clr(struct cpa_data *cpa) |
ff31452b | 645 | { |
65e074df | 646 | int ret, numpages = cpa->numpages; |
ff31452b | 647 | |
65e074df TG |
648 | while (numpages) { |
649 | /* | |
650 | * Store the remaining nr of pages for the large page | |
651 | * preservation check. | |
652 | */ | |
653 | cpa->numpages = numpages; | |
72e458df | 654 | ret = change_page_attr_addr(cpa); |
ff31452b TG |
655 | if (ret) |
656 | return ret; | |
ff31452b | 657 | |
65e074df TG |
658 | /* |
659 | * Adjust the number of pages with the result of the | |
660 | * CPA operation. Either a large page has been | |
661 | * preserved or a single page update happened. | |
662 | */ | |
663 | BUG_ON(cpa->numpages > numpages); | |
664 | numpages -= cpa->numpages; | |
665 | cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
666 | } | |
ff31452b TG |
667 | return 0; |
668 | } | |
669 | ||
6bb8383b AK |
670 | static inline int cache_attr(pgprot_t attr) |
671 | { | |
672 | return pgprot_val(attr) & | |
673 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | |
674 | } | |
675 | ||
ff31452b TG |
676 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
677 | pgprot_t mask_set, pgprot_t mask_clr) | |
678 | { | |
72e458df | 679 | struct cpa_data cpa; |
6bb8383b | 680 | int ret, cache; |
331e4065 TG |
681 | |
682 | /* | |
683 | * Check, if we are requested to change a not supported | |
684 | * feature: | |
685 | */ | |
686 | mask_set = canon_pgprot(mask_set); | |
687 | mask_clr = canon_pgprot(mask_clr); | |
688 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) | |
689 | return 0; | |
690 | ||
69b1415e TG |
691 | /* Ensure we are PAGE_SIZE aligned */ |
692 | if (addr & ~PAGE_MASK) { | |
693 | addr &= PAGE_MASK; | |
694 | /* | |
695 | * People should not be passing in unaligned addresses: | |
696 | */ | |
697 | WARN_ON_ONCE(1); | |
698 | } | |
699 | ||
72e458df TG |
700 | cpa.vaddr = addr; |
701 | cpa.numpages = numpages; | |
702 | cpa.mask_set = mask_set; | |
703 | cpa.mask_clr = mask_clr; | |
f4ae5da0 | 704 | cpa.flushtlb = 0; |
72e458df TG |
705 | |
706 | ret = __change_page_attr_set_clr(&cpa); | |
ff31452b | 707 | |
f4ae5da0 TG |
708 | /* |
709 | * Check whether we really changed something: | |
710 | */ | |
711 | if (!cpa.flushtlb) | |
76ebd054 | 712 | goto out; |
f4ae5da0 | 713 | |
6bb8383b AK |
714 | /* |
715 | * No need to flush, when we did not set any of the caching | |
716 | * attributes: | |
717 | */ | |
718 | cache = cache_attr(mask_set); | |
719 | ||
57a6a46a TG |
720 | /* |
721 | * On success we use clflush, when the CPU supports it to | |
722 | * avoid the wbindv. If the CPU does not support it and in the | |
af1e6844 | 723 | * error case we fall back to cpa_flush_all (which uses |
57a6a46a TG |
724 | * wbindv): |
725 | */ | |
726 | if (!ret && cpu_has_clflush) | |
6bb8383b | 727 | cpa_flush_range(addr, numpages, cache); |
57a6a46a | 728 | else |
6bb8383b | 729 | cpa_flush_all(cache); |
ff31452b | 730 | |
76ebd054 TG |
731 | out: |
732 | cpa_fill_pool(); | |
ff31452b TG |
733 | return ret; |
734 | } | |
735 | ||
56744546 TG |
736 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
737 | pgprot_t mask) | |
75cbade8 | 738 | { |
56744546 | 739 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); |
75cbade8 AV |
740 | } |
741 | ||
56744546 TG |
742 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
743 | pgprot_t mask) | |
72932c7a | 744 | { |
5827040d | 745 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); |
72932c7a TG |
746 | } |
747 | ||
748 | int set_memory_uc(unsigned long addr, int numpages) | |
749 | { | |
750 | return change_page_attr_set(addr, numpages, | |
751 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
752 | } |
753 | EXPORT_SYMBOL(set_memory_uc); | |
754 | ||
755 | int set_memory_wb(unsigned long addr, int numpages) | |
756 | { | |
72932c7a TG |
757 | return change_page_attr_clear(addr, numpages, |
758 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
759 | } |
760 | EXPORT_SYMBOL(set_memory_wb); | |
761 | ||
762 | int set_memory_x(unsigned long addr, int numpages) | |
763 | { | |
72932c7a | 764 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
765 | } |
766 | EXPORT_SYMBOL(set_memory_x); | |
767 | ||
768 | int set_memory_nx(unsigned long addr, int numpages) | |
769 | { | |
72932c7a | 770 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
771 | } |
772 | EXPORT_SYMBOL(set_memory_nx); | |
773 | ||
774 | int set_memory_ro(unsigned long addr, int numpages) | |
775 | { | |
72932c7a | 776 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 777 | } |
75cbade8 AV |
778 | |
779 | int set_memory_rw(unsigned long addr, int numpages) | |
780 | { | |
72932c7a | 781 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 782 | } |
f62d0f00 IM |
783 | |
784 | int set_memory_np(unsigned long addr, int numpages) | |
785 | { | |
72932c7a | 786 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
f62d0f00 | 787 | } |
75cbade8 AV |
788 | |
789 | int set_pages_uc(struct page *page, int numpages) | |
790 | { | |
791 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 792 | |
d7c8f21a | 793 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
794 | } |
795 | EXPORT_SYMBOL(set_pages_uc); | |
796 | ||
797 | int set_pages_wb(struct page *page, int numpages) | |
798 | { | |
799 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 800 | |
d7c8f21a | 801 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
802 | } |
803 | EXPORT_SYMBOL(set_pages_wb); | |
804 | ||
805 | int set_pages_x(struct page *page, int numpages) | |
806 | { | |
807 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 808 | |
d7c8f21a | 809 | return set_memory_x(addr, numpages); |
75cbade8 AV |
810 | } |
811 | EXPORT_SYMBOL(set_pages_x); | |
812 | ||
813 | int set_pages_nx(struct page *page, int numpages) | |
814 | { | |
815 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 816 | |
d7c8f21a | 817 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
818 | } |
819 | EXPORT_SYMBOL(set_pages_nx); | |
820 | ||
821 | int set_pages_ro(struct page *page, int numpages) | |
822 | { | |
823 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 824 | |
d7c8f21a | 825 | return set_memory_ro(addr, numpages); |
75cbade8 | 826 | } |
75cbade8 AV |
827 | |
828 | int set_pages_rw(struct page *page, int numpages) | |
829 | { | |
830 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 831 | |
d7c8f21a | 832 | return set_memory_rw(addr, numpages); |
78c94aba IM |
833 | } |
834 | ||
1da177e4 | 835 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
836 | |
837 | static int __set_pages_p(struct page *page, int numpages) | |
838 | { | |
72e458df TG |
839 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
840 | .numpages = numpages, | |
841 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
842 | .mask_clr = __pgprot(0)}; | |
72932c7a | 843 | |
72e458df | 844 | return __change_page_attr_set_clr(&cpa); |
f62d0f00 IM |
845 | } |
846 | ||
847 | static int __set_pages_np(struct page *page, int numpages) | |
848 | { | |
72e458df TG |
849 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
850 | .numpages = numpages, | |
851 | .mask_set = __pgprot(0), | |
852 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; | |
72932c7a | 853 | |
72e458df | 854 | return __change_page_attr_set_clr(&cpa); |
f62d0f00 IM |
855 | } |
856 | ||
1da177e4 LT |
857 | void kernel_map_pages(struct page *page, int numpages, int enable) |
858 | { | |
859 | if (PageHighMem(page)) | |
860 | return; | |
9f4c815c | 861 | if (!enable) { |
f9b8404c IM |
862 | debug_check_no_locks_freed(page_address(page), |
863 | numpages * PAGE_SIZE); | |
9f4c815c | 864 | } |
de5097c2 | 865 | |
12d6f21e IM |
866 | /* |
867 | * If page allocator is not up yet then do not call c_p_a(): | |
868 | */ | |
869 | if (!debug_pagealloc_enabled) | |
870 | return; | |
871 | ||
9f4c815c | 872 | /* |
e4b71dcf IM |
873 | * The return value is ignored - the calls cannot fail, |
874 | * large pages are disabled at boot time: | |
1da177e4 | 875 | */ |
f62d0f00 IM |
876 | if (enable) |
877 | __set_pages_p(page, numpages); | |
878 | else | |
879 | __set_pages_np(page, numpages); | |
9f4c815c IM |
880 | |
881 | /* | |
e4b71dcf IM |
882 | * We should perform an IPI and flush all tlbs, |
883 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
884 | */ |
885 | __flush_tlb_all(); | |
76ebd054 TG |
886 | |
887 | /* | |
888 | * Try to refill the page pool here. We can do this only after | |
889 | * the tlb flush. | |
890 | */ | |
891 | cpa_fill_pool(); | |
1da177e4 LT |
892 | } |
893 | #endif | |
d1028a15 AV |
894 | |
895 | /* | |
896 | * The testcases use internal knowledge of the implementation that shouldn't | |
897 | * be exposed to the rest of the kernel. Include these directly here. | |
898 | */ | |
899 | #ifdef CONFIG_CPA_DEBUG | |
900 | #include "pageattr-test.c" | |
901 | #endif |