]>
Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c IM |
10 | #include <linux/mm.h> |
11 | ||
950f9d95 | 12 | #include <asm/e820.h> |
1da177e4 LT |
13 | #include <asm/processor.h> |
14 | #include <asm/tlbflush.h> | |
f8af095d | 15 | #include <asm/sections.h> |
9f4c815c IM |
16 | #include <asm/uaccess.h> |
17 | #include <asm/pgalloc.h> | |
1da177e4 | 18 | |
ed724be6 AV |
19 | static inline int |
20 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 21 | { |
ed724be6 AV |
22 | return addr >= start && addr < end; |
23 | } | |
24 | ||
d7c8f21a TG |
25 | /* |
26 | * Flushing functions | |
27 | */ | |
cd8ddf1a | 28 | |
cd8ddf1a TG |
29 | /** |
30 | * clflush_cache_range - flush a cache range with clflush | |
31 | * @addr: virtual start address | |
32 | * @size: number of bytes to flush | |
33 | * | |
34 | * clflush is an unordered instruction which needs fencing with mfence | |
35 | * to avoid ordering issues. | |
36 | */ | |
4c61afcd | 37 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 38 | { |
4c61afcd | 39 | void *vend = vaddr + size - 1; |
d7c8f21a | 40 | |
cd8ddf1a | 41 | mb(); |
4c61afcd IM |
42 | |
43 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | |
44 | clflush(vaddr); | |
45 | /* | |
46 | * Flush any possible final partial cacheline: | |
47 | */ | |
48 | clflush(vend); | |
49 | ||
cd8ddf1a | 50 | mb(); |
d7c8f21a TG |
51 | } |
52 | ||
af1e6844 | 53 | static void __cpa_flush_all(void *arg) |
d7c8f21a TG |
54 | { |
55 | /* | |
56 | * Flush all to work around Errata in early athlons regarding | |
57 | * large page flushing. | |
58 | */ | |
59 | __flush_tlb_all(); | |
60 | ||
61 | if (boot_cpu_data.x86_model >= 4) | |
62 | wbinvd(); | |
63 | } | |
64 | ||
af1e6844 | 65 | static void cpa_flush_all(void) |
d7c8f21a TG |
66 | { |
67 | BUG_ON(irqs_disabled()); | |
68 | ||
af1e6844 | 69 | on_each_cpu(__cpa_flush_all, NULL, 1, 1); |
d7c8f21a TG |
70 | } |
71 | ||
57a6a46a TG |
72 | static void __cpa_flush_range(void *arg) |
73 | { | |
57a6a46a TG |
74 | /* |
75 | * We could optimize that further and do individual per page | |
76 | * tlb invalidates for a low number of pages. Caveat: we must | |
77 | * flush the high aliases on 64bit as well. | |
78 | */ | |
79 | __flush_tlb_all(); | |
57a6a46a TG |
80 | } |
81 | ||
4c61afcd | 82 | static void cpa_flush_range(unsigned long start, int numpages) |
57a6a46a | 83 | { |
4c61afcd IM |
84 | unsigned int i, level; |
85 | unsigned long addr; | |
86 | ||
57a6a46a | 87 | BUG_ON(irqs_disabled()); |
4c61afcd | 88 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 89 | |
3b233e52 | 90 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
57a6a46a | 91 | |
3b233e52 TG |
92 | /* |
93 | * We only need to flush on one CPU, | |
94 | * clflush is a MESI-coherent instruction that | |
95 | * will cause all other CPUs to flush the same | |
96 | * cachelines: | |
97 | */ | |
4c61afcd IM |
98 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
99 | pte_t *pte = lookup_address(addr, &level); | |
100 | ||
101 | /* | |
102 | * Only flush present addresses: | |
103 | */ | |
104 | if (pte && pte_present(*pte)) | |
105 | clflush_cache_range((void *) addr, PAGE_SIZE); | |
106 | } | |
57a6a46a TG |
107 | } |
108 | ||
ed724be6 AV |
109 | /* |
110 | * Certain areas of memory on x86 require very specific protection flags, | |
111 | * for example the BIOS area or kernel text. Callers don't always get this | |
112 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
113 | * checks and fixes these known static required protection bits. | |
114 | */ | |
115 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) | |
116 | { | |
117 | pgprot_t forbidden = __pgprot(0); | |
118 | ||
687c4825 | 119 | /* |
ed724be6 AV |
120 | * The BIOS area between 640k and 1Mb needs to be executable for |
121 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 122 | */ |
ed724be6 AV |
123 | if (within(__pa(address), BIOS_BEGIN, BIOS_END)) |
124 | pgprot_val(forbidden) |= _PAGE_NX; | |
125 | ||
126 | /* | |
127 | * The kernel text needs to be executable for obvious reasons | |
128 | * Does not cover __inittext since that is gone later on | |
129 | */ | |
130 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
131 | pgprot_val(forbidden) |= _PAGE_NX; | |
132 | ||
133 | #ifdef CONFIG_DEBUG_RODATA | |
134 | /* The .rodata section needs to be read-only */ | |
135 | if (within(address, (unsigned long)__start_rodata, | |
136 | (unsigned long)__end_rodata)) | |
137 | pgprot_val(forbidden) |= _PAGE_RW; | |
138 | #endif | |
139 | ||
140 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
687c4825 IM |
141 | |
142 | return prot; | |
143 | } | |
144 | ||
f0646e43 | 145 | pte_t *lookup_address(unsigned long address, int *level) |
9f4c815c | 146 | { |
1da177e4 LT |
147 | pgd_t *pgd = pgd_offset_k(address); |
148 | pud_t *pud; | |
149 | pmd_t *pmd; | |
9f4c815c | 150 | |
30551bb3 TG |
151 | *level = PG_LEVEL_NONE; |
152 | ||
1da177e4 LT |
153 | if (pgd_none(*pgd)) |
154 | return NULL; | |
155 | pud = pud_offset(pgd, address); | |
156 | if (pud_none(*pud)) | |
157 | return NULL; | |
158 | pmd = pmd_offset(pud, address); | |
159 | if (pmd_none(*pmd)) | |
160 | return NULL; | |
30551bb3 TG |
161 | |
162 | *level = PG_LEVEL_2M; | |
1da177e4 LT |
163 | if (pmd_large(*pmd)) |
164 | return (pte_t *)pmd; | |
1da177e4 | 165 | |
30551bb3 | 166 | *level = PG_LEVEL_4K; |
9f4c815c IM |
167 | return pte_offset_kernel(pmd, address); |
168 | } | |
169 | ||
9a3dc780 | 170 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 171 | { |
9f4c815c IM |
172 | /* change init_mm */ |
173 | set_pte_atomic(kpte, pte); | |
44af6c41 | 174 | #ifdef CONFIG_X86_32 |
e4b71dcf | 175 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
176 | struct page *page; |
177 | ||
178 | for (page = pgd_list; page; page = (struct page *)page->index) { | |
179 | pgd_t *pgd; | |
180 | pud_t *pud; | |
181 | pmd_t *pmd; | |
182 | ||
183 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
184 | pud = pud_offset(pgd, address); | |
185 | pmd = pmd_offset(pud, address); | |
186 | set_pte_atomic((pte_t *)pmd, pte); | |
187 | } | |
1da177e4 | 188 | } |
44af6c41 | 189 | #endif |
1da177e4 LT |
190 | } |
191 | ||
7afe15b9 | 192 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 193 | { |
7afe15b9 | 194 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
12d6f21e | 195 | gfp_t gfp_flags = GFP_KERNEL; |
9a3dc780 | 196 | unsigned long flags; |
bb5c2dbd IM |
197 | unsigned long addr; |
198 | pte_t *pbase, *tmp; | |
199 | struct page *base; | |
86f03989 | 200 | unsigned int i, level; |
bb5c2dbd | 201 | |
12d6f21e | 202 | #ifdef CONFIG_DEBUG_PAGEALLOC |
86f03989 IM |
203 | gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN; |
204 | gfp_flags = GFP_ATOMIC | __GFP_NOWARN; | |
12d6f21e IM |
205 | #endif |
206 | base = alloc_pages(gfp_flags, 0); | |
bb5c2dbd IM |
207 | if (!base) |
208 | return -ENOMEM; | |
209 | ||
9a3dc780 | 210 | spin_lock_irqsave(&pgd_lock, flags); |
bb5c2dbd IM |
211 | /* |
212 | * Check for races, another CPU might have split this page | |
213 | * up for us already: | |
214 | */ | |
215 | tmp = lookup_address(address, &level); | |
5508a748 IM |
216 | if (tmp != kpte) { |
217 | WARN_ON_ONCE(1); | |
bb5c2dbd | 218 | goto out_unlock; |
5508a748 | 219 | } |
bb5c2dbd IM |
220 | |
221 | address = __pa(address); | |
222 | addr = address & LARGE_PAGE_MASK; | |
223 | pbase = (pte_t *)page_address(base); | |
44af6c41 | 224 | #ifdef CONFIG_X86_32 |
bb5c2dbd | 225 | paravirt_alloc_pt(&init_mm, page_to_pfn(base)); |
44af6c41 | 226 | #endif |
bb5c2dbd | 227 | |
86f03989 | 228 | pgprot_val(ref_prot) &= ~_PAGE_NX; |
bb5c2dbd IM |
229 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) |
230 | set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); | |
231 | ||
232 | /* | |
4c881ca1 HY |
233 | * Install the new, split up pagetable. Important detail here: |
234 | * | |
235 | * On Intel the NX bit of all levels must be cleared to make a | |
236 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | |
237 | * Architectures Software Developer's Manual). | |
bb5c2dbd | 238 | */ |
4c881ca1 | 239 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
9a3dc780 | 240 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
bb5c2dbd IM |
241 | base = NULL; |
242 | ||
243 | out_unlock: | |
9a3dc780 | 244 | spin_unlock_irqrestore(&pgd_lock, flags); |
bb5c2dbd IM |
245 | |
246 | if (base) | |
247 | __free_pages(base, 0); | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
44af6c41 | 252 | static int |
86f03989 IM |
253 | __change_page_attr(unsigned long address, unsigned long pfn, |
254 | pgprot_t mask_set, pgprot_t mask_clr) | |
9f4c815c | 255 | { |
1da177e4 | 256 | struct page *kpte_page; |
bb5c2dbd | 257 | int level, err = 0; |
9f4c815c | 258 | pte_t *kpte; |
1da177e4 | 259 | |
8192206d IM |
260 | #ifdef CONFIG_X86_32 |
261 | BUG_ON(pfn > max_low_pfn); | |
262 | #endif | |
1da177e4 | 263 | |
97f99fed | 264 | repeat: |
f0646e43 | 265 | kpte = lookup_address(address, &level); |
1da177e4 LT |
266 | if (!kpte) |
267 | return -EINVAL; | |
9f4c815c | 268 | |
1da177e4 | 269 | kpte_page = virt_to_page(kpte); |
65d2f0bc AK |
270 | BUG_ON(PageLRU(kpte_page)); |
271 | BUG_ON(PageCompound(kpte_page)); | |
272 | ||
30551bb3 | 273 | if (level == PG_LEVEL_4K) { |
86f03989 IM |
274 | pgprot_t new_prot = pte_pgprot(*kpte); |
275 | pte_t new_pte, old_pte = *kpte; | |
276 | ||
277 | pgprot_val(new_prot) &= ~pgprot_val(mask_clr); | |
278 | pgprot_val(new_prot) |= pgprot_val(mask_set); | |
279 | ||
280 | new_prot = static_protections(new_prot, address); | |
281 | ||
282 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); | |
283 | BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte)); | |
a72a08a4 | 284 | |
86f03989 IM |
285 | set_pte_atomic(kpte, new_pte); |
286 | } else { | |
7afe15b9 | 287 | err = split_large_page(kpte, address); |
bb5c2dbd IM |
288 | if (!err) |
289 | goto repeat; | |
1da177e4 | 290 | } |
bb5c2dbd | 291 | return err; |
9f4c815c | 292 | } |
1da177e4 | 293 | |
44af6c41 IM |
294 | /** |
295 | * change_page_attr_addr - Change page table attributes in linear mapping | |
296 | * @address: Virtual address in linear mapping. | |
44af6c41 | 297 | * @prot: New page table attribute (PAGE_*) |
1da177e4 | 298 | * |
44af6c41 IM |
299 | * Change page attributes of a page in the direct mapping. This is a variant |
300 | * of change_page_attr() that also works on memory holes that do not have | |
301 | * mem_map entry (pfn_valid() is false). | |
9f4c815c | 302 | * |
44af6c41 | 303 | * See change_page_attr() documentation for more details. |
75cbade8 AV |
304 | * |
305 | * Modules and drivers should use the set_memory_* APIs instead. | |
1da177e4 | 306 | */ |
44af6c41 | 307 | |
86f03989 IM |
308 | static int |
309 | change_page_attr_addr(unsigned long address, pgprot_t mask_set, | |
310 | pgprot_t mask_clr) | |
1da177e4 | 311 | { |
488fd995 | 312 | int err = 0, kernel_map = 0; |
86f03989 | 313 | unsigned long pfn; |
44af6c41 IM |
314 | |
315 | #ifdef CONFIG_X86_64 | |
316 | if (address >= __START_KERNEL_map && | |
317 | address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { | |
1da177e4 | 318 | |
86f03989 | 319 | address = (unsigned long)__va(__pa((void *)address)); |
44af6c41 IM |
320 | kernel_map = 1; |
321 | } | |
322 | #endif | |
323 | ||
86f03989 IM |
324 | pfn = __pa(address) >> PAGE_SHIFT; |
325 | ||
326 | if (!kernel_map || 1) { | |
327 | err = __change_page_attr(address, pfn, mask_set, mask_clr); | |
488fd995 AV |
328 | if (err) |
329 | return err; | |
330 | } | |
44af6c41 | 331 | |
44af6c41 | 332 | #ifdef CONFIG_X86_64 |
488fd995 AV |
333 | /* |
334 | * Handle kernel mapping too which aliases part of | |
335 | * lowmem: | |
336 | */ | |
337 | if (__pa(address) < KERNEL_TEXT_SIZE) { | |
338 | unsigned long addr2; | |
488fd995 | 339 | |
86f03989 | 340 | addr2 = __pa(address) + __START_KERNEL_map - phys_base; |
488fd995 | 341 | /* Make sure the kernel mappings stay executable */ |
86f03989 IM |
342 | pgprot_val(mask_clr) |= _PAGE_NX; |
343 | /* | |
344 | * Our high aliases are imprecise, so do not propagate | |
345 | * failures back to users: | |
346 | */ | |
347 | __change_page_attr(addr2, pfn, mask_set, mask_clr); | |
9f4c815c | 348 | } |
488fd995 | 349 | #endif |
9f4c815c | 350 | |
1da177e4 LT |
351 | return err; |
352 | } | |
353 | ||
ff31452b TG |
354 | static int __change_page_attr_set_clr(unsigned long addr, int numpages, |
355 | pgprot_t mask_set, pgprot_t mask_clr) | |
356 | { | |
86f03989 IM |
357 | unsigned int i; |
358 | int ret; | |
ff31452b | 359 | |
86f03989 IM |
360 | for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) { |
361 | ret = change_page_attr_addr(addr, mask_set, mask_clr); | |
ff31452b TG |
362 | if (ret) |
363 | return ret; | |
ff31452b TG |
364 | } |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | static int change_page_attr_set_clr(unsigned long addr, int numpages, | |
370 | pgprot_t mask_set, pgprot_t mask_clr) | |
371 | { | |
372 | int ret = __change_page_attr_set_clr(addr, numpages, mask_set, | |
373 | mask_clr); | |
374 | ||
57a6a46a TG |
375 | /* |
376 | * On success we use clflush, when the CPU supports it to | |
377 | * avoid the wbindv. If the CPU does not support it and in the | |
af1e6844 | 378 | * error case we fall back to cpa_flush_all (which uses |
57a6a46a TG |
379 | * wbindv): |
380 | */ | |
381 | if (!ret && cpu_has_clflush) | |
382 | cpa_flush_range(addr, numpages); | |
383 | else | |
af1e6844 | 384 | cpa_flush_all(); |
ff31452b TG |
385 | |
386 | return ret; | |
387 | } | |
388 | ||
56744546 TG |
389 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
390 | pgprot_t mask) | |
75cbade8 | 391 | { |
56744546 | 392 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); |
75cbade8 AV |
393 | } |
394 | ||
56744546 TG |
395 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
396 | pgprot_t mask) | |
72932c7a | 397 | { |
56744546 | 398 | return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); |
72932c7a TG |
399 | |
400 | } | |
401 | ||
402 | int set_memory_uc(unsigned long addr, int numpages) | |
403 | { | |
404 | return change_page_attr_set(addr, numpages, | |
405 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
406 | } |
407 | EXPORT_SYMBOL(set_memory_uc); | |
408 | ||
409 | int set_memory_wb(unsigned long addr, int numpages) | |
410 | { | |
72932c7a TG |
411 | return change_page_attr_clear(addr, numpages, |
412 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
413 | } |
414 | EXPORT_SYMBOL(set_memory_wb); | |
415 | ||
416 | int set_memory_x(unsigned long addr, int numpages) | |
417 | { | |
72932c7a | 418 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
419 | } |
420 | EXPORT_SYMBOL(set_memory_x); | |
421 | ||
422 | int set_memory_nx(unsigned long addr, int numpages) | |
423 | { | |
72932c7a | 424 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
425 | } |
426 | EXPORT_SYMBOL(set_memory_nx); | |
427 | ||
428 | int set_memory_ro(unsigned long addr, int numpages) | |
429 | { | |
72932c7a | 430 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 431 | } |
75cbade8 AV |
432 | |
433 | int set_memory_rw(unsigned long addr, int numpages) | |
434 | { | |
72932c7a | 435 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 436 | } |
f62d0f00 IM |
437 | |
438 | int set_memory_np(unsigned long addr, int numpages) | |
439 | { | |
72932c7a | 440 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
f62d0f00 | 441 | } |
75cbade8 AV |
442 | |
443 | int set_pages_uc(struct page *page, int numpages) | |
444 | { | |
445 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 446 | |
d7c8f21a | 447 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
448 | } |
449 | EXPORT_SYMBOL(set_pages_uc); | |
450 | ||
451 | int set_pages_wb(struct page *page, int numpages) | |
452 | { | |
453 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 454 | |
d7c8f21a | 455 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
456 | } |
457 | EXPORT_SYMBOL(set_pages_wb); | |
458 | ||
459 | int set_pages_x(struct page *page, int numpages) | |
460 | { | |
461 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 462 | |
d7c8f21a | 463 | return set_memory_x(addr, numpages); |
75cbade8 AV |
464 | } |
465 | EXPORT_SYMBOL(set_pages_x); | |
466 | ||
467 | int set_pages_nx(struct page *page, int numpages) | |
468 | { | |
469 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 470 | |
d7c8f21a | 471 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
472 | } |
473 | EXPORT_SYMBOL(set_pages_nx); | |
474 | ||
475 | int set_pages_ro(struct page *page, int numpages) | |
476 | { | |
477 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 478 | |
d7c8f21a | 479 | return set_memory_ro(addr, numpages); |
75cbade8 | 480 | } |
75cbade8 AV |
481 | |
482 | int set_pages_rw(struct page *page, int numpages) | |
483 | { | |
484 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 485 | |
d7c8f21a | 486 | return set_memory_rw(addr, numpages); |
78c94aba IM |
487 | } |
488 | ||
1da177e4 | 489 | |
56744546 TG |
490 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG) |
491 | static inline int __change_page_attr_set(unsigned long addr, int numpages, | |
492 | pgprot_t mask) | |
493 | { | |
494 | return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); | |
495 | } | |
496 | ||
497 | static inline int __change_page_attr_clear(unsigned long addr, int numpages, | |
498 | pgprot_t mask) | |
499 | { | |
500 | return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); | |
501 | } | |
502 | #endif | |
503 | ||
1da177e4 | 504 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
505 | |
506 | static int __set_pages_p(struct page *page, int numpages) | |
507 | { | |
508 | unsigned long addr = (unsigned long)page_address(page); | |
72932c7a TG |
509 | |
510 | return __change_page_attr_set(addr, numpages, | |
511 | __pgprot(_PAGE_PRESENT | _PAGE_RW)); | |
f62d0f00 IM |
512 | } |
513 | ||
514 | static int __set_pages_np(struct page *page, int numpages) | |
515 | { | |
516 | unsigned long addr = (unsigned long)page_address(page); | |
72932c7a TG |
517 | |
518 | return __change_page_attr_clear(addr, numpages, | |
519 | __pgprot(_PAGE_PRESENT)); | |
f62d0f00 IM |
520 | } |
521 | ||
1da177e4 LT |
522 | void kernel_map_pages(struct page *page, int numpages, int enable) |
523 | { | |
524 | if (PageHighMem(page)) | |
525 | return; | |
9f4c815c | 526 | if (!enable) { |
f9b8404c IM |
527 | debug_check_no_locks_freed(page_address(page), |
528 | numpages * PAGE_SIZE); | |
9f4c815c | 529 | } |
de5097c2 | 530 | |
12d6f21e IM |
531 | /* |
532 | * If page allocator is not up yet then do not call c_p_a(): | |
533 | */ | |
534 | if (!debug_pagealloc_enabled) | |
535 | return; | |
536 | ||
9f4c815c | 537 | /* |
e4b71dcf IM |
538 | * The return value is ignored - the calls cannot fail, |
539 | * large pages are disabled at boot time: | |
1da177e4 | 540 | */ |
f62d0f00 IM |
541 | if (enable) |
542 | __set_pages_p(page, numpages); | |
543 | else | |
544 | __set_pages_np(page, numpages); | |
9f4c815c IM |
545 | |
546 | /* | |
e4b71dcf IM |
547 | * We should perform an IPI and flush all tlbs, |
548 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
549 | */ |
550 | __flush_tlb_all(); | |
551 | } | |
552 | #endif | |
d1028a15 AV |
553 | |
554 | /* | |
555 | * The testcases use internal knowledge of the implementation that shouldn't | |
556 | * be exposed to the rest of the kernel. Include these directly here. | |
557 | */ | |
558 | #ifdef CONFIG_CPA_DEBUG | |
559 | #include "pageattr-test.c" | |
560 | #endif |