]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/pat.c
x86, pat: Allow ISA memory range uncacheable mapping requests
[net-next-2.6.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
2e5d9c85 15#include <linux/gfp.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
18
ad2cde16 19#include <asm/cacheflush.h>
2e5d9c85 20#include <asm/processor.h>
ad2cde16 21#include <asm/tlbflush.h>
2e5d9c85 22#include <asm/pgtable.h>
2e5d9c85 23#include <asm/fcntl.h>
ad2cde16 24#include <asm/e820.h>
2e5d9c85 25#include <asm/mtrr.h>
ad2cde16
IM
26#include <asm/page.h>
27#include <asm/msr.h>
28#include <asm/pat.h>
e7f260a2 29#include <asm/io.h>
2e5d9c85 30
8d4a4300 31#ifdef CONFIG_X86_PAT
499f8f84 32int __read_mostly pat_enabled = 1;
2e5d9c85 33
1ee4bd92 34static inline void pat_disable(const char *reason)
2e5d9c85 35{
499f8f84 36 pat_enabled = 0;
8d4a4300 37 printk(KERN_INFO "%s\n", reason);
2e5d9c85 38}
2e5d9c85 39
be524fb9 40static int __init nopat(char *str)
2e5d9c85 41{
8d4a4300 42 pat_disable("PAT support disabled.");
2e5d9c85 43 return 0;
44}
8d4a4300 45early_param("nopat", nopat);
75a04811
PA
46#else
47static inline void pat_disable(const char *reason)
48{
49 (void)reason;
50}
8d4a4300
TG
51#endif
52
77b52b4c
VP
53
54static int debug_enable;
ad2cde16 55
77b52b4c
VP
56static int __init pat_debug_setup(char *str)
57{
58 debug_enable = 1;
59 return 0;
60}
61__setup("debugpat", pat_debug_setup);
62
63#define dprintk(fmt, arg...) \
64 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
65
66
8d4a4300 67static u64 __read_mostly boot_pat_state;
2e5d9c85 68
69enum {
70 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */
72 PAT_WT = 4, /* Write Through */
73 PAT_WP = 5, /* Write Protected */
74 PAT_WB = 6, /* Write Back (default) */
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76};
77
cd7a4e93 78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 79
80void pat_init(void)
81{
82 u64 pat;
83
499f8f84 84 if (!pat_enabled)
2e5d9c85 85 return;
86
75a04811
PA
87 if (!cpu_has_pat) {
88 if (!boot_pat_state) {
89 pat_disable("PAT not supported by CPU.");
90 return;
91 } else {
92 /*
93 * If this happens we are on a secondary CPU, but
94 * switched to PAT on the boot CPU. We have no way to
95 * undo PAT.
96 */
97 printk(KERN_ERR "PAT enabled, "
98 "but not supported by secondary CPU\n");
99 BUG();
100 }
8d4a4300 101 }
2e5d9c85 102
103 /* Set PWT to Write-Combining. All other bits stay the same */
104 /*
105 * PTE encoding used in Linux:
106 * PAT
107 * |PCD
108 * ||PWT
109 * |||
110 * 000 WB _PAGE_CACHE_WB
111 * 001 WC _PAGE_CACHE_WC
112 * 010 UC- _PAGE_CACHE_UC_MINUS
113 * 011 UC _PAGE_CACHE_UC
114 * PAT bit unused
115 */
cd7a4e93
AH
116 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
117 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 118
119 /* Boot CPU check */
8d4a4300 120 if (!boot_pat_state)
2e5d9c85 121 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 122
123 wrmsrl(MSR_IA32_CR_PAT, pat);
124 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
125 smp_processor_id(), boot_pat_state, pat);
126}
127
128#undef PAT
129
130static char *cattr_name(unsigned long flags)
131{
132 switch (flags & _PAGE_CACHE_MASK) {
cd7a4e93
AH
133 case _PAGE_CACHE_UC: return "uncached";
134 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
135 case _PAGE_CACHE_WB: return "write-back";
136 case _PAGE_CACHE_WC: return "write-combining";
137 default: return "broken";
2e5d9c85 138 }
139}
140
141/*
142 * The global memtype list keeps track of memory type for specific
143 * physical memory areas. Conflicting memory types in different
144 * mappings can cause CPU cache corruption. To avoid this we keep track.
145 *
146 * The list is sorted based on starting address and can contain multiple
147 * entries for each address (this allows reference counting for overlapping
148 * areas). All the aliases have the same cache attributes of course.
149 * Zero attributes are represented as holes.
150 *
151 * Currently the data structure is a list because the number of mappings
152 * are expected to be relatively small. If this should be a problem
153 * it could be changed to a rbtree or similar.
154 *
155 * memtype_lock protects the whole list.
156 */
157
158struct memtype {
ad2cde16
IM
159 u64 start;
160 u64 end;
161 unsigned long type;
162 struct list_head nd;
2e5d9c85 163};
164
165static LIST_HEAD(memtype_list);
ad2cde16 166static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
2e5d9c85 167
168/*
169 * Does intersection of PAT memory type and MTRR memory type and returns
170 * the resulting memory type as PAT understands it.
171 * (Type in pat and mtrr will not have same value)
172 * The intersection is based on "Effective Memory Type" tables in IA-32
173 * SDM vol 3a
174 */
6cf514fc 175static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
2e5d9c85 176{
c26421d0
VP
177 /*
178 * Look for MTRR hint to get the effective type in case where PAT
179 * request is for WB.
180 */
dd0c7c49
AH
181 if (req_type == _PAGE_CACHE_WB) {
182 u8 mtrr_type;
183
184 mtrr_type = mtrr_type_lookup(start, end);
b6ff32d9
SS
185 if (mtrr_type != MTRR_TYPE_WRBACK)
186 return _PAGE_CACHE_UC_MINUS;
187
188 return _PAGE_CACHE_WB;
dd0c7c49
AH
189 }
190
191 return req_type;
2e5d9c85 192}
193
ad2cde16
IM
194static int
195chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
64fe44c3
AH
196{
197 if (new->type != entry->type) {
198 if (type) {
199 new->type = entry->type;
200 *type = entry->type;
201 } else
202 goto conflict;
203 }
204
205 /* check overlaps with more than one entry in the list */
206 list_for_each_entry_continue(entry, &memtype_list, nd) {
207 if (new->end <= entry->start)
208 break;
209 else if (new->type != entry->type)
210 goto conflict;
211 }
212 return 0;
213
214 conflict:
215 printk(KERN_INFO "%s:%d conflicting memory types "
216 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
217 new->end, cattr_name(new->type), cattr_name(entry->type));
218 return -EBUSY;
219}
220
80c5e73d
VP
221static struct memtype *cached_entry;
222static u64 cached_start;
223
be03d9e8
SS
224static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
225{
226 int ram_page = 0, not_rampage = 0;
227 unsigned long page_nr;
228
229 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
230 ++page_nr) {
231 /*
232 * For legacy reasons, physical address range in the legacy ISA
233 * region is tracked as non-RAM. This will allow users of
234 * /dev/mem to map portions of legacy ISA region, even when
235 * some of those portions are listed(or not even listed) with
236 * different e820 types(RAM/reserved/..)
237 */
238 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
239 page_is_ram(page_nr))
240 ram_page = 1;
241 else
242 not_rampage = 1;
243
244 if (ram_page == not_rampage)
245 return -1;
246 }
247
248 return ram_page;
249}
250
9542ada8
SS
251/*
252 * For RAM pages, mark the pages as non WB memory type using
253 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
254 * set_memory_wc() on a RAM page at a time before marking it as WB again.
255 * This is ok, because only one driver will be owning the page and
256 * doing set_memory_*() calls.
257 *
258 * For now, we use PageNonWB to track that the RAM page is being mapped
259 * as non WB. In future, we will have to use one more flag
260 * (or some other mechanism in page_struct) to distinguish between
261 * UC and WC mapping.
262 */
263static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
ad2cde16 264 unsigned long *new_type)
9542ada8
SS
265{
266 struct page *page;
267 u64 pfn, end_pfn;
268
269 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
270 page = pfn_to_page(pfn);
271 if (page_mapped(page) || PageNonWB(page))
272 goto out;
273
274 SetPageNonWB(page);
275 }
276 return 0;
277
278out:
279 end_pfn = pfn;
280 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
281 page = pfn_to_page(pfn);
282 ClearPageNonWB(page);
283 }
284
285 return -EINVAL;
286}
287
288static int free_ram_pages_type(u64 start, u64 end)
289{
290 struct page *page;
291 u64 pfn, end_pfn;
292
293 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
294 page = pfn_to_page(pfn);
295 if (page_mapped(page) || !PageNonWB(page))
296 goto out;
297
298 ClearPageNonWB(page);
299 }
300 return 0;
301
302out:
303 end_pfn = pfn;
304 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
305 page = pfn_to_page(pfn);
306 SetPageNonWB(page);
307 }
308 return -EINVAL;
309}
310
e7f260a2 311/*
312 * req_type typically has one of the:
313 * - _PAGE_CACHE_WB
314 * - _PAGE_CACHE_WC
315 * - _PAGE_CACHE_UC_MINUS
316 * - _PAGE_CACHE_UC
317 *
318 * req_type will have a special case value '-1', when requester want to inherit
319 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
320 *
ac97991e
AH
321 * If new_type is NULL, function will return an error if it cannot reserve the
322 * region with req_type. If new_type is non-NULL, function will return
323 * available type in new_type in case of no error. In case of any error
e7f260a2 324 * it will return a negative return value.
325 */
2e5d9c85 326int reserve_memtype(u64 start, u64 end, unsigned long req_type,
ad2cde16 327 unsigned long *new_type)
2e5d9c85 328{
ac97991e 329 struct memtype *new, *entry;
2e5d9c85 330 unsigned long actual_type;
f6887264 331 struct list_head *where;
9542ada8 332 int is_range_ram;
ad2cde16 333 int err = 0;
2e5d9c85 334
ad2cde16 335 BUG_ON(start >= end); /* end is exclusive */
69e26be9 336
499f8f84 337 if (!pat_enabled) {
e7f260a2 338 /* This is identical to page table setting without PAT */
ac97991e
AH
339 if (new_type) {
340 if (req_type == -1)
341 *new_type = _PAGE_CACHE_WB;
342 else
343 *new_type = req_type & _PAGE_CACHE_MASK;
e7f260a2 344 }
2e5d9c85 345 return 0;
346 }
347
348 /* Low ISA region is always mapped WB in page table. No need to track */
bcc643dc 349 if (is_ISA_range(start, end - 1)) {
ac97991e
AH
350 if (new_type)
351 *new_type = _PAGE_CACHE_WB;
2e5d9c85 352 return 0;
353 }
354
b6ff32d9
SS
355 /*
356 * Call mtrr_lookup to get the type hint. This is an
357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * tools and ACPI tools). Use WB request for WB memory and use
359 * UC_MINUS otherwise.
360 */
361 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
2e5d9c85 362
95971342
SS
363 if (new_type)
364 *new_type = actual_type;
365
be03d9e8 366 is_range_ram = pat_pagerange_is_ram(start, end);
9542ada8 367 if (is_range_ram == 1)
be03d9e8
SS
368 return reserve_ram_pages_type(start, end, req_type,
369 new_type);
9542ada8
SS
370 else if (is_range_ram < 0)
371 return -EINVAL;
372
ac97991e
AH
373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
374 if (!new)
2e5d9c85 375 return -ENOMEM;
376
ad2cde16
IM
377 new->start = start;
378 new->end = end;
379 new->type = actual_type;
2e5d9c85 380
2e5d9c85 381 spin_lock(&memtype_lock);
382
80c5e73d
VP
383 if (cached_entry && start >= cached_start)
384 entry = cached_entry;
385 else
386 entry = list_entry(&memtype_list, struct memtype, nd);
387
2e5d9c85 388 /* Search for existing mapping that overlaps the current range */
f6887264 389 where = NULL;
80c5e73d 390 list_for_each_entry_continue(entry, &memtype_list, nd) {
33af9039 391 if (end <= entry->start) {
f6887264 392 where = entry->nd.prev;
80c5e73d 393 cached_entry = list_entry(where, struct memtype, nd);
2e5d9c85 394 break;
33af9039 395 } else if (start <= entry->start) { /* end > entry->start */
64fe44c3 396 err = chk_conflict(new, entry, new_type);
33af9039
AH
397 if (!err) {
398 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry->start, entry->end);
400 where = entry->nd.prev;
80c5e73d
VP
401 cached_entry = list_entry(where,
402 struct memtype, nd);
2e5d9c85 403 }
2e5d9c85 404 break;
33af9039 405 } else if (start < entry->end) { /* start > entry->start */
64fe44c3 406 err = chk_conflict(new, entry, new_type);
33af9039
AH
407 if (!err) {
408 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry->start, entry->end);
80c5e73d
VP
410 cached_entry = list_entry(entry->nd.prev,
411 struct memtype, nd);
412
413 /*
414 * Move to right position in the linked
415 * list to add this new entry
416 */
417 list_for_each_entry_continue(entry,
418 &memtype_list, nd) {
419 if (start <= entry->start) {
420 where = entry->nd.prev;
421 break;
422 }
423 }
2e5d9c85 424 }
2e5d9c85 425 break;
426 }
427 }
428
429 if (err) {
3e9c83b3
AH
430 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
431 "track %s, req %s\n",
432 start, end, cattr_name(new->type), cattr_name(req_type));
ac97991e 433 kfree(new);
2e5d9c85 434 spin_unlock(&memtype_lock);
ad2cde16 435
2e5d9c85 436 return err;
437 }
438
80c5e73d
VP
439 cached_start = start;
440
f6887264
AH
441 if (where)
442 list_add(&new->nd, where);
443 else
ac97991e 444 list_add_tail(&new->nd, &memtype_list);
6997ab49 445
2e5d9c85 446 spin_unlock(&memtype_lock);
3e9c83b3
AH
447
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
449 start, end, cattr_name(new->type), cattr_name(req_type),
450 new_type ? cattr_name(*new_type) : "-");
451
2e5d9c85 452 return err;
453}
454
455int free_memtype(u64 start, u64 end)
456{
ac97991e 457 struct memtype *entry;
2e5d9c85 458 int err = -EINVAL;
9542ada8 459 int is_range_ram;
2e5d9c85 460
69e26be9 461 if (!pat_enabled)
2e5d9c85 462 return 0;
2e5d9c85 463
464 /* Low ISA region is always mapped WB. No need to track */
bcc643dc 465 if (is_ISA_range(start, end - 1))
2e5d9c85 466 return 0;
2e5d9c85 467
be03d9e8 468 is_range_ram = pat_pagerange_is_ram(start, end);
9542ada8
SS
469 if (is_range_ram == 1)
470 return free_ram_pages_type(start, end);
471 else if (is_range_ram < 0)
472 return -EINVAL;
473
2e5d9c85 474 spin_lock(&memtype_lock);
ac97991e
AH
475 list_for_each_entry(entry, &memtype_list, nd) {
476 if (entry->start == start && entry->end == end) {
80c5e73d
VP
477 if (cached_entry == entry || cached_start == start)
478 cached_entry = NULL;
479
ac97991e
AH
480 list_del(&entry->nd);
481 kfree(entry);
2e5d9c85 482 err = 0;
483 break;
484 }
485 }
486 spin_unlock(&memtype_lock);
487
488 if (err) {
28eb559b 489 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
2e5d9c85 490 current->comm, current->pid, start, end);
491 }
6997ab49 492
77b52b4c 493 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
ad2cde16 494
2e5d9c85 495 return err;
496}
497
f0970c13 498
f0970c13 499pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
500 unsigned long size, pgprot_t vma_prot)
501{
502 return vma_prot;
503}
504
d092633b
IM
505#ifdef CONFIG_STRICT_DEVMEM
506/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
507static inline int range_is_allowed(unsigned long pfn, unsigned long size)
508{
509 return 1;
510}
511#else
9e41bff2 512/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
513static inline int range_is_allowed(unsigned long pfn, unsigned long size)
514{
515 u64 from = ((u64)pfn) << PAGE_SHIFT;
516 u64 to = from + size;
517 u64 cursor = from;
518
9e41bff2
RT
519 if (!pat_enabled)
520 return 1;
521
0124cecf
VP
522 while (cursor < to) {
523 if (!devmem_is_allowed(pfn)) {
524 printk(KERN_INFO
525 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
526 current->comm, from, to);
527 return 0;
528 }
529 cursor += PAGE_SIZE;
530 pfn++;
531 }
532 return 1;
533}
d092633b 534#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 535
f0970c13 536int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
537 unsigned long size, pgprot_t *vma_prot)
538{
0c3c8a18 539 unsigned long flags = _PAGE_CACHE_WB;
f0970c13 540
0124cecf
VP
541 if (!range_is_allowed(pfn, size))
542 return 0;
543
f0970c13 544 if (file->f_flags & O_SYNC) {
28df82eb 545 flags = _PAGE_CACHE_UC_MINUS;
f0970c13 546 }
547
548#ifdef CONFIG_X86_32
549 /*
550 * On the PPro and successors, the MTRRs are used to set
551 * memory types for physical addresses outside main memory,
552 * so blindly setting UC or PWT on those pages is wrong.
553 * For Pentiums and earlier, the surround logic should disable
554 * caching for the high addresses through the KEN pin, but
555 * we maintain the tradition of paranoia in this code.
556 */
499f8f84 557 if (!pat_enabled &&
cd7a4e93
AH
558 !(boot_cpu_has(X86_FEATURE_MTRR) ||
559 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
560 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
561 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
562 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e7f260a2 563 flags = _PAGE_CACHE_UC;
f0970c13 564 }
565#endif
566
e7f260a2 567 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
568 flags);
f0970c13 569 return 1;
570}
e7f260a2 571
7880f746
VP
572/*
573 * Change the memory type for the physial address range in kernel identity
574 * mapping space if that range is a part of identity map.
575 */
576int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
577{
578 unsigned long id_sz;
579
580 if (!pat_enabled || base >= __pa(high_memory))
581 return 0;
582
583 id_sz = (__pa(high_memory) < base + size) ?
584 __pa(high_memory) - base :
585 size;
586
587 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
588 printk(KERN_INFO
589 "%s:%d ioremap_change_attr failed %s "
590 "for %Lx-%Lx\n",
591 current->comm, current->pid,
592 cattr_name(flags),
593 base, (unsigned long long)(base + size));
594 return -EINVAL;
595 }
596 return 0;
597}
598
5899329b 599/*
600 * Internal interface to reserve a range of physical memory with prot.
601 * Reserved non RAM regions only and after successful reserve_memtype,
602 * this func also keeps identity mapping (if any) in sync with this new prot.
603 */
cdecff68 604static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
605 int strict_prot)
5899329b 606{
607 int is_ram = 0;
7880f746 608 int ret;
cdecff68 609 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
0c3c8a18 610 unsigned long flags = want_flags;
5899329b 611
be03d9e8 612 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 613
be03d9e8 614 /*
4bb9c5c0
PV
615 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
616 * behavior with RAM pages by returning success.
be03d9e8
SS
617 */
618 if (is_ram != 0)
4bb9c5c0 619 return 0;
5899329b 620
621 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
622 if (ret)
623 return ret;
624
625 if (flags != want_flags) {
1adcaafe
SS
626 if (strict_prot ||
627 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
cdecff68 628 free_memtype(paddr, paddr + size);
629 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
630 " for %Lx-%Lx, got %s\n",
631 current->comm, current->pid,
632 cattr_name(want_flags),
633 (unsigned long long)paddr,
634 (unsigned long long)(paddr + size),
635 cattr_name(flags));
636 return -EINVAL;
637 }
638 /*
639 * We allow returning different type than the one requested in
640 * non strict case.
641 */
642 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
643 (~_PAGE_CACHE_MASK)) |
644 flags);
5899329b 645 }
646
7880f746 647 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
5899329b 648 free_memtype(paddr, paddr + size);
5899329b 649 return -EINVAL;
650 }
651 return 0;
652}
653
654/*
655 * Internal interface to free a range of physical memory.
656 * Frees non RAM regions only.
657 */
658static void free_pfn_range(u64 paddr, unsigned long size)
659{
660 int is_ram;
661
be03d9e8 662 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 663 if (is_ram == 0)
664 free_memtype(paddr, paddr + size);
665}
666
667/*
668 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
669 * copied through copy_page_range().
670 *
671 * If the vma has a linear pfn mapping for the entire range, we get the prot
672 * from pte and reserve the entire vma range with single reserve_pfn_range call.
5899329b 673 */
674int track_pfn_vma_copy(struct vm_area_struct *vma)
675{
c1c15b65 676 resource_size_t paddr;
982d789a 677 unsigned long prot;
4b065046 678 unsigned long vma_size = vma->vm_end - vma->vm_start;
cdecff68 679 pgprot_t pgprot;
5899329b 680
681 if (!pat_enabled)
682 return 0;
683
4b065046
PV
684 /*
685 * For now, only handle remap_pfn_range() vmas where
686 * is_linear_pfn_mapping() == TRUE. Handling of
687 * vm_insert_pfn() is TBD.
688 */
5899329b 689 if (is_linear_pfn_mapping(vma)) {
690 /*
982d789a 691 * reserve the whole chunk covered by vma. We need the
692 * starting address and protection from pte.
5899329b 693 */
4b065046 694 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
5899329b 695 WARN_ON_ONCE(1);
982d789a 696 return -EINVAL;
5899329b 697 }
cdecff68 698 pgprot = __pgprot(prot);
699 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 700 }
701
5899329b 702 return 0;
5899329b 703}
704
705/*
706 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
707 * for physical range indicated by pfn and size.
708 *
709 * prot is passed in as a parameter for the new mapping. If the vma has a
710 * linear pfn mapping for the entire range reserve the entire vma range with
711 * single reserve_pfn_range call.
5899329b 712 */
e4b866ed 713int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
5899329b 714 unsigned long pfn, unsigned long size)
715{
c1c15b65 716 resource_size_t paddr;
4b065046 717 unsigned long vma_size = vma->vm_end - vma->vm_start;
5899329b 718
719 if (!pat_enabled)
720 return 0;
721
4b065046
PV
722 /*
723 * For now, only handle remap_pfn_range() vmas where
724 * is_linear_pfn_mapping() == TRUE. Handling of
725 * vm_insert_pfn() is TBD.
726 */
5899329b 727 if (is_linear_pfn_mapping(vma)) {
728 /* reserve the whole chunk starting from vm_pgoff */
c1c15b65 729 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
cdecff68 730 return reserve_pfn_range(paddr, vma_size, prot, 0);
5899329b 731 }
732
5899329b 733 return 0;
5899329b 734}
735
736/*
737 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
738 * untrack can be called for a specific region indicated by pfn and size or
739 * can be for the entire vma (in which case size can be zero).
740 */
741void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
742 unsigned long size)
743{
c1c15b65 744 resource_size_t paddr;
4b065046 745 unsigned long vma_size = vma->vm_end - vma->vm_start;
5899329b 746
747 if (!pat_enabled)
748 return;
749
4b065046
PV
750 /*
751 * For now, only handle remap_pfn_range() vmas where
752 * is_linear_pfn_mapping() == TRUE. Handling of
753 * vm_insert_pfn() is TBD.
754 */
5899329b 755 if (is_linear_pfn_mapping(vma)) {
756 /* free the whole chunk starting from vm_pgoff */
c1c15b65 757 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
5899329b 758 free_pfn_range(paddr, vma_size);
759 return;
760 }
5899329b 761}
762
2520bd31 763pgprot_t pgprot_writecombine(pgprot_t prot)
764{
765 if (pat_enabled)
766 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
767 else
768 return pgprot_noncached(prot);
769}
92b9af9e 770EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 771
012f09e7 772#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 773
774/* get Nth element of the linked list */
775static struct memtype *memtype_get_idx(loff_t pos)
776{
777 struct memtype *list_node, *print_entry;
778 int i = 1;
779
780 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
781 if (!print_entry)
782 return NULL;
783
784 spin_lock(&memtype_lock);
785 list_for_each_entry(list_node, &memtype_list, nd) {
786 if (pos == i) {
787 *print_entry = *list_node;
788 spin_unlock(&memtype_lock);
789 return print_entry;
790 }
791 ++i;
792 }
793 spin_unlock(&memtype_lock);
794 kfree(print_entry);
ad2cde16 795
fec0962e 796 return NULL;
797}
798
799static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
800{
801 if (*pos == 0) {
802 ++*pos;
803 seq_printf(seq, "PAT memtype list:\n");
804 }
805
806 return memtype_get_idx(*pos);
807}
808
809static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
810{
811 ++*pos;
812 return memtype_get_idx(*pos);
813}
814
815static void memtype_seq_stop(struct seq_file *seq, void *v)
816{
817}
818
819static int memtype_seq_show(struct seq_file *seq, void *v)
820{
821 struct memtype *print_entry = (struct memtype *)v;
822
823 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
824 print_entry->start, print_entry->end);
825 kfree(print_entry);
ad2cde16 826
fec0962e 827 return 0;
828}
829
830static struct seq_operations memtype_seq_ops = {
831 .start = memtype_seq_start,
832 .next = memtype_seq_next,
833 .stop = memtype_seq_stop,
834 .show = memtype_seq_show,
835};
836
837static int memtype_seq_open(struct inode *inode, struct file *file)
838{
839 return seq_open(file, &memtype_seq_ops);
840}
841
842static const struct file_operations memtype_fops = {
843 .open = memtype_seq_open,
844 .read = seq_read,
845 .llseek = seq_lseek,
846 .release = seq_release,
847};
848
849static int __init pat_memtype_list_init(void)
850{
851 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
852 NULL, &memtype_fops);
853 return 0;
854}
855
856late_initcall(pat_memtype_list_init);
857
012f09e7 858#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */