]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/pat.c
x86, PAT: Consolidate code in pat_x_mtrr_type() and reserve_memtype()
[net-next-2.6.git] / arch / x86 / mm / pat.c
CommitLineData
2e5d9c85 1/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
ad2cde16
IM
10#include <linux/seq_file.h>
11#include <linux/bootmem.h>
12#include <linux/debugfs.h>
2e5d9c85 13#include <linux/kernel.h>
92b9af9e 14#include <linux/module.h>
2e5d9c85 15#include <linux/gfp.h>
ad2cde16 16#include <linux/mm.h>
2e5d9c85 17#include <linux/fs.h>
18
ad2cde16 19#include <asm/cacheflush.h>
2e5d9c85 20#include <asm/processor.h>
ad2cde16 21#include <asm/tlbflush.h>
2e5d9c85 22#include <asm/pgtable.h>
2e5d9c85 23#include <asm/fcntl.h>
ad2cde16 24#include <asm/e820.h>
2e5d9c85 25#include <asm/mtrr.h>
ad2cde16
IM
26#include <asm/page.h>
27#include <asm/msr.h>
28#include <asm/pat.h>
e7f260a2 29#include <asm/io.h>
2e5d9c85 30
8d4a4300 31#ifdef CONFIG_X86_PAT
499f8f84 32int __read_mostly pat_enabled = 1;
2e5d9c85 33
75a04811 34void __cpuinit pat_disable(const char *reason)
2e5d9c85 35{
499f8f84 36 pat_enabled = 0;
8d4a4300 37 printk(KERN_INFO "%s\n", reason);
2e5d9c85 38}
2e5d9c85 39
be524fb9 40static int __init nopat(char *str)
2e5d9c85 41{
8d4a4300 42 pat_disable("PAT support disabled.");
2e5d9c85 43 return 0;
44}
8d4a4300 45early_param("nopat", nopat);
75a04811
PA
46#else
47static inline void pat_disable(const char *reason)
48{
49 (void)reason;
50}
8d4a4300
TG
51#endif
52
77b52b4c
VP
53
54static int debug_enable;
ad2cde16 55
77b52b4c
VP
56static int __init pat_debug_setup(char *str)
57{
58 debug_enable = 1;
59 return 0;
60}
61__setup("debugpat", pat_debug_setup);
62
63#define dprintk(fmt, arg...) \
64 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
65
66
8d4a4300 67static u64 __read_mostly boot_pat_state;
2e5d9c85 68
69enum {
70 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */
72 PAT_WT = 4, /* Write Through */
73 PAT_WP = 5, /* Write Protected */
74 PAT_WB = 6, /* Write Back (default) */
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
76};
77
cd7a4e93 78#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
2e5d9c85 79
80void pat_init(void)
81{
82 u64 pat;
83
499f8f84 84 if (!pat_enabled)
2e5d9c85 85 return;
86
75a04811
PA
87 if (!cpu_has_pat) {
88 if (!boot_pat_state) {
89 pat_disable("PAT not supported by CPU.");
90 return;
91 } else {
92 /*
93 * If this happens we are on a secondary CPU, but
94 * switched to PAT on the boot CPU. We have no way to
95 * undo PAT.
96 */
97 printk(KERN_ERR "PAT enabled, "
98 "but not supported by secondary CPU\n");
99 BUG();
100 }
8d4a4300 101 }
2e5d9c85 102
103 /* Set PWT to Write-Combining. All other bits stay the same */
104 /*
105 * PTE encoding used in Linux:
106 * PAT
107 * |PCD
108 * ||PWT
109 * |||
110 * 000 WB _PAGE_CACHE_WB
111 * 001 WC _PAGE_CACHE_WC
112 * 010 UC- _PAGE_CACHE_UC_MINUS
113 * 011 UC _PAGE_CACHE_UC
114 * PAT bit unused
115 */
cd7a4e93
AH
116 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
117 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
2e5d9c85 118
119 /* Boot CPU check */
8d4a4300 120 if (!boot_pat_state)
2e5d9c85 121 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
2e5d9c85 122
123 wrmsrl(MSR_IA32_CR_PAT, pat);
124 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
125 smp_processor_id(), boot_pat_state, pat);
126}
127
128#undef PAT
129
130static char *cattr_name(unsigned long flags)
131{
132 switch (flags & _PAGE_CACHE_MASK) {
cd7a4e93
AH
133 case _PAGE_CACHE_UC: return "uncached";
134 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
135 case _PAGE_CACHE_WB: return "write-back";
136 case _PAGE_CACHE_WC: return "write-combining";
137 default: return "broken";
2e5d9c85 138 }
139}
140
141/*
142 * The global memtype list keeps track of memory type for specific
143 * physical memory areas. Conflicting memory types in different
144 * mappings can cause CPU cache corruption. To avoid this we keep track.
145 *
146 * The list is sorted based on starting address and can contain multiple
147 * entries for each address (this allows reference counting for overlapping
148 * areas). All the aliases have the same cache attributes of course.
149 * Zero attributes are represented as holes.
150 *
151 * Currently the data structure is a list because the number of mappings
152 * are expected to be relatively small. If this should be a problem
153 * it could be changed to a rbtree or similar.
154 *
155 * memtype_lock protects the whole list.
156 */
157
158struct memtype {
ad2cde16
IM
159 u64 start;
160 u64 end;
161 unsigned long type;
162 struct list_head nd;
2e5d9c85 163};
164
165static LIST_HEAD(memtype_list);
ad2cde16 166static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
2e5d9c85 167
168/*
169 * Does intersection of PAT memory type and MTRR memory type and returns
170 * the resulting memory type as PAT understands it.
171 * (Type in pat and mtrr will not have same value)
172 * The intersection is based on "Effective Memory Type" tables in IA-32
173 * SDM vol 3a
174 */
6cf514fc 175static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
2e5d9c85 176{
c26421d0
VP
177 /*
178 * Look for MTRR hint to get the effective type in case where PAT
179 * request is for WB.
180 */
dd0c7c49
AH
181 if (req_type == _PAGE_CACHE_WB) {
182 u8 mtrr_type;
183
184 mtrr_type = mtrr_type_lookup(start, end);
b6ff32d9
SS
185 if (mtrr_type != MTRR_TYPE_WRBACK)
186 return _PAGE_CACHE_UC_MINUS;
187
188 return _PAGE_CACHE_WB;
dd0c7c49
AH
189 }
190
191 return req_type;
2e5d9c85 192}
193
ad2cde16
IM
194static int
195chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
64fe44c3
AH
196{
197 if (new->type != entry->type) {
198 if (type) {
199 new->type = entry->type;
200 *type = entry->type;
201 } else
202 goto conflict;
203 }
204
205 /* check overlaps with more than one entry in the list */
206 list_for_each_entry_continue(entry, &memtype_list, nd) {
207 if (new->end <= entry->start)
208 break;
209 else if (new->type != entry->type)
210 goto conflict;
211 }
212 return 0;
213
214 conflict:
215 printk(KERN_INFO "%s:%d conflicting memory types "
216 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
217 new->end, cattr_name(new->type), cattr_name(entry->type));
218 return -EBUSY;
219}
220
80c5e73d
VP
221static struct memtype *cached_entry;
222static u64 cached_start;
223
be03d9e8
SS
224static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
225{
226 int ram_page = 0, not_rampage = 0;
227 unsigned long page_nr;
228
229 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
230 ++page_nr) {
231 /*
232 * For legacy reasons, physical address range in the legacy ISA
233 * region is tracked as non-RAM. This will allow users of
234 * /dev/mem to map portions of legacy ISA region, even when
235 * some of those portions are listed(or not even listed) with
236 * different e820 types(RAM/reserved/..)
237 */
238 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
239 page_is_ram(page_nr))
240 ram_page = 1;
241 else
242 not_rampage = 1;
243
244 if (ram_page == not_rampage)
245 return -1;
246 }
247
248 return ram_page;
249}
250
9542ada8
SS
251/*
252 * For RAM pages, mark the pages as non WB memory type using
253 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
254 * set_memory_wc() on a RAM page at a time before marking it as WB again.
255 * This is ok, because only one driver will be owning the page and
256 * doing set_memory_*() calls.
257 *
258 * For now, we use PageNonWB to track that the RAM page is being mapped
259 * as non WB. In future, we will have to use one more flag
260 * (or some other mechanism in page_struct) to distinguish between
261 * UC and WC mapping.
262 */
263static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
ad2cde16 264 unsigned long *new_type)
9542ada8
SS
265{
266 struct page *page;
267 u64 pfn, end_pfn;
268
269 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
270 page = pfn_to_page(pfn);
271 if (page_mapped(page) || PageNonWB(page))
272 goto out;
273
274 SetPageNonWB(page);
275 }
276 return 0;
277
278out:
279 end_pfn = pfn;
280 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
281 page = pfn_to_page(pfn);
282 ClearPageNonWB(page);
283 }
284
285 return -EINVAL;
286}
287
288static int free_ram_pages_type(u64 start, u64 end)
289{
290 struct page *page;
291 u64 pfn, end_pfn;
292
293 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
294 page = pfn_to_page(pfn);
295 if (page_mapped(page) || !PageNonWB(page))
296 goto out;
297
298 ClearPageNonWB(page);
299 }
300 return 0;
301
302out:
303 end_pfn = pfn;
304 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
305 page = pfn_to_page(pfn);
306 SetPageNonWB(page);
307 }
308 return -EINVAL;
309}
310
e7f260a2 311/*
312 * req_type typically has one of the:
313 * - _PAGE_CACHE_WB
314 * - _PAGE_CACHE_WC
315 * - _PAGE_CACHE_UC_MINUS
316 * - _PAGE_CACHE_UC
317 *
318 * req_type will have a special case value '-1', when requester want to inherit
319 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
320 *
ac97991e
AH
321 * If new_type is NULL, function will return an error if it cannot reserve the
322 * region with req_type. If new_type is non-NULL, function will return
323 * available type in new_type in case of no error. In case of any error
e7f260a2 324 * it will return a negative return value.
325 */
2e5d9c85 326int reserve_memtype(u64 start, u64 end, unsigned long req_type,
ad2cde16 327 unsigned long *new_type)
2e5d9c85 328{
ac97991e 329 struct memtype *new, *entry;
2e5d9c85 330 unsigned long actual_type;
f6887264 331 struct list_head *where;
9542ada8 332 int is_range_ram;
ad2cde16 333 int err = 0;
2e5d9c85 334
ad2cde16 335 BUG_ON(start >= end); /* end is exclusive */
69e26be9 336
499f8f84 337 if (!pat_enabled) {
e7f260a2 338 /* This is identical to page table setting without PAT */
ac97991e
AH
339 if (new_type) {
340 if (req_type == -1)
341 *new_type = _PAGE_CACHE_WB;
342 else
343 *new_type = req_type & _PAGE_CACHE_MASK;
e7f260a2 344 }
2e5d9c85 345 return 0;
346 }
347
348 /* Low ISA region is always mapped WB in page table. No need to track */
bcc643dc 349 if (is_ISA_range(start, end - 1)) {
ac97991e
AH
350 if (new_type)
351 *new_type = _PAGE_CACHE_WB;
2e5d9c85 352 return 0;
353 }
354
b6ff32d9
SS
355 /*
356 * Call mtrr_lookup to get the type hint. This is an
357 * optimization for /dev/mem mmap'ers into WB memory (BIOS
358 * tools and ACPI tools). Use WB request for WB memory and use
359 * UC_MINUS otherwise.
360 */
361 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
2e5d9c85 362
95971342
SS
363 if (new_type)
364 *new_type = actual_type;
365
be03d9e8 366 is_range_ram = pat_pagerange_is_ram(start, end);
9542ada8 367 if (is_range_ram == 1)
be03d9e8
SS
368 return reserve_ram_pages_type(start, end, req_type,
369 new_type);
9542ada8
SS
370 else if (is_range_ram < 0)
371 return -EINVAL;
372
ac97991e
AH
373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
374 if (!new)
2e5d9c85 375 return -ENOMEM;
376
ad2cde16
IM
377 new->start = start;
378 new->end = end;
379 new->type = actual_type;
2e5d9c85 380
2e5d9c85 381 spin_lock(&memtype_lock);
382
80c5e73d
VP
383 if (cached_entry && start >= cached_start)
384 entry = cached_entry;
385 else
386 entry = list_entry(&memtype_list, struct memtype, nd);
387
2e5d9c85 388 /* Search for existing mapping that overlaps the current range */
f6887264 389 where = NULL;
80c5e73d 390 list_for_each_entry_continue(entry, &memtype_list, nd) {
33af9039 391 if (end <= entry->start) {
f6887264 392 where = entry->nd.prev;
80c5e73d 393 cached_entry = list_entry(where, struct memtype, nd);
2e5d9c85 394 break;
33af9039 395 } else if (start <= entry->start) { /* end > entry->start */
64fe44c3 396 err = chk_conflict(new, entry, new_type);
33af9039
AH
397 if (!err) {
398 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry->start, entry->end);
400 where = entry->nd.prev;
80c5e73d
VP
401 cached_entry = list_entry(where,
402 struct memtype, nd);
2e5d9c85 403 }
2e5d9c85 404 break;
33af9039 405 } else if (start < entry->end) { /* start > entry->start */
64fe44c3 406 err = chk_conflict(new, entry, new_type);
33af9039
AH
407 if (!err) {
408 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry->start, entry->end);
80c5e73d
VP
410 cached_entry = list_entry(entry->nd.prev,
411 struct memtype, nd);
412
413 /*
414 * Move to right position in the linked
415 * list to add this new entry
416 */
417 list_for_each_entry_continue(entry,
418 &memtype_list, nd) {
419 if (start <= entry->start) {
420 where = entry->nd.prev;
421 break;
422 }
423 }
2e5d9c85 424 }
2e5d9c85 425 break;
426 }
427 }
428
429 if (err) {
3e9c83b3
AH
430 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
431 "track %s, req %s\n",
432 start, end, cattr_name(new->type), cattr_name(req_type));
ac97991e 433 kfree(new);
2e5d9c85 434 spin_unlock(&memtype_lock);
ad2cde16 435
2e5d9c85 436 return err;
437 }
438
80c5e73d
VP
439 cached_start = start;
440
f6887264
AH
441 if (where)
442 list_add(&new->nd, where);
443 else
ac97991e 444 list_add_tail(&new->nd, &memtype_list);
6997ab49 445
2e5d9c85 446 spin_unlock(&memtype_lock);
3e9c83b3
AH
447
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
449 start, end, cattr_name(new->type), cattr_name(req_type),
450 new_type ? cattr_name(*new_type) : "-");
451
2e5d9c85 452 return err;
453}
454
455int free_memtype(u64 start, u64 end)
456{
ac97991e 457 struct memtype *entry;
2e5d9c85 458 int err = -EINVAL;
9542ada8 459 int is_range_ram;
2e5d9c85 460
69e26be9 461 if (!pat_enabled)
2e5d9c85 462 return 0;
2e5d9c85 463
464 /* Low ISA region is always mapped WB. No need to track */
bcc643dc 465 if (is_ISA_range(start, end - 1))
2e5d9c85 466 return 0;
2e5d9c85 467
be03d9e8 468 is_range_ram = pat_pagerange_is_ram(start, end);
9542ada8
SS
469 if (is_range_ram == 1)
470 return free_ram_pages_type(start, end);
471 else if (is_range_ram < 0)
472 return -EINVAL;
473
2e5d9c85 474 spin_lock(&memtype_lock);
ac97991e
AH
475 list_for_each_entry(entry, &memtype_list, nd) {
476 if (entry->start == start && entry->end == end) {
80c5e73d
VP
477 if (cached_entry == entry || cached_start == start)
478 cached_entry = NULL;
479
ac97991e
AH
480 list_del(&entry->nd);
481 kfree(entry);
2e5d9c85 482 err = 0;
483 break;
484 }
485 }
486 spin_unlock(&memtype_lock);
487
488 if (err) {
28eb559b 489 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
2e5d9c85 490 current->comm, current->pid, start, end);
491 }
6997ab49 492
77b52b4c 493 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
ad2cde16 494
2e5d9c85 495 return err;
496}
497
f0970c13 498
f0970c13 499pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
500 unsigned long size, pgprot_t vma_prot)
501{
502 return vma_prot;
503}
504
d092633b
IM
505#ifdef CONFIG_STRICT_DEVMEM
506/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
0124cecf
VP
507static inline int range_is_allowed(unsigned long pfn, unsigned long size)
508{
509 return 1;
510}
511#else
9e41bff2 512/* This check is needed to avoid cache aliasing when PAT is enabled */
0124cecf
VP
513static inline int range_is_allowed(unsigned long pfn, unsigned long size)
514{
515 u64 from = ((u64)pfn) << PAGE_SHIFT;
516 u64 to = from + size;
517 u64 cursor = from;
518
9e41bff2
RT
519 if (!pat_enabled)
520 return 1;
521
0124cecf
VP
522 while (cursor < to) {
523 if (!devmem_is_allowed(pfn)) {
524 printk(KERN_INFO
525 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
526 current->comm, from, to);
527 return 0;
528 }
529 cursor += PAGE_SIZE;
530 pfn++;
531 }
532 return 1;
533}
d092633b 534#endif /* CONFIG_STRICT_DEVMEM */
0124cecf 535
f0970c13 536int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
537 unsigned long size, pgprot_t *vma_prot)
538{
e7f260a2 539 u64 offset = ((u64) pfn) << PAGE_SHIFT;
28df82eb 540 unsigned long flags = -1;
e7f260a2 541 int retval;
f0970c13 542
0124cecf
VP
543 if (!range_is_allowed(pfn, size))
544 return 0;
545
f0970c13 546 if (file->f_flags & O_SYNC) {
28df82eb 547 flags = _PAGE_CACHE_UC_MINUS;
f0970c13 548 }
549
550#ifdef CONFIG_X86_32
551 /*
552 * On the PPro and successors, the MTRRs are used to set
553 * memory types for physical addresses outside main memory,
554 * so blindly setting UC or PWT on those pages is wrong.
555 * For Pentiums and earlier, the surround logic should disable
556 * caching for the high addresses through the KEN pin, but
557 * we maintain the tradition of paranoia in this code.
558 */
499f8f84 559 if (!pat_enabled &&
cd7a4e93
AH
560 !(boot_cpu_has(X86_FEATURE_MTRR) ||
561 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
562 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
563 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
564 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
e7f260a2 565 flags = _PAGE_CACHE_UC;
f0970c13 566 }
567#endif
568
e7f260a2 569 /*
28df82eb 570 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
571 *
e7f260a2 572 * Without O_SYNC, we want to get
573 * - WB for WB-able memory and no other conflicting mappings
574 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
575 * - Inherit from confliting mappings otherwise
576 */
28df82eb 577 if (flags != -1) {
e7f260a2 578 retval = reserve_memtype(offset, offset + size, flags, NULL);
579 } else {
b6ff32d9
SS
580 retval = reserve_memtype(offset, offset + size,
581 _PAGE_CACHE_WB, &flags);
e7f260a2 582 }
583
584 if (retval < 0)
585 return 0;
586
b5db0e38
LT
587 if (((pfn < max_low_pfn_mapped) ||
588 (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
589 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
e7f260a2 590 free_memtype(offset, offset + size);
28eb559b 591 printk(KERN_INFO
e7f260a2 592 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
593 current->comm, current->pid,
594 cattr_name(flags),
afc85343 595 offset, (unsigned long long)(offset + size));
e7f260a2 596 return 0;
597 }
598
599 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
600 flags);
f0970c13 601 return 1;
602}
e7f260a2 603
604void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
605{
ad2cde16 606 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
e7f260a2 607 u64 addr = (u64)pfn << PAGE_SHIFT;
608 unsigned long flags;
e7f260a2 609
610 reserve_memtype(addr, addr + size, want_flags, &flags);
611 if (flags != want_flags) {
28eb559b 612 printk(KERN_INFO
e7f260a2 613 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
614 current->comm, current->pid,
615 cattr_name(want_flags),
afc85343 616 addr, (unsigned long long)(addr + size),
e7f260a2 617 cattr_name(flags));
618 }
619}
620
621void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
622{
623 u64 addr = (u64)pfn << PAGE_SHIFT;
624
625 free_memtype(addr, addr + size);
626}
627
7880f746
VP
628/*
629 * Change the memory type for the physial address range in kernel identity
630 * mapping space if that range is a part of identity map.
631 */
632int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
633{
634 unsigned long id_sz;
635
636 if (!pat_enabled || base >= __pa(high_memory))
637 return 0;
638
639 id_sz = (__pa(high_memory) < base + size) ?
640 __pa(high_memory) - base :
641 size;
642
643 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
644 printk(KERN_INFO
645 "%s:%d ioremap_change_attr failed %s "
646 "for %Lx-%Lx\n",
647 current->comm, current->pid,
648 cattr_name(flags),
649 base, (unsigned long long)(base + size));
650 return -EINVAL;
651 }
652 return 0;
653}
654
5899329b 655/*
656 * Internal interface to reserve a range of physical memory with prot.
657 * Reserved non RAM regions only and after successful reserve_memtype,
658 * this func also keeps identity mapping (if any) in sync with this new prot.
659 */
cdecff68 660static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
661 int strict_prot)
5899329b 662{
663 int is_ram = 0;
7880f746 664 int ret;
5899329b 665 unsigned long flags;
cdecff68 666 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
5899329b 667
be03d9e8 668 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 669
be03d9e8 670 /*
4bb9c5c0
PV
671 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
672 * behavior with RAM pages by returning success.
be03d9e8
SS
673 */
674 if (is_ram != 0)
4bb9c5c0 675 return 0;
5899329b 676
677 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
678 if (ret)
679 return ret;
680
681 if (flags != want_flags) {
cdecff68 682 if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
683 free_memtype(paddr, paddr + size);
684 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
685 " for %Lx-%Lx, got %s\n",
686 current->comm, current->pid,
687 cattr_name(want_flags),
688 (unsigned long long)paddr,
689 (unsigned long long)(paddr + size),
690 cattr_name(flags));
691 return -EINVAL;
692 }
693 /*
694 * We allow returning different type than the one requested in
695 * non strict case.
696 */
697 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
698 (~_PAGE_CACHE_MASK)) |
699 flags);
5899329b 700 }
701
7880f746 702 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
5899329b 703 free_memtype(paddr, paddr + size);
5899329b 704 return -EINVAL;
705 }
706 return 0;
707}
708
709/*
710 * Internal interface to free a range of physical memory.
711 * Frees non RAM regions only.
712 */
713static void free_pfn_range(u64 paddr, unsigned long size)
714{
715 int is_ram;
716
be03d9e8 717 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
5899329b 718 if (is_ram == 0)
719 free_memtype(paddr, paddr + size);
720}
721
722/*
723 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
724 * copied through copy_page_range().
725 *
726 * If the vma has a linear pfn mapping for the entire range, we get the prot
727 * from pte and reserve the entire vma range with single reserve_pfn_range call.
728 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
729 * by page to get physical address and protection.
730 */
731int track_pfn_vma_copy(struct vm_area_struct *vma)
732{
733 int retval = 0;
734 unsigned long i, j;
c1c15b65 735 resource_size_t paddr;
982d789a 736 unsigned long prot;
5899329b 737 unsigned long vma_start = vma->vm_start;
738 unsigned long vma_end = vma->vm_end;
739 unsigned long vma_size = vma_end - vma_start;
cdecff68 740 pgprot_t pgprot;
5899329b 741
742 if (!pat_enabled)
743 return 0;
744
745 if (is_linear_pfn_mapping(vma)) {
746 /*
982d789a 747 * reserve the whole chunk covered by vma. We need the
748 * starting address and protection from pte.
5899329b 749 */
982d789a 750 if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
5899329b 751 WARN_ON_ONCE(1);
982d789a 752 return -EINVAL;
5899329b 753 }
cdecff68 754 pgprot = __pgprot(prot);
755 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
5899329b 756 }
757
758 /* reserve entire vma page by page, using pfn and prot from pte */
759 for (i = 0; i < vma_size; i += PAGE_SIZE) {
982d789a 760 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
5899329b 761 continue;
762
cdecff68 763 pgprot = __pgprot(prot);
764 retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
5899329b 765 if (retval)
766 goto cleanup_ret;
767 }
768 return 0;
769
770cleanup_ret:
771 /* Reserve error: Cleanup partial reservation and return error */
772 for (j = 0; j < i; j += PAGE_SIZE) {
982d789a 773 if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
5899329b 774 continue;
775
5899329b 776 free_pfn_range(paddr, PAGE_SIZE);
777 }
778
779 return retval;
780}
781
782/*
783 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
784 * for physical range indicated by pfn and size.
785 *
786 * prot is passed in as a parameter for the new mapping. If the vma has a
787 * linear pfn mapping for the entire range reserve the entire vma range with
788 * single reserve_pfn_range call.
789 * Otherwise, we look t the pfn and size and reserve only the specified range
790 * page by page.
791 *
792 * Note that this function can be called with caller trying to map only a
793 * subrange/page inside the vma.
794 */
e4b866ed 795int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
5899329b 796 unsigned long pfn, unsigned long size)
797{
798 int retval = 0;
799 unsigned long i, j;
c1c15b65
PA
800 resource_size_t base_paddr;
801 resource_size_t paddr;
5899329b 802 unsigned long vma_start = vma->vm_start;
803 unsigned long vma_end = vma->vm_end;
804 unsigned long vma_size = vma_end - vma_start;
805
806 if (!pat_enabled)
807 return 0;
808
809 if (is_linear_pfn_mapping(vma)) {
810 /* reserve the whole chunk starting from vm_pgoff */
c1c15b65 811 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
cdecff68 812 return reserve_pfn_range(paddr, vma_size, prot, 0);
5899329b 813 }
814
815 /* reserve page by page using pfn and size */
c1c15b65 816 base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
5899329b 817 for (i = 0; i < size; i += PAGE_SIZE) {
818 paddr = base_paddr + i;
cdecff68 819 retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
5899329b 820 if (retval)
821 goto cleanup_ret;
822 }
823 return 0;
824
825cleanup_ret:
826 /* Reserve error: Cleanup partial reservation and return error */
827 for (j = 0; j < i; j += PAGE_SIZE) {
828 paddr = base_paddr + j;
829 free_pfn_range(paddr, PAGE_SIZE);
830 }
831
832 return retval;
833}
834
835/*
836 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
837 * untrack can be called for a specific region indicated by pfn and size or
838 * can be for the entire vma (in which case size can be zero).
839 */
840void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
841 unsigned long size)
842{
843 unsigned long i;
c1c15b65 844 resource_size_t paddr;
982d789a 845 unsigned long prot;
5899329b 846 unsigned long vma_start = vma->vm_start;
847 unsigned long vma_end = vma->vm_end;
848 unsigned long vma_size = vma_end - vma_start;
849
850 if (!pat_enabled)
851 return;
852
853 if (is_linear_pfn_mapping(vma)) {
854 /* free the whole chunk starting from vm_pgoff */
c1c15b65 855 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
5899329b 856 free_pfn_range(paddr, vma_size);
857 return;
858 }
859
860 if (size != 0 && size != vma_size) {
861 /* free page by page, using pfn and size */
c1c15b65 862 paddr = (resource_size_t)pfn << PAGE_SHIFT;
5899329b 863 for (i = 0; i < size; i += PAGE_SIZE) {
864 paddr = paddr + i;
865 free_pfn_range(paddr, PAGE_SIZE);
866 }
867 } else {
868 /* free entire vma, page by page, using the pfn from pte */
869 for (i = 0; i < vma_size; i += PAGE_SIZE) {
982d789a 870 if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
5899329b 871 continue;
872
5899329b 873 free_pfn_range(paddr, PAGE_SIZE);
874 }
875 }
876}
877
2520bd31 878pgprot_t pgprot_writecombine(pgprot_t prot)
879{
880 if (pat_enabled)
881 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
882 else
883 return pgprot_noncached(prot);
884}
92b9af9e 885EXPORT_SYMBOL_GPL(pgprot_writecombine);
2520bd31 886
012f09e7 887#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
fec0962e 888
889/* get Nth element of the linked list */
890static struct memtype *memtype_get_idx(loff_t pos)
891{
892 struct memtype *list_node, *print_entry;
893 int i = 1;
894
895 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
896 if (!print_entry)
897 return NULL;
898
899 spin_lock(&memtype_lock);
900 list_for_each_entry(list_node, &memtype_list, nd) {
901 if (pos == i) {
902 *print_entry = *list_node;
903 spin_unlock(&memtype_lock);
904 return print_entry;
905 }
906 ++i;
907 }
908 spin_unlock(&memtype_lock);
909 kfree(print_entry);
ad2cde16 910
fec0962e 911 return NULL;
912}
913
914static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
915{
916 if (*pos == 0) {
917 ++*pos;
918 seq_printf(seq, "PAT memtype list:\n");
919 }
920
921 return memtype_get_idx(*pos);
922}
923
924static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
925{
926 ++*pos;
927 return memtype_get_idx(*pos);
928}
929
930static void memtype_seq_stop(struct seq_file *seq, void *v)
931{
932}
933
934static int memtype_seq_show(struct seq_file *seq, void *v)
935{
936 struct memtype *print_entry = (struct memtype *)v;
937
938 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
939 print_entry->start, print_entry->end);
940 kfree(print_entry);
ad2cde16 941
fec0962e 942 return 0;
943}
944
945static struct seq_operations memtype_seq_ops = {
946 .start = memtype_seq_start,
947 .next = memtype_seq_next,
948 .stop = memtype_seq_stop,
949 .show = memtype_seq_show,
950};
951
952static int memtype_seq_open(struct inode *inode, struct file *file)
953{
954 return seq_open(file, &memtype_seq_ops);
955}
956
957static const struct file_operations memtype_fops = {
958 .open = memtype_seq_open,
959 .read = seq_read,
960 .llseek = seq_lseek,
961 .release = seq_release,
962};
963
964static int __init pat_memtype_list_init(void)
965{
966 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
967 NULL, &memtype_fops);
968 return 0;
969}
970
971late_initcall(pat_memtype_list_init);
972
012f09e7 973#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */