]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/iomap_32.c
gpu/drm, x86, PAT: PAT support for io_mapping_*
[net-next-2.6.git] / arch / x86 / mm / iomap_32.c
CommitLineData
fd940934
KP
1/*
2 * Copyright © 2008 Ingo Molnar
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#include <asm/iomap.h>
ef5fa0ab 20#include <asm/pat.h>
fd940934
KP
21#include <linux/module.h>
22
4ab0d47d 23#ifdef CONFIG_X86_PAE
17581ad8 24static int
4ab0d47d
VP
25is_io_mapping_possible(resource_size_t base, unsigned long size)
26{
27 return 1;
28}
29#else
17581ad8 30static int
4ab0d47d
VP
31is_io_mapping_possible(resource_size_t base, unsigned long size)
32{
33 /* There is no way to map greater than 1 << 32 address without PAE */
34 if (base + size > 0x100000000ULL)
35 return 0;
36
37 return 1;
38}
39#endif
40
17581ad8
VP
41int
42reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
43{
44 unsigned long ret_flag;
45
46 if (!is_io_mapping_possible(base, size))
47 goto out_err;
48
49 if (!pat_enabled) {
50 *prot = pgprot_noncached(PAGE_KERNEL);
51 return 0;
52 }
53
54 if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
55 goto out_err;
56
57 if (ret_flag == _PAGE_CACHE_WB)
58 goto out_free;
59
60 if (kernel_map_sync_memtype(base, size, ret_flag))
61 goto out_free;
62
63 *prot = __pgprot(__PAGE_KERNEL | ret_flag);
64 return 0;
65
66out_free:
67 free_memtype(base, base + size);
68out_err:
69 return -EINVAL;
70}
71
72void
73free_io_memtype(u64 base, unsigned long size)
74{
75 if (pat_enabled)
76 free_memtype(base, base + size);
77}
78
fd940934
KP
79/* Map 'pfn' using fixed map 'type' and protections 'prot'
80 */
81void *
82iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
83{
84 enum fixed_addresses idx;
85 unsigned long vaddr;
86
87 pagefault_disable();
88
ef5fa0ab
EA
89 /*
90 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
91 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
92 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
93 * user, which is "WC if the MTRR is WC, UC if you can't do that."
94 */
95 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
96 prot = PAGE_KERNEL_UC_MINUS;
97
fd940934
KP
98 idx = type + KM_TYPE_NR*smp_processor_id();
99 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
100 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
101 arch_flush_lazy_mmu_mode();
102
103 return (void*) vaddr;
104}
105EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
106
107void
108iounmap_atomic(void *kvaddr, enum km_type type)
109{
110 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
111 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
112
113 /*
114 * Force other mappings to Oops if they'll try to access this pte
115 * without first remap it. Keeping stale mappings around is a bad idea
116 * also, in case the page changes cacheability attributes or becomes
117 * a protected page in a hypervisor.
118 */
119 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
120 kpte_clear_flush(kmap_pte-idx, vaddr);
121
122 arch_flush_lazy_mmu_mode();
123 pagefault_enable();
124}
125EXPORT_SYMBOL_GPL(iounmap_atomic);