]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/highmem.c
ARM: 6152/1: ux500 make it possible to disable localtimers
[net-next-2.6.git] / arch / arm / mm / highmem.c
CommitLineData
d73cd428
NP
1/*
2 * arch/arm/mm/highmem.c -- ARM highmem support
3 *
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include "mm.h"
20
21void *kmap(struct page *page)
22{
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
27}
28EXPORT_SYMBOL(kmap);
29
30void kunmap(struct page *page)
31{
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
36}
37EXPORT_SYMBOL(kunmap);
38
39void *kmap_atomic(struct page *page, enum km_type type)
40{
41 unsigned int idx;
42 unsigned long vaddr;
7929eb9c 43 void *kmap;
d73cd428
NP
44
45 pagefault_disable();
46 if (!PageHighMem(page))
47 return page_address(page);
48
6a5e293f
RK
49 debug_kmap_atomic(type);
50
7929eb9c
NP
51 kmap = kmap_high_get(page);
52 if (kmap)
53 return kmap;
54
d73cd428
NP
55 idx = type + KM_TYPE_NR * smp_processor_id();
56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
57#ifdef CONFIG_DEBUG_HIGHMEM
58 /*
59 * With debugging enabled, kunmap_atomic forces that entry to 0.
60 * Make sure it was indeed properly unmapped.
61 */
62 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
63#endif
64 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
65 /*
66 * When debugging is off, kunmap_atomic leaves the previous mapping
67 * in place, so this TLB flush ensures the TLB is updated with the
68 * new mapping.
69 */
70 local_flush_tlb_kernel_page(vaddr);
71
72 return (void *)vaddr;
73}
74EXPORT_SYMBOL(kmap_atomic);
75
76void kunmap_atomic(void *kvaddr, enum km_type type)
77{
78 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80
81 if (kvaddr >= (void *)FIXADDR_START) {
7e5a69e8
NP
82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
d73cd428
NP
84#ifdef CONFIG_DEBUG_HIGHMEM
85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
87 local_flush_tlb_kernel_page(vaddr);
88#else
89 (void) idx; /* to kill a warning */
90#endif
7929eb9c
NP
91 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
92 /* this address was obtained through kmap_high_get() */
93 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
d73cd428
NP
94 }
95 pagefault_enable();
96}
97EXPORT_SYMBOL(kunmap_atomic);
98
99void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
100{
101 unsigned int idx;
102 unsigned long vaddr;
103
104 pagefault_disable();
105
106 idx = type + KM_TYPE_NR * smp_processor_id();
107 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
108#ifdef CONFIG_DEBUG_HIGHMEM
109 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
110#endif
111 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
112 local_flush_tlb_kernel_page(vaddr);
113
114 return (void *)vaddr;
115}
116
117struct page *kmap_atomic_to_page(const void *ptr)
118{
119 unsigned long vaddr = (unsigned long)ptr;
120 pte_t *pte;
121
122 if (vaddr < FIXADDR_START)
123 return virt_to_page(ptr);
124
125 pte = TOP_PTE(vaddr);
126 return pte_page(*pte);
127}
7e5a69e8
NP
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */