]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_HIGHMEM_H |
2 | #define _LINUX_HIGHMEM_H | |
3 | ||
1da177e4 | 4 | #include <linux/fs.h> |
597781f3 | 5 | #include <linux/kernel.h> |
1da177e4 | 6 | #include <linux/mm.h> |
ad76fb6b | 7 | #include <linux/uaccess.h> |
43b3a0c7 | 8 | #include <linux/hardirq.h> |
1da177e4 LT |
9 | |
10 | #include <asm/cacheflush.h> | |
11 | ||
03beb076 | 12 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
a6f36be3 | 13 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
03beb076 JB |
14 | { |
15 | } | |
16 | #endif | |
17 | ||
5a3a5a98 JB |
18 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
19 | static inline void flush_kernel_dcache_page(struct page *page) | |
20 | { | |
21 | } | |
9df5f741 JB |
22 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
23 | { | |
24 | } | |
25 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | |
26 | { | |
27 | } | |
5a3a5a98 JB |
28 | #endif |
29 | ||
3688e07f KG |
30 | #include <asm/kmap_types.h> |
31 | ||
3688e07f | 32 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
33 | #include <asm/highmem.h> |
34 | ||
35 | /* declarations for linux/mm/highmem.c */ | |
36 | unsigned int nr_free_highpages(void); | |
c1f60a5a | 37 | extern unsigned long totalhigh_pages; |
1da177e4 | 38 | |
ce6234b5 JF |
39 | void kmap_flush_unused(void); |
40 | ||
1da177e4 LT |
41 | #else /* CONFIG_HIGHMEM */ |
42 | ||
43 | static inline unsigned int nr_free_highpages(void) { return 0; } | |
44 | ||
4b529401 | 45 | #define totalhigh_pages 0UL |
c1f60a5a | 46 | |
a6ca1b99 | 47 | #ifndef ARCH_HAS_KMAP |
1da177e4 LT |
48 | static inline void *kmap(struct page *page) |
49 | { | |
50 | might_sleep(); | |
51 | return page_address(page); | |
52 | } | |
53 | ||
31c91132 MW |
54 | static inline void kunmap(struct page *page) |
55 | { | |
56 | } | |
1da177e4 | 57 | |
3e4d3af5 | 58 | static inline void *__kmap_atomic(struct page *page) |
254f9c5c GU |
59 | { |
60 | pagefault_disable(); | |
61 | return page_address(page); | |
62 | } | |
3e4d3af5 | 63 | #define kmap_atomic_prot(page, prot) __kmap_atomic(page) |
254f9c5c | 64 | |
3e4d3af5 | 65 | static inline void __kunmap_atomic(void *addr) |
4e60c86b AK |
66 | { |
67 | pagefault_enable(); | |
68 | } | |
69 | ||
3e4d3af5 | 70 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
1da177e4 | 71 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
ce6234b5 JF |
72 | |
73 | #define kmap_flush_unused() do {} while(0) | |
a6ca1b99 | 74 | #endif |
1da177e4 LT |
75 | |
76 | #endif /* CONFIG_HIGHMEM */ | |
77 | ||
a8e23a29 PZ |
78 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
79 | ||
80 | DECLARE_PER_CPU(int, __kmap_atomic_idx); | |
81 | ||
82 | static inline int kmap_atomic_idx_push(void) | |
83 | { | |
84 | int idx = __get_cpu_var(__kmap_atomic_idx)++; | |
85 | #ifdef CONFIG_DEBUG_HIGHMEM | |
86 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); | |
87 | BUG_ON(idx > KM_TYPE_NR); | |
88 | #endif | |
89 | return idx; | |
90 | } | |
91 | ||
20273941 PZ |
92 | static inline int kmap_atomic_idx(void) |
93 | { | |
94 | return __get_cpu_var(__kmap_atomic_idx) - 1; | |
95 | } | |
96 | ||
a8e23a29 PZ |
97 | static inline int kmap_atomic_idx_pop(void) |
98 | { | |
99 | int idx = --__get_cpu_var(__kmap_atomic_idx); | |
100 | #ifdef CONFIG_DEBUG_HIGHMEM | |
101 | BUG_ON(idx < 0); | |
102 | #endif | |
103 | return idx; | |
104 | } | |
105 | ||
106 | #endif | |
107 | ||
3e4d3af5 PZ |
108 | /* |
109 | * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work. | |
110 | */ | |
111 | #define kmap_atomic(page, args...) __kmap_atomic(page) | |
112 | ||
113 | /* | |
114 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() | |
115 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. | |
116 | */ | |
117 | #define kunmap_atomic(addr, args...) \ | |
118 | do { \ | |
119 | BUILD_BUG_ON(__same_type((addr), struct page *)); \ | |
120 | __kunmap_atomic(addr); \ | |
121 | } while (0) | |
597781f3 | 122 | |
1da177e4 | 123 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
487ff320 | 124 | #ifndef clear_user_highpage |
1da177e4 LT |
125 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
126 | { | |
127 | void *addr = kmap_atomic(page, KM_USER0); | |
128 | clear_user_page(addr, vaddr, page); | |
129 | kunmap_atomic(addr, KM_USER0); | |
1da177e4 | 130 | } |
487ff320 | 131 | #endif |
1da177e4 LT |
132 | |
133 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | |
769848c0 MG |
134 | /** |
135 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | |
136 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | |
137 | * @vma: The VMA the page is to be allocated for | |
138 | * @vaddr: The virtual address the page will be inserted into | |
139 | * | |
140 | * This function will allocate a page for a VMA but the caller is expected | |
141 | * to specify via movableflags whether the page will be movable in the | |
142 | * future or not | |
143 | * | |
144 | * An architecture may override this function by defining | |
145 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | |
146 | * implementation. | |
147 | */ | |
1da177e4 | 148 | static inline struct page * |
769848c0 MG |
149 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
150 | struct vm_area_struct *vma, | |
151 | unsigned long vaddr) | |
1da177e4 | 152 | { |
769848c0 MG |
153 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
154 | vma, vaddr); | |
1da177e4 LT |
155 | |
156 | if (page) | |
157 | clear_user_highpage(page, vaddr); | |
158 | ||
159 | return page; | |
160 | } | |
161 | #endif | |
162 | ||
769848c0 MG |
163 | /** |
164 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | |
165 | * @vma: The VMA the page is to be allocated for | |
166 | * @vaddr: The virtual address the page will be inserted into | |
167 | * | |
168 | * This function will allocate a page for a VMA that the caller knows will | |
169 | * be able to migrate in the future using move_pages() or reclaimed | |
170 | */ | |
171 | static inline struct page * | |
172 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | |
173 | unsigned long vaddr) | |
174 | { | |
175 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | |
176 | } | |
177 | ||
1da177e4 LT |
178 | static inline void clear_highpage(struct page *page) |
179 | { | |
180 | void *kaddr = kmap_atomic(page, KM_USER0); | |
181 | clear_page(kaddr); | |
182 | kunmap_atomic(kaddr, KM_USER0); | |
183 | } | |
184 | ||
eebd2aa3 CL |
185 | static inline void zero_user_segments(struct page *page, |
186 | unsigned start1, unsigned end1, | |
187 | unsigned start2, unsigned end2) | |
188 | { | |
189 | void *kaddr = kmap_atomic(page, KM_USER0); | |
190 | ||
191 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | |
192 | ||
193 | if (end1 > start1) | |
194 | memset(kaddr + start1, 0, end1 - start1); | |
195 | ||
196 | if (end2 > start2) | |
197 | memset(kaddr + start2, 0, end2 - start2); | |
198 | ||
199 | kunmap_atomic(kaddr, KM_USER0); | |
200 | flush_dcache_page(page); | |
201 | } | |
202 | ||
203 | static inline void zero_user_segment(struct page *page, | |
204 | unsigned start, unsigned end) | |
205 | { | |
206 | zero_user_segments(page, start, end, 0, 0); | |
207 | } | |
208 | ||
209 | static inline void zero_user(struct page *page, | |
210 | unsigned start, unsigned size) | |
211 | { | |
212 | zero_user_segments(page, start, start + size, 0, 0); | |
213 | } | |
01f2705d | 214 | |
f37bc271 | 215 | static inline void __deprecated memclear_highpage_flush(struct page *page, |
01f2705d | 216 | unsigned int offset, unsigned int size) |
1da177e4 | 217 | { |
eebd2aa3 | 218 | zero_user(page, offset, size); |
1da177e4 LT |
219 | } |
220 | ||
77fff4ae AN |
221 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
222 | ||
9de455b2 AN |
223 | static inline void copy_user_highpage(struct page *to, struct page *from, |
224 | unsigned long vaddr, struct vm_area_struct *vma) | |
1da177e4 LT |
225 | { |
226 | char *vfrom, *vto; | |
227 | ||
228 | vfrom = kmap_atomic(from, KM_USER0); | |
229 | vto = kmap_atomic(to, KM_USER1); | |
230 | copy_user_page(vto, vfrom, vaddr, to); | |
1da177e4 | 231 | kunmap_atomic(vto, KM_USER1); |
61ecdb80 | 232 | kunmap_atomic(vfrom, KM_USER0); |
1da177e4 LT |
233 | } |
234 | ||
77fff4ae AN |
235 | #endif |
236 | ||
1da177e4 LT |
237 | static inline void copy_highpage(struct page *to, struct page *from) |
238 | { | |
239 | char *vfrom, *vto; | |
240 | ||
241 | vfrom = kmap_atomic(from, KM_USER0); | |
242 | vto = kmap_atomic(to, KM_USER1); | |
243 | copy_page(vto, vfrom); | |
1da177e4 | 244 | kunmap_atomic(vto, KM_USER1); |
61ecdb80 | 245 | kunmap_atomic(vfrom, KM_USER0); |
1da177e4 LT |
246 | } |
247 | ||
248 | #endif /* _LINUX_HIGHMEM_H */ |