]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/highmem.h
mm: stack based kmap_atomic()
[net-next-2.6.git] / include / linux / highmem.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
1da177e4 4#include <linux/fs.h>
597781f3 5#include <linux/kernel.h>
1da177e4 6#include <linux/mm.h>
ad76fb6b 7#include <linux/uaccess.h>
1da177e4
LT
8
9#include <asm/cacheflush.h>
10
03beb076 11#ifndef ARCH_HAS_FLUSH_ANON_PAGE
a6f36be3 12static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
03beb076
JB
13{
14}
15#endif
16
5a3a5a98
JB
17#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
18static inline void flush_kernel_dcache_page(struct page *page)
19{
20}
9df5f741
JB
21static inline void flush_kernel_vmap_range(void *vaddr, int size)
22{
23}
24static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
25{
26}
5a3a5a98
JB
27#endif
28
3688e07f
KG
29#include <asm/kmap_types.h>
30
3688e07f 31#ifdef CONFIG_HIGHMEM
1da177e4
LT
32#include <asm/highmem.h>
33
34/* declarations for linux/mm/highmem.c */
35unsigned int nr_free_highpages(void);
c1f60a5a 36extern unsigned long totalhigh_pages;
1da177e4 37
ce6234b5
JF
38void kmap_flush_unused(void);
39
3e4d3af5
PZ
40DECLARE_PER_CPU(int, __kmap_atomic_idx);
41
42static inline int kmap_atomic_idx_push(void)
43{
44 int idx = __get_cpu_var(__kmap_atomic_idx)++;
45#ifdef CONFIG_DEBUG_HIGHMEM
46 WARN_ON_ONCE(in_irq() && !irqs_disabled());
47 BUG_ON(idx > KM_TYPE_NR);
48#endif
49 return idx;
50}
51
52static inline int kmap_atomic_idx_pop(void)
53{
54 int idx = --__get_cpu_var(__kmap_atomic_idx);
55#ifdef CONFIG_DEBUG_HIGHMEM
56 BUG_ON(idx < 0);
57#endif
58 return idx;
59}
60
1da177e4
LT
61#else /* CONFIG_HIGHMEM */
62
63static inline unsigned int nr_free_highpages(void) { return 0; }
64
4b529401 65#define totalhigh_pages 0UL
c1f60a5a 66
a6ca1b99 67#ifndef ARCH_HAS_KMAP
1da177e4
LT
68static inline void *kmap(struct page *page)
69{
70 might_sleep();
71 return page_address(page);
72}
73
31c91132
MW
74static inline void kunmap(struct page *page)
75{
76}
1da177e4 77
3e4d3af5 78static inline void *__kmap_atomic(struct page *page)
254f9c5c
GU
79{
80 pagefault_disable();
81 return page_address(page);
82}
3e4d3af5 83#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
254f9c5c 84
3e4d3af5 85static inline void __kunmap_atomic(void *addr)
4e60c86b
AK
86{
87 pagefault_enable();
88}
89
3e4d3af5 90#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
1da177e4 91#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
ce6234b5
JF
92
93#define kmap_flush_unused() do {} while(0)
a6ca1b99 94#endif
1da177e4
LT
95
96#endif /* CONFIG_HIGHMEM */
97
3e4d3af5
PZ
98/*
99 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
100 */
101#define kmap_atomic(page, args...) __kmap_atomic(page)
102
103/*
104 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
105 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
106 */
107#define kunmap_atomic(addr, args...) \
108do { \
109 BUILD_BUG_ON(__same_type((addr), struct page *)); \
110 __kunmap_atomic(addr); \
111} while (0)
597781f3 112
1da177e4 113/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
487ff320 114#ifndef clear_user_highpage
1da177e4
LT
115static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
116{
117 void *addr = kmap_atomic(page, KM_USER0);
118 clear_user_page(addr, vaddr, page);
119 kunmap_atomic(addr, KM_USER0);
1da177e4 120}
487ff320 121#endif
1da177e4
LT
122
123#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
769848c0
MG
124/**
125 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
126 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
127 * @vma: The VMA the page is to be allocated for
128 * @vaddr: The virtual address the page will be inserted into
129 *
130 * This function will allocate a page for a VMA but the caller is expected
131 * to specify via movableflags whether the page will be movable in the
132 * future or not
133 *
134 * An architecture may override this function by defining
135 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
136 * implementation.
137 */
1da177e4 138static inline struct page *
769848c0
MG
139__alloc_zeroed_user_highpage(gfp_t movableflags,
140 struct vm_area_struct *vma,
141 unsigned long vaddr)
1da177e4 142{
769848c0
MG
143 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
144 vma, vaddr);
1da177e4
LT
145
146 if (page)
147 clear_user_highpage(page, vaddr);
148
149 return page;
150}
151#endif
152
769848c0
MG
153/**
154 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
155 * @vma: The VMA the page is to be allocated for
156 * @vaddr: The virtual address the page will be inserted into
157 *
158 * This function will allocate a page for a VMA that the caller knows will
159 * be able to migrate in the future using move_pages() or reclaimed
160 */
161static inline struct page *
162alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
163 unsigned long vaddr)
164{
165 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
166}
167
1da177e4
LT
168static inline void clear_highpage(struct page *page)
169{
170 void *kaddr = kmap_atomic(page, KM_USER0);
171 clear_page(kaddr);
172 kunmap_atomic(kaddr, KM_USER0);
173}
174
eebd2aa3
CL
175static inline void zero_user_segments(struct page *page,
176 unsigned start1, unsigned end1,
177 unsigned start2, unsigned end2)
178{
179 void *kaddr = kmap_atomic(page, KM_USER0);
180
181 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
182
183 if (end1 > start1)
184 memset(kaddr + start1, 0, end1 - start1);
185
186 if (end2 > start2)
187 memset(kaddr + start2, 0, end2 - start2);
188
189 kunmap_atomic(kaddr, KM_USER0);
190 flush_dcache_page(page);
191}
192
193static inline void zero_user_segment(struct page *page,
194 unsigned start, unsigned end)
195{
196 zero_user_segments(page, start, end, 0, 0);
197}
198
199static inline void zero_user(struct page *page,
200 unsigned start, unsigned size)
201{
202 zero_user_segments(page, start, start + size, 0, 0);
203}
01f2705d 204
f37bc271 205static inline void __deprecated memclear_highpage_flush(struct page *page,
01f2705d 206 unsigned int offset, unsigned int size)
1da177e4 207{
eebd2aa3 208 zero_user(page, offset, size);
1da177e4
LT
209}
210
77fff4ae
AN
211#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
212
9de455b2
AN
213static inline void copy_user_highpage(struct page *to, struct page *from,
214 unsigned long vaddr, struct vm_area_struct *vma)
1da177e4
LT
215{
216 char *vfrom, *vto;
217
218 vfrom = kmap_atomic(from, KM_USER0);
219 vto = kmap_atomic(to, KM_USER1);
220 copy_user_page(vto, vfrom, vaddr, to);
1da177e4 221 kunmap_atomic(vto, KM_USER1);
61ecdb80 222 kunmap_atomic(vfrom, KM_USER0);
1da177e4
LT
223}
224
77fff4ae
AN
225#endif
226
1da177e4
LT
227static inline void copy_highpage(struct page *to, struct page *from)
228{
229 char *vfrom, *vto;
230
231 vfrom = kmap_atomic(from, KM_USER0);
232 vto = kmap_atomic(to, KM_USER1);
233 copy_page(vto, vfrom);
1da177e4 234 kunmap_atomic(vto, KM_USER1);
61ecdb80 235 kunmap_atomic(vfrom, KM_USER0);
1da177e4
LT
236}
237
238#endif /* _LINUX_HIGHMEM_H */