]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/highmem.h
mm: strictly nested kmap_atomic()
[net-next-2.6.git] / include / linux / highmem.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
1da177e4 4#include <linux/fs.h>
597781f3 5#include <linux/kernel.h>
1da177e4 6#include <linux/mm.h>
ad76fb6b 7#include <linux/uaccess.h>
1da177e4
LT
8
9#include <asm/cacheflush.h>
10
03beb076 11#ifndef ARCH_HAS_FLUSH_ANON_PAGE
a6f36be3 12static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
03beb076
JB
13{
14}
15#endif
16
5a3a5a98
JB
17#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
18static inline void flush_kernel_dcache_page(struct page *page)
19{
20}
9df5f741
JB
21static inline void flush_kernel_vmap_range(void *vaddr, int size)
22{
23}
24static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
25{
26}
5a3a5a98
JB
27#endif
28
3688e07f
KG
29#include <asm/kmap_types.h>
30
ff3d58c2 31#ifdef CONFIG_DEBUG_HIGHMEM
3688e07f
KG
32
33void debug_kmap_atomic(enum km_type type);
34
35#else
1da177e4 36
3688e07f
KG
37static inline void debug_kmap_atomic(enum km_type type)
38{
39}
40
41#endif
42
43#ifdef CONFIG_HIGHMEM
1da177e4
LT
44#include <asm/highmem.h>
45
46/* declarations for linux/mm/highmem.c */
47unsigned int nr_free_highpages(void);
c1f60a5a 48extern unsigned long totalhigh_pages;
1da177e4 49
ce6234b5
JF
50void kmap_flush_unused(void);
51
1da177e4
LT
52#else /* CONFIG_HIGHMEM */
53
54static inline unsigned int nr_free_highpages(void) { return 0; }
55
4b529401 56#define totalhigh_pages 0UL
c1f60a5a 57
a6ca1b99 58#ifndef ARCH_HAS_KMAP
1da177e4
LT
59static inline void *kmap(struct page *page)
60{
61 might_sleep();
62 return page_address(page);
63}
64
31c91132
MW
65static inline void kunmap(struct page *page)
66{
67}
1da177e4 68
254f9c5c
GU
69static inline void *kmap_atomic(struct page *page, enum km_type idx)
70{
71 pagefault_disable();
72 return page_address(page);
73}
ce6234b5 74#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
254f9c5c 75
4e60c86b
AK
76static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
77{
78 pagefault_enable();
79}
80
ad76fb6b 81#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
1da177e4 82#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
ce6234b5
JF
83
84#define kmap_flush_unused() do {} while(0)
a6ca1b99 85#endif
1da177e4
LT
86
87#endif /* CONFIG_HIGHMEM */
88
597781f3
CEB
89/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
90/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
91#define kunmap_atomic(addr, idx) do { \
92 BUILD_BUG_ON(__same_type((addr), struct page *)); \
93 kunmap_atomic_notypecheck((addr), (idx)); \
94 } while (0)
95
1da177e4 96/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
487ff320 97#ifndef clear_user_highpage
1da177e4
LT
98static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
99{
100 void *addr = kmap_atomic(page, KM_USER0);
101 clear_user_page(addr, vaddr, page);
102 kunmap_atomic(addr, KM_USER0);
1da177e4 103}
487ff320 104#endif
1da177e4
LT
105
106#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
769848c0
MG
107/**
108 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
109 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
110 * @vma: The VMA the page is to be allocated for
111 * @vaddr: The virtual address the page will be inserted into
112 *
113 * This function will allocate a page for a VMA but the caller is expected
114 * to specify via movableflags whether the page will be movable in the
115 * future or not
116 *
117 * An architecture may override this function by defining
118 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
119 * implementation.
120 */
1da177e4 121static inline struct page *
769848c0
MG
122__alloc_zeroed_user_highpage(gfp_t movableflags,
123 struct vm_area_struct *vma,
124 unsigned long vaddr)
1da177e4 125{
769848c0
MG
126 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
127 vma, vaddr);
1da177e4
LT
128
129 if (page)
130 clear_user_highpage(page, vaddr);
131
132 return page;
133}
134#endif
135
769848c0
MG
136/**
137 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
138 * @vma: The VMA the page is to be allocated for
139 * @vaddr: The virtual address the page will be inserted into
140 *
141 * This function will allocate a page for a VMA that the caller knows will
142 * be able to migrate in the future using move_pages() or reclaimed
143 */
144static inline struct page *
145alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
146 unsigned long vaddr)
147{
148 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
149}
150
1da177e4
LT
151static inline void clear_highpage(struct page *page)
152{
153 void *kaddr = kmap_atomic(page, KM_USER0);
154 clear_page(kaddr);
155 kunmap_atomic(kaddr, KM_USER0);
156}
157
eebd2aa3
CL
158static inline void zero_user_segments(struct page *page,
159 unsigned start1, unsigned end1,
160 unsigned start2, unsigned end2)
161{
162 void *kaddr = kmap_atomic(page, KM_USER0);
163
164 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
165
166 if (end1 > start1)
167 memset(kaddr + start1, 0, end1 - start1);
168
169 if (end2 > start2)
170 memset(kaddr + start2, 0, end2 - start2);
171
172 kunmap_atomic(kaddr, KM_USER0);
173 flush_dcache_page(page);
174}
175
176static inline void zero_user_segment(struct page *page,
177 unsigned start, unsigned end)
178{
179 zero_user_segments(page, start, end, 0, 0);
180}
181
182static inline void zero_user(struct page *page,
183 unsigned start, unsigned size)
184{
185 zero_user_segments(page, start, start + size, 0, 0);
186}
01f2705d 187
f37bc271 188static inline void __deprecated memclear_highpage_flush(struct page *page,
01f2705d 189 unsigned int offset, unsigned int size)
1da177e4 190{
eebd2aa3 191 zero_user(page, offset, size);
1da177e4
LT
192}
193
77fff4ae
AN
194#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
195
9de455b2
AN
196static inline void copy_user_highpage(struct page *to, struct page *from,
197 unsigned long vaddr, struct vm_area_struct *vma)
1da177e4
LT
198{
199 char *vfrom, *vto;
200
201 vfrom = kmap_atomic(from, KM_USER0);
202 vto = kmap_atomic(to, KM_USER1);
203 copy_user_page(vto, vfrom, vaddr, to);
1da177e4 204 kunmap_atomic(vto, KM_USER1);
61ecdb80 205 kunmap_atomic(vfrom, KM_USER0);
1da177e4
LT
206}
207
77fff4ae
AN
208#endif
209
1da177e4
LT
210static inline void copy_highpage(struct page *to, struct page *from)
211{
212 char *vfrom, *vto;
213
214 vfrom = kmap_atomic(from, KM_USER0);
215 vto = kmap_atomic(to, KM_USER1);
216 copy_page(vto, vfrom);
1da177e4 217 kunmap_atomic(vto, KM_USER1);
61ecdb80 218 kunmap_atomic(vfrom, KM_USER0);
1da177e4
LT
219}
220
221#endif /* _LINUX_HIGHMEM_H */