]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmalloc.c
RTC: add periodic irq support to rtc-cmos
[net-next-2.6.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
b221385b
AB
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28 int node);
29
1da177e4
LT
30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{
32 pte_t *pte;
33
34 pte = pte_offset_kernel(pmd, addr);
35 do {
36 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38 } while (pte++, addr += PAGE_SIZE, addr != end);
39}
40
41static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42 unsigned long end)
43{
44 pmd_t *pmd;
45 unsigned long next;
46
47 pmd = pmd_offset(pud, addr);
48 do {
49 next = pmd_addr_end(addr, end);
50 if (pmd_none_or_clear_bad(pmd))
51 continue;
52 vunmap_pte_range(pmd, addr, next);
53 } while (pmd++, addr = next, addr != end);
54}
55
56static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57 unsigned long end)
58{
59 pud_t *pud;
60 unsigned long next;
61
62 pud = pud_offset(pgd, addr);
63 do {
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud))
66 continue;
67 vunmap_pmd_range(pud, addr, next);
68 } while (pud++, addr = next, addr != end);
69}
70
c19c03fc 71void unmap_kernel_range(unsigned long addr, unsigned long size)
1da177e4
LT
72{
73 pgd_t *pgd;
74 unsigned long next;
c19c03fc
BH
75 unsigned long start = addr;
76 unsigned long end = addr + size;
1da177e4
LT
77
78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do {
82 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd))
84 continue;
85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end);
c19c03fc
BH
87 flush_tlb_kernel_range(start, end);
88}
89
90static void unmap_vm_area(struct vm_struct *area)
91{
92 unmap_kernel_range((unsigned long)area->addr, area->size);
1da177e4
LT
93}
94
95static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
96 unsigned long end, pgprot_t prot, struct page ***pages)
97{
98 pte_t *pte;
99
872fec16 100 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
101 if (!pte)
102 return -ENOMEM;
103 do {
104 struct page *page = **pages;
105 WARN_ON(!pte_none(*pte));
106 if (!page)
107 return -ENOMEM;
108 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
109 (*pages)++;
110 } while (pte++, addr += PAGE_SIZE, addr != end);
111 return 0;
112}
113
114static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
115 unsigned long end, pgprot_t prot, struct page ***pages)
116{
117 pmd_t *pmd;
118 unsigned long next;
119
120 pmd = pmd_alloc(&init_mm, pud, addr);
121 if (!pmd)
122 return -ENOMEM;
123 do {
124 next = pmd_addr_end(addr, end);
125 if (vmap_pte_range(pmd, addr, next, prot, pages))
126 return -ENOMEM;
127 } while (pmd++, addr = next, addr != end);
128 return 0;
129}
130
131static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
132 unsigned long end, pgprot_t prot, struct page ***pages)
133{
134 pud_t *pud;
135 unsigned long next;
136
137 pud = pud_alloc(&init_mm, pgd, addr);
138 if (!pud)
139 return -ENOMEM;
140 do {
141 next = pud_addr_end(addr, end);
142 if (vmap_pmd_range(pud, addr, next, prot, pages))
143 return -ENOMEM;
144 } while (pud++, addr = next, addr != end);
145 return 0;
146}
147
148int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
149{
150 pgd_t *pgd;
151 unsigned long next;
152 unsigned long addr = (unsigned long) area->addr;
153 unsigned long end = addr + area->size - PAGE_SIZE;
154 int err;
155
156 BUG_ON(addr >= end);
157 pgd = pgd_offset_k(addr);
1da177e4
LT
158 do {
159 next = pgd_addr_end(addr, end);
160 err = vmap_pud_range(pgd, addr, next, prot, pages);
161 if (err)
162 break;
163 } while (pgd++, addr = next, addr != end);
1da177e4
LT
164 flush_cache_vmap((unsigned long) area->addr, end);
165 return err;
166}
167
52fd24ca
GP
168static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
169 unsigned long start, unsigned long end,
170 int node, gfp_t gfp_mask)
1da177e4
LT
171{
172 struct vm_struct **p, *tmp, *area;
173 unsigned long align = 1;
174 unsigned long addr;
175
52fd24ca 176 BUG_ON(in_interrupt());
1da177e4
LT
177 if (flags & VM_IOREMAP) {
178 int bit = fls(size);
179
180 if (bit > IOREMAP_MAX_ORDER)
181 bit = IOREMAP_MAX_ORDER;
182 else if (bit < PAGE_SHIFT)
183 bit = PAGE_SHIFT;
184
185 align = 1ul << bit;
186 }
187 addr = ALIGN(start, align);
188 size = PAGE_ALIGN(size);
31be8309
OH
189 if (unlikely(!size))
190 return NULL;
1da177e4 191
5211e6e6 192 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
1da177e4
LT
193 if (unlikely(!area))
194 return NULL;
195
1da177e4
LT
196 /*
197 * We always allocate a guard page.
198 */
199 size += PAGE_SIZE;
200
201 write_lock(&vmlist_lock);
202 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
203 if ((unsigned long)tmp->addr < addr) {
204 if((unsigned long)tmp->addr + tmp->size >= addr)
205 addr = ALIGN(tmp->size +
206 (unsigned long)tmp->addr, align);
207 continue;
208 }
209 if ((size + addr) < addr)
210 goto out;
211 if (size + addr <= (unsigned long)tmp->addr)
212 goto found;
213 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
214 if (addr > end - size)
215 goto out;
216 }
217
218found:
219 area->next = *p;
220 *p = area;
221
222 area->flags = flags;
223 area->addr = (void *)addr;
224 area->size = size;
225 area->pages = NULL;
226 area->nr_pages = 0;
227 area->phys_addr = 0;
228 write_unlock(&vmlist_lock);
229
230 return area;
231
232out:
233 write_unlock(&vmlist_lock);
234 kfree(area);
235 if (printk_ratelimit())
236 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
237 return NULL;
238}
239
930fc45a
CL
240struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
241 unsigned long start, unsigned long end)
242{
52fd24ca 243 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
930fc45a
CL
244}
245
1da177e4
LT
246/**
247 * get_vm_area - reserve a contingous kernel virtual area
1da177e4
LT
248 * @size: size of the area
249 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
250 *
251 * Search an area of @size in the kernel virtual mapping area,
252 * and reserved it for out purposes. Returns the area descriptor
253 * on success or %NULL on failure.
254 */
255struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
256{
257 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
258}
259
52fd24ca
GP
260struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
261 int node, gfp_t gfp_mask)
930fc45a 262{
52fd24ca
GP
263 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
264 gfp_mask);
930fc45a
CL
265}
266
83342314
NP
267/* Caller must hold vmlist_lock */
268static struct vm_struct *__find_vm_area(void *addr)
269{
270 struct vm_struct *tmp;
271
272 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
273 if (tmp->addr == addr)
274 break;
275 }
276
277 return tmp;
278}
279
7856dfeb 280/* Caller must hold vmlist_lock */
d24afc57 281static struct vm_struct *__remove_vm_area(void *addr)
1da177e4
LT
282{
283 struct vm_struct **p, *tmp;
284
1da177e4
LT
285 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
286 if (tmp->addr == addr)
287 goto found;
288 }
1da177e4
LT
289 return NULL;
290
291found:
292 unmap_vm_area(tmp);
293 *p = tmp->next;
1da177e4
LT
294
295 /*
296 * Remove the guard page.
297 */
298 tmp->size -= PAGE_SIZE;
299 return tmp;
300}
301
7856dfeb
AK
302/**
303 * remove_vm_area - find and remove a contingous kernel virtual area
7856dfeb
AK
304 * @addr: base address
305 *
306 * Search for the kernel VM area starting at @addr, and remove it.
307 * This function returns the found VM area, but using it is NOT safe
308 * on SMP machines, except for its size or flags.
309 */
310struct vm_struct *remove_vm_area(void *addr)
311{
312 struct vm_struct *v;
313 write_lock(&vmlist_lock);
314 v = __remove_vm_area(addr);
315 write_unlock(&vmlist_lock);
316 return v;
317}
318
d55e2ca8 319static void __vunmap(void *addr, int deallocate_pages)
1da177e4
LT
320{
321 struct vm_struct *area;
322
323 if (!addr)
324 return;
325
326 if ((PAGE_SIZE-1) & (unsigned long)addr) {
327 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
328 WARN_ON(1);
329 return;
330 }
331
332 area = remove_vm_area(addr);
333 if (unlikely(!area)) {
334 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
335 addr);
336 WARN_ON(1);
337 return;
338 }
339
9a11b49a
IM
340 debug_check_no_locks_freed(addr, area->size);
341
1da177e4
LT
342 if (deallocate_pages) {
343 int i;
344
345 for (i = 0; i < area->nr_pages; i++) {
5aae277e 346 BUG_ON(!area->pages[i]);
1da177e4
LT
347 __free_page(area->pages[i]);
348 }
349
8757d5fa 350 if (area->flags & VM_VPAGES)
1da177e4
LT
351 vfree(area->pages);
352 else
353 kfree(area->pages);
354 }
355
356 kfree(area);
357 return;
358}
359
360/**
361 * vfree - release memory allocated by vmalloc()
1da177e4
LT
362 * @addr: memory base address
363 *
364 * Free the virtually contiguous memory area starting at @addr, as
80e93eff
PE
365 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
366 * NULL, no operation is performed.
1da177e4 367 *
80e93eff 368 * Must not be called in interrupt context.
1da177e4
LT
369 */
370void vfree(void *addr)
371{
372 BUG_ON(in_interrupt());
373 __vunmap(addr, 1);
374}
1da177e4
LT
375EXPORT_SYMBOL(vfree);
376
377/**
378 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
379 * @addr: memory base address
380 *
381 * Free the virtually contiguous memory area starting at @addr,
382 * which was created from the page array passed to vmap().
383 *
80e93eff 384 * Must not be called in interrupt context.
1da177e4
LT
385 */
386void vunmap(void *addr)
387{
388 BUG_ON(in_interrupt());
389 __vunmap(addr, 0);
390}
1da177e4
LT
391EXPORT_SYMBOL(vunmap);
392
393/**
394 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
395 * @pages: array of page pointers
396 * @count: number of pages to map
397 * @flags: vm_area->flags
398 * @prot: page protection for the mapping
399 *
400 * Maps @count pages from @pages into contiguous kernel virtual
401 * space.
402 */
403void *vmap(struct page **pages, unsigned int count,
404 unsigned long flags, pgprot_t prot)
405{
406 struct vm_struct *area;
407
408 if (count > num_physpages)
409 return NULL;
410
411 area = get_vm_area((count << PAGE_SHIFT), flags);
412 if (!area)
413 return NULL;
414 if (map_vm_area(area, prot, &pages)) {
415 vunmap(area->addr);
416 return NULL;
417 }
418
419 return area->addr;
420}
1da177e4
LT
421EXPORT_SYMBOL(vmap);
422
930fc45a
CL
423void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
424 pgprot_t prot, int node)
1da177e4
LT
425{
426 struct page **pages;
427 unsigned int nr_pages, array_size, i;
428
429 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
430 array_size = (nr_pages * sizeof(struct page *));
431
432 area->nr_pages = nr_pages;
433 /* Please note that the recursion is strictly bounded. */
8757d5fa 434 if (array_size > PAGE_SIZE) {
94f6030c
CL
435 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
436 PAGE_KERNEL, node);
8757d5fa 437 area->flags |= VM_VPAGES;
286e1ea3
AM
438 } else {
439 pages = kmalloc_node(array_size,
94f6030c 440 (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
286e1ea3
AM
441 node);
442 }
1da177e4
LT
443 area->pages = pages;
444 if (!area->pages) {
445 remove_vm_area(area->addr);
446 kfree(area);
447 return NULL;
448 }
1da177e4
LT
449
450 for (i = 0; i < area->nr_pages; i++) {
930fc45a
CL
451 if (node < 0)
452 area->pages[i] = alloc_page(gfp_mask);
453 else
454 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
1da177e4
LT
455 if (unlikely(!area->pages[i])) {
456 /* Successfully allocated i pages, free them in __vunmap() */
457 area->nr_pages = i;
458 goto fail;
459 }
460 }
461
462 if (map_vm_area(area, prot, &pages))
463 goto fail;
464 return area->addr;
465
466fail:
467 vfree(area->addr);
468 return NULL;
469}
470
930fc45a
CL
471void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
472{
473 return __vmalloc_area_node(area, gfp_mask, prot, -1);
474}
475
1da177e4 476/**
930fc45a 477 * __vmalloc_node - allocate virtually contiguous memory
1da177e4
LT
478 * @size: allocation size
479 * @gfp_mask: flags for the page level allocator
480 * @prot: protection mask for the allocated pages
d44e0780 481 * @node: node to use for allocation or -1
1da177e4
LT
482 *
483 * Allocate enough pages to cover @size from the page level
484 * allocator with @gfp_mask flags. Map them into contiguous
485 * kernel virtual space, using a pagetable protection of @prot.
486 */
b221385b
AB
487static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
488 int node)
1da177e4
LT
489{
490 struct vm_struct *area;
491
492 size = PAGE_ALIGN(size);
493 if (!size || (size >> PAGE_SHIFT) > num_physpages)
494 return NULL;
495
52fd24ca 496 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
1da177e4
LT
497 if (!area)
498 return NULL;
499
930fc45a 500 return __vmalloc_area_node(area, gfp_mask, prot, node);
1da177e4
LT
501}
502
930fc45a
CL
503void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
504{
505 return __vmalloc_node(size, gfp_mask, prot, -1);
506}
1da177e4
LT
507EXPORT_SYMBOL(__vmalloc);
508
509/**
510 * vmalloc - allocate virtually contiguous memory
1da177e4 511 * @size: allocation size
1da177e4
LT
512 * Allocate enough pages to cover @size from the page level
513 * allocator and map them into contiguous kernel virtual space.
514 *
c1c8897f 515 * For tight control over page level allocator and protection flags
1da177e4
LT
516 * use __vmalloc() instead.
517 */
518void *vmalloc(unsigned long size)
519{
83342314 520 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
1da177e4 521}
1da177e4
LT
522EXPORT_SYMBOL(vmalloc);
523
83342314 524/**
ead04089
REB
525 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
526 * @size: allocation size
83342314 527 *
ead04089
REB
528 * The resulting memory area is zeroed so it can be mapped to userspace
529 * without leaking data.
83342314
NP
530 */
531void *vmalloc_user(unsigned long size)
532{
533 struct vm_struct *area;
534 void *ret;
535
536 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
2b4ac44e
ED
537 if (ret) {
538 write_lock(&vmlist_lock);
539 area = __find_vm_area(ret);
540 area->flags |= VM_USERMAP;
541 write_unlock(&vmlist_lock);
542 }
83342314
NP
543 return ret;
544}
545EXPORT_SYMBOL(vmalloc_user);
546
930fc45a
CL
547/**
548 * vmalloc_node - allocate memory on a specific node
930fc45a 549 * @size: allocation size
d44e0780 550 * @node: numa node
930fc45a
CL
551 *
552 * Allocate enough pages to cover @size from the page level
553 * allocator and map them into contiguous kernel virtual space.
554 *
c1c8897f 555 * For tight control over page level allocator and protection flags
930fc45a
CL
556 * use __vmalloc() instead.
557 */
558void *vmalloc_node(unsigned long size, int node)
559{
83342314 560 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
930fc45a
CL
561}
562EXPORT_SYMBOL(vmalloc_node);
563
4dc3b16b
PP
564#ifndef PAGE_KERNEL_EXEC
565# define PAGE_KERNEL_EXEC PAGE_KERNEL
566#endif
567
1da177e4
LT
568/**
569 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
570 * @size: allocation size
571 *
572 * Kernel-internal function to allocate enough pages to cover @size
573 * the page level allocator and map them into contiguous and
574 * executable kernel virtual space.
575 *
c1c8897f 576 * For tight control over page level allocator and protection flags
1da177e4
LT
577 * use __vmalloc() instead.
578 */
579
1da177e4
LT
580void *vmalloc_exec(unsigned long size)
581{
582 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
583}
584
0d08e0d3 585#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 586#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 587#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 588#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
589#else
590#define GFP_VMALLOC32 GFP_KERNEL
591#endif
592
1da177e4
LT
593/**
594 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
595 * @size: allocation size
596 *
597 * Allocate enough 32bit PA addressable pages to cover @size from the
598 * page level allocator and map them into contiguous kernel virtual space.
599 */
600void *vmalloc_32(unsigned long size)
601{
0d08e0d3 602 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
1da177e4 603}
1da177e4
LT
604EXPORT_SYMBOL(vmalloc_32);
605
83342314 606/**
ead04089 607 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 608 * @size: allocation size
ead04089
REB
609 *
610 * The resulting memory area is 32bit addressable and zeroed so it can be
611 * mapped to userspace without leaking data.
83342314
NP
612 */
613void *vmalloc_32_user(unsigned long size)
614{
615 struct vm_struct *area;
616 void *ret;
617
0d08e0d3 618 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
2b4ac44e
ED
619 if (ret) {
620 write_lock(&vmlist_lock);
621 area = __find_vm_area(ret);
622 area->flags |= VM_USERMAP;
623 write_unlock(&vmlist_lock);
624 }
83342314
NP
625 return ret;
626}
627EXPORT_SYMBOL(vmalloc_32_user);
628
1da177e4
LT
629long vread(char *buf, char *addr, unsigned long count)
630{
631 struct vm_struct *tmp;
632 char *vaddr, *buf_start = buf;
633 unsigned long n;
634
635 /* Don't allow overflow */
636 if ((unsigned long) addr + count < count)
637 count = -(unsigned long) addr;
638
639 read_lock(&vmlist_lock);
640 for (tmp = vmlist; tmp; tmp = tmp->next) {
641 vaddr = (char *) tmp->addr;
642 if (addr >= vaddr + tmp->size - PAGE_SIZE)
643 continue;
644 while (addr < vaddr) {
645 if (count == 0)
646 goto finished;
647 *buf = '\0';
648 buf++;
649 addr++;
650 count--;
651 }
652 n = vaddr + tmp->size - PAGE_SIZE - addr;
653 do {
654 if (count == 0)
655 goto finished;
656 *buf = *addr;
657 buf++;
658 addr++;
659 count--;
660 } while (--n > 0);
661 }
662finished:
663 read_unlock(&vmlist_lock);
664 return buf - buf_start;
665}
666
667long vwrite(char *buf, char *addr, unsigned long count)
668{
669 struct vm_struct *tmp;
670 char *vaddr, *buf_start = buf;
671 unsigned long n;
672
673 /* Don't allow overflow */
674 if ((unsigned long) addr + count < count)
675 count = -(unsigned long) addr;
676
677 read_lock(&vmlist_lock);
678 for (tmp = vmlist; tmp; tmp = tmp->next) {
679 vaddr = (char *) tmp->addr;
680 if (addr >= vaddr + tmp->size - PAGE_SIZE)
681 continue;
682 while (addr < vaddr) {
683 if (count == 0)
684 goto finished;
685 buf++;
686 addr++;
687 count--;
688 }
689 n = vaddr + tmp->size - PAGE_SIZE - addr;
690 do {
691 if (count == 0)
692 goto finished;
693 *addr = *buf;
694 buf++;
695 addr++;
696 count--;
697 } while (--n > 0);
698 }
699finished:
700 read_unlock(&vmlist_lock);
701 return buf - buf_start;
702}
83342314
NP
703
704/**
705 * remap_vmalloc_range - map vmalloc pages to userspace
83342314
NP
706 * @vma: vma to cover (map full range of vma)
707 * @addr: vmalloc memory
708 * @pgoff: number of pages into addr before first page to map
709 * @returns: 0 for success, -Exxx on failure
710 *
711 * This function checks that addr is a valid vmalloc'ed area, and
712 * that it is big enough to cover the vma. Will return failure if
713 * that criteria isn't met.
714 *
72fd4a35 715 * Similar to remap_pfn_range() (see mm/memory.c)
83342314
NP
716 */
717int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
718 unsigned long pgoff)
719{
720 struct vm_struct *area;
721 unsigned long uaddr = vma->vm_start;
722 unsigned long usize = vma->vm_end - vma->vm_start;
723 int ret;
724
725 if ((PAGE_SIZE-1) & (unsigned long)addr)
726 return -EINVAL;
727
728 read_lock(&vmlist_lock);
729 area = __find_vm_area(addr);
730 if (!area)
731 goto out_einval_locked;
732
733 if (!(area->flags & VM_USERMAP))
734 goto out_einval_locked;
735
736 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
737 goto out_einval_locked;
738 read_unlock(&vmlist_lock);
739
740 addr += pgoff << PAGE_SHIFT;
741 do {
742 struct page *page = vmalloc_to_page(addr);
743 ret = vm_insert_page(vma, uaddr, page);
744 if (ret)
745 return ret;
746
747 uaddr += PAGE_SIZE;
748 addr += PAGE_SIZE;
749 usize -= PAGE_SIZE;
750 } while (usize > 0);
751
752 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
753 vma->vm_flags |= VM_RESERVED;
754
755 return ret;
756
757out_einval_locked:
758 read_unlock(&vmlist_lock);
759 return -EINVAL;
760}
761EXPORT_SYMBOL(remap_vmalloc_range);
762
1eeb66a1
CH
763/*
764 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
765 * have one.
766 */
767void __attribute__((weak)) vmalloc_sync_all(void)
768{
769}
5f4352fb
JF
770
771
772static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
773{
774 /* apply_to_page_range() does all the hard work. */
775 return 0;
776}
777
778/**
779 * alloc_vm_area - allocate a range of kernel address space
780 * @size: size of the area
781 * @returns: NULL on failure, vm_struct on success
782 *
783 * This function reserves a range of kernel address space, and
784 * allocates pagetables to map that range. No actual mappings
785 * are created. If the kernel address space is not shared
786 * between processes, it syncs the pagetable across all
787 * processes.
788 */
789struct vm_struct *alloc_vm_area(size_t size)
790{
791 struct vm_struct *area;
792
793 area = get_vm_area(size, VM_IOREMAP);
794 if (area == NULL)
795 return NULL;
796
797 /*
798 * This ensures that page tables are constructed for this region
799 * of kernel virtual address space and mapped into init_mm.
800 */
801 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
802 area->size, f, NULL)) {
803 free_vm_area(area);
804 return NULL;
805 }
806
807 /* Make sure the pagetables are constructed in process kernel
808 mappings */
809 vmalloc_sync_all();
810
811 return area;
812}
813EXPORT_SYMBOL_GPL(alloc_vm_area);
814
815void free_vm_area(struct vm_struct *area)
816{
817 struct vm_struct *ret;
818 ret = remove_vm_area(area->addr);
819 BUG_ON(ret != area);
820 kfree(area);
821}
822EXPORT_SYMBOL_GPL(free_vm_area);