]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmalloc.c
[ARM] fix VFP+softfloat binaries
[net-next-2.6.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/interrupt.h>
5f6a6a9c 18#include <linux/proc_fs.h>
a10aa579 19#include <linux/seq_file.h>
3ac7fe5a 20#include <linux/debugobjects.h>
23016969 21#include <linux/kallsyms.h>
db64fe02
NP
22#include <linux/list.h>
23#include <linux/rbtree.h>
24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h>
1da177e4 26
db64fe02 27#include <asm/atomic.h>
1da177e4
LT
28#include <asm/uaccess.h>
29#include <asm/tlbflush.h>
30
31
db64fe02 32/*** Page table manipulation functions ***/
b221385b 33
1da177e4
LT
34static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
35{
36 pte_t *pte;
37
38 pte = pte_offset_kernel(pmd, addr);
39 do {
40 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
41 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
42 } while (pte++, addr += PAGE_SIZE, addr != end);
43}
44
db64fe02 45static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
46{
47 pmd_t *pmd;
48 unsigned long next;
49
50 pmd = pmd_offset(pud, addr);
51 do {
52 next = pmd_addr_end(addr, end);
53 if (pmd_none_or_clear_bad(pmd))
54 continue;
55 vunmap_pte_range(pmd, addr, next);
56 } while (pmd++, addr = next, addr != end);
57}
58
db64fe02 59static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4
LT
60{
61 pud_t *pud;
62 unsigned long next;
63
64 pud = pud_offset(pgd, addr);
65 do {
66 next = pud_addr_end(addr, end);
67 if (pud_none_or_clear_bad(pud))
68 continue;
69 vunmap_pmd_range(pud, addr, next);
70 } while (pud++, addr = next, addr != end);
71}
72
db64fe02 73static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
74{
75 pgd_t *pgd;
76 unsigned long next;
1da177e4
LT
77
78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do {
82 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd))
84 continue;
85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end);
1da177e4
LT
87}
88
89static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 90 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
91{
92 pte_t *pte;
93
db64fe02
NP
94 /*
95 * nr is a running index into the array which helps higher level
96 * callers keep track of where we're up to.
97 */
98
872fec16 99 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
100 if (!pte)
101 return -ENOMEM;
102 do {
db64fe02
NP
103 struct page *page = pages[*nr];
104
105 if (WARN_ON(!pte_none(*pte)))
106 return -EBUSY;
107 if (WARN_ON(!page))
1da177e4
LT
108 return -ENOMEM;
109 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 110 (*nr)++;
1da177e4
LT
111 } while (pte++, addr += PAGE_SIZE, addr != end);
112 return 0;
113}
114
db64fe02
NP
115static int vmap_pmd_range(pud_t *pud, unsigned long addr,
116 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
117{
118 pmd_t *pmd;
119 unsigned long next;
120
121 pmd = pmd_alloc(&init_mm, pud, addr);
122 if (!pmd)
123 return -ENOMEM;
124 do {
125 next = pmd_addr_end(addr, end);
db64fe02 126 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
127 return -ENOMEM;
128 } while (pmd++, addr = next, addr != end);
129 return 0;
130}
131
db64fe02
NP
132static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
133 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
134{
135 pud_t *pud;
136 unsigned long next;
137
138 pud = pud_alloc(&init_mm, pgd, addr);
139 if (!pud)
140 return -ENOMEM;
141 do {
142 next = pud_addr_end(addr, end);
db64fe02 143 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
144 return -ENOMEM;
145 } while (pud++, addr = next, addr != end);
146 return 0;
147}
148
db64fe02
NP
149/*
150 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
151 * will have pfns corresponding to the "pages" array.
152 *
153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
154 */
155static int vmap_page_range(unsigned long addr, unsigned long end,
156 pgprot_t prot, struct page **pages)
1da177e4
LT
157{
158 pgd_t *pgd;
159 unsigned long next;
db64fe02
NP
160 int err = 0;
161 int nr = 0;
1da177e4
LT
162
163 BUG_ON(addr >= end);
164 pgd = pgd_offset_k(addr);
1da177e4
LT
165 do {
166 next = pgd_addr_end(addr, end);
db64fe02 167 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4
LT
168 if (err)
169 break;
170 } while (pgd++, addr = next, addr != end);
db64fe02
NP
171 flush_cache_vmap(addr, end);
172
173 if (unlikely(err))
174 return err;
175 return nr;
1da177e4
LT
176}
177
73bdf0a6
LT
178static inline int is_vmalloc_or_module_addr(const void *x)
179{
180 /*
181 * x86-64 and sparc64 put modules in a special place,
182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space.
184 */
185#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
186 unsigned long addr = (unsigned long)x;
187 if (addr >= MODULES_VADDR && addr < MODULES_END)
188 return 1;
189#endif
190 return is_vmalloc_addr(x);
191}
192
48667e7a 193/*
db64fe02 194 * Walk a vmap address to the struct page it maps.
48667e7a 195 */
b3bdda02 196struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
197{
198 unsigned long addr = (unsigned long) vmalloc_addr;
199 struct page *page = NULL;
200 pgd_t *pgd = pgd_offset_k(addr);
48667e7a 201
7aa413de
IM
202 /*
203 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
204 * architectures that do not vmalloc module space
205 */
73bdf0a6 206 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 207
48667e7a 208 if (!pgd_none(*pgd)) {
db64fe02 209 pud_t *pud = pud_offset(pgd, addr);
48667e7a 210 if (!pud_none(*pud)) {
db64fe02 211 pmd_t *pmd = pmd_offset(pud, addr);
48667e7a 212 if (!pmd_none(*pmd)) {
db64fe02
NP
213 pte_t *ptep, pte;
214
48667e7a
CL
215 ptep = pte_offset_map(pmd, addr);
216 pte = *ptep;
217 if (pte_present(pte))
218 page = pte_page(pte);
219 pte_unmap(ptep);
220 }
221 }
222 }
223 return page;
224}
225EXPORT_SYMBOL(vmalloc_to_page);
226
227/*
228 * Map a vmalloc()-space virtual address to the physical page frame number.
229 */
b3bdda02 230unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a
CL
231{
232 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
233}
234EXPORT_SYMBOL(vmalloc_to_pfn);
235
db64fe02
NP
236
237/*** Global kva allocator ***/
238
239#define VM_LAZY_FREE 0x01
240#define VM_LAZY_FREEING 0x02
241#define VM_VM_AREA 0x04
242
243struct vmap_area {
244 unsigned long va_start;
245 unsigned long va_end;
246 unsigned long flags;
247 struct rb_node rb_node; /* address sorted rbtree */
248 struct list_head list; /* address sorted list */
249 struct list_head purge_list; /* "lazy purge" list */
250 void *private;
251 struct rcu_head rcu_head;
252};
253
254static DEFINE_SPINLOCK(vmap_area_lock);
255static struct rb_root vmap_area_root = RB_ROOT;
256static LIST_HEAD(vmap_area_list);
257
258static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 259{
db64fe02
NP
260 struct rb_node *n = vmap_area_root.rb_node;
261
262 while (n) {
263 struct vmap_area *va;
264
265 va = rb_entry(n, struct vmap_area, rb_node);
266 if (addr < va->va_start)
267 n = n->rb_left;
268 else if (addr > va->va_start)
269 n = n->rb_right;
270 else
271 return va;
272 }
273
274 return NULL;
275}
276
277static void __insert_vmap_area(struct vmap_area *va)
278{
279 struct rb_node **p = &vmap_area_root.rb_node;
280 struct rb_node *parent = NULL;
281 struct rb_node *tmp;
282
283 while (*p) {
284 struct vmap_area *tmp;
285
286 parent = *p;
287 tmp = rb_entry(parent, struct vmap_area, rb_node);
288 if (va->va_start < tmp->va_end)
289 p = &(*p)->rb_left;
290 else if (va->va_end > tmp->va_start)
291 p = &(*p)->rb_right;
292 else
293 BUG();
294 }
295
296 rb_link_node(&va->rb_node, parent, p);
297 rb_insert_color(&va->rb_node, &vmap_area_root);
298
299 /* address-sort this list so it is usable like the vmlist */
300 tmp = rb_prev(&va->rb_node);
301 if (tmp) {
302 struct vmap_area *prev;
303 prev = rb_entry(tmp, struct vmap_area, rb_node);
304 list_add_rcu(&va->list, &prev->list);
305 } else
306 list_add_rcu(&va->list, &vmap_area_list);
307}
308
309static void purge_vmap_area_lazy(void);
310
311/*
312 * Allocate a region of KVA of the specified size and alignment, within the
313 * vstart and vend.
314 */
315static struct vmap_area *alloc_vmap_area(unsigned long size,
316 unsigned long align,
317 unsigned long vstart, unsigned long vend,
318 int node, gfp_t gfp_mask)
319{
320 struct vmap_area *va;
321 struct rb_node *n;
1da177e4 322 unsigned long addr;
db64fe02
NP
323 int purged = 0;
324
325 BUG_ON(size & ~PAGE_MASK);
326
327 addr = ALIGN(vstart, align);
328
329 va = kmalloc_node(sizeof(struct vmap_area),
330 gfp_mask & GFP_RECLAIM_MASK, node);
331 if (unlikely(!va))
332 return ERR_PTR(-ENOMEM);
333
334retry:
335 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node;
338 if (n) {
339 struct vmap_area *first = NULL;
340
341 do {
342 struct vmap_area *tmp;
343 tmp = rb_entry(n, struct vmap_area, rb_node);
344 if (tmp->va_end >= addr) {
345 if (!first && tmp->va_start < addr + size)
346 first = tmp;
347 n = n->rb_left;
348 } else {
349 first = tmp;
350 n = n->rb_right;
351 }
352 } while (n);
353
354 if (!first)
355 goto found;
356
357 if (first->va_end < addr) {
358 n = rb_next(&first->rb_node);
359 if (n)
360 first = rb_entry(n, struct vmap_area, rb_node);
361 else
362 goto found;
363 }
364
365 while (addr + size >= first->va_start && addr + size <= vend) {
366 addr = ALIGN(first->va_end + PAGE_SIZE, align);
367
368 n = rb_next(&first->rb_node);
369 if (n)
370 first = rb_entry(n, struct vmap_area, rb_node);
371 else
372 goto found;
373 }
374 }
375found:
376 if (addr + size > vend) {
377 spin_unlock(&vmap_area_lock);
378 if (!purged) {
379 purge_vmap_area_lazy();
380 purged = 1;
381 goto retry;
382 }
383 if (printk_ratelimit())
384 printk(KERN_WARNING "vmap allocation failed: "
385 "use vmalloc=<size> to increase size.\n");
386 return ERR_PTR(-EBUSY);
387 }
388
389 BUG_ON(addr & (align-1));
390
391 va->va_start = addr;
392 va->va_end = addr + size;
393 va->flags = 0;
394 __insert_vmap_area(va);
395 spin_unlock(&vmap_area_lock);
396
397 return va;
398}
399
400static void rcu_free_va(struct rcu_head *head)
401{
402 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
403
404 kfree(va);
405}
406
407static void __free_vmap_area(struct vmap_area *va)
408{
409 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
410 rb_erase(&va->rb_node, &vmap_area_root);
411 RB_CLEAR_NODE(&va->rb_node);
412 list_del_rcu(&va->list);
413
414 call_rcu(&va->rcu_head, rcu_free_va);
415}
416
417/*
418 * Free a region of KVA allocated by alloc_vmap_area
419 */
420static void free_vmap_area(struct vmap_area *va)
421{
422 spin_lock(&vmap_area_lock);
423 __free_vmap_area(va);
424 spin_unlock(&vmap_area_lock);
425}
426
427/*
428 * Clear the pagetable entries of a given vmap_area
429 */
430static void unmap_vmap_area(struct vmap_area *va)
431{
432 vunmap_page_range(va->va_start, va->va_end);
433}
434
435/*
436 * lazy_max_pages is the maximum amount of virtual address space we gather up
437 * before attempting to purge with a TLB flush.
438 *
439 * There is a tradeoff here: a larger number will cover more kernel page tables
440 * and take slightly longer to purge, but it will linearly reduce the number of
441 * global TLB flushes that must be performed. It would seem natural to scale
442 * this number up linearly with the number of CPUs (because vmapping activity
443 * could also scale linearly with the number of CPUs), however it is likely
444 * that in practice, workloads might be constrained in other ways that mean
445 * vmap activity will not scale linearly with CPUs. Also, I want to be
446 * conservative and not introduce a big latency on huge systems, so go with
447 * a less aggressive log scale. It will still be an improvement over the old
448 * code, and it will be simple to change the scale factor if we find that it
449 * becomes a problem on bigger systems.
450 */
451static unsigned long lazy_max_pages(void)
452{
453 unsigned int log;
454
455 log = fls(num_online_cpus());
456
457 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
458}
459
460static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
461
462/*
463 * Purges all lazily-freed vmap areas.
464 *
465 * If sync is 0 then don't purge if there is already a purge in progress.
466 * If force_flush is 1, then flush kernel TLBs between *start and *end even
467 * if we found no lazy vmap areas to unmap (callers can use this to optimise
468 * their own TLB flushing).
469 * Returns with *start = min(*start, lowest purged address)
470 * *end = max(*end, highest purged address)
471 */
472static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
473 int sync, int force_flush)
474{
475 static DEFINE_SPINLOCK(purge_lock);
476 LIST_HEAD(valist);
477 struct vmap_area *va;
478 int nr = 0;
479
480 /*
481 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
482 * should not expect such behaviour. This just simplifies locking for
483 * the case that isn't actually used at the moment anyway.
484 */
485 if (!sync && !force_flush) {
486 if (!spin_trylock(&purge_lock))
487 return;
488 } else
489 spin_lock(&purge_lock);
490
491 rcu_read_lock();
492 list_for_each_entry_rcu(va, &vmap_area_list, list) {
493 if (va->flags & VM_LAZY_FREE) {
494 if (va->va_start < *start)
495 *start = va->va_start;
496 if (va->va_end > *end)
497 *end = va->va_end;
498 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
499 unmap_vmap_area(va);
500 list_add_tail(&va->purge_list, &valist);
501 va->flags |= VM_LAZY_FREEING;
502 va->flags &= ~VM_LAZY_FREE;
503 }
504 }
505 rcu_read_unlock();
506
507 if (nr) {
508 BUG_ON(nr > atomic_read(&vmap_lazy_nr));
509 atomic_sub(nr, &vmap_lazy_nr);
510 }
511
512 if (nr || force_flush)
513 flush_tlb_kernel_range(*start, *end);
514
515 if (nr) {
516 spin_lock(&vmap_area_lock);
517 list_for_each_entry(va, &valist, purge_list)
518 __free_vmap_area(va);
519 spin_unlock(&vmap_area_lock);
520 }
521 spin_unlock(&purge_lock);
522}
523
524/*
525 * Kick off a purge of the outstanding lazy areas.
526 */
527static void purge_vmap_area_lazy(void)
528{
529 unsigned long start = ULONG_MAX, end = 0;
530
531 __purge_vmap_area_lazy(&start, &end, 0, 0);
532}
533
534/*
535 * Free and unmap a vmap area
536 */
537static void free_unmap_vmap_area(struct vmap_area *va)
538{
539 va->flags |= VM_LAZY_FREE;
540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
542 purge_vmap_area_lazy();
543}
544
545static struct vmap_area *find_vmap_area(unsigned long addr)
546{
547 struct vmap_area *va;
548
549 spin_lock(&vmap_area_lock);
550 va = __find_vmap_area(addr);
551 spin_unlock(&vmap_area_lock);
552
553 return va;
554}
555
556static void free_unmap_vmap_area_addr(unsigned long addr)
557{
558 struct vmap_area *va;
559
560 va = find_vmap_area(addr);
561 BUG_ON(!va);
562 free_unmap_vmap_area(va);
563}
564
565
566/*** Per cpu kva allocator ***/
567
568/*
569 * vmap space is limited especially on 32 bit architectures. Ensure there is
570 * room for at least 16 percpu vmap blocks per CPU.
571 */
572/*
573 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
574 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
575 * instead (we just need a rough idea)
576 */
577#if BITS_PER_LONG == 32
578#define VMALLOC_SPACE (128UL*1024*1024)
579#else
580#define VMALLOC_SPACE (128UL*1024*1024*1024)
581#endif
582
583#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
584#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
585#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
586#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
587#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
588#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
589#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
590 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
591 VMALLOC_PAGES / NR_CPUS / 16))
592
593#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
594
595struct vmap_block_queue {
596 spinlock_t lock;
597 struct list_head free;
598 struct list_head dirty;
599 unsigned int nr_dirty;
600};
601
602struct vmap_block {
603 spinlock_t lock;
604 struct vmap_area *va;
605 struct vmap_block_queue *vbq;
606 unsigned long free, dirty;
607 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
608 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
609 union {
610 struct {
611 struct list_head free_list;
612 struct list_head dirty_list;
613 };
614 struct rcu_head rcu_head;
615 };
616};
617
618/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
619static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
620
621/*
622 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
623 * in the free path. Could get rid of this if we change the API to return a
624 * "cookie" from alloc, to be passed to free. But no big deal yet.
625 */
626static DEFINE_SPINLOCK(vmap_block_tree_lock);
627static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
628
629/*
630 * We should probably have a fallback mechanism to allocate virtual memory
631 * out of partially filled vmap blocks. However vmap block sizing should be
632 * fairly reasonable according to the vmalloc size, so it shouldn't be a
633 * big problem.
634 */
635
636static unsigned long addr_to_vb_idx(unsigned long addr)
637{
638 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
639 addr /= VMAP_BLOCK_SIZE;
640 return addr;
641}
642
643static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
644{
645 struct vmap_block_queue *vbq;
646 struct vmap_block *vb;
647 struct vmap_area *va;
648 unsigned long vb_idx;
649 int node, err;
650
651 node = numa_node_id();
652
653 vb = kmalloc_node(sizeof(struct vmap_block),
654 gfp_mask & GFP_RECLAIM_MASK, node);
655 if (unlikely(!vb))
656 return ERR_PTR(-ENOMEM);
657
658 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
659 VMALLOC_START, VMALLOC_END,
660 node, gfp_mask);
661 if (unlikely(IS_ERR(va))) {
662 kfree(vb);
663 return ERR_PTR(PTR_ERR(va));
664 }
665
666 err = radix_tree_preload(gfp_mask);
667 if (unlikely(err)) {
668 kfree(vb);
669 free_vmap_area(va);
670 return ERR_PTR(err);
671 }
672
673 spin_lock_init(&vb->lock);
674 vb->va = va;
675 vb->free = VMAP_BBMAP_BITS;
676 vb->dirty = 0;
677 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
678 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
679 INIT_LIST_HEAD(&vb->free_list);
680 INIT_LIST_HEAD(&vb->dirty_list);
681
682 vb_idx = addr_to_vb_idx(va->va_start);
683 spin_lock(&vmap_block_tree_lock);
684 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
685 spin_unlock(&vmap_block_tree_lock);
686 BUG_ON(err);
687 radix_tree_preload_end();
688
689 vbq = &get_cpu_var(vmap_block_queue);
690 vb->vbq = vbq;
691 spin_lock(&vbq->lock);
692 list_add(&vb->free_list, &vbq->free);
693 spin_unlock(&vbq->lock);
694 put_cpu_var(vmap_cpu_blocks);
695
696 return vb;
697}
698
699static void rcu_free_vb(struct rcu_head *head)
700{
701 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
702
703 kfree(vb);
704}
705
706static void free_vmap_block(struct vmap_block *vb)
707{
708 struct vmap_block *tmp;
709 unsigned long vb_idx;
710
711 spin_lock(&vb->vbq->lock);
712 if (!list_empty(&vb->free_list))
713 list_del(&vb->free_list);
714 if (!list_empty(&vb->dirty_list))
715 list_del(&vb->dirty_list);
716 spin_unlock(&vb->vbq->lock);
717
718 vb_idx = addr_to_vb_idx(vb->va->va_start);
719 spin_lock(&vmap_block_tree_lock);
720 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
721 spin_unlock(&vmap_block_tree_lock);
722 BUG_ON(tmp != vb);
723
724 free_unmap_vmap_area(vb->va);
725 call_rcu(&vb->rcu_head, rcu_free_vb);
726}
727
728static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
729{
730 struct vmap_block_queue *vbq;
731 struct vmap_block *vb;
732 unsigned long addr = 0;
733 unsigned int order;
734
735 BUG_ON(size & ~PAGE_MASK);
736 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
737 order = get_order(size);
738
739again:
740 rcu_read_lock();
741 vbq = &get_cpu_var(vmap_block_queue);
742 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
743 int i;
744
745 spin_lock(&vb->lock);
746 i = bitmap_find_free_region(vb->alloc_map,
747 VMAP_BBMAP_BITS, order);
748
749 if (i >= 0) {
750 addr = vb->va->va_start + (i << PAGE_SHIFT);
751 BUG_ON(addr_to_vb_idx(addr) !=
752 addr_to_vb_idx(vb->va->va_start));
753 vb->free -= 1UL << order;
754 if (vb->free == 0) {
755 spin_lock(&vbq->lock);
756 list_del_init(&vb->free_list);
757 spin_unlock(&vbq->lock);
758 }
759 spin_unlock(&vb->lock);
760 break;
761 }
762 spin_unlock(&vb->lock);
763 }
764 put_cpu_var(vmap_cpu_blocks);
765 rcu_read_unlock();
766
767 if (!addr) {
768 vb = new_vmap_block(gfp_mask);
769 if (IS_ERR(vb))
770 return vb;
771 goto again;
772 }
773
774 return (void *)addr;
775}
776
777static void vb_free(const void *addr, unsigned long size)
778{
779 unsigned long offset;
780 unsigned long vb_idx;
781 unsigned int order;
782 struct vmap_block *vb;
783
784 BUG_ON(size & ~PAGE_MASK);
785 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
786 order = get_order(size);
787
788 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
789
790 vb_idx = addr_to_vb_idx((unsigned long)addr);
791 rcu_read_lock();
792 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
793 rcu_read_unlock();
794 BUG_ON(!vb);
795
796 spin_lock(&vb->lock);
797 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
798 if (!vb->dirty) {
799 spin_lock(&vb->vbq->lock);
800 list_add(&vb->dirty_list, &vb->vbq->dirty);
801 spin_unlock(&vb->vbq->lock);
802 }
803 vb->dirty += 1UL << order;
804 if (vb->dirty == VMAP_BBMAP_BITS) {
805 BUG_ON(vb->free || !list_empty(&vb->free_list));
806 spin_unlock(&vb->lock);
807 free_vmap_block(vb);
808 } else
809 spin_unlock(&vb->lock);
810}
811
812/**
813 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
814 *
815 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
816 * to amortize TLB flushing overheads. What this means is that any page you
817 * have now, may, in a former life, have been mapped into kernel virtual
818 * address by the vmap layer and so there might be some CPUs with TLB entries
819 * still referencing that page (additional to the regular 1:1 kernel mapping).
820 *
821 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
822 * be sure that none of the pages we have control over will have any aliases
823 * from the vmap layer.
824 */
825void vm_unmap_aliases(void)
826{
827 unsigned long start = ULONG_MAX, end = 0;
828 int cpu;
829 int flush = 0;
830
831 for_each_possible_cpu(cpu) {
832 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
833 struct vmap_block *vb;
834
835 rcu_read_lock();
836 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
837 int i;
838
839 spin_lock(&vb->lock);
840 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
841 while (i < VMAP_BBMAP_BITS) {
842 unsigned long s, e;
843 int j;
844 j = find_next_zero_bit(vb->dirty_map,
845 VMAP_BBMAP_BITS, i);
846
847 s = vb->va->va_start + (i << PAGE_SHIFT);
848 e = vb->va->va_start + (j << PAGE_SHIFT);
849 vunmap_page_range(s, e);
850 flush = 1;
851
852 if (s < start)
853 start = s;
854 if (e > end)
855 end = e;
856
857 i = j;
858 i = find_next_bit(vb->dirty_map,
859 VMAP_BBMAP_BITS, i);
860 }
861 spin_unlock(&vb->lock);
862 }
863 rcu_read_unlock();
864 }
865
866 __purge_vmap_area_lazy(&start, &end, 1, flush);
867}
868EXPORT_SYMBOL_GPL(vm_unmap_aliases);
869
870/**
871 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
872 * @mem: the pointer returned by vm_map_ram
873 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
874 */
875void vm_unmap_ram(const void *mem, unsigned int count)
876{
877 unsigned long size = count << PAGE_SHIFT;
878 unsigned long addr = (unsigned long)mem;
879
880 BUG_ON(!addr);
881 BUG_ON(addr < VMALLOC_START);
882 BUG_ON(addr > VMALLOC_END);
883 BUG_ON(addr & (PAGE_SIZE-1));
884
885 debug_check_no_locks_freed(mem, size);
886
887 if (likely(count <= VMAP_MAX_ALLOC))
888 vb_free(mem, size);
889 else
890 free_unmap_vmap_area_addr(addr);
891}
892EXPORT_SYMBOL(vm_unmap_ram);
893
894/**
895 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
896 * @pages: an array of pointers to the pages to be mapped
897 * @count: number of pages
898 * @node: prefer to allocate data structures on this node
899 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad
RD
900 *
901 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
902 */
903void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
904{
905 unsigned long size = count << PAGE_SHIFT;
906 unsigned long addr;
907 void *mem;
908
909 if (likely(count <= VMAP_MAX_ALLOC)) {
910 mem = vb_alloc(size, GFP_KERNEL);
911 if (IS_ERR(mem))
912 return NULL;
913 addr = (unsigned long)mem;
914 } else {
915 struct vmap_area *va;
916 va = alloc_vmap_area(size, PAGE_SIZE,
917 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
918 if (IS_ERR(va))
919 return NULL;
920
921 addr = va->va_start;
922 mem = (void *)addr;
923 }
924 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
925 vm_unmap_ram(mem, count);
926 return NULL;
927 }
928 return mem;
929}
930EXPORT_SYMBOL(vm_map_ram);
931
932void __init vmalloc_init(void)
933{
934 int i;
935
936 for_each_possible_cpu(i) {
937 struct vmap_block_queue *vbq;
938
939 vbq = &per_cpu(vmap_block_queue, i);
940 spin_lock_init(&vbq->lock);
941 INIT_LIST_HEAD(&vbq->free);
942 INIT_LIST_HEAD(&vbq->dirty);
943 vbq->nr_dirty = 0;
944 }
945}
946
947void unmap_kernel_range(unsigned long addr, unsigned long size)
948{
949 unsigned long end = addr + size;
950 vunmap_page_range(addr, end);
951 flush_tlb_kernel_range(addr, end);
952}
953
954int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
955{
956 unsigned long addr = (unsigned long)area->addr;
957 unsigned long end = addr + area->size - PAGE_SIZE;
958 int err;
959
960 err = vmap_page_range(addr, end, prot, *pages);
961 if (err > 0) {
962 *pages += err;
963 err = 0;
964 }
965
966 return err;
967}
968EXPORT_SYMBOL_GPL(map_vm_area);
969
970/*** Old vmalloc interfaces ***/
971DEFINE_RWLOCK(vmlist_lock);
972struct vm_struct *vmlist;
973
974static struct vm_struct *__get_vm_area_node(unsigned long size,
975 unsigned long flags, unsigned long start, unsigned long end,
976 int node, gfp_t gfp_mask, void *caller)
977{
978 static struct vmap_area *va;
979 struct vm_struct *area;
980 struct vm_struct *tmp, **p;
981 unsigned long align = 1;
1da177e4 982
52fd24ca 983 BUG_ON(in_interrupt());
1da177e4
LT
984 if (flags & VM_IOREMAP) {
985 int bit = fls(size);
986
987 if (bit > IOREMAP_MAX_ORDER)
988 bit = IOREMAP_MAX_ORDER;
989 else if (bit < PAGE_SHIFT)
990 bit = PAGE_SHIFT;
991
992 align = 1ul << bit;
993 }
db64fe02 994
1da177e4 995 size = PAGE_ALIGN(size);
31be8309
OH
996 if (unlikely(!size))
997 return NULL;
1da177e4 998
6cb06229 999 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
1000 if (unlikely(!area))
1001 return NULL;
1002
1da177e4
LT
1003 /*
1004 * We always allocate a guard page.
1005 */
1006 size += PAGE_SIZE;
1007
db64fe02
NP
1008 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1009 if (IS_ERR(va)) {
1010 kfree(area);
1011 return NULL;
1da177e4 1012 }
1da177e4
LT
1013
1014 area->flags = flags;
db64fe02 1015 area->addr = (void *)va->va_start;
1da177e4
LT
1016 area->size = size;
1017 area->pages = NULL;
1018 area->nr_pages = 0;
1019 area->phys_addr = 0;
23016969 1020 area->caller = caller;
db64fe02
NP
1021 va->private = area;
1022 va->flags |= VM_VM_AREA;
1023
1024 write_lock(&vmlist_lock);
1025 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1026 if (tmp->addr >= area->addr)
1027 break;
1028 }
1029 area->next = *p;
1030 *p = area;
1da177e4
LT
1031 write_unlock(&vmlist_lock);
1032
1033 return area;
1da177e4
LT
1034}
1035
930fc45a
CL
1036struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1037 unsigned long start, unsigned long end)
1038{
23016969
CL
1039 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1040 __builtin_return_address(0));
930fc45a 1041}
5992b6da 1042EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 1043
1da177e4 1044/**
183ff22b 1045 * get_vm_area - reserve a contiguous kernel virtual area
1da177e4
LT
1046 * @size: size of the area
1047 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1048 *
1049 * Search an area of @size in the kernel virtual mapping area,
1050 * and reserved it for out purposes. Returns the area descriptor
1051 * on success or %NULL on failure.
1052 */
1053struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1054{
23016969
CL
1055 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1056 -1, GFP_KERNEL, __builtin_return_address(0));
1057}
1058
1059struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1060 void *caller)
1061{
1062 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1063 -1, GFP_KERNEL, caller);
1da177e4
LT
1064}
1065
52fd24ca
GP
1066struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1067 int node, gfp_t gfp_mask)
930fc45a 1068{
52fd24ca 1069 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
23016969 1070 gfp_mask, __builtin_return_address(0));
930fc45a
CL
1071}
1072
db64fe02 1073static struct vm_struct *find_vm_area(const void *addr)
83342314 1074{
db64fe02 1075 struct vmap_area *va;
83342314 1076
db64fe02
NP
1077 va = find_vmap_area((unsigned long)addr);
1078 if (va && va->flags & VM_VM_AREA)
1079 return va->private;
1da177e4 1080
1da177e4 1081 return NULL;
1da177e4
LT
1082}
1083
7856dfeb 1084/**
183ff22b 1085 * remove_vm_area - find and remove a continuous kernel virtual area
7856dfeb
AK
1086 * @addr: base address
1087 *
1088 * Search for the kernel VM area starting at @addr, and remove it.
1089 * This function returns the found VM area, but using it is NOT safe
1090 * on SMP machines, except for its size or flags.
1091 */
b3bdda02 1092struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 1093{
db64fe02
NP
1094 struct vmap_area *va;
1095
1096 va = find_vmap_area((unsigned long)addr);
1097 if (va && va->flags & VM_VM_AREA) {
1098 struct vm_struct *vm = va->private;
1099 struct vm_struct *tmp, **p;
1100 free_unmap_vmap_area(va);
1101 vm->size -= PAGE_SIZE;
1102
1103 write_lock(&vmlist_lock);
1104 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
1105 ;
1106 *p = tmp->next;
1107 write_unlock(&vmlist_lock);
1108
1109 return vm;
1110 }
1111 return NULL;
7856dfeb
AK
1112}
1113
b3bdda02 1114static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
1115{
1116 struct vm_struct *area;
1117
1118 if (!addr)
1119 return;
1120
1121 if ((PAGE_SIZE-1) & (unsigned long)addr) {
4c8573e2 1122 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
1da177e4
LT
1123 return;
1124 }
1125
1126 area = remove_vm_area(addr);
1127 if (unlikely(!area)) {
4c8573e2 1128 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 1129 addr);
1da177e4
LT
1130 return;
1131 }
1132
9a11b49a 1133 debug_check_no_locks_freed(addr, area->size);
3ac7fe5a 1134 debug_check_no_obj_freed(addr, area->size);
9a11b49a 1135
1da177e4
LT
1136 if (deallocate_pages) {
1137 int i;
1138
1139 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1140 struct page *page = area->pages[i];
1141
1142 BUG_ON(!page);
1143 __free_page(page);
1da177e4
LT
1144 }
1145
8757d5fa 1146 if (area->flags & VM_VPAGES)
1da177e4
LT
1147 vfree(area->pages);
1148 else
1149 kfree(area->pages);
1150 }
1151
1152 kfree(area);
1153 return;
1154}
1155
1156/**
1157 * vfree - release memory allocated by vmalloc()
1da177e4
LT
1158 * @addr: memory base address
1159 *
183ff22b 1160 * Free the virtually continuous memory area starting at @addr, as
80e93eff
PE
1161 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1162 * NULL, no operation is performed.
1da177e4 1163 *
80e93eff 1164 * Must not be called in interrupt context.
1da177e4 1165 */
b3bdda02 1166void vfree(const void *addr)
1da177e4
LT
1167{
1168 BUG_ON(in_interrupt());
1169 __vunmap(addr, 1);
1170}
1da177e4
LT
1171EXPORT_SYMBOL(vfree);
1172
1173/**
1174 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
1175 * @addr: memory base address
1176 *
1177 * Free the virtually contiguous memory area starting at @addr,
1178 * which was created from the page array passed to vmap().
1179 *
80e93eff 1180 * Must not be called in interrupt context.
1da177e4 1181 */
b3bdda02 1182void vunmap(const void *addr)
1da177e4
LT
1183{
1184 BUG_ON(in_interrupt());
1185 __vunmap(addr, 0);
1186}
1da177e4
LT
1187EXPORT_SYMBOL(vunmap);
1188
1189/**
1190 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
1191 * @pages: array of page pointers
1192 * @count: number of pages to map
1193 * @flags: vm_area->flags
1194 * @prot: page protection for the mapping
1195 *
1196 * Maps @count pages from @pages into contiguous kernel virtual
1197 * space.
1198 */
1199void *vmap(struct page **pages, unsigned int count,
1200 unsigned long flags, pgprot_t prot)
1201{
1202 struct vm_struct *area;
1203
1204 if (count > num_physpages)
1205 return NULL;
1206
23016969
CL
1207 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1208 __builtin_return_address(0));
1da177e4
LT
1209 if (!area)
1210 return NULL;
23016969 1211
1da177e4
LT
1212 if (map_vm_area(area, prot, &pages)) {
1213 vunmap(area->addr);
1214 return NULL;
1215 }
1216
1217 return area->addr;
1218}
1da177e4
LT
1219EXPORT_SYMBOL(vmap);
1220
db64fe02
NP
1221static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1222 int node, void *caller);
e31d9eb5 1223static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
23016969 1224 pgprot_t prot, int node, void *caller)
1da177e4
LT
1225{
1226 struct page **pages;
1227 unsigned int nr_pages, array_size, i;
1228
1229 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1230 array_size = (nr_pages * sizeof(struct page *));
1231
1232 area->nr_pages = nr_pages;
1233 /* Please note that the recursion is strictly bounded. */
8757d5fa 1234 if (array_size > PAGE_SIZE) {
94f6030c 1235 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
23016969 1236 PAGE_KERNEL, node, caller);
8757d5fa 1237 area->flags |= VM_VPAGES;
286e1ea3
AM
1238 } else {
1239 pages = kmalloc_node(array_size,
6cb06229 1240 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
286e1ea3
AM
1241 node);
1242 }
1da177e4 1243 area->pages = pages;
23016969 1244 area->caller = caller;
1da177e4
LT
1245 if (!area->pages) {
1246 remove_vm_area(area->addr);
1247 kfree(area);
1248 return NULL;
1249 }
1da177e4
LT
1250
1251 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1252 struct page *page;
1253
930fc45a 1254 if (node < 0)
bf53d6f8 1255 page = alloc_page(gfp_mask);
930fc45a 1256 else
bf53d6f8
CL
1257 page = alloc_pages_node(node, gfp_mask, 0);
1258
1259 if (unlikely(!page)) {
1da177e4
LT
1260 /* Successfully allocated i pages, free them in __vunmap() */
1261 area->nr_pages = i;
1262 goto fail;
1263 }
bf53d6f8 1264 area->pages[i] = page;
1da177e4
LT
1265 }
1266
1267 if (map_vm_area(area, prot, &pages))
1268 goto fail;
1269 return area->addr;
1270
1271fail:
1272 vfree(area->addr);
1273 return NULL;
1274}
1275
930fc45a
CL
1276void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1277{
23016969
CL
1278 return __vmalloc_area_node(area, gfp_mask, prot, -1,
1279 __builtin_return_address(0));
930fc45a
CL
1280}
1281
1da177e4 1282/**
930fc45a 1283 * __vmalloc_node - allocate virtually contiguous memory
1da177e4
LT
1284 * @size: allocation size
1285 * @gfp_mask: flags for the page level allocator
1286 * @prot: protection mask for the allocated pages
d44e0780 1287 * @node: node to use for allocation or -1
c85d194b 1288 * @caller: caller's return address
1da177e4
LT
1289 *
1290 * Allocate enough pages to cover @size from the page level
1291 * allocator with @gfp_mask flags. Map them into contiguous
1292 * kernel virtual space, using a pagetable protection of @prot.
1293 */
b221385b 1294static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
23016969 1295 int node, void *caller)
1da177e4
LT
1296{
1297 struct vm_struct *area;
1298
1299 size = PAGE_ALIGN(size);
1300 if (!size || (size >> PAGE_SHIFT) > num_physpages)
1301 return NULL;
1302
23016969
CL
1303 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
1304 node, gfp_mask, caller);
1305
1da177e4
LT
1306 if (!area)
1307 return NULL;
1308
23016969 1309 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1da177e4
LT
1310}
1311
930fc45a
CL
1312void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1313{
23016969
CL
1314 return __vmalloc_node(size, gfp_mask, prot, -1,
1315 __builtin_return_address(0));
930fc45a 1316}
1da177e4
LT
1317EXPORT_SYMBOL(__vmalloc);
1318
1319/**
1320 * vmalloc - allocate virtually contiguous memory
1da177e4 1321 * @size: allocation size
1da177e4
LT
1322 * Allocate enough pages to cover @size from the page level
1323 * allocator and map them into contiguous kernel virtual space.
1324 *
c1c8897f 1325 * For tight control over page level allocator and protection flags
1da177e4
LT
1326 * use __vmalloc() instead.
1327 */
1328void *vmalloc(unsigned long size)
1329{
23016969
CL
1330 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1331 -1, __builtin_return_address(0));
1da177e4 1332}
1da177e4
LT
1333EXPORT_SYMBOL(vmalloc);
1334
83342314 1335/**
ead04089
REB
1336 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1337 * @size: allocation size
83342314 1338 *
ead04089
REB
1339 * The resulting memory area is zeroed so it can be mapped to userspace
1340 * without leaking data.
83342314
NP
1341 */
1342void *vmalloc_user(unsigned long size)
1343{
1344 struct vm_struct *area;
1345 void *ret;
1346
1347 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
2b4ac44e 1348 if (ret) {
db64fe02 1349 area = find_vm_area(ret);
2b4ac44e 1350 area->flags |= VM_USERMAP;
2b4ac44e 1351 }
83342314
NP
1352 return ret;
1353}
1354EXPORT_SYMBOL(vmalloc_user);
1355
930fc45a
CL
1356/**
1357 * vmalloc_node - allocate memory on a specific node
930fc45a 1358 * @size: allocation size
d44e0780 1359 * @node: numa node
930fc45a
CL
1360 *
1361 * Allocate enough pages to cover @size from the page level
1362 * allocator and map them into contiguous kernel virtual space.
1363 *
c1c8897f 1364 * For tight control over page level allocator and protection flags
930fc45a
CL
1365 * use __vmalloc() instead.
1366 */
1367void *vmalloc_node(unsigned long size, int node)
1368{
23016969
CL
1369 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1370 node, __builtin_return_address(0));
930fc45a
CL
1371}
1372EXPORT_SYMBOL(vmalloc_node);
1373
4dc3b16b
PP
1374#ifndef PAGE_KERNEL_EXEC
1375# define PAGE_KERNEL_EXEC PAGE_KERNEL
1376#endif
1377
1da177e4
LT
1378/**
1379 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
1380 * @size: allocation size
1381 *
1382 * Kernel-internal function to allocate enough pages to cover @size
1383 * the page level allocator and map them into contiguous and
1384 * executable kernel virtual space.
1385 *
c1c8897f 1386 * For tight control over page level allocator and protection flags
1da177e4
LT
1387 * use __vmalloc() instead.
1388 */
1389
1da177e4
LT
1390void *vmalloc_exec(unsigned long size)
1391{
1392 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
1393}
1394
0d08e0d3 1395#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 1396#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 1397#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 1398#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
1399#else
1400#define GFP_VMALLOC32 GFP_KERNEL
1401#endif
1402
1da177e4
LT
1403/**
1404 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
1405 * @size: allocation size
1406 *
1407 * Allocate enough 32bit PA addressable pages to cover @size from the
1408 * page level allocator and map them into contiguous kernel virtual space.
1409 */
1410void *vmalloc_32(unsigned long size)
1411{
0d08e0d3 1412 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
1da177e4 1413}
1da177e4
LT
1414EXPORT_SYMBOL(vmalloc_32);
1415
83342314 1416/**
ead04089 1417 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 1418 * @size: allocation size
ead04089
REB
1419 *
1420 * The resulting memory area is 32bit addressable and zeroed so it can be
1421 * mapped to userspace without leaking data.
83342314
NP
1422 */
1423void *vmalloc_32_user(unsigned long size)
1424{
1425 struct vm_struct *area;
1426 void *ret;
1427
0d08e0d3 1428 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
2b4ac44e 1429 if (ret) {
db64fe02 1430 area = find_vm_area(ret);
2b4ac44e 1431 area->flags |= VM_USERMAP;
2b4ac44e 1432 }
83342314
NP
1433 return ret;
1434}
1435EXPORT_SYMBOL(vmalloc_32_user);
1436
1da177e4
LT
1437long vread(char *buf, char *addr, unsigned long count)
1438{
1439 struct vm_struct *tmp;
1440 char *vaddr, *buf_start = buf;
1441 unsigned long n;
1442
1443 /* Don't allow overflow */
1444 if ((unsigned long) addr + count < count)
1445 count = -(unsigned long) addr;
1446
1447 read_lock(&vmlist_lock);
1448 for (tmp = vmlist; tmp; tmp = tmp->next) {
1449 vaddr = (char *) tmp->addr;
1450 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1451 continue;
1452 while (addr < vaddr) {
1453 if (count == 0)
1454 goto finished;
1455 *buf = '\0';
1456 buf++;
1457 addr++;
1458 count--;
1459 }
1460 n = vaddr + tmp->size - PAGE_SIZE - addr;
1461 do {
1462 if (count == 0)
1463 goto finished;
1464 *buf = *addr;
1465 buf++;
1466 addr++;
1467 count--;
1468 } while (--n > 0);
1469 }
1470finished:
1471 read_unlock(&vmlist_lock);
1472 return buf - buf_start;
1473}
1474
1475long vwrite(char *buf, char *addr, unsigned long count)
1476{
1477 struct vm_struct *tmp;
1478 char *vaddr, *buf_start = buf;
1479 unsigned long n;
1480
1481 /* Don't allow overflow */
1482 if ((unsigned long) addr + count < count)
1483 count = -(unsigned long) addr;
1484
1485 read_lock(&vmlist_lock);
1486 for (tmp = vmlist; tmp; tmp = tmp->next) {
1487 vaddr = (char *) tmp->addr;
1488 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1489 continue;
1490 while (addr < vaddr) {
1491 if (count == 0)
1492 goto finished;
1493 buf++;
1494 addr++;
1495 count--;
1496 }
1497 n = vaddr + tmp->size - PAGE_SIZE - addr;
1498 do {
1499 if (count == 0)
1500 goto finished;
1501 *addr = *buf;
1502 buf++;
1503 addr++;
1504 count--;
1505 } while (--n > 0);
1506 }
1507finished:
1508 read_unlock(&vmlist_lock);
1509 return buf - buf_start;
1510}
83342314
NP
1511
1512/**
1513 * remap_vmalloc_range - map vmalloc pages to userspace
83342314
NP
1514 * @vma: vma to cover (map full range of vma)
1515 * @addr: vmalloc memory
1516 * @pgoff: number of pages into addr before first page to map
7682486b
RD
1517 *
1518 * Returns: 0 for success, -Exxx on failure
83342314
NP
1519 *
1520 * This function checks that addr is a valid vmalloc'ed area, and
1521 * that it is big enough to cover the vma. Will return failure if
1522 * that criteria isn't met.
1523 *
72fd4a35 1524 * Similar to remap_pfn_range() (see mm/memory.c)
83342314
NP
1525 */
1526int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1527 unsigned long pgoff)
1528{
1529 struct vm_struct *area;
1530 unsigned long uaddr = vma->vm_start;
1531 unsigned long usize = vma->vm_end - vma->vm_start;
83342314
NP
1532
1533 if ((PAGE_SIZE-1) & (unsigned long)addr)
1534 return -EINVAL;
1535
db64fe02 1536 area = find_vm_area(addr);
83342314 1537 if (!area)
db64fe02 1538 return -EINVAL;
83342314
NP
1539
1540 if (!(area->flags & VM_USERMAP))
db64fe02 1541 return -EINVAL;
83342314
NP
1542
1543 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
db64fe02 1544 return -EINVAL;
83342314
NP
1545
1546 addr += pgoff << PAGE_SHIFT;
1547 do {
1548 struct page *page = vmalloc_to_page(addr);
db64fe02
NP
1549 int ret;
1550
83342314
NP
1551 ret = vm_insert_page(vma, uaddr, page);
1552 if (ret)
1553 return ret;
1554
1555 uaddr += PAGE_SIZE;
1556 addr += PAGE_SIZE;
1557 usize -= PAGE_SIZE;
1558 } while (usize > 0);
1559
1560 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1561 vma->vm_flags |= VM_RESERVED;
1562
db64fe02 1563 return 0;
83342314
NP
1564}
1565EXPORT_SYMBOL(remap_vmalloc_range);
1566
1eeb66a1
CH
1567/*
1568 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1569 * have one.
1570 */
1571void __attribute__((weak)) vmalloc_sync_all(void)
1572{
1573}
5f4352fb
JF
1574
1575
2f569afd 1576static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fb
JF
1577{
1578 /* apply_to_page_range() does all the hard work. */
1579 return 0;
1580}
1581
1582/**
1583 * alloc_vm_area - allocate a range of kernel address space
1584 * @size: size of the area
7682486b
RD
1585 *
1586 * Returns: NULL on failure, vm_struct on success
5f4352fb
JF
1587 *
1588 * This function reserves a range of kernel address space, and
1589 * allocates pagetables to map that range. No actual mappings
1590 * are created. If the kernel address space is not shared
1591 * between processes, it syncs the pagetable across all
1592 * processes.
1593 */
1594struct vm_struct *alloc_vm_area(size_t size)
1595{
1596 struct vm_struct *area;
1597
23016969
CL
1598 area = get_vm_area_caller(size, VM_IOREMAP,
1599 __builtin_return_address(0));
5f4352fb
JF
1600 if (area == NULL)
1601 return NULL;
1602
1603 /*
1604 * This ensures that page tables are constructed for this region
1605 * of kernel virtual address space and mapped into init_mm.
1606 */
1607 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
1608 area->size, f, NULL)) {
1609 free_vm_area(area);
1610 return NULL;
1611 }
1612
1613 /* Make sure the pagetables are constructed in process kernel
1614 mappings */
1615 vmalloc_sync_all();
1616
1617 return area;
1618}
1619EXPORT_SYMBOL_GPL(alloc_vm_area);
1620
1621void free_vm_area(struct vm_struct *area)
1622{
1623 struct vm_struct *ret;
1624 ret = remove_vm_area(area->addr);
1625 BUG_ON(ret != area);
1626 kfree(area);
1627}
1628EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579
CL
1629
1630
1631#ifdef CONFIG_PROC_FS
1632static void *s_start(struct seq_file *m, loff_t *pos)
1633{
1634 loff_t n = *pos;
1635 struct vm_struct *v;
1636
1637 read_lock(&vmlist_lock);
1638 v = vmlist;
1639 while (n > 0 && v) {
1640 n--;
1641 v = v->next;
1642 }
1643 if (!n)
1644 return v;
1645
1646 return NULL;
1647
1648}
1649
1650static void *s_next(struct seq_file *m, void *p, loff_t *pos)
1651{
1652 struct vm_struct *v = p;
1653
1654 ++*pos;
1655 return v->next;
1656}
1657
1658static void s_stop(struct seq_file *m, void *p)
1659{
1660 read_unlock(&vmlist_lock);
1661}
1662
a47a126a
ED
1663static void show_numa_info(struct seq_file *m, struct vm_struct *v)
1664{
1665 if (NUMA_BUILD) {
1666 unsigned int nr, *counters = m->private;
1667
1668 if (!counters)
1669 return;
1670
1671 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
1672
1673 for (nr = 0; nr < v->nr_pages; nr++)
1674 counters[page_to_nid(v->pages[nr])]++;
1675
1676 for_each_node_state(nr, N_HIGH_MEMORY)
1677 if (counters[nr])
1678 seq_printf(m, " N%u=%u", nr, counters[nr]);
1679 }
1680}
1681
a10aa579
CL
1682static int s_show(struct seq_file *m, void *p)
1683{
1684 struct vm_struct *v = p;
1685
1686 seq_printf(m, "0x%p-0x%p %7ld",
1687 v->addr, v->addr + v->size, v->size);
1688
23016969
CL
1689 if (v->caller) {
1690 char buff[2 * KSYM_NAME_LEN];
1691
1692 seq_putc(m, ' ');
1693 sprint_symbol(buff, (unsigned long)v->caller);
1694 seq_puts(m, buff);
1695 }
1696
a10aa579
CL
1697 if (v->nr_pages)
1698 seq_printf(m, " pages=%d", v->nr_pages);
1699
1700 if (v->phys_addr)
1701 seq_printf(m, " phys=%lx", v->phys_addr);
1702
1703 if (v->flags & VM_IOREMAP)
1704 seq_printf(m, " ioremap");
1705
1706 if (v->flags & VM_ALLOC)
1707 seq_printf(m, " vmalloc");
1708
1709 if (v->flags & VM_MAP)
1710 seq_printf(m, " vmap");
1711
1712 if (v->flags & VM_USERMAP)
1713 seq_printf(m, " user");
1714
1715 if (v->flags & VM_VPAGES)
1716 seq_printf(m, " vpages");
1717
a47a126a 1718 show_numa_info(m, v);
a10aa579
CL
1719 seq_putc(m, '\n');
1720 return 0;
1721}
1722
5f6a6a9c 1723static const struct seq_operations vmalloc_op = {
a10aa579
CL
1724 .start = s_start,
1725 .next = s_next,
1726 .stop = s_stop,
1727 .show = s_show,
1728};
5f6a6a9c
AD
1729
1730static int vmalloc_open(struct inode *inode, struct file *file)
1731{
1732 unsigned int *ptr = NULL;
1733 int ret;
1734
1735 if (NUMA_BUILD)
1736 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
1737 ret = seq_open(file, &vmalloc_op);
1738 if (!ret) {
1739 struct seq_file *m = file->private_data;
1740 m->private = ptr;
1741 } else
1742 kfree(ptr);
1743 return ret;
1744}
1745
1746static const struct file_operations proc_vmalloc_operations = {
1747 .open = vmalloc_open,
1748 .read = seq_read,
1749 .llseek = seq_lseek,
1750 .release = seq_release_private,
1751};
1752
1753static int __init proc_vmalloc_init(void)
1754{
1755 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
1756 return 0;
1757}
1758module_init(proc_vmalloc_init);
a10aa579
CL
1759#endif
1760