]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmalloc.c
mm: percpu-vmap fix RCU list walking
[net-next-2.6.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
d43c36dc 15#include <linux/sched.h>
1da177e4
LT
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
5f6a6a9c 19#include <linux/proc_fs.h>
a10aa579 20#include <linux/seq_file.h>
3ac7fe5a 21#include <linux/debugobjects.h>
23016969 22#include <linux/kallsyms.h>
db64fe02
NP
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
f0aa6617 27#include <linux/pfn.h>
89219d37 28#include <linux/kmemleak.h>
db64fe02 29#include <asm/atomic.h>
1da177e4
LT
30#include <asm/uaccess.h>
31#include <asm/tlbflush.h>
2dca6999 32#include <asm/shmparam.h>
1da177e4
LT
33
34
db64fe02 35/*** Page table manipulation functions ***/
b221385b 36
1da177e4
LT
37static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
38{
39 pte_t *pte;
40
41 pte = pte_offset_kernel(pmd, addr);
42 do {
43 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
44 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
45 } while (pte++, addr += PAGE_SIZE, addr != end);
46}
47
db64fe02 48static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
49{
50 pmd_t *pmd;
51 unsigned long next;
52
53 pmd = pmd_offset(pud, addr);
54 do {
55 next = pmd_addr_end(addr, end);
56 if (pmd_none_or_clear_bad(pmd))
57 continue;
58 vunmap_pte_range(pmd, addr, next);
59 } while (pmd++, addr = next, addr != end);
60}
61
db64fe02 62static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4
LT
63{
64 pud_t *pud;
65 unsigned long next;
66
67 pud = pud_offset(pgd, addr);
68 do {
69 next = pud_addr_end(addr, end);
70 if (pud_none_or_clear_bad(pud))
71 continue;
72 vunmap_pmd_range(pud, addr, next);
73 } while (pud++, addr = next, addr != end);
74}
75
db64fe02 76static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
77{
78 pgd_t *pgd;
79 unsigned long next;
1da177e4
LT
80
81 BUG_ON(addr >= end);
82 pgd = pgd_offset_k(addr);
1da177e4
LT
83 do {
84 next = pgd_addr_end(addr, end);
85 if (pgd_none_or_clear_bad(pgd))
86 continue;
87 vunmap_pud_range(pgd, addr, next);
88 } while (pgd++, addr = next, addr != end);
1da177e4
LT
89}
90
91static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 92 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
93{
94 pte_t *pte;
95
db64fe02
NP
96 /*
97 * nr is a running index into the array which helps higher level
98 * callers keep track of where we're up to.
99 */
100
872fec16 101 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
102 if (!pte)
103 return -ENOMEM;
104 do {
db64fe02
NP
105 struct page *page = pages[*nr];
106
107 if (WARN_ON(!pte_none(*pte)))
108 return -EBUSY;
109 if (WARN_ON(!page))
1da177e4
LT
110 return -ENOMEM;
111 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 112 (*nr)++;
1da177e4
LT
113 } while (pte++, addr += PAGE_SIZE, addr != end);
114 return 0;
115}
116
db64fe02
NP
117static int vmap_pmd_range(pud_t *pud, unsigned long addr,
118 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
119{
120 pmd_t *pmd;
121 unsigned long next;
122
123 pmd = pmd_alloc(&init_mm, pud, addr);
124 if (!pmd)
125 return -ENOMEM;
126 do {
127 next = pmd_addr_end(addr, end);
db64fe02 128 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
129 return -ENOMEM;
130 } while (pmd++, addr = next, addr != end);
131 return 0;
132}
133
db64fe02
NP
134static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
135 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
136{
137 pud_t *pud;
138 unsigned long next;
139
140 pud = pud_alloc(&init_mm, pgd, addr);
141 if (!pud)
142 return -ENOMEM;
143 do {
144 next = pud_addr_end(addr, end);
db64fe02 145 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
146 return -ENOMEM;
147 } while (pud++, addr = next, addr != end);
148 return 0;
149}
150
db64fe02
NP
151/*
152 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
153 * will have pfns corresponding to the "pages" array.
154 *
155 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
156 */
8fc48985
TH
157static int vmap_page_range_noflush(unsigned long start, unsigned long end,
158 pgprot_t prot, struct page **pages)
1da177e4
LT
159{
160 pgd_t *pgd;
161 unsigned long next;
2e4e27c7 162 unsigned long addr = start;
db64fe02
NP
163 int err = 0;
164 int nr = 0;
1da177e4
LT
165
166 BUG_ON(addr >= end);
167 pgd = pgd_offset_k(addr);
1da177e4
LT
168 do {
169 next = pgd_addr_end(addr, end);
db64fe02 170 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4 171 if (err)
bf88c8c8 172 return err;
1da177e4 173 } while (pgd++, addr = next, addr != end);
db64fe02 174
db64fe02 175 return nr;
1da177e4
LT
176}
177
8fc48985
TH
178static int vmap_page_range(unsigned long start, unsigned long end,
179 pgprot_t prot, struct page **pages)
180{
181 int ret;
182
183 ret = vmap_page_range_noflush(start, end, prot, pages);
184 flush_cache_vmap(start, end);
185 return ret;
186}
187
81ac3ad9 188int is_vmalloc_or_module_addr(const void *x)
73bdf0a6
LT
189{
190 /*
ab4f2ee1 191 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
192 * and fall back on vmalloc() if that fails. Others
193 * just put it in the vmalloc space.
194 */
195#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
196 unsigned long addr = (unsigned long)x;
197 if (addr >= MODULES_VADDR && addr < MODULES_END)
198 return 1;
199#endif
200 return is_vmalloc_addr(x);
201}
202
48667e7a 203/*
db64fe02 204 * Walk a vmap address to the struct page it maps.
48667e7a 205 */
b3bdda02 206struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
207{
208 unsigned long addr = (unsigned long) vmalloc_addr;
209 struct page *page = NULL;
210 pgd_t *pgd = pgd_offset_k(addr);
48667e7a 211
7aa413de
IM
212 /*
213 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
214 * architectures that do not vmalloc module space
215 */
73bdf0a6 216 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 217
48667e7a 218 if (!pgd_none(*pgd)) {
db64fe02 219 pud_t *pud = pud_offset(pgd, addr);
48667e7a 220 if (!pud_none(*pud)) {
db64fe02 221 pmd_t *pmd = pmd_offset(pud, addr);
48667e7a 222 if (!pmd_none(*pmd)) {
db64fe02
NP
223 pte_t *ptep, pte;
224
48667e7a
CL
225 ptep = pte_offset_map(pmd, addr);
226 pte = *ptep;
227 if (pte_present(pte))
228 page = pte_page(pte);
229 pte_unmap(ptep);
230 }
231 }
232 }
233 return page;
234}
235EXPORT_SYMBOL(vmalloc_to_page);
236
237/*
238 * Map a vmalloc()-space virtual address to the physical page frame number.
239 */
b3bdda02 240unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a
CL
241{
242 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
243}
244EXPORT_SYMBOL(vmalloc_to_pfn);
245
db64fe02
NP
246
247/*** Global kva allocator ***/
248
249#define VM_LAZY_FREE 0x01
250#define VM_LAZY_FREEING 0x02
251#define VM_VM_AREA 0x04
252
253struct vmap_area {
254 unsigned long va_start;
255 unsigned long va_end;
256 unsigned long flags;
257 struct rb_node rb_node; /* address sorted rbtree */
258 struct list_head list; /* address sorted list */
259 struct list_head purge_list; /* "lazy purge" list */
260 void *private;
261 struct rcu_head rcu_head;
262};
263
264static DEFINE_SPINLOCK(vmap_area_lock);
265static struct rb_root vmap_area_root = RB_ROOT;
266static LIST_HEAD(vmap_area_list);
ca23e405 267static unsigned long vmap_area_pcpu_hole;
db64fe02
NP
268
269static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 270{
db64fe02
NP
271 struct rb_node *n = vmap_area_root.rb_node;
272
273 while (n) {
274 struct vmap_area *va;
275
276 va = rb_entry(n, struct vmap_area, rb_node);
277 if (addr < va->va_start)
278 n = n->rb_left;
279 else if (addr > va->va_start)
280 n = n->rb_right;
281 else
282 return va;
283 }
284
285 return NULL;
286}
287
288static void __insert_vmap_area(struct vmap_area *va)
289{
290 struct rb_node **p = &vmap_area_root.rb_node;
291 struct rb_node *parent = NULL;
292 struct rb_node *tmp;
293
294 while (*p) {
295 struct vmap_area *tmp;
296
297 parent = *p;
298 tmp = rb_entry(parent, struct vmap_area, rb_node);
299 if (va->va_start < tmp->va_end)
300 p = &(*p)->rb_left;
301 else if (va->va_end > tmp->va_start)
302 p = &(*p)->rb_right;
303 else
304 BUG();
305 }
306
307 rb_link_node(&va->rb_node, parent, p);
308 rb_insert_color(&va->rb_node, &vmap_area_root);
309
310 /* address-sort this list so it is usable like the vmlist */
311 tmp = rb_prev(&va->rb_node);
312 if (tmp) {
313 struct vmap_area *prev;
314 prev = rb_entry(tmp, struct vmap_area, rb_node);
315 list_add_rcu(&va->list, &prev->list);
316 } else
317 list_add_rcu(&va->list, &vmap_area_list);
318}
319
320static void purge_vmap_area_lazy(void);
321
322/*
323 * Allocate a region of KVA of the specified size and alignment, within the
324 * vstart and vend.
325 */
326static struct vmap_area *alloc_vmap_area(unsigned long size,
327 unsigned long align,
328 unsigned long vstart, unsigned long vend,
329 int node, gfp_t gfp_mask)
330{
331 struct vmap_area *va;
332 struct rb_node *n;
1da177e4 333 unsigned long addr;
db64fe02
NP
334 int purged = 0;
335
7766970c 336 BUG_ON(!size);
db64fe02
NP
337 BUG_ON(size & ~PAGE_MASK);
338
db64fe02
NP
339 va = kmalloc_node(sizeof(struct vmap_area),
340 gfp_mask & GFP_RECLAIM_MASK, node);
341 if (unlikely(!va))
342 return ERR_PTR(-ENOMEM);
343
344retry:
0ae15132
GC
345 addr = ALIGN(vstart, align);
346
db64fe02 347 spin_lock(&vmap_area_lock);
7766970c
NP
348 if (addr + size - 1 < addr)
349 goto overflow;
350
db64fe02
NP
351 /* XXX: could have a last_hole cache */
352 n = vmap_area_root.rb_node;
353 if (n) {
354 struct vmap_area *first = NULL;
355
356 do {
357 struct vmap_area *tmp;
358 tmp = rb_entry(n, struct vmap_area, rb_node);
359 if (tmp->va_end >= addr) {
360 if (!first && tmp->va_start < addr + size)
361 first = tmp;
362 n = n->rb_left;
363 } else {
364 first = tmp;
365 n = n->rb_right;
366 }
367 } while (n);
368
369 if (!first)
370 goto found;
371
372 if (first->va_end < addr) {
373 n = rb_next(&first->rb_node);
374 if (n)
375 first = rb_entry(n, struct vmap_area, rb_node);
376 else
377 goto found;
378 }
379
f011c2da 380 while (addr + size > first->va_start && addr + size <= vend) {
db64fe02 381 addr = ALIGN(first->va_end + PAGE_SIZE, align);
7766970c
NP
382 if (addr + size - 1 < addr)
383 goto overflow;
db64fe02
NP
384
385 n = rb_next(&first->rb_node);
386 if (n)
387 first = rb_entry(n, struct vmap_area, rb_node);
388 else
389 goto found;
390 }
391 }
392found:
393 if (addr + size > vend) {
7766970c 394overflow:
db64fe02
NP
395 spin_unlock(&vmap_area_lock);
396 if (!purged) {
397 purge_vmap_area_lazy();
398 purged = 1;
399 goto retry;
400 }
401 if (printk_ratelimit())
c1279c4e
GC
402 printk(KERN_WARNING
403 "vmap allocation for size %lu failed: "
404 "use vmalloc=<size> to increase size.\n", size);
2498ce42 405 kfree(va);
db64fe02
NP
406 return ERR_PTR(-EBUSY);
407 }
408
409 BUG_ON(addr & (align-1));
410
411 va->va_start = addr;
412 va->va_end = addr + size;
413 va->flags = 0;
414 __insert_vmap_area(va);
415 spin_unlock(&vmap_area_lock);
416
417 return va;
418}
419
420static void rcu_free_va(struct rcu_head *head)
421{
422 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
423
424 kfree(va);
425}
426
427static void __free_vmap_area(struct vmap_area *va)
428{
429 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
430 rb_erase(&va->rb_node, &vmap_area_root);
431 RB_CLEAR_NODE(&va->rb_node);
432 list_del_rcu(&va->list);
433
ca23e405
TH
434 /*
435 * Track the highest possible candidate for pcpu area
436 * allocation. Areas outside of vmalloc area can be returned
437 * here too, consider only end addresses which fall inside
438 * vmalloc area proper.
439 */
440 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
441 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
442
db64fe02
NP
443 call_rcu(&va->rcu_head, rcu_free_va);
444}
445
446/*
447 * Free a region of KVA allocated by alloc_vmap_area
448 */
449static void free_vmap_area(struct vmap_area *va)
450{
451 spin_lock(&vmap_area_lock);
452 __free_vmap_area(va);
453 spin_unlock(&vmap_area_lock);
454}
455
456/*
457 * Clear the pagetable entries of a given vmap_area
458 */
459static void unmap_vmap_area(struct vmap_area *va)
460{
461 vunmap_page_range(va->va_start, va->va_end);
462}
463
cd52858c
NP
464static void vmap_debug_free_range(unsigned long start, unsigned long end)
465{
466 /*
467 * Unmap page tables and force a TLB flush immediately if
468 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
469 * bugs similarly to those in linear kernel virtual address
470 * space after a page has been freed.
471 *
472 * All the lazy freeing logic is still retained, in order to
473 * minimise intrusiveness of this debugging feature.
474 *
475 * This is going to be *slow* (linear kernel virtual address
476 * debugging doesn't do a broadcast TLB flush so it is a lot
477 * faster).
478 */
479#ifdef CONFIG_DEBUG_PAGEALLOC
480 vunmap_page_range(start, end);
481 flush_tlb_kernel_range(start, end);
482#endif
483}
484
db64fe02
NP
485/*
486 * lazy_max_pages is the maximum amount of virtual address space we gather up
487 * before attempting to purge with a TLB flush.
488 *
489 * There is a tradeoff here: a larger number will cover more kernel page tables
490 * and take slightly longer to purge, but it will linearly reduce the number of
491 * global TLB flushes that must be performed. It would seem natural to scale
492 * this number up linearly with the number of CPUs (because vmapping activity
493 * could also scale linearly with the number of CPUs), however it is likely
494 * that in practice, workloads might be constrained in other ways that mean
495 * vmap activity will not scale linearly with CPUs. Also, I want to be
496 * conservative and not introduce a big latency on huge systems, so go with
497 * a less aggressive log scale. It will still be an improvement over the old
498 * code, and it will be simple to change the scale factor if we find that it
499 * becomes a problem on bigger systems.
500 */
501static unsigned long lazy_max_pages(void)
502{
503 unsigned int log;
504
505 log = fls(num_online_cpus());
506
507 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
508}
509
510static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
511
512/*
513 * Purges all lazily-freed vmap areas.
514 *
515 * If sync is 0 then don't purge if there is already a purge in progress.
516 * If force_flush is 1, then flush kernel TLBs between *start and *end even
517 * if we found no lazy vmap areas to unmap (callers can use this to optimise
518 * their own TLB flushing).
519 * Returns with *start = min(*start, lowest purged address)
520 * *end = max(*end, highest purged address)
521 */
522static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
523 int sync, int force_flush)
524{
46666d8a 525 static DEFINE_SPINLOCK(purge_lock);
db64fe02
NP
526 LIST_HEAD(valist);
527 struct vmap_area *va;
cbb76676 528 struct vmap_area *n_va;
db64fe02
NP
529 int nr = 0;
530
531 /*
532 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
533 * should not expect such behaviour. This just simplifies locking for
534 * the case that isn't actually used at the moment anyway.
535 */
536 if (!sync && !force_flush) {
46666d8a 537 if (!spin_trylock(&purge_lock))
db64fe02
NP
538 return;
539 } else
46666d8a 540 spin_lock(&purge_lock);
db64fe02
NP
541
542 rcu_read_lock();
543 list_for_each_entry_rcu(va, &vmap_area_list, list) {
544 if (va->flags & VM_LAZY_FREE) {
545 if (va->va_start < *start)
546 *start = va->va_start;
547 if (va->va_end > *end)
548 *end = va->va_end;
549 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
550 unmap_vmap_area(va);
551 list_add_tail(&va->purge_list, &valist);
552 va->flags |= VM_LAZY_FREEING;
553 va->flags &= ~VM_LAZY_FREE;
554 }
555 }
556 rcu_read_unlock();
557
88f50044 558 if (nr)
db64fe02 559 atomic_sub(nr, &vmap_lazy_nr);
db64fe02
NP
560
561 if (nr || force_flush)
562 flush_tlb_kernel_range(*start, *end);
563
564 if (nr) {
565 spin_lock(&vmap_area_lock);
cbb76676 566 list_for_each_entry_safe(va, n_va, &valist, purge_list)
db64fe02
NP
567 __free_vmap_area(va);
568 spin_unlock(&vmap_area_lock);
569 }
46666d8a 570 spin_unlock(&purge_lock);
db64fe02
NP
571}
572
496850e5
NP
573/*
574 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
575 * is already purging.
576 */
577static void try_purge_vmap_area_lazy(void)
578{
579 unsigned long start = ULONG_MAX, end = 0;
580
581 __purge_vmap_area_lazy(&start, &end, 0, 0);
582}
583
db64fe02
NP
584/*
585 * Kick off a purge of the outstanding lazy areas.
586 */
587static void purge_vmap_area_lazy(void)
588{
589 unsigned long start = ULONG_MAX, end = 0;
590
496850e5 591 __purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe02
NP
592}
593
594/*
b29acbdc
NP
595 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
596 * called for the correct range previously.
db64fe02 597 */
b29acbdc 598static void free_unmap_vmap_area_noflush(struct vmap_area *va)
db64fe02
NP
599{
600 va->flags |= VM_LAZY_FREE;
601 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
602 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
496850e5 603 try_purge_vmap_area_lazy();
db64fe02
NP
604}
605
b29acbdc
NP
606/*
607 * Free and unmap a vmap area
608 */
609static void free_unmap_vmap_area(struct vmap_area *va)
610{
611 flush_cache_vunmap(va->va_start, va->va_end);
612 free_unmap_vmap_area_noflush(va);
613}
614
db64fe02
NP
615static struct vmap_area *find_vmap_area(unsigned long addr)
616{
617 struct vmap_area *va;
618
619 spin_lock(&vmap_area_lock);
620 va = __find_vmap_area(addr);
621 spin_unlock(&vmap_area_lock);
622
623 return va;
624}
625
626static void free_unmap_vmap_area_addr(unsigned long addr)
627{
628 struct vmap_area *va;
629
630 va = find_vmap_area(addr);
631 BUG_ON(!va);
632 free_unmap_vmap_area(va);
633}
634
635
636/*** Per cpu kva allocator ***/
637
638/*
639 * vmap space is limited especially on 32 bit architectures. Ensure there is
640 * room for at least 16 percpu vmap blocks per CPU.
641 */
642/*
643 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
644 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
645 * instead (we just need a rough idea)
646 */
647#if BITS_PER_LONG == 32
648#define VMALLOC_SPACE (128UL*1024*1024)
649#else
650#define VMALLOC_SPACE (128UL*1024*1024*1024)
651#endif
652
653#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
654#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
655#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
656#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
657#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
658#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
659#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
660 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
661 VMALLOC_PAGES / NR_CPUS / 16))
662
663#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
664
9b463334
JF
665static bool vmap_initialized __read_mostly = false;
666
db64fe02
NP
667struct vmap_block_queue {
668 spinlock_t lock;
669 struct list_head free;
db64fe02
NP
670};
671
672struct vmap_block {
673 spinlock_t lock;
674 struct vmap_area *va;
675 struct vmap_block_queue *vbq;
676 unsigned long free, dirty;
677 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
678 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
de560423
NP
679 struct list_head free_list;
680 struct rcu_head rcu_head;
db64fe02
NP
681};
682
683/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
684static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
685
686/*
687 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
688 * in the free path. Could get rid of this if we change the API to return a
689 * "cookie" from alloc, to be passed to free. But no big deal yet.
690 */
691static DEFINE_SPINLOCK(vmap_block_tree_lock);
692static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
693
694/*
695 * We should probably have a fallback mechanism to allocate virtual memory
696 * out of partially filled vmap blocks. However vmap block sizing should be
697 * fairly reasonable according to the vmalloc size, so it shouldn't be a
698 * big problem.
699 */
700
701static unsigned long addr_to_vb_idx(unsigned long addr)
702{
703 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
704 addr /= VMAP_BLOCK_SIZE;
705 return addr;
706}
707
708static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
709{
710 struct vmap_block_queue *vbq;
711 struct vmap_block *vb;
712 struct vmap_area *va;
713 unsigned long vb_idx;
714 int node, err;
715
716 node = numa_node_id();
717
718 vb = kmalloc_node(sizeof(struct vmap_block),
719 gfp_mask & GFP_RECLAIM_MASK, node);
720 if (unlikely(!vb))
721 return ERR_PTR(-ENOMEM);
722
723 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
724 VMALLOC_START, VMALLOC_END,
725 node, gfp_mask);
726 if (unlikely(IS_ERR(va))) {
727 kfree(vb);
728 return ERR_PTR(PTR_ERR(va));
729 }
730
731 err = radix_tree_preload(gfp_mask);
732 if (unlikely(err)) {
733 kfree(vb);
734 free_vmap_area(va);
735 return ERR_PTR(err);
736 }
737
738 spin_lock_init(&vb->lock);
739 vb->va = va;
740 vb->free = VMAP_BBMAP_BITS;
741 vb->dirty = 0;
742 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
743 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
744 INIT_LIST_HEAD(&vb->free_list);
db64fe02
NP
745
746 vb_idx = addr_to_vb_idx(va->va_start);
747 spin_lock(&vmap_block_tree_lock);
748 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
749 spin_unlock(&vmap_block_tree_lock);
750 BUG_ON(err);
751 radix_tree_preload_end();
752
753 vbq = &get_cpu_var(vmap_block_queue);
754 vb->vbq = vbq;
755 spin_lock(&vbq->lock);
de560423 756 list_add_rcu(&vb->free_list, &vbq->free);
db64fe02 757 spin_unlock(&vbq->lock);
3f04ba85 758 put_cpu_var(vmap_block_queue);
db64fe02
NP
759
760 return vb;
761}
762
763static void rcu_free_vb(struct rcu_head *head)
764{
765 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
766
767 kfree(vb);
768}
769
770static void free_vmap_block(struct vmap_block *vb)
771{
772 struct vmap_block *tmp;
773 unsigned long vb_idx;
774
db64fe02
NP
775 vb_idx = addr_to_vb_idx(vb->va->va_start);
776 spin_lock(&vmap_block_tree_lock);
777 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
778 spin_unlock(&vmap_block_tree_lock);
779 BUG_ON(tmp != vb);
780
b29acbdc 781 free_unmap_vmap_area_noflush(vb->va);
db64fe02
NP
782 call_rcu(&vb->rcu_head, rcu_free_vb);
783}
784
785static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
786{
787 struct vmap_block_queue *vbq;
788 struct vmap_block *vb;
789 unsigned long addr = 0;
790 unsigned int order;
791
792 BUG_ON(size & ~PAGE_MASK);
793 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
794 order = get_order(size);
795
796again:
797 rcu_read_lock();
798 vbq = &get_cpu_var(vmap_block_queue);
799 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
800 int i;
801
802 spin_lock(&vb->lock);
803 i = bitmap_find_free_region(vb->alloc_map,
804 VMAP_BBMAP_BITS, order);
805
806 if (i >= 0) {
807 addr = vb->va->va_start + (i << PAGE_SHIFT);
808 BUG_ON(addr_to_vb_idx(addr) !=
809 addr_to_vb_idx(vb->va->va_start));
810 vb->free -= 1UL << order;
811 if (vb->free == 0) {
812 spin_lock(&vbq->lock);
de560423 813 list_del_rcu(&vb->free_list);
db64fe02
NP
814 spin_unlock(&vbq->lock);
815 }
816 spin_unlock(&vb->lock);
817 break;
818 }
819 spin_unlock(&vb->lock);
820 }
3f04ba85 821 put_cpu_var(vmap_block_queue);
db64fe02
NP
822 rcu_read_unlock();
823
824 if (!addr) {
825 vb = new_vmap_block(gfp_mask);
826 if (IS_ERR(vb))
827 return vb;
828 goto again;
829 }
830
831 return (void *)addr;
832}
833
834static void vb_free(const void *addr, unsigned long size)
835{
836 unsigned long offset;
837 unsigned long vb_idx;
838 unsigned int order;
839 struct vmap_block *vb;
840
841 BUG_ON(size & ~PAGE_MASK);
842 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc
NP
843
844 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
845
db64fe02
NP
846 order = get_order(size);
847
848 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
849
850 vb_idx = addr_to_vb_idx((unsigned long)addr);
851 rcu_read_lock();
852 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
853 rcu_read_unlock();
854 BUG_ON(!vb);
855
856 spin_lock(&vb->lock);
de560423 857 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
d086817d 858
db64fe02
NP
859 vb->dirty += 1UL << order;
860 if (vb->dirty == VMAP_BBMAP_BITS) {
de560423 861 BUG_ON(vb->free);
db64fe02
NP
862 spin_unlock(&vb->lock);
863 free_vmap_block(vb);
864 } else
865 spin_unlock(&vb->lock);
866}
867
868/**
869 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
870 *
871 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
872 * to amortize TLB flushing overheads. What this means is that any page you
873 * have now, may, in a former life, have been mapped into kernel virtual
874 * address by the vmap layer and so there might be some CPUs with TLB entries
875 * still referencing that page (additional to the regular 1:1 kernel mapping).
876 *
877 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
878 * be sure that none of the pages we have control over will have any aliases
879 * from the vmap layer.
880 */
881void vm_unmap_aliases(void)
882{
883 unsigned long start = ULONG_MAX, end = 0;
884 int cpu;
885 int flush = 0;
886
9b463334
JF
887 if (unlikely(!vmap_initialized))
888 return;
889
db64fe02
NP
890 for_each_possible_cpu(cpu) {
891 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
892 struct vmap_block *vb;
893
894 rcu_read_lock();
895 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
896 int i;
897
898 spin_lock(&vb->lock);
899 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
900 while (i < VMAP_BBMAP_BITS) {
901 unsigned long s, e;
902 int j;
903 j = find_next_zero_bit(vb->dirty_map,
904 VMAP_BBMAP_BITS, i);
905
906 s = vb->va->va_start + (i << PAGE_SHIFT);
907 e = vb->va->va_start + (j << PAGE_SHIFT);
908 vunmap_page_range(s, e);
909 flush = 1;
910
911 if (s < start)
912 start = s;
913 if (e > end)
914 end = e;
915
916 i = j;
917 i = find_next_bit(vb->dirty_map,
918 VMAP_BBMAP_BITS, i);
919 }
920 spin_unlock(&vb->lock);
921 }
922 rcu_read_unlock();
923 }
924
925 __purge_vmap_area_lazy(&start, &end, 1, flush);
926}
927EXPORT_SYMBOL_GPL(vm_unmap_aliases);
928
929/**
930 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
931 * @mem: the pointer returned by vm_map_ram
932 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
933 */
934void vm_unmap_ram(const void *mem, unsigned int count)
935{
936 unsigned long size = count << PAGE_SHIFT;
937 unsigned long addr = (unsigned long)mem;
938
939 BUG_ON(!addr);
940 BUG_ON(addr < VMALLOC_START);
941 BUG_ON(addr > VMALLOC_END);
942 BUG_ON(addr & (PAGE_SIZE-1));
943
944 debug_check_no_locks_freed(mem, size);
cd52858c 945 vmap_debug_free_range(addr, addr+size);
db64fe02
NP
946
947 if (likely(count <= VMAP_MAX_ALLOC))
948 vb_free(mem, size);
949 else
950 free_unmap_vmap_area_addr(addr);
951}
952EXPORT_SYMBOL(vm_unmap_ram);
953
954/**
955 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
956 * @pages: an array of pointers to the pages to be mapped
957 * @count: number of pages
958 * @node: prefer to allocate data structures on this node
959 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad
RD
960 *
961 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
962 */
963void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
964{
965 unsigned long size = count << PAGE_SHIFT;
966 unsigned long addr;
967 void *mem;
968
969 if (likely(count <= VMAP_MAX_ALLOC)) {
970 mem = vb_alloc(size, GFP_KERNEL);
971 if (IS_ERR(mem))
972 return NULL;
973 addr = (unsigned long)mem;
974 } else {
975 struct vmap_area *va;
976 va = alloc_vmap_area(size, PAGE_SIZE,
977 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
978 if (IS_ERR(va))
979 return NULL;
980
981 addr = va->va_start;
982 mem = (void *)addr;
983 }
984 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
985 vm_unmap_ram(mem, count);
986 return NULL;
987 }
988 return mem;
989}
990EXPORT_SYMBOL(vm_map_ram);
991
f0aa6617
TH
992/**
993 * vm_area_register_early - register vmap area early during boot
994 * @vm: vm_struct to register
c0c0a293 995 * @align: requested alignment
f0aa6617
TH
996 *
997 * This function is used to register kernel vm area before
998 * vmalloc_init() is called. @vm->size and @vm->flags should contain
999 * proper values on entry and other fields should be zero. On return,
1000 * vm->addr contains the allocated address.
1001 *
1002 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1003 */
c0c0a293 1004void __init vm_area_register_early(struct vm_struct *vm, size_t align)
f0aa6617
TH
1005{
1006 static size_t vm_init_off __initdata;
c0c0a293
TH
1007 unsigned long addr;
1008
1009 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1010 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
f0aa6617 1011
c0c0a293 1012 vm->addr = (void *)addr;
f0aa6617
TH
1013
1014 vm->next = vmlist;
1015 vmlist = vm;
1016}
1017
db64fe02
NP
1018void __init vmalloc_init(void)
1019{
822c18f2
IK
1020 struct vmap_area *va;
1021 struct vm_struct *tmp;
db64fe02
NP
1022 int i;
1023
1024 for_each_possible_cpu(i) {
1025 struct vmap_block_queue *vbq;
1026
1027 vbq = &per_cpu(vmap_block_queue, i);
1028 spin_lock_init(&vbq->lock);
1029 INIT_LIST_HEAD(&vbq->free);
db64fe02 1030 }
9b463334 1031
822c18f2
IK
1032 /* Import existing vmlist entries. */
1033 for (tmp = vmlist; tmp; tmp = tmp->next) {
43ebdac4 1034 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
822c18f2
IK
1035 va->flags = tmp->flags | VM_VM_AREA;
1036 va->va_start = (unsigned long)tmp->addr;
1037 va->va_end = va->va_start + tmp->size;
1038 __insert_vmap_area(va);
1039 }
ca23e405
TH
1040
1041 vmap_area_pcpu_hole = VMALLOC_END;
1042
9b463334 1043 vmap_initialized = true;
db64fe02
NP
1044}
1045
8fc48985
TH
1046/**
1047 * map_kernel_range_noflush - map kernel VM area with the specified pages
1048 * @addr: start of the VM area to map
1049 * @size: size of the VM area to map
1050 * @prot: page protection flags to use
1051 * @pages: pages to map
1052 *
1053 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1054 * specify should have been allocated using get_vm_area() and its
1055 * friends.
1056 *
1057 * NOTE:
1058 * This function does NOT do any cache flushing. The caller is
1059 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1060 * before calling this function.
1061 *
1062 * RETURNS:
1063 * The number of pages mapped on success, -errno on failure.
1064 */
1065int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1066 pgprot_t prot, struct page **pages)
1067{
1068 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1069}
1070
1071/**
1072 * unmap_kernel_range_noflush - unmap kernel VM area
1073 * @addr: start of the VM area to unmap
1074 * @size: size of the VM area to unmap
1075 *
1076 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1077 * specify should have been allocated using get_vm_area() and its
1078 * friends.
1079 *
1080 * NOTE:
1081 * This function does NOT do any cache flushing. The caller is
1082 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1083 * before calling this function and flush_tlb_kernel_range() after.
1084 */
1085void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1086{
1087 vunmap_page_range(addr, addr + size);
1088}
1089
1090/**
1091 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1092 * @addr: start of the VM area to unmap
1093 * @size: size of the VM area to unmap
1094 *
1095 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1096 * the unmapping and tlb after.
1097 */
db64fe02
NP
1098void unmap_kernel_range(unsigned long addr, unsigned long size)
1099{
1100 unsigned long end = addr + size;
f6fcba70
TH
1101
1102 flush_cache_vunmap(addr, end);
db64fe02
NP
1103 vunmap_page_range(addr, end);
1104 flush_tlb_kernel_range(addr, end);
1105}
1106
1107int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1108{
1109 unsigned long addr = (unsigned long)area->addr;
1110 unsigned long end = addr + area->size - PAGE_SIZE;
1111 int err;
1112
1113 err = vmap_page_range(addr, end, prot, *pages);
1114 if (err > 0) {
1115 *pages += err;
1116 err = 0;
1117 }
1118
1119 return err;
1120}
1121EXPORT_SYMBOL_GPL(map_vm_area);
1122
1123/*** Old vmalloc interfaces ***/
1124DEFINE_RWLOCK(vmlist_lock);
1125struct vm_struct *vmlist;
1126
cf88c790
TH
1127static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1128 unsigned long flags, void *caller)
1129{
1130 struct vm_struct *tmp, **p;
1131
1132 vm->flags = flags;
1133 vm->addr = (void *)va->va_start;
1134 vm->size = va->va_end - va->va_start;
1135 vm->caller = caller;
1136 va->private = vm;
1137 va->flags |= VM_VM_AREA;
1138
1139 write_lock(&vmlist_lock);
1140 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1141 if (tmp->addr >= vm->addr)
1142 break;
1143 }
1144 vm->next = *p;
1145 *p = vm;
1146 write_unlock(&vmlist_lock);
1147}
1148
db64fe02 1149static struct vm_struct *__get_vm_area_node(unsigned long size,
2dca6999
DM
1150 unsigned long align, unsigned long flags, unsigned long start,
1151 unsigned long end, int node, gfp_t gfp_mask, void *caller)
db64fe02
NP
1152{
1153 static struct vmap_area *va;
1154 struct vm_struct *area;
1da177e4 1155
52fd24ca 1156 BUG_ON(in_interrupt());
1da177e4
LT
1157 if (flags & VM_IOREMAP) {
1158 int bit = fls(size);
1159
1160 if (bit > IOREMAP_MAX_ORDER)
1161 bit = IOREMAP_MAX_ORDER;
1162 else if (bit < PAGE_SHIFT)
1163 bit = PAGE_SHIFT;
1164
1165 align = 1ul << bit;
1166 }
db64fe02 1167
1da177e4 1168 size = PAGE_ALIGN(size);
31be8309
OH
1169 if (unlikely(!size))
1170 return NULL;
1da177e4 1171
cf88c790 1172 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
1173 if (unlikely(!area))
1174 return NULL;
1175
1da177e4
LT
1176 /*
1177 * We always allocate a guard page.
1178 */
1179 size += PAGE_SIZE;
1180
db64fe02
NP
1181 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1182 if (IS_ERR(va)) {
1183 kfree(area);
1184 return NULL;
1da177e4 1185 }
1da177e4 1186
cf88c790 1187 insert_vmalloc_vm(area, va, flags, caller);
1da177e4 1188 return area;
1da177e4
LT
1189}
1190
930fc45a
CL
1191struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1192 unsigned long start, unsigned long end)
1193{
2dca6999 1194 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
23016969 1195 __builtin_return_address(0));
930fc45a 1196}
5992b6da 1197EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 1198
c2968612
BH
1199struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1200 unsigned long start, unsigned long end,
1201 void *caller)
1202{
2dca6999 1203 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
c2968612
BH
1204 caller);
1205}
1206
1da177e4 1207/**
183ff22b 1208 * get_vm_area - reserve a contiguous kernel virtual area
1da177e4
LT
1209 * @size: size of the area
1210 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1211 *
1212 * Search an area of @size in the kernel virtual mapping area,
1213 * and reserved it for out purposes. Returns the area descriptor
1214 * on success or %NULL on failure.
1215 */
1216struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1217{
2dca6999 1218 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
23016969
CL
1219 -1, GFP_KERNEL, __builtin_return_address(0));
1220}
1221
1222struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1223 void *caller)
1224{
2dca6999 1225 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
23016969 1226 -1, GFP_KERNEL, caller);
1da177e4
LT
1227}
1228
52fd24ca
GP
1229struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1230 int node, gfp_t gfp_mask)
930fc45a 1231{
2dca6999
DM
1232 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1233 node, gfp_mask, __builtin_return_address(0));
930fc45a
CL
1234}
1235
db64fe02 1236static struct vm_struct *find_vm_area(const void *addr)
83342314 1237{
db64fe02 1238 struct vmap_area *va;
83342314 1239
db64fe02
NP
1240 va = find_vmap_area((unsigned long)addr);
1241 if (va && va->flags & VM_VM_AREA)
1242 return va->private;
1da177e4 1243
1da177e4 1244 return NULL;
1da177e4
LT
1245}
1246
7856dfeb 1247/**
183ff22b 1248 * remove_vm_area - find and remove a continuous kernel virtual area
7856dfeb
AK
1249 * @addr: base address
1250 *
1251 * Search for the kernel VM area starting at @addr, and remove it.
1252 * This function returns the found VM area, but using it is NOT safe
1253 * on SMP machines, except for its size or flags.
1254 */
b3bdda02 1255struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 1256{
db64fe02
NP
1257 struct vmap_area *va;
1258
1259 va = find_vmap_area((unsigned long)addr);
1260 if (va && va->flags & VM_VM_AREA) {
1261 struct vm_struct *vm = va->private;
1262 struct vm_struct *tmp, **p;
dd32c279
KH
1263 /*
1264 * remove from list and disallow access to this vm_struct
1265 * before unmap. (address range confliction is maintained by
1266 * vmap.)
1267 */
db64fe02
NP
1268 write_lock(&vmlist_lock);
1269 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
1270 ;
1271 *p = tmp->next;
1272 write_unlock(&vmlist_lock);
1273
dd32c279
KH
1274 vmap_debug_free_range(va->va_start, va->va_end);
1275 free_unmap_vmap_area(va);
1276 vm->size -= PAGE_SIZE;
1277
db64fe02
NP
1278 return vm;
1279 }
1280 return NULL;
7856dfeb
AK
1281}
1282
b3bdda02 1283static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
1284{
1285 struct vm_struct *area;
1286
1287 if (!addr)
1288 return;
1289
1290 if ((PAGE_SIZE-1) & (unsigned long)addr) {
4c8573e2 1291 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
1da177e4
LT
1292 return;
1293 }
1294
1295 area = remove_vm_area(addr);
1296 if (unlikely(!area)) {
4c8573e2 1297 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 1298 addr);
1da177e4
LT
1299 return;
1300 }
1301
9a11b49a 1302 debug_check_no_locks_freed(addr, area->size);
3ac7fe5a 1303 debug_check_no_obj_freed(addr, area->size);
9a11b49a 1304
1da177e4
LT
1305 if (deallocate_pages) {
1306 int i;
1307
1308 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1309 struct page *page = area->pages[i];
1310
1311 BUG_ON(!page);
1312 __free_page(page);
1da177e4
LT
1313 }
1314
8757d5fa 1315 if (area->flags & VM_VPAGES)
1da177e4
LT
1316 vfree(area->pages);
1317 else
1318 kfree(area->pages);
1319 }
1320
1321 kfree(area);
1322 return;
1323}
1324
1325/**
1326 * vfree - release memory allocated by vmalloc()
1da177e4
LT
1327 * @addr: memory base address
1328 *
183ff22b 1329 * Free the virtually continuous memory area starting at @addr, as
80e93eff
PE
1330 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1331 * NULL, no operation is performed.
1da177e4 1332 *
80e93eff 1333 * Must not be called in interrupt context.
1da177e4 1334 */
b3bdda02 1335void vfree(const void *addr)
1da177e4
LT
1336{
1337 BUG_ON(in_interrupt());
89219d37
CM
1338
1339 kmemleak_free(addr);
1340
1da177e4
LT
1341 __vunmap(addr, 1);
1342}
1da177e4
LT
1343EXPORT_SYMBOL(vfree);
1344
1345/**
1346 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
1347 * @addr: memory base address
1348 *
1349 * Free the virtually contiguous memory area starting at @addr,
1350 * which was created from the page array passed to vmap().
1351 *
80e93eff 1352 * Must not be called in interrupt context.
1da177e4 1353 */
b3bdda02 1354void vunmap(const void *addr)
1da177e4
LT
1355{
1356 BUG_ON(in_interrupt());
34754b69 1357 might_sleep();
1da177e4
LT
1358 __vunmap(addr, 0);
1359}
1da177e4
LT
1360EXPORT_SYMBOL(vunmap);
1361
1362/**
1363 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
1364 * @pages: array of page pointers
1365 * @count: number of pages to map
1366 * @flags: vm_area->flags
1367 * @prot: page protection for the mapping
1368 *
1369 * Maps @count pages from @pages into contiguous kernel virtual
1370 * space.
1371 */
1372void *vmap(struct page **pages, unsigned int count,
1373 unsigned long flags, pgprot_t prot)
1374{
1375 struct vm_struct *area;
1376
34754b69
PZ
1377 might_sleep();
1378
4481374c 1379 if (count > totalram_pages)
1da177e4
LT
1380 return NULL;
1381
23016969
CL
1382 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1383 __builtin_return_address(0));
1da177e4
LT
1384 if (!area)
1385 return NULL;
23016969 1386
1da177e4
LT
1387 if (map_vm_area(area, prot, &pages)) {
1388 vunmap(area->addr);
1389 return NULL;
1390 }
1391
1392 return area->addr;
1393}
1da177e4
LT
1394EXPORT_SYMBOL(vmap);
1395
2dca6999
DM
1396static void *__vmalloc_node(unsigned long size, unsigned long align,
1397 gfp_t gfp_mask, pgprot_t prot,
db64fe02 1398 int node, void *caller);
e31d9eb5 1399static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
23016969 1400 pgprot_t prot, int node, void *caller)
1da177e4
LT
1401{
1402 struct page **pages;
1403 unsigned int nr_pages, array_size, i;
976d6dfb 1404 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1da177e4
LT
1405
1406 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1407 array_size = (nr_pages * sizeof(struct page *));
1408
1409 area->nr_pages = nr_pages;
1410 /* Please note that the recursion is strictly bounded. */
8757d5fa 1411 if (array_size > PAGE_SIZE) {
976d6dfb 1412 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
23016969 1413 PAGE_KERNEL, node, caller);
8757d5fa 1414 area->flags |= VM_VPAGES;
286e1ea3 1415 } else {
976d6dfb 1416 pages = kmalloc_node(array_size, nested_gfp, node);
286e1ea3 1417 }
1da177e4 1418 area->pages = pages;
23016969 1419 area->caller = caller;
1da177e4
LT
1420 if (!area->pages) {
1421 remove_vm_area(area->addr);
1422 kfree(area);
1423 return NULL;
1424 }
1da177e4
LT
1425
1426 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1427 struct page *page;
1428
930fc45a 1429 if (node < 0)
bf53d6f8 1430 page = alloc_page(gfp_mask);
930fc45a 1431 else
bf53d6f8
CL
1432 page = alloc_pages_node(node, gfp_mask, 0);
1433
1434 if (unlikely(!page)) {
1da177e4
LT
1435 /* Successfully allocated i pages, free them in __vunmap() */
1436 area->nr_pages = i;
1437 goto fail;
1438 }
bf53d6f8 1439 area->pages[i] = page;
1da177e4
LT
1440 }
1441
1442 if (map_vm_area(area, prot, &pages))
1443 goto fail;
1444 return area->addr;
1445
1446fail:
1447 vfree(area->addr);
1448 return NULL;
1449}
1450
930fc45a
CL
1451void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1452{
89219d37
CM
1453 void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
1454 __builtin_return_address(0));
1455
1456 /*
1457 * A ref_count = 3 is needed because the vm_struct and vmap_area
1458 * structures allocated in the __get_vm_area_node() function contain
1459 * references to the virtual address of the vmalloc'ed block.
1460 */
1461 kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
1462
1463 return addr;
930fc45a
CL
1464}
1465
1da177e4 1466/**
930fc45a 1467 * __vmalloc_node - allocate virtually contiguous memory
1da177e4 1468 * @size: allocation size
2dca6999 1469 * @align: desired alignment
1da177e4
LT
1470 * @gfp_mask: flags for the page level allocator
1471 * @prot: protection mask for the allocated pages
d44e0780 1472 * @node: node to use for allocation or -1
c85d194b 1473 * @caller: caller's return address
1da177e4
LT
1474 *
1475 * Allocate enough pages to cover @size from the page level
1476 * allocator with @gfp_mask flags. Map them into contiguous
1477 * kernel virtual space, using a pagetable protection of @prot.
1478 */
2dca6999
DM
1479static void *__vmalloc_node(unsigned long size, unsigned long align,
1480 gfp_t gfp_mask, pgprot_t prot,
1481 int node, void *caller)
1da177e4
LT
1482{
1483 struct vm_struct *area;
89219d37
CM
1484 void *addr;
1485 unsigned long real_size = size;
1da177e4
LT
1486
1487 size = PAGE_ALIGN(size);
4481374c 1488 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1da177e4
LT
1489 return NULL;
1490
2dca6999
DM
1491 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
1492 VMALLOC_END, node, gfp_mask, caller);
23016969 1493
1da177e4
LT
1494 if (!area)
1495 return NULL;
1496
89219d37
CM
1497 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1498
1499 /*
1500 * A ref_count = 3 is needed because the vm_struct and vmap_area
1501 * structures allocated in the __get_vm_area_node() function contain
1502 * references to the virtual address of the vmalloc'ed block.
1503 */
1504 kmemleak_alloc(addr, real_size, 3, gfp_mask);
1505
1506 return addr;
1da177e4
LT
1507}
1508
930fc45a
CL
1509void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1510{
2dca6999 1511 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
23016969 1512 __builtin_return_address(0));
930fc45a 1513}
1da177e4
LT
1514EXPORT_SYMBOL(__vmalloc);
1515
1516/**
1517 * vmalloc - allocate virtually contiguous memory
1da177e4 1518 * @size: allocation size
1da177e4
LT
1519 * Allocate enough pages to cover @size from the page level
1520 * allocator and map them into contiguous kernel virtual space.
1521 *
c1c8897f 1522 * For tight control over page level allocator and protection flags
1da177e4
LT
1523 * use __vmalloc() instead.
1524 */
1525void *vmalloc(unsigned long size)
1526{
2dca6999 1527 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
23016969 1528 -1, __builtin_return_address(0));
1da177e4 1529}
1da177e4
LT
1530EXPORT_SYMBOL(vmalloc);
1531
83342314 1532/**
ead04089
REB
1533 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1534 * @size: allocation size
83342314 1535 *
ead04089
REB
1536 * The resulting memory area is zeroed so it can be mapped to userspace
1537 * without leaking data.
83342314
NP
1538 */
1539void *vmalloc_user(unsigned long size)
1540{
1541 struct vm_struct *area;
1542 void *ret;
1543
2dca6999
DM
1544 ret = __vmalloc_node(size, SHMLBA,
1545 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
84877848 1546 PAGE_KERNEL, -1, __builtin_return_address(0));
2b4ac44e 1547 if (ret) {
db64fe02 1548 area = find_vm_area(ret);
2b4ac44e 1549 area->flags |= VM_USERMAP;
2b4ac44e 1550 }
83342314
NP
1551 return ret;
1552}
1553EXPORT_SYMBOL(vmalloc_user);
1554
930fc45a
CL
1555/**
1556 * vmalloc_node - allocate memory on a specific node
930fc45a 1557 * @size: allocation size
d44e0780 1558 * @node: numa node
930fc45a
CL
1559 *
1560 * Allocate enough pages to cover @size from the page level
1561 * allocator and map them into contiguous kernel virtual space.
1562 *
c1c8897f 1563 * For tight control over page level allocator and protection flags
930fc45a
CL
1564 * use __vmalloc() instead.
1565 */
1566void *vmalloc_node(unsigned long size, int node)
1567{
2dca6999 1568 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
23016969 1569 node, __builtin_return_address(0));
930fc45a
CL
1570}
1571EXPORT_SYMBOL(vmalloc_node);
1572
4dc3b16b
PP
1573#ifndef PAGE_KERNEL_EXEC
1574# define PAGE_KERNEL_EXEC PAGE_KERNEL
1575#endif
1576
1da177e4
LT
1577/**
1578 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
1579 * @size: allocation size
1580 *
1581 * Kernel-internal function to allocate enough pages to cover @size
1582 * the page level allocator and map them into contiguous and
1583 * executable kernel virtual space.
1584 *
c1c8897f 1585 * For tight control over page level allocator and protection flags
1da177e4
LT
1586 * use __vmalloc() instead.
1587 */
1588
1da177e4
LT
1589void *vmalloc_exec(unsigned long size)
1590{
2dca6999 1591 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
84877848 1592 -1, __builtin_return_address(0));
1da177e4
LT
1593}
1594
0d08e0d3 1595#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 1596#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 1597#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 1598#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
1599#else
1600#define GFP_VMALLOC32 GFP_KERNEL
1601#endif
1602
1da177e4
LT
1603/**
1604 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
1605 * @size: allocation size
1606 *
1607 * Allocate enough 32bit PA addressable pages to cover @size from the
1608 * page level allocator and map them into contiguous kernel virtual space.
1609 */
1610void *vmalloc_32(unsigned long size)
1611{
2dca6999 1612 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
84877848 1613 -1, __builtin_return_address(0));
1da177e4 1614}
1da177e4
LT
1615EXPORT_SYMBOL(vmalloc_32);
1616
83342314 1617/**
ead04089 1618 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 1619 * @size: allocation size
ead04089
REB
1620 *
1621 * The resulting memory area is 32bit addressable and zeroed so it can be
1622 * mapped to userspace without leaking data.
83342314
NP
1623 */
1624void *vmalloc_32_user(unsigned long size)
1625{
1626 struct vm_struct *area;
1627 void *ret;
1628
2dca6999 1629 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
84877848 1630 -1, __builtin_return_address(0));
2b4ac44e 1631 if (ret) {
db64fe02 1632 area = find_vm_area(ret);
2b4ac44e 1633 area->flags |= VM_USERMAP;
2b4ac44e 1634 }
83342314
NP
1635 return ret;
1636}
1637EXPORT_SYMBOL(vmalloc_32_user);
1638
d0107eb0
KH
1639/*
1640 * small helper routine , copy contents to buf from addr.
1641 * If the page is not present, fill zero.
1642 */
1643
1644static int aligned_vread(char *buf, char *addr, unsigned long count)
1645{
1646 struct page *p;
1647 int copied = 0;
1648
1649 while (count) {
1650 unsigned long offset, length;
1651
1652 offset = (unsigned long)addr & ~PAGE_MASK;
1653 length = PAGE_SIZE - offset;
1654 if (length > count)
1655 length = count;
1656 p = vmalloc_to_page(addr);
1657 /*
1658 * To do safe access to this _mapped_ area, we need
1659 * lock. But adding lock here means that we need to add
1660 * overhead of vmalloc()/vfree() calles for this _debug_
1661 * interface, rarely used. Instead of that, we'll use
1662 * kmap() and get small overhead in this access function.
1663 */
1664 if (p) {
1665 /*
1666 * we can expect USER0 is not used (see vread/vwrite's
1667 * function description)
1668 */
1669 void *map = kmap_atomic(p, KM_USER0);
1670 memcpy(buf, map + offset, length);
1671 kunmap_atomic(map, KM_USER0);
1672 } else
1673 memset(buf, 0, length);
1674
1675 addr += length;
1676 buf += length;
1677 copied += length;
1678 count -= length;
1679 }
1680 return copied;
1681}
1682
1683static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1684{
1685 struct page *p;
1686 int copied = 0;
1687
1688 while (count) {
1689 unsigned long offset, length;
1690
1691 offset = (unsigned long)addr & ~PAGE_MASK;
1692 length = PAGE_SIZE - offset;
1693 if (length > count)
1694 length = count;
1695 p = vmalloc_to_page(addr);
1696 /*
1697 * To do safe access to this _mapped_ area, we need
1698 * lock. But adding lock here means that we need to add
1699 * overhead of vmalloc()/vfree() calles for this _debug_
1700 * interface, rarely used. Instead of that, we'll use
1701 * kmap() and get small overhead in this access function.
1702 */
1703 if (p) {
1704 /*
1705 * we can expect USER0 is not used (see vread/vwrite's
1706 * function description)
1707 */
1708 void *map = kmap_atomic(p, KM_USER0);
1709 memcpy(map + offset, buf, length);
1710 kunmap_atomic(map, KM_USER0);
1711 }
1712 addr += length;
1713 buf += length;
1714 copied += length;
1715 count -= length;
1716 }
1717 return copied;
1718}
1719
1720/**
1721 * vread() - read vmalloc area in a safe way.
1722 * @buf: buffer for reading data
1723 * @addr: vm address.
1724 * @count: number of bytes to be read.
1725 *
1726 * Returns # of bytes which addr and buf should be increased.
1727 * (same number to @count). Returns 0 if [addr...addr+count) doesn't
1728 * includes any intersect with alive vmalloc area.
1729 *
1730 * This function checks that addr is a valid vmalloc'ed area, and
1731 * copy data from that area to a given buffer. If the given memory range
1732 * of [addr...addr+count) includes some valid address, data is copied to
1733 * proper area of @buf. If there are memory holes, they'll be zero-filled.
1734 * IOREMAP area is treated as memory hole and no copy is done.
1735 *
1736 * If [addr...addr+count) doesn't includes any intersects with alive
1737 * vm_struct area, returns 0.
1738 * @buf should be kernel's buffer. Because this function uses KM_USER0,
1739 * the caller should guarantee KM_USER0 is not used.
1740 *
1741 * Note: In usual ops, vread() is never necessary because the caller
1742 * should know vmalloc() area is valid and can use memcpy().
1743 * This is for routines which have to access vmalloc area without
1744 * any informaion, as /dev/kmem.
1745 *
1746 */
1747
1da177e4
LT
1748long vread(char *buf, char *addr, unsigned long count)
1749{
1750 struct vm_struct *tmp;
1751 char *vaddr, *buf_start = buf;
d0107eb0 1752 unsigned long buflen = count;
1da177e4
LT
1753 unsigned long n;
1754
1755 /* Don't allow overflow */
1756 if ((unsigned long) addr + count < count)
1757 count = -(unsigned long) addr;
1758
1759 read_lock(&vmlist_lock);
d0107eb0 1760 for (tmp = vmlist; count && tmp; tmp = tmp->next) {
1da177e4
LT
1761 vaddr = (char *) tmp->addr;
1762 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1763 continue;
1764 while (addr < vaddr) {
1765 if (count == 0)
1766 goto finished;
1767 *buf = '\0';
1768 buf++;
1769 addr++;
1770 count--;
1771 }
1772 n = vaddr + tmp->size - PAGE_SIZE - addr;
d0107eb0
KH
1773 if (n > count)
1774 n = count;
1775 if (!(tmp->flags & VM_IOREMAP))
1776 aligned_vread(buf, addr, n);
1777 else /* IOREMAP area is treated as memory hole */
1778 memset(buf, 0, n);
1779 buf += n;
1780 addr += n;
1781 count -= n;
1da177e4
LT
1782 }
1783finished:
1784 read_unlock(&vmlist_lock);
d0107eb0
KH
1785
1786 if (buf == buf_start)
1787 return 0;
1788 /* zero-fill memory holes */
1789 if (buf != buf_start + buflen)
1790 memset(buf, 0, buflen - (buf - buf_start));
1791
1792 return buflen;
1da177e4
LT
1793}
1794
d0107eb0
KH
1795/**
1796 * vwrite() - write vmalloc area in a safe way.
1797 * @buf: buffer for source data
1798 * @addr: vm address.
1799 * @count: number of bytes to be read.
1800 *
1801 * Returns # of bytes which addr and buf should be incresed.
1802 * (same number to @count).
1803 * If [addr...addr+count) doesn't includes any intersect with valid
1804 * vmalloc area, returns 0.
1805 *
1806 * This function checks that addr is a valid vmalloc'ed area, and
1807 * copy data from a buffer to the given addr. If specified range of
1808 * [addr...addr+count) includes some valid address, data is copied from
1809 * proper area of @buf. If there are memory holes, no copy to hole.
1810 * IOREMAP area is treated as memory hole and no copy is done.
1811 *
1812 * If [addr...addr+count) doesn't includes any intersects with alive
1813 * vm_struct area, returns 0.
1814 * @buf should be kernel's buffer. Because this function uses KM_USER0,
1815 * the caller should guarantee KM_USER0 is not used.
1816 *
1817 * Note: In usual ops, vwrite() is never necessary because the caller
1818 * should know vmalloc() area is valid and can use memcpy().
1819 * This is for routines which have to access vmalloc area without
1820 * any informaion, as /dev/kmem.
1821 *
1822 * The caller should guarantee KM_USER1 is not used.
1823 */
1824
1da177e4
LT
1825long vwrite(char *buf, char *addr, unsigned long count)
1826{
1827 struct vm_struct *tmp;
d0107eb0
KH
1828 char *vaddr;
1829 unsigned long n, buflen;
1830 int copied = 0;
1da177e4
LT
1831
1832 /* Don't allow overflow */
1833 if ((unsigned long) addr + count < count)
1834 count = -(unsigned long) addr;
d0107eb0 1835 buflen = count;
1da177e4
LT
1836
1837 read_lock(&vmlist_lock);
d0107eb0 1838 for (tmp = vmlist; count && tmp; tmp = tmp->next) {
1da177e4
LT
1839 vaddr = (char *) tmp->addr;
1840 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1841 continue;
1842 while (addr < vaddr) {
1843 if (count == 0)
1844 goto finished;
1845 buf++;
1846 addr++;
1847 count--;
1848 }
1849 n = vaddr + tmp->size - PAGE_SIZE - addr;
d0107eb0
KH
1850 if (n > count)
1851 n = count;
1852 if (!(tmp->flags & VM_IOREMAP)) {
1853 aligned_vwrite(buf, addr, n);
1854 copied++;
1855 }
1856 buf += n;
1857 addr += n;
1858 count -= n;
1da177e4
LT
1859 }
1860finished:
1861 read_unlock(&vmlist_lock);
d0107eb0
KH
1862 if (!copied)
1863 return 0;
1864 return buflen;
1da177e4 1865}
83342314
NP
1866
1867/**
1868 * remap_vmalloc_range - map vmalloc pages to userspace
83342314
NP
1869 * @vma: vma to cover (map full range of vma)
1870 * @addr: vmalloc memory
1871 * @pgoff: number of pages into addr before first page to map
7682486b
RD
1872 *
1873 * Returns: 0 for success, -Exxx on failure
83342314
NP
1874 *
1875 * This function checks that addr is a valid vmalloc'ed area, and
1876 * that it is big enough to cover the vma. Will return failure if
1877 * that criteria isn't met.
1878 *
72fd4a35 1879 * Similar to remap_pfn_range() (see mm/memory.c)
83342314
NP
1880 */
1881int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1882 unsigned long pgoff)
1883{
1884 struct vm_struct *area;
1885 unsigned long uaddr = vma->vm_start;
1886 unsigned long usize = vma->vm_end - vma->vm_start;
83342314
NP
1887
1888 if ((PAGE_SIZE-1) & (unsigned long)addr)
1889 return -EINVAL;
1890
db64fe02 1891 area = find_vm_area(addr);
83342314 1892 if (!area)
db64fe02 1893 return -EINVAL;
83342314
NP
1894
1895 if (!(area->flags & VM_USERMAP))
db64fe02 1896 return -EINVAL;
83342314
NP
1897
1898 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
db64fe02 1899 return -EINVAL;
83342314
NP
1900
1901 addr += pgoff << PAGE_SHIFT;
1902 do {
1903 struct page *page = vmalloc_to_page(addr);
db64fe02
NP
1904 int ret;
1905
83342314
NP
1906 ret = vm_insert_page(vma, uaddr, page);
1907 if (ret)
1908 return ret;
1909
1910 uaddr += PAGE_SIZE;
1911 addr += PAGE_SIZE;
1912 usize -= PAGE_SIZE;
1913 } while (usize > 0);
1914
1915 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1916 vma->vm_flags |= VM_RESERVED;
1917
db64fe02 1918 return 0;
83342314
NP
1919}
1920EXPORT_SYMBOL(remap_vmalloc_range);
1921
1eeb66a1
CH
1922/*
1923 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1924 * have one.
1925 */
1926void __attribute__((weak)) vmalloc_sync_all(void)
1927{
1928}
5f4352fb
JF
1929
1930
2f569afd 1931static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fb
JF
1932{
1933 /* apply_to_page_range() does all the hard work. */
1934 return 0;
1935}
1936
1937/**
1938 * alloc_vm_area - allocate a range of kernel address space
1939 * @size: size of the area
7682486b
RD
1940 *
1941 * Returns: NULL on failure, vm_struct on success
5f4352fb
JF
1942 *
1943 * This function reserves a range of kernel address space, and
1944 * allocates pagetables to map that range. No actual mappings
1945 * are created. If the kernel address space is not shared
1946 * between processes, it syncs the pagetable across all
1947 * processes.
1948 */
1949struct vm_struct *alloc_vm_area(size_t size)
1950{
1951 struct vm_struct *area;
1952
23016969
CL
1953 area = get_vm_area_caller(size, VM_IOREMAP,
1954 __builtin_return_address(0));
5f4352fb
JF
1955 if (area == NULL)
1956 return NULL;
1957
1958 /*
1959 * This ensures that page tables are constructed for this region
1960 * of kernel virtual address space and mapped into init_mm.
1961 */
1962 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
1963 area->size, f, NULL)) {
1964 free_vm_area(area);
1965 return NULL;
1966 }
1967
1968 /* Make sure the pagetables are constructed in process kernel
1969 mappings */
1970 vmalloc_sync_all();
1971
1972 return area;
1973}
1974EXPORT_SYMBOL_GPL(alloc_vm_area);
1975
1976void free_vm_area(struct vm_struct *area)
1977{
1978 struct vm_struct *ret;
1979 ret = remove_vm_area(area->addr);
1980 BUG_ON(ret != area);
1981 kfree(area);
1982}
1983EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579 1984
ca23e405
TH
1985static struct vmap_area *node_to_va(struct rb_node *n)
1986{
1987 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
1988}
1989
1990/**
1991 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
1992 * @end: target address
1993 * @pnext: out arg for the next vmap_area
1994 * @pprev: out arg for the previous vmap_area
1995 *
1996 * Returns: %true if either or both of next and prev are found,
1997 * %false if no vmap_area exists
1998 *
1999 * Find vmap_areas end addresses of which enclose @end. ie. if not
2000 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2001 */
2002static bool pvm_find_next_prev(unsigned long end,
2003 struct vmap_area **pnext,
2004 struct vmap_area **pprev)
2005{
2006 struct rb_node *n = vmap_area_root.rb_node;
2007 struct vmap_area *va = NULL;
2008
2009 while (n) {
2010 va = rb_entry(n, struct vmap_area, rb_node);
2011 if (end < va->va_end)
2012 n = n->rb_left;
2013 else if (end > va->va_end)
2014 n = n->rb_right;
2015 else
2016 break;
2017 }
2018
2019 if (!va)
2020 return false;
2021
2022 if (va->va_end > end) {
2023 *pnext = va;
2024 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2025 } else {
2026 *pprev = va;
2027 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2028 }
2029 return true;
2030}
2031
2032/**
2033 * pvm_determine_end - find the highest aligned address between two vmap_areas
2034 * @pnext: in/out arg for the next vmap_area
2035 * @pprev: in/out arg for the previous vmap_area
2036 * @align: alignment
2037 *
2038 * Returns: determined end address
2039 *
2040 * Find the highest aligned address between *@pnext and *@pprev below
2041 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
2042 * down address is between the end addresses of the two vmap_areas.
2043 *
2044 * Please note that the address returned by this function may fall
2045 * inside *@pnext vmap_area. The caller is responsible for checking
2046 * that.
2047 */
2048static unsigned long pvm_determine_end(struct vmap_area **pnext,
2049 struct vmap_area **pprev,
2050 unsigned long align)
2051{
2052 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2053 unsigned long addr;
2054
2055 if (*pnext)
2056 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2057 else
2058 addr = vmalloc_end;
2059
2060 while (*pprev && (*pprev)->va_end > addr) {
2061 *pnext = *pprev;
2062 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2063 }
2064
2065 return addr;
2066}
2067
2068/**
2069 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2070 * @offsets: array containing offset of each area
2071 * @sizes: array containing size of each area
2072 * @nr_vms: the number of areas to allocate
2073 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2074 * @gfp_mask: allocation mask
2075 *
2076 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2077 * vm_structs on success, %NULL on failure
2078 *
2079 * Percpu allocator wants to use congruent vm areas so that it can
2080 * maintain the offsets among percpu areas. This function allocates
2081 * congruent vmalloc areas for it. These areas tend to be scattered
2082 * pretty far, distance between two areas easily going up to
2083 * gigabytes. To avoid interacting with regular vmallocs, these areas
2084 * are allocated from top.
2085 *
2086 * Despite its complicated look, this allocator is rather simple. It
2087 * does everything top-down and scans areas from the end looking for
2088 * matching slot. While scanning, if any of the areas overlaps with
2089 * existing vmap_area, the base address is pulled down to fit the
2090 * area. Scanning is repeated till all the areas fit and then all
2091 * necessary data structres are inserted and the result is returned.
2092 */
2093struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2094 const size_t *sizes, int nr_vms,
2095 size_t align, gfp_t gfp_mask)
2096{
2097 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2098 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2099 struct vmap_area **vas, *prev, *next;
2100 struct vm_struct **vms;
2101 int area, area2, last_area, term_area;
2102 unsigned long base, start, end, last_end;
2103 bool purged = false;
2104
2105 gfp_mask &= GFP_RECLAIM_MASK;
2106
2107 /* verify parameters and allocate data structures */
2108 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2109 for (last_area = 0, area = 0; area < nr_vms; area++) {
2110 start = offsets[area];
2111 end = start + sizes[area];
2112
2113 /* is everything aligned properly? */
2114 BUG_ON(!IS_ALIGNED(offsets[area], align));
2115 BUG_ON(!IS_ALIGNED(sizes[area], align));
2116
2117 /* detect the area with the highest address */
2118 if (start > offsets[last_area])
2119 last_area = area;
2120
2121 for (area2 = 0; area2 < nr_vms; area2++) {
2122 unsigned long start2 = offsets[area2];
2123 unsigned long end2 = start2 + sizes[area2];
2124
2125 if (area2 == area)
2126 continue;
2127
2128 BUG_ON(start2 >= start && start2 < end);
2129 BUG_ON(end2 <= end && end2 > start);
2130 }
2131 }
2132 last_end = offsets[last_area] + sizes[last_area];
2133
2134 if (vmalloc_end - vmalloc_start < last_end) {
2135 WARN_ON(true);
2136 return NULL;
2137 }
2138
2139 vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask);
2140 vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask);
2141 if (!vas || !vms)
2142 goto err_free;
2143
2144 for (area = 0; area < nr_vms; area++) {
2145 vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask);
2146 vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask);
2147 if (!vas[area] || !vms[area])
2148 goto err_free;
2149 }
2150retry:
2151 spin_lock(&vmap_area_lock);
2152
2153 /* start scanning - we scan from the top, begin with the last area */
2154 area = term_area = last_area;
2155 start = offsets[area];
2156 end = start + sizes[area];
2157
2158 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2159 base = vmalloc_end - last_end;
2160 goto found;
2161 }
2162 base = pvm_determine_end(&next, &prev, align) - end;
2163
2164 while (true) {
2165 BUG_ON(next && next->va_end <= base + end);
2166 BUG_ON(prev && prev->va_end > base + end);
2167
2168 /*
2169 * base might have underflowed, add last_end before
2170 * comparing.
2171 */
2172 if (base + last_end < vmalloc_start + last_end) {
2173 spin_unlock(&vmap_area_lock);
2174 if (!purged) {
2175 purge_vmap_area_lazy();
2176 purged = true;
2177 goto retry;
2178 }
2179 goto err_free;
2180 }
2181
2182 /*
2183 * If next overlaps, move base downwards so that it's
2184 * right below next and then recheck.
2185 */
2186 if (next && next->va_start < base + end) {
2187 base = pvm_determine_end(&next, &prev, align) - end;
2188 term_area = area;
2189 continue;
2190 }
2191
2192 /*
2193 * If prev overlaps, shift down next and prev and move
2194 * base so that it's right below new next and then
2195 * recheck.
2196 */
2197 if (prev && prev->va_end > base + start) {
2198 next = prev;
2199 prev = node_to_va(rb_prev(&next->rb_node));
2200 base = pvm_determine_end(&next, &prev, align) - end;
2201 term_area = area;
2202 continue;
2203 }
2204
2205 /*
2206 * This area fits, move on to the previous one. If
2207 * the previous one is the terminal one, we're done.
2208 */
2209 area = (area + nr_vms - 1) % nr_vms;
2210 if (area == term_area)
2211 break;
2212 start = offsets[area];
2213 end = start + sizes[area];
2214 pvm_find_next_prev(base + end, &next, &prev);
2215 }
2216found:
2217 /* we've found a fitting base, insert all va's */
2218 for (area = 0; area < nr_vms; area++) {
2219 struct vmap_area *va = vas[area];
2220
2221 va->va_start = base + offsets[area];
2222 va->va_end = va->va_start + sizes[area];
2223 __insert_vmap_area(va);
2224 }
2225
2226 vmap_area_pcpu_hole = base + offsets[last_area];
2227
2228 spin_unlock(&vmap_area_lock);
2229
2230 /* insert all vm's */
2231 for (area = 0; area < nr_vms; area++)
2232 insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2233 pcpu_get_vm_areas);
2234
2235 kfree(vas);
2236 return vms;
2237
2238err_free:
2239 for (area = 0; area < nr_vms; area++) {
2240 if (vas)
2241 kfree(vas[area]);
2242 if (vms)
2243 kfree(vms[area]);
2244 }
2245 kfree(vas);
2246 kfree(vms);
2247 return NULL;
2248}
2249
2250/**
2251 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2252 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2253 * @nr_vms: the number of allocated areas
2254 *
2255 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2256 */
2257void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2258{
2259 int i;
2260
2261 for (i = 0; i < nr_vms; i++)
2262 free_vm_area(vms[i]);
2263 kfree(vms);
2264}
a10aa579
CL
2265
2266#ifdef CONFIG_PROC_FS
2267static void *s_start(struct seq_file *m, loff_t *pos)
2268{
2269 loff_t n = *pos;
2270 struct vm_struct *v;
2271
2272 read_lock(&vmlist_lock);
2273 v = vmlist;
2274 while (n > 0 && v) {
2275 n--;
2276 v = v->next;
2277 }
2278 if (!n)
2279 return v;
2280
2281 return NULL;
2282
2283}
2284
2285static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2286{
2287 struct vm_struct *v = p;
2288
2289 ++*pos;
2290 return v->next;
2291}
2292
2293static void s_stop(struct seq_file *m, void *p)
2294{
2295 read_unlock(&vmlist_lock);
2296}
2297
a47a126a
ED
2298static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2299{
2300 if (NUMA_BUILD) {
2301 unsigned int nr, *counters = m->private;
2302
2303 if (!counters)
2304 return;
2305
2306 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2307
2308 for (nr = 0; nr < v->nr_pages; nr++)
2309 counters[page_to_nid(v->pages[nr])]++;
2310
2311 for_each_node_state(nr, N_HIGH_MEMORY)
2312 if (counters[nr])
2313 seq_printf(m, " N%u=%u", nr, counters[nr]);
2314 }
2315}
2316
a10aa579
CL
2317static int s_show(struct seq_file *m, void *p)
2318{
2319 struct vm_struct *v = p;
2320
2321 seq_printf(m, "0x%p-0x%p %7ld",
2322 v->addr, v->addr + v->size, v->size);
2323
23016969 2324 if (v->caller) {
9c246247 2325 char buff[KSYM_SYMBOL_LEN];
23016969
CL
2326
2327 seq_putc(m, ' ');
2328 sprint_symbol(buff, (unsigned long)v->caller);
2329 seq_puts(m, buff);
2330 }
2331
a10aa579
CL
2332 if (v->nr_pages)
2333 seq_printf(m, " pages=%d", v->nr_pages);
2334
2335 if (v->phys_addr)
2336 seq_printf(m, " phys=%lx", v->phys_addr);
2337
2338 if (v->flags & VM_IOREMAP)
2339 seq_printf(m, " ioremap");
2340
2341 if (v->flags & VM_ALLOC)
2342 seq_printf(m, " vmalloc");
2343
2344 if (v->flags & VM_MAP)
2345 seq_printf(m, " vmap");
2346
2347 if (v->flags & VM_USERMAP)
2348 seq_printf(m, " user");
2349
2350 if (v->flags & VM_VPAGES)
2351 seq_printf(m, " vpages");
2352
a47a126a 2353 show_numa_info(m, v);
a10aa579
CL
2354 seq_putc(m, '\n');
2355 return 0;
2356}
2357
5f6a6a9c 2358static const struct seq_operations vmalloc_op = {
a10aa579
CL
2359 .start = s_start,
2360 .next = s_next,
2361 .stop = s_stop,
2362 .show = s_show,
2363};
5f6a6a9c
AD
2364
2365static int vmalloc_open(struct inode *inode, struct file *file)
2366{
2367 unsigned int *ptr = NULL;
2368 int ret;
2369
2370 if (NUMA_BUILD)
2371 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
2372 ret = seq_open(file, &vmalloc_op);
2373 if (!ret) {
2374 struct seq_file *m = file->private_data;
2375 m->private = ptr;
2376 } else
2377 kfree(ptr);
2378 return ret;
2379}
2380
2381static const struct file_operations proc_vmalloc_operations = {
2382 .open = vmalloc_open,
2383 .read = seq_read,
2384 .llseek = seq_lseek,
2385 .release = seq_release_private,
2386};
2387
2388static int __init proc_vmalloc_init(void)
2389{
2390 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2391 return 0;
2392}
2393module_init(proc_vmalloc_init);
a10aa579
CL
2394#endif
2395