]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmalloc.c
mm: vmalloc tweak failure printk
[net-next-2.6.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
db64fe02 11#include <linux/vmalloc.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/interrupt.h>
5f6a6a9c 18#include <linux/proc_fs.h>
a10aa579 19#include <linux/seq_file.h>
3ac7fe5a 20#include <linux/debugobjects.h>
23016969 21#include <linux/kallsyms.h>
db64fe02
NP
22#include <linux/list.h>
23#include <linux/rbtree.h>
24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h>
1da177e4 26
db64fe02 27#include <asm/atomic.h>
1da177e4
LT
28#include <asm/uaccess.h>
29#include <asm/tlbflush.h>
30
31
db64fe02 32/*** Page table manipulation functions ***/
b221385b 33
1da177e4
LT
34static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
35{
36 pte_t *pte;
37
38 pte = pte_offset_kernel(pmd, addr);
39 do {
40 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
41 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
42 } while (pte++, addr += PAGE_SIZE, addr != end);
43}
44
db64fe02 45static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
1da177e4
LT
46{
47 pmd_t *pmd;
48 unsigned long next;
49
50 pmd = pmd_offset(pud, addr);
51 do {
52 next = pmd_addr_end(addr, end);
53 if (pmd_none_or_clear_bad(pmd))
54 continue;
55 vunmap_pte_range(pmd, addr, next);
56 } while (pmd++, addr = next, addr != end);
57}
58
db64fe02 59static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
1da177e4
LT
60{
61 pud_t *pud;
62 unsigned long next;
63
64 pud = pud_offset(pgd, addr);
65 do {
66 next = pud_addr_end(addr, end);
67 if (pud_none_or_clear_bad(pud))
68 continue;
69 vunmap_pmd_range(pud, addr, next);
70 } while (pud++, addr = next, addr != end);
71}
72
db64fe02 73static void vunmap_page_range(unsigned long addr, unsigned long end)
1da177e4
LT
74{
75 pgd_t *pgd;
76 unsigned long next;
1da177e4
LT
77
78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr);
1da177e4
LT
80 do {
81 next = pgd_addr_end(addr, end);
82 if (pgd_none_or_clear_bad(pgd))
83 continue;
84 vunmap_pud_range(pgd, addr, next);
85 } while (pgd++, addr = next, addr != end);
1da177e4
LT
86}
87
88static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
db64fe02 89 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
90{
91 pte_t *pte;
92
db64fe02
NP
93 /*
94 * nr is a running index into the array which helps higher level
95 * callers keep track of where we're up to.
96 */
97
872fec16 98 pte = pte_alloc_kernel(pmd, addr);
1da177e4
LT
99 if (!pte)
100 return -ENOMEM;
101 do {
db64fe02
NP
102 struct page *page = pages[*nr];
103
104 if (WARN_ON(!pte_none(*pte)))
105 return -EBUSY;
106 if (WARN_ON(!page))
1da177e4
LT
107 return -ENOMEM;
108 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
db64fe02 109 (*nr)++;
1da177e4
LT
110 } while (pte++, addr += PAGE_SIZE, addr != end);
111 return 0;
112}
113
db64fe02
NP
114static int vmap_pmd_range(pud_t *pud, unsigned long addr,
115 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
116{
117 pmd_t *pmd;
118 unsigned long next;
119
120 pmd = pmd_alloc(&init_mm, pud, addr);
121 if (!pmd)
122 return -ENOMEM;
123 do {
124 next = pmd_addr_end(addr, end);
db64fe02 125 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
1da177e4
LT
126 return -ENOMEM;
127 } while (pmd++, addr = next, addr != end);
128 return 0;
129}
130
db64fe02
NP
131static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
132 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
1da177e4
LT
133{
134 pud_t *pud;
135 unsigned long next;
136
137 pud = pud_alloc(&init_mm, pgd, addr);
138 if (!pud)
139 return -ENOMEM;
140 do {
141 next = pud_addr_end(addr, end);
db64fe02 142 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
1da177e4
LT
143 return -ENOMEM;
144 } while (pud++, addr = next, addr != end);
145 return 0;
146}
147
db64fe02
NP
148/*
149 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
150 * will have pfns corresponding to the "pages" array.
151 *
152 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
153 */
2e4e27c7 154static int vmap_page_range(unsigned long start, unsigned long end,
db64fe02 155 pgprot_t prot, struct page **pages)
1da177e4
LT
156{
157 pgd_t *pgd;
158 unsigned long next;
2e4e27c7 159 unsigned long addr = start;
db64fe02
NP
160 int err = 0;
161 int nr = 0;
1da177e4
LT
162
163 BUG_ON(addr >= end);
164 pgd = pgd_offset_k(addr);
1da177e4
LT
165 do {
166 next = pgd_addr_end(addr, end);
db64fe02 167 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
1da177e4
LT
168 if (err)
169 break;
170 } while (pgd++, addr = next, addr != end);
2e4e27c7 171 flush_cache_vmap(start, end);
db64fe02
NP
172
173 if (unlikely(err))
174 return err;
175 return nr;
1da177e4
LT
176}
177
73bdf0a6
LT
178static inline int is_vmalloc_or_module_addr(const void *x)
179{
180 /*
ab4f2ee1 181 * ARM, x86-64 and sparc64 put modules in a special place,
73bdf0a6
LT
182 * and fall back on vmalloc() if that fails. Others
183 * just put it in the vmalloc space.
184 */
185#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
186 unsigned long addr = (unsigned long)x;
187 if (addr >= MODULES_VADDR && addr < MODULES_END)
188 return 1;
189#endif
190 return is_vmalloc_addr(x);
191}
192
48667e7a 193/*
db64fe02 194 * Walk a vmap address to the struct page it maps.
48667e7a 195 */
b3bdda02 196struct page *vmalloc_to_page(const void *vmalloc_addr)
48667e7a
CL
197{
198 unsigned long addr = (unsigned long) vmalloc_addr;
199 struct page *page = NULL;
200 pgd_t *pgd = pgd_offset_k(addr);
48667e7a 201
7aa413de
IM
202 /*
203 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
204 * architectures that do not vmalloc module space
205 */
73bdf0a6 206 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
59ea7463 207
48667e7a 208 if (!pgd_none(*pgd)) {
db64fe02 209 pud_t *pud = pud_offset(pgd, addr);
48667e7a 210 if (!pud_none(*pud)) {
db64fe02 211 pmd_t *pmd = pmd_offset(pud, addr);
48667e7a 212 if (!pmd_none(*pmd)) {
db64fe02
NP
213 pte_t *ptep, pte;
214
48667e7a
CL
215 ptep = pte_offset_map(pmd, addr);
216 pte = *ptep;
217 if (pte_present(pte))
218 page = pte_page(pte);
219 pte_unmap(ptep);
220 }
221 }
222 }
223 return page;
224}
225EXPORT_SYMBOL(vmalloc_to_page);
226
227/*
228 * Map a vmalloc()-space virtual address to the physical page frame number.
229 */
b3bdda02 230unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
48667e7a
CL
231{
232 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
233}
234EXPORT_SYMBOL(vmalloc_to_pfn);
235
db64fe02
NP
236
237/*** Global kva allocator ***/
238
239#define VM_LAZY_FREE 0x01
240#define VM_LAZY_FREEING 0x02
241#define VM_VM_AREA 0x04
242
243struct vmap_area {
244 unsigned long va_start;
245 unsigned long va_end;
246 unsigned long flags;
247 struct rb_node rb_node; /* address sorted rbtree */
248 struct list_head list; /* address sorted list */
249 struct list_head purge_list; /* "lazy purge" list */
250 void *private;
251 struct rcu_head rcu_head;
252};
253
254static DEFINE_SPINLOCK(vmap_area_lock);
255static struct rb_root vmap_area_root = RB_ROOT;
256static LIST_HEAD(vmap_area_list);
257
258static struct vmap_area *__find_vmap_area(unsigned long addr)
1da177e4 259{
db64fe02
NP
260 struct rb_node *n = vmap_area_root.rb_node;
261
262 while (n) {
263 struct vmap_area *va;
264
265 va = rb_entry(n, struct vmap_area, rb_node);
266 if (addr < va->va_start)
267 n = n->rb_left;
268 else if (addr > va->va_start)
269 n = n->rb_right;
270 else
271 return va;
272 }
273
274 return NULL;
275}
276
277static void __insert_vmap_area(struct vmap_area *va)
278{
279 struct rb_node **p = &vmap_area_root.rb_node;
280 struct rb_node *parent = NULL;
281 struct rb_node *tmp;
282
283 while (*p) {
284 struct vmap_area *tmp;
285
286 parent = *p;
287 tmp = rb_entry(parent, struct vmap_area, rb_node);
288 if (va->va_start < tmp->va_end)
289 p = &(*p)->rb_left;
290 else if (va->va_end > tmp->va_start)
291 p = &(*p)->rb_right;
292 else
293 BUG();
294 }
295
296 rb_link_node(&va->rb_node, parent, p);
297 rb_insert_color(&va->rb_node, &vmap_area_root);
298
299 /* address-sort this list so it is usable like the vmlist */
300 tmp = rb_prev(&va->rb_node);
301 if (tmp) {
302 struct vmap_area *prev;
303 prev = rb_entry(tmp, struct vmap_area, rb_node);
304 list_add_rcu(&va->list, &prev->list);
305 } else
306 list_add_rcu(&va->list, &vmap_area_list);
307}
308
309static void purge_vmap_area_lazy(void);
310
311/*
312 * Allocate a region of KVA of the specified size and alignment, within the
313 * vstart and vend.
314 */
315static struct vmap_area *alloc_vmap_area(unsigned long size,
316 unsigned long align,
317 unsigned long vstart, unsigned long vend,
318 int node, gfp_t gfp_mask)
319{
320 struct vmap_area *va;
321 struct rb_node *n;
1da177e4 322 unsigned long addr;
db64fe02
NP
323 int purged = 0;
324
325 BUG_ON(size & ~PAGE_MASK);
326
db64fe02
NP
327 va = kmalloc_node(sizeof(struct vmap_area),
328 gfp_mask & GFP_RECLAIM_MASK, node);
329 if (unlikely(!va))
330 return ERR_PTR(-ENOMEM);
331
332retry:
0ae15132
GC
333 addr = ALIGN(vstart, align);
334
db64fe02
NP
335 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node;
338 if (n) {
339 struct vmap_area *first = NULL;
340
341 do {
342 struct vmap_area *tmp;
343 tmp = rb_entry(n, struct vmap_area, rb_node);
344 if (tmp->va_end >= addr) {
345 if (!first && tmp->va_start < addr + size)
346 first = tmp;
347 n = n->rb_left;
348 } else {
349 first = tmp;
350 n = n->rb_right;
351 }
352 } while (n);
353
354 if (!first)
355 goto found;
356
357 if (first->va_end < addr) {
358 n = rb_next(&first->rb_node);
359 if (n)
360 first = rb_entry(n, struct vmap_area, rb_node);
361 else
362 goto found;
363 }
364
f011c2da 365 while (addr + size > first->va_start && addr + size <= vend) {
db64fe02
NP
366 addr = ALIGN(first->va_end + PAGE_SIZE, align);
367
368 n = rb_next(&first->rb_node);
369 if (n)
370 first = rb_entry(n, struct vmap_area, rb_node);
371 else
372 goto found;
373 }
374 }
375found:
376 if (addr + size > vend) {
377 spin_unlock(&vmap_area_lock);
378 if (!purged) {
379 purge_vmap_area_lazy();
380 purged = 1;
381 goto retry;
382 }
383 if (printk_ratelimit())
c1279c4e
GC
384 printk(KERN_WARNING
385 "vmap allocation for size %lu failed: "
386 "use vmalloc=<size> to increase size.\n", size);
db64fe02
NP
387 return ERR_PTR(-EBUSY);
388 }
389
390 BUG_ON(addr & (align-1));
391
392 va->va_start = addr;
393 va->va_end = addr + size;
394 va->flags = 0;
395 __insert_vmap_area(va);
396 spin_unlock(&vmap_area_lock);
397
398 return va;
399}
400
401static void rcu_free_va(struct rcu_head *head)
402{
403 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
404
405 kfree(va);
406}
407
408static void __free_vmap_area(struct vmap_area *va)
409{
410 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
411 rb_erase(&va->rb_node, &vmap_area_root);
412 RB_CLEAR_NODE(&va->rb_node);
413 list_del_rcu(&va->list);
414
415 call_rcu(&va->rcu_head, rcu_free_va);
416}
417
418/*
419 * Free a region of KVA allocated by alloc_vmap_area
420 */
421static void free_vmap_area(struct vmap_area *va)
422{
423 spin_lock(&vmap_area_lock);
424 __free_vmap_area(va);
425 spin_unlock(&vmap_area_lock);
426}
427
428/*
429 * Clear the pagetable entries of a given vmap_area
430 */
431static void unmap_vmap_area(struct vmap_area *va)
432{
433 vunmap_page_range(va->va_start, va->va_end);
434}
435
436/*
437 * lazy_max_pages is the maximum amount of virtual address space we gather up
438 * before attempting to purge with a TLB flush.
439 *
440 * There is a tradeoff here: a larger number will cover more kernel page tables
441 * and take slightly longer to purge, but it will linearly reduce the number of
442 * global TLB flushes that must be performed. It would seem natural to scale
443 * this number up linearly with the number of CPUs (because vmapping activity
444 * could also scale linearly with the number of CPUs), however it is likely
445 * that in practice, workloads might be constrained in other ways that mean
446 * vmap activity will not scale linearly with CPUs. Also, I want to be
447 * conservative and not introduce a big latency on huge systems, so go with
448 * a less aggressive log scale. It will still be an improvement over the old
449 * code, and it will be simple to change the scale factor if we find that it
450 * becomes a problem on bigger systems.
451 */
452static unsigned long lazy_max_pages(void)
453{
454 unsigned int log;
455
456 log = fls(num_online_cpus());
457
458 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
459}
460
461static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
462
463/*
464 * Purges all lazily-freed vmap areas.
465 *
466 * If sync is 0 then don't purge if there is already a purge in progress.
467 * If force_flush is 1, then flush kernel TLBs between *start and *end even
468 * if we found no lazy vmap areas to unmap (callers can use this to optimise
469 * their own TLB flushing).
470 * Returns with *start = min(*start, lowest purged address)
471 * *end = max(*end, highest purged address)
472 */
473static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
474 int sync, int force_flush)
475{
476 static DEFINE_SPINLOCK(purge_lock);
477 LIST_HEAD(valist);
478 struct vmap_area *va;
479 int nr = 0;
480
481 /*
482 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
483 * should not expect such behaviour. This just simplifies locking for
484 * the case that isn't actually used at the moment anyway.
485 */
486 if (!sync && !force_flush) {
487 if (!spin_trylock(&purge_lock))
488 return;
489 } else
490 spin_lock(&purge_lock);
491
492 rcu_read_lock();
493 list_for_each_entry_rcu(va, &vmap_area_list, list) {
494 if (va->flags & VM_LAZY_FREE) {
495 if (va->va_start < *start)
496 *start = va->va_start;
497 if (va->va_end > *end)
498 *end = va->va_end;
499 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
500 unmap_vmap_area(va);
501 list_add_tail(&va->purge_list, &valist);
502 va->flags |= VM_LAZY_FREEING;
503 va->flags &= ~VM_LAZY_FREE;
504 }
505 }
506 rcu_read_unlock();
507
508 if (nr) {
509 BUG_ON(nr > atomic_read(&vmap_lazy_nr));
510 atomic_sub(nr, &vmap_lazy_nr);
511 }
512
513 if (nr || force_flush)
514 flush_tlb_kernel_range(*start, *end);
515
516 if (nr) {
517 spin_lock(&vmap_area_lock);
518 list_for_each_entry(va, &valist, purge_list)
519 __free_vmap_area(va);
520 spin_unlock(&vmap_area_lock);
521 }
522 spin_unlock(&purge_lock);
523}
524
496850e5
NP
525/*
526 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
527 * is already purging.
528 */
529static void try_purge_vmap_area_lazy(void)
530{
531 unsigned long start = ULONG_MAX, end = 0;
532
533 __purge_vmap_area_lazy(&start, &end, 0, 0);
534}
535
db64fe02
NP
536/*
537 * Kick off a purge of the outstanding lazy areas.
538 */
539static void purge_vmap_area_lazy(void)
540{
541 unsigned long start = ULONG_MAX, end = 0;
542
496850e5 543 __purge_vmap_area_lazy(&start, &end, 1, 0);
db64fe02
NP
544}
545
546/*
b29acbdc
NP
547 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
548 * called for the correct range previously.
db64fe02 549 */
b29acbdc 550static void free_unmap_vmap_area_noflush(struct vmap_area *va)
db64fe02
NP
551{
552 va->flags |= VM_LAZY_FREE;
553 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
554 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
496850e5 555 try_purge_vmap_area_lazy();
db64fe02
NP
556}
557
b29acbdc
NP
558/*
559 * Free and unmap a vmap area
560 */
561static void free_unmap_vmap_area(struct vmap_area *va)
562{
563 flush_cache_vunmap(va->va_start, va->va_end);
564 free_unmap_vmap_area_noflush(va);
565}
566
db64fe02
NP
567static struct vmap_area *find_vmap_area(unsigned long addr)
568{
569 struct vmap_area *va;
570
571 spin_lock(&vmap_area_lock);
572 va = __find_vmap_area(addr);
573 spin_unlock(&vmap_area_lock);
574
575 return va;
576}
577
578static void free_unmap_vmap_area_addr(unsigned long addr)
579{
580 struct vmap_area *va;
581
582 va = find_vmap_area(addr);
583 BUG_ON(!va);
584 free_unmap_vmap_area(va);
585}
586
587
588/*** Per cpu kva allocator ***/
589
590/*
591 * vmap space is limited especially on 32 bit architectures. Ensure there is
592 * room for at least 16 percpu vmap blocks per CPU.
593 */
594/*
595 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
596 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
597 * instead (we just need a rough idea)
598 */
599#if BITS_PER_LONG == 32
600#define VMALLOC_SPACE (128UL*1024*1024)
601#else
602#define VMALLOC_SPACE (128UL*1024*1024*1024)
603#endif
604
605#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
606#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
607#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
608#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
609#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
610#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
611#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
612 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
613 VMALLOC_PAGES / NR_CPUS / 16))
614
615#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
616
9b463334
JF
617static bool vmap_initialized __read_mostly = false;
618
db64fe02
NP
619struct vmap_block_queue {
620 spinlock_t lock;
621 struct list_head free;
622 struct list_head dirty;
623 unsigned int nr_dirty;
624};
625
626struct vmap_block {
627 spinlock_t lock;
628 struct vmap_area *va;
629 struct vmap_block_queue *vbq;
630 unsigned long free, dirty;
631 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
632 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
633 union {
634 struct {
635 struct list_head free_list;
636 struct list_head dirty_list;
637 };
638 struct rcu_head rcu_head;
639 };
640};
641
642/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
643static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
644
645/*
646 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
647 * in the free path. Could get rid of this if we change the API to return a
648 * "cookie" from alloc, to be passed to free. But no big deal yet.
649 */
650static DEFINE_SPINLOCK(vmap_block_tree_lock);
651static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
652
653/*
654 * We should probably have a fallback mechanism to allocate virtual memory
655 * out of partially filled vmap blocks. However vmap block sizing should be
656 * fairly reasonable according to the vmalloc size, so it shouldn't be a
657 * big problem.
658 */
659
660static unsigned long addr_to_vb_idx(unsigned long addr)
661{
662 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
663 addr /= VMAP_BLOCK_SIZE;
664 return addr;
665}
666
667static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
668{
669 struct vmap_block_queue *vbq;
670 struct vmap_block *vb;
671 struct vmap_area *va;
672 unsigned long vb_idx;
673 int node, err;
674
675 node = numa_node_id();
676
677 vb = kmalloc_node(sizeof(struct vmap_block),
678 gfp_mask & GFP_RECLAIM_MASK, node);
679 if (unlikely(!vb))
680 return ERR_PTR(-ENOMEM);
681
682 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
683 VMALLOC_START, VMALLOC_END,
684 node, gfp_mask);
685 if (unlikely(IS_ERR(va))) {
686 kfree(vb);
687 return ERR_PTR(PTR_ERR(va));
688 }
689
690 err = radix_tree_preload(gfp_mask);
691 if (unlikely(err)) {
692 kfree(vb);
693 free_vmap_area(va);
694 return ERR_PTR(err);
695 }
696
697 spin_lock_init(&vb->lock);
698 vb->va = va;
699 vb->free = VMAP_BBMAP_BITS;
700 vb->dirty = 0;
701 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
702 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
703 INIT_LIST_HEAD(&vb->free_list);
704 INIT_LIST_HEAD(&vb->dirty_list);
705
706 vb_idx = addr_to_vb_idx(va->va_start);
707 spin_lock(&vmap_block_tree_lock);
708 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
709 spin_unlock(&vmap_block_tree_lock);
710 BUG_ON(err);
711 radix_tree_preload_end();
712
713 vbq = &get_cpu_var(vmap_block_queue);
714 vb->vbq = vbq;
715 spin_lock(&vbq->lock);
716 list_add(&vb->free_list, &vbq->free);
717 spin_unlock(&vbq->lock);
718 put_cpu_var(vmap_cpu_blocks);
719
720 return vb;
721}
722
723static void rcu_free_vb(struct rcu_head *head)
724{
725 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
726
727 kfree(vb);
728}
729
730static void free_vmap_block(struct vmap_block *vb)
731{
732 struct vmap_block *tmp;
733 unsigned long vb_idx;
734
735 spin_lock(&vb->vbq->lock);
736 if (!list_empty(&vb->free_list))
737 list_del(&vb->free_list);
738 if (!list_empty(&vb->dirty_list))
739 list_del(&vb->dirty_list);
740 spin_unlock(&vb->vbq->lock);
741
742 vb_idx = addr_to_vb_idx(vb->va->va_start);
743 spin_lock(&vmap_block_tree_lock);
744 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
745 spin_unlock(&vmap_block_tree_lock);
746 BUG_ON(tmp != vb);
747
b29acbdc 748 free_unmap_vmap_area_noflush(vb->va);
db64fe02
NP
749 call_rcu(&vb->rcu_head, rcu_free_vb);
750}
751
752static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
753{
754 struct vmap_block_queue *vbq;
755 struct vmap_block *vb;
756 unsigned long addr = 0;
757 unsigned int order;
758
759 BUG_ON(size & ~PAGE_MASK);
760 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
761 order = get_order(size);
762
763again:
764 rcu_read_lock();
765 vbq = &get_cpu_var(vmap_block_queue);
766 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
767 int i;
768
769 spin_lock(&vb->lock);
770 i = bitmap_find_free_region(vb->alloc_map,
771 VMAP_BBMAP_BITS, order);
772
773 if (i >= 0) {
774 addr = vb->va->va_start + (i << PAGE_SHIFT);
775 BUG_ON(addr_to_vb_idx(addr) !=
776 addr_to_vb_idx(vb->va->va_start));
777 vb->free -= 1UL << order;
778 if (vb->free == 0) {
779 spin_lock(&vbq->lock);
780 list_del_init(&vb->free_list);
781 spin_unlock(&vbq->lock);
782 }
783 spin_unlock(&vb->lock);
784 break;
785 }
786 spin_unlock(&vb->lock);
787 }
788 put_cpu_var(vmap_cpu_blocks);
789 rcu_read_unlock();
790
791 if (!addr) {
792 vb = new_vmap_block(gfp_mask);
793 if (IS_ERR(vb))
794 return vb;
795 goto again;
796 }
797
798 return (void *)addr;
799}
800
801static void vb_free(const void *addr, unsigned long size)
802{
803 unsigned long offset;
804 unsigned long vb_idx;
805 unsigned int order;
806 struct vmap_block *vb;
807
808 BUG_ON(size & ~PAGE_MASK);
809 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
b29acbdc
NP
810
811 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
812
db64fe02
NP
813 order = get_order(size);
814
815 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
816
817 vb_idx = addr_to_vb_idx((unsigned long)addr);
818 rcu_read_lock();
819 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
820 rcu_read_unlock();
821 BUG_ON(!vb);
822
823 spin_lock(&vb->lock);
824 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
825 if (!vb->dirty) {
826 spin_lock(&vb->vbq->lock);
827 list_add(&vb->dirty_list, &vb->vbq->dirty);
828 spin_unlock(&vb->vbq->lock);
829 }
830 vb->dirty += 1UL << order;
831 if (vb->dirty == VMAP_BBMAP_BITS) {
832 BUG_ON(vb->free || !list_empty(&vb->free_list));
833 spin_unlock(&vb->lock);
834 free_vmap_block(vb);
835 } else
836 spin_unlock(&vb->lock);
837}
838
839/**
840 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
841 *
842 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
843 * to amortize TLB flushing overheads. What this means is that any page you
844 * have now, may, in a former life, have been mapped into kernel virtual
845 * address by the vmap layer and so there might be some CPUs with TLB entries
846 * still referencing that page (additional to the regular 1:1 kernel mapping).
847 *
848 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
849 * be sure that none of the pages we have control over will have any aliases
850 * from the vmap layer.
851 */
852void vm_unmap_aliases(void)
853{
854 unsigned long start = ULONG_MAX, end = 0;
855 int cpu;
856 int flush = 0;
857
9b463334
JF
858 if (unlikely(!vmap_initialized))
859 return;
860
db64fe02
NP
861 for_each_possible_cpu(cpu) {
862 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
863 struct vmap_block *vb;
864
865 rcu_read_lock();
866 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
867 int i;
868
869 spin_lock(&vb->lock);
870 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
871 while (i < VMAP_BBMAP_BITS) {
872 unsigned long s, e;
873 int j;
874 j = find_next_zero_bit(vb->dirty_map,
875 VMAP_BBMAP_BITS, i);
876
877 s = vb->va->va_start + (i << PAGE_SHIFT);
878 e = vb->va->va_start + (j << PAGE_SHIFT);
879 vunmap_page_range(s, e);
880 flush = 1;
881
882 if (s < start)
883 start = s;
884 if (e > end)
885 end = e;
886
887 i = j;
888 i = find_next_bit(vb->dirty_map,
889 VMAP_BBMAP_BITS, i);
890 }
891 spin_unlock(&vb->lock);
892 }
893 rcu_read_unlock();
894 }
895
896 __purge_vmap_area_lazy(&start, &end, 1, flush);
897}
898EXPORT_SYMBOL_GPL(vm_unmap_aliases);
899
900/**
901 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
902 * @mem: the pointer returned by vm_map_ram
903 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
904 */
905void vm_unmap_ram(const void *mem, unsigned int count)
906{
907 unsigned long size = count << PAGE_SHIFT;
908 unsigned long addr = (unsigned long)mem;
909
910 BUG_ON(!addr);
911 BUG_ON(addr < VMALLOC_START);
912 BUG_ON(addr > VMALLOC_END);
913 BUG_ON(addr & (PAGE_SIZE-1));
914
915 debug_check_no_locks_freed(mem, size);
916
917 if (likely(count <= VMAP_MAX_ALLOC))
918 vb_free(mem, size);
919 else
920 free_unmap_vmap_area_addr(addr);
921}
922EXPORT_SYMBOL(vm_unmap_ram);
923
924/**
925 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
926 * @pages: an array of pointers to the pages to be mapped
927 * @count: number of pages
928 * @node: prefer to allocate data structures on this node
929 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
e99c97ad
RD
930 *
931 * Returns: a pointer to the address that has been mapped, or %NULL on failure
db64fe02
NP
932 */
933void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
934{
935 unsigned long size = count << PAGE_SHIFT;
936 unsigned long addr;
937 void *mem;
938
939 if (likely(count <= VMAP_MAX_ALLOC)) {
940 mem = vb_alloc(size, GFP_KERNEL);
941 if (IS_ERR(mem))
942 return NULL;
943 addr = (unsigned long)mem;
944 } else {
945 struct vmap_area *va;
946 va = alloc_vmap_area(size, PAGE_SIZE,
947 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
948 if (IS_ERR(va))
949 return NULL;
950
951 addr = va->va_start;
952 mem = (void *)addr;
953 }
954 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
955 vm_unmap_ram(mem, count);
956 return NULL;
957 }
958 return mem;
959}
960EXPORT_SYMBOL(vm_map_ram);
961
962void __init vmalloc_init(void)
963{
964 int i;
965
966 for_each_possible_cpu(i) {
967 struct vmap_block_queue *vbq;
968
969 vbq = &per_cpu(vmap_block_queue, i);
970 spin_lock_init(&vbq->lock);
971 INIT_LIST_HEAD(&vbq->free);
972 INIT_LIST_HEAD(&vbq->dirty);
973 vbq->nr_dirty = 0;
974 }
9b463334
JF
975
976 vmap_initialized = true;
db64fe02
NP
977}
978
979void unmap_kernel_range(unsigned long addr, unsigned long size)
980{
981 unsigned long end = addr + size;
982 vunmap_page_range(addr, end);
983 flush_tlb_kernel_range(addr, end);
984}
985
986int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
987{
988 unsigned long addr = (unsigned long)area->addr;
989 unsigned long end = addr + area->size - PAGE_SIZE;
990 int err;
991
992 err = vmap_page_range(addr, end, prot, *pages);
993 if (err > 0) {
994 *pages += err;
995 err = 0;
996 }
997
998 return err;
999}
1000EXPORT_SYMBOL_GPL(map_vm_area);
1001
1002/*** Old vmalloc interfaces ***/
1003DEFINE_RWLOCK(vmlist_lock);
1004struct vm_struct *vmlist;
1005
1006static struct vm_struct *__get_vm_area_node(unsigned long size,
1007 unsigned long flags, unsigned long start, unsigned long end,
1008 int node, gfp_t gfp_mask, void *caller)
1009{
1010 static struct vmap_area *va;
1011 struct vm_struct *area;
1012 struct vm_struct *tmp, **p;
1013 unsigned long align = 1;
1da177e4 1014
52fd24ca 1015 BUG_ON(in_interrupt());
1da177e4
LT
1016 if (flags & VM_IOREMAP) {
1017 int bit = fls(size);
1018
1019 if (bit > IOREMAP_MAX_ORDER)
1020 bit = IOREMAP_MAX_ORDER;
1021 else if (bit < PAGE_SHIFT)
1022 bit = PAGE_SHIFT;
1023
1024 align = 1ul << bit;
1025 }
db64fe02 1026
1da177e4 1027 size = PAGE_ALIGN(size);
31be8309
OH
1028 if (unlikely(!size))
1029 return NULL;
1da177e4 1030
6cb06229 1031 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1da177e4
LT
1032 if (unlikely(!area))
1033 return NULL;
1034
1da177e4
LT
1035 /*
1036 * We always allocate a guard page.
1037 */
1038 size += PAGE_SIZE;
1039
db64fe02
NP
1040 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1041 if (IS_ERR(va)) {
1042 kfree(area);
1043 return NULL;
1da177e4 1044 }
1da177e4
LT
1045
1046 area->flags = flags;
db64fe02 1047 area->addr = (void *)va->va_start;
1da177e4
LT
1048 area->size = size;
1049 area->pages = NULL;
1050 area->nr_pages = 0;
1051 area->phys_addr = 0;
23016969 1052 area->caller = caller;
db64fe02
NP
1053 va->private = area;
1054 va->flags |= VM_VM_AREA;
1055
1056 write_lock(&vmlist_lock);
1057 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1058 if (tmp->addr >= area->addr)
1059 break;
1060 }
1061 area->next = *p;
1062 *p = area;
1da177e4
LT
1063 write_unlock(&vmlist_lock);
1064
1065 return area;
1da177e4
LT
1066}
1067
930fc45a
CL
1068struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1069 unsigned long start, unsigned long end)
1070{
23016969
CL
1071 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1072 __builtin_return_address(0));
930fc45a 1073}
5992b6da 1074EXPORT_SYMBOL_GPL(__get_vm_area);
930fc45a 1075
1da177e4 1076/**
183ff22b 1077 * get_vm_area - reserve a contiguous kernel virtual area
1da177e4
LT
1078 * @size: size of the area
1079 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
1080 *
1081 * Search an area of @size in the kernel virtual mapping area,
1082 * and reserved it for out purposes. Returns the area descriptor
1083 * on success or %NULL on failure.
1084 */
1085struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1086{
23016969
CL
1087 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1088 -1, GFP_KERNEL, __builtin_return_address(0));
1089}
1090
1091struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1092 void *caller)
1093{
1094 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1095 -1, GFP_KERNEL, caller);
1da177e4
LT
1096}
1097
52fd24ca
GP
1098struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1099 int node, gfp_t gfp_mask)
930fc45a 1100{
52fd24ca 1101 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
23016969 1102 gfp_mask, __builtin_return_address(0));
930fc45a
CL
1103}
1104
db64fe02 1105static struct vm_struct *find_vm_area(const void *addr)
83342314 1106{
db64fe02 1107 struct vmap_area *va;
83342314 1108
db64fe02
NP
1109 va = find_vmap_area((unsigned long)addr);
1110 if (va && va->flags & VM_VM_AREA)
1111 return va->private;
1da177e4 1112
1da177e4 1113 return NULL;
1da177e4
LT
1114}
1115
7856dfeb 1116/**
183ff22b 1117 * remove_vm_area - find and remove a continuous kernel virtual area
7856dfeb
AK
1118 * @addr: base address
1119 *
1120 * Search for the kernel VM area starting at @addr, and remove it.
1121 * This function returns the found VM area, but using it is NOT safe
1122 * on SMP machines, except for its size or flags.
1123 */
b3bdda02 1124struct vm_struct *remove_vm_area(const void *addr)
7856dfeb 1125{
db64fe02
NP
1126 struct vmap_area *va;
1127
1128 va = find_vmap_area((unsigned long)addr);
1129 if (va && va->flags & VM_VM_AREA) {
1130 struct vm_struct *vm = va->private;
1131 struct vm_struct *tmp, **p;
1132 free_unmap_vmap_area(va);
1133 vm->size -= PAGE_SIZE;
1134
1135 write_lock(&vmlist_lock);
1136 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
1137 ;
1138 *p = tmp->next;
1139 write_unlock(&vmlist_lock);
1140
1141 return vm;
1142 }
1143 return NULL;
7856dfeb
AK
1144}
1145
b3bdda02 1146static void __vunmap(const void *addr, int deallocate_pages)
1da177e4
LT
1147{
1148 struct vm_struct *area;
1149
1150 if (!addr)
1151 return;
1152
1153 if ((PAGE_SIZE-1) & (unsigned long)addr) {
4c8573e2 1154 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
1da177e4
LT
1155 return;
1156 }
1157
1158 area = remove_vm_area(addr);
1159 if (unlikely(!area)) {
4c8573e2 1160 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1da177e4 1161 addr);
1da177e4
LT
1162 return;
1163 }
1164
9a11b49a 1165 debug_check_no_locks_freed(addr, area->size);
3ac7fe5a 1166 debug_check_no_obj_freed(addr, area->size);
9a11b49a 1167
1da177e4
LT
1168 if (deallocate_pages) {
1169 int i;
1170
1171 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1172 struct page *page = area->pages[i];
1173
1174 BUG_ON(!page);
1175 __free_page(page);
1da177e4
LT
1176 }
1177
8757d5fa 1178 if (area->flags & VM_VPAGES)
1da177e4
LT
1179 vfree(area->pages);
1180 else
1181 kfree(area->pages);
1182 }
1183
1184 kfree(area);
1185 return;
1186}
1187
1188/**
1189 * vfree - release memory allocated by vmalloc()
1da177e4
LT
1190 * @addr: memory base address
1191 *
183ff22b 1192 * Free the virtually continuous memory area starting at @addr, as
80e93eff
PE
1193 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1194 * NULL, no operation is performed.
1da177e4 1195 *
80e93eff 1196 * Must not be called in interrupt context.
1da177e4 1197 */
b3bdda02 1198void vfree(const void *addr)
1da177e4
LT
1199{
1200 BUG_ON(in_interrupt());
1201 __vunmap(addr, 1);
1202}
1da177e4
LT
1203EXPORT_SYMBOL(vfree);
1204
1205/**
1206 * vunmap - release virtual mapping obtained by vmap()
1da177e4
LT
1207 * @addr: memory base address
1208 *
1209 * Free the virtually contiguous memory area starting at @addr,
1210 * which was created from the page array passed to vmap().
1211 *
80e93eff 1212 * Must not be called in interrupt context.
1da177e4 1213 */
b3bdda02 1214void vunmap(const void *addr)
1da177e4
LT
1215{
1216 BUG_ON(in_interrupt());
1217 __vunmap(addr, 0);
1218}
1da177e4
LT
1219EXPORT_SYMBOL(vunmap);
1220
1221/**
1222 * vmap - map an array of pages into virtually contiguous space
1da177e4
LT
1223 * @pages: array of page pointers
1224 * @count: number of pages to map
1225 * @flags: vm_area->flags
1226 * @prot: page protection for the mapping
1227 *
1228 * Maps @count pages from @pages into contiguous kernel virtual
1229 * space.
1230 */
1231void *vmap(struct page **pages, unsigned int count,
1232 unsigned long flags, pgprot_t prot)
1233{
1234 struct vm_struct *area;
1235
1236 if (count > num_physpages)
1237 return NULL;
1238
23016969
CL
1239 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1240 __builtin_return_address(0));
1da177e4
LT
1241 if (!area)
1242 return NULL;
23016969 1243
1da177e4
LT
1244 if (map_vm_area(area, prot, &pages)) {
1245 vunmap(area->addr);
1246 return NULL;
1247 }
1248
1249 return area->addr;
1250}
1da177e4
LT
1251EXPORT_SYMBOL(vmap);
1252
db64fe02
NP
1253static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1254 int node, void *caller);
e31d9eb5 1255static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
23016969 1256 pgprot_t prot, int node, void *caller)
1da177e4
LT
1257{
1258 struct page **pages;
1259 unsigned int nr_pages, array_size, i;
1260
1261 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1262 array_size = (nr_pages * sizeof(struct page *));
1263
1264 area->nr_pages = nr_pages;
1265 /* Please note that the recursion is strictly bounded. */
8757d5fa 1266 if (array_size > PAGE_SIZE) {
94f6030c 1267 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
23016969 1268 PAGE_KERNEL, node, caller);
8757d5fa 1269 area->flags |= VM_VPAGES;
286e1ea3
AM
1270 } else {
1271 pages = kmalloc_node(array_size,
6cb06229 1272 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
286e1ea3
AM
1273 node);
1274 }
1da177e4 1275 area->pages = pages;
23016969 1276 area->caller = caller;
1da177e4
LT
1277 if (!area->pages) {
1278 remove_vm_area(area->addr);
1279 kfree(area);
1280 return NULL;
1281 }
1da177e4
LT
1282
1283 for (i = 0; i < area->nr_pages; i++) {
bf53d6f8
CL
1284 struct page *page;
1285
930fc45a 1286 if (node < 0)
bf53d6f8 1287 page = alloc_page(gfp_mask);
930fc45a 1288 else
bf53d6f8
CL
1289 page = alloc_pages_node(node, gfp_mask, 0);
1290
1291 if (unlikely(!page)) {
1da177e4
LT
1292 /* Successfully allocated i pages, free them in __vunmap() */
1293 area->nr_pages = i;
1294 goto fail;
1295 }
bf53d6f8 1296 area->pages[i] = page;
1da177e4
LT
1297 }
1298
1299 if (map_vm_area(area, prot, &pages))
1300 goto fail;
1301 return area->addr;
1302
1303fail:
1304 vfree(area->addr);
1305 return NULL;
1306}
1307
930fc45a
CL
1308void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1309{
23016969
CL
1310 return __vmalloc_area_node(area, gfp_mask, prot, -1,
1311 __builtin_return_address(0));
930fc45a
CL
1312}
1313
1da177e4 1314/**
930fc45a 1315 * __vmalloc_node - allocate virtually contiguous memory
1da177e4
LT
1316 * @size: allocation size
1317 * @gfp_mask: flags for the page level allocator
1318 * @prot: protection mask for the allocated pages
d44e0780 1319 * @node: node to use for allocation or -1
c85d194b 1320 * @caller: caller's return address
1da177e4
LT
1321 *
1322 * Allocate enough pages to cover @size from the page level
1323 * allocator with @gfp_mask flags. Map them into contiguous
1324 * kernel virtual space, using a pagetable protection of @prot.
1325 */
b221385b 1326static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
23016969 1327 int node, void *caller)
1da177e4
LT
1328{
1329 struct vm_struct *area;
1330
1331 size = PAGE_ALIGN(size);
1332 if (!size || (size >> PAGE_SHIFT) > num_physpages)
1333 return NULL;
1334
23016969
CL
1335 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
1336 node, gfp_mask, caller);
1337
1da177e4
LT
1338 if (!area)
1339 return NULL;
1340
23016969 1341 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1da177e4
LT
1342}
1343
930fc45a
CL
1344void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1345{
23016969
CL
1346 return __vmalloc_node(size, gfp_mask, prot, -1,
1347 __builtin_return_address(0));
930fc45a 1348}
1da177e4
LT
1349EXPORT_SYMBOL(__vmalloc);
1350
1351/**
1352 * vmalloc - allocate virtually contiguous memory
1da177e4 1353 * @size: allocation size
1da177e4
LT
1354 * Allocate enough pages to cover @size from the page level
1355 * allocator and map them into contiguous kernel virtual space.
1356 *
c1c8897f 1357 * For tight control over page level allocator and protection flags
1da177e4
LT
1358 * use __vmalloc() instead.
1359 */
1360void *vmalloc(unsigned long size)
1361{
23016969
CL
1362 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1363 -1, __builtin_return_address(0));
1da177e4 1364}
1da177e4
LT
1365EXPORT_SYMBOL(vmalloc);
1366
83342314 1367/**
ead04089
REB
1368 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1369 * @size: allocation size
83342314 1370 *
ead04089
REB
1371 * The resulting memory area is zeroed so it can be mapped to userspace
1372 * without leaking data.
83342314
NP
1373 */
1374void *vmalloc_user(unsigned long size)
1375{
1376 struct vm_struct *area;
1377 void *ret;
1378
1379 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
2b4ac44e 1380 if (ret) {
db64fe02 1381 area = find_vm_area(ret);
2b4ac44e 1382 area->flags |= VM_USERMAP;
2b4ac44e 1383 }
83342314
NP
1384 return ret;
1385}
1386EXPORT_SYMBOL(vmalloc_user);
1387
930fc45a
CL
1388/**
1389 * vmalloc_node - allocate memory on a specific node
930fc45a 1390 * @size: allocation size
d44e0780 1391 * @node: numa node
930fc45a
CL
1392 *
1393 * Allocate enough pages to cover @size from the page level
1394 * allocator and map them into contiguous kernel virtual space.
1395 *
c1c8897f 1396 * For tight control over page level allocator and protection flags
930fc45a
CL
1397 * use __vmalloc() instead.
1398 */
1399void *vmalloc_node(unsigned long size, int node)
1400{
23016969
CL
1401 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1402 node, __builtin_return_address(0));
930fc45a
CL
1403}
1404EXPORT_SYMBOL(vmalloc_node);
1405
4dc3b16b
PP
1406#ifndef PAGE_KERNEL_EXEC
1407# define PAGE_KERNEL_EXEC PAGE_KERNEL
1408#endif
1409
1da177e4
LT
1410/**
1411 * vmalloc_exec - allocate virtually contiguous, executable memory
1da177e4
LT
1412 * @size: allocation size
1413 *
1414 * Kernel-internal function to allocate enough pages to cover @size
1415 * the page level allocator and map them into contiguous and
1416 * executable kernel virtual space.
1417 *
c1c8897f 1418 * For tight control over page level allocator and protection flags
1da177e4
LT
1419 * use __vmalloc() instead.
1420 */
1421
1da177e4
LT
1422void *vmalloc_exec(unsigned long size)
1423{
1424 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
1425}
1426
0d08e0d3 1427#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
7ac674f5 1428#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
0d08e0d3 1429#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
7ac674f5 1430#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
0d08e0d3
AK
1431#else
1432#define GFP_VMALLOC32 GFP_KERNEL
1433#endif
1434
1da177e4
LT
1435/**
1436 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
1da177e4
LT
1437 * @size: allocation size
1438 *
1439 * Allocate enough 32bit PA addressable pages to cover @size from the
1440 * page level allocator and map them into contiguous kernel virtual space.
1441 */
1442void *vmalloc_32(unsigned long size)
1443{
0d08e0d3 1444 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
1da177e4 1445}
1da177e4
LT
1446EXPORT_SYMBOL(vmalloc_32);
1447
83342314 1448/**
ead04089 1449 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
83342314 1450 * @size: allocation size
ead04089
REB
1451 *
1452 * The resulting memory area is 32bit addressable and zeroed so it can be
1453 * mapped to userspace without leaking data.
83342314
NP
1454 */
1455void *vmalloc_32_user(unsigned long size)
1456{
1457 struct vm_struct *area;
1458 void *ret;
1459
0d08e0d3 1460 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
2b4ac44e 1461 if (ret) {
db64fe02 1462 area = find_vm_area(ret);
2b4ac44e 1463 area->flags |= VM_USERMAP;
2b4ac44e 1464 }
83342314
NP
1465 return ret;
1466}
1467EXPORT_SYMBOL(vmalloc_32_user);
1468
1da177e4
LT
1469long vread(char *buf, char *addr, unsigned long count)
1470{
1471 struct vm_struct *tmp;
1472 char *vaddr, *buf_start = buf;
1473 unsigned long n;
1474
1475 /* Don't allow overflow */
1476 if ((unsigned long) addr + count < count)
1477 count = -(unsigned long) addr;
1478
1479 read_lock(&vmlist_lock);
1480 for (tmp = vmlist; tmp; tmp = tmp->next) {
1481 vaddr = (char *) tmp->addr;
1482 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1483 continue;
1484 while (addr < vaddr) {
1485 if (count == 0)
1486 goto finished;
1487 *buf = '\0';
1488 buf++;
1489 addr++;
1490 count--;
1491 }
1492 n = vaddr + tmp->size - PAGE_SIZE - addr;
1493 do {
1494 if (count == 0)
1495 goto finished;
1496 *buf = *addr;
1497 buf++;
1498 addr++;
1499 count--;
1500 } while (--n > 0);
1501 }
1502finished:
1503 read_unlock(&vmlist_lock);
1504 return buf - buf_start;
1505}
1506
1507long vwrite(char *buf, char *addr, unsigned long count)
1508{
1509 struct vm_struct *tmp;
1510 char *vaddr, *buf_start = buf;
1511 unsigned long n;
1512
1513 /* Don't allow overflow */
1514 if ((unsigned long) addr + count < count)
1515 count = -(unsigned long) addr;
1516
1517 read_lock(&vmlist_lock);
1518 for (tmp = vmlist; tmp; tmp = tmp->next) {
1519 vaddr = (char *) tmp->addr;
1520 if (addr >= vaddr + tmp->size - PAGE_SIZE)
1521 continue;
1522 while (addr < vaddr) {
1523 if (count == 0)
1524 goto finished;
1525 buf++;
1526 addr++;
1527 count--;
1528 }
1529 n = vaddr + tmp->size - PAGE_SIZE - addr;
1530 do {
1531 if (count == 0)
1532 goto finished;
1533 *addr = *buf;
1534 buf++;
1535 addr++;
1536 count--;
1537 } while (--n > 0);
1538 }
1539finished:
1540 read_unlock(&vmlist_lock);
1541 return buf - buf_start;
1542}
83342314
NP
1543
1544/**
1545 * remap_vmalloc_range - map vmalloc pages to userspace
83342314
NP
1546 * @vma: vma to cover (map full range of vma)
1547 * @addr: vmalloc memory
1548 * @pgoff: number of pages into addr before first page to map
7682486b
RD
1549 *
1550 * Returns: 0 for success, -Exxx on failure
83342314
NP
1551 *
1552 * This function checks that addr is a valid vmalloc'ed area, and
1553 * that it is big enough to cover the vma. Will return failure if
1554 * that criteria isn't met.
1555 *
72fd4a35 1556 * Similar to remap_pfn_range() (see mm/memory.c)
83342314
NP
1557 */
1558int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1559 unsigned long pgoff)
1560{
1561 struct vm_struct *area;
1562 unsigned long uaddr = vma->vm_start;
1563 unsigned long usize = vma->vm_end - vma->vm_start;
83342314
NP
1564
1565 if ((PAGE_SIZE-1) & (unsigned long)addr)
1566 return -EINVAL;
1567
db64fe02 1568 area = find_vm_area(addr);
83342314 1569 if (!area)
db64fe02 1570 return -EINVAL;
83342314
NP
1571
1572 if (!(area->flags & VM_USERMAP))
db64fe02 1573 return -EINVAL;
83342314
NP
1574
1575 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
db64fe02 1576 return -EINVAL;
83342314
NP
1577
1578 addr += pgoff << PAGE_SHIFT;
1579 do {
1580 struct page *page = vmalloc_to_page(addr);
db64fe02
NP
1581 int ret;
1582
83342314
NP
1583 ret = vm_insert_page(vma, uaddr, page);
1584 if (ret)
1585 return ret;
1586
1587 uaddr += PAGE_SIZE;
1588 addr += PAGE_SIZE;
1589 usize -= PAGE_SIZE;
1590 } while (usize > 0);
1591
1592 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1593 vma->vm_flags |= VM_RESERVED;
1594
db64fe02 1595 return 0;
83342314
NP
1596}
1597EXPORT_SYMBOL(remap_vmalloc_range);
1598
1eeb66a1
CH
1599/*
1600 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1601 * have one.
1602 */
1603void __attribute__((weak)) vmalloc_sync_all(void)
1604{
1605}
5f4352fb
JF
1606
1607
2f569afd 1608static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
5f4352fb
JF
1609{
1610 /* apply_to_page_range() does all the hard work. */
1611 return 0;
1612}
1613
1614/**
1615 * alloc_vm_area - allocate a range of kernel address space
1616 * @size: size of the area
7682486b
RD
1617 *
1618 * Returns: NULL on failure, vm_struct on success
5f4352fb
JF
1619 *
1620 * This function reserves a range of kernel address space, and
1621 * allocates pagetables to map that range. No actual mappings
1622 * are created. If the kernel address space is not shared
1623 * between processes, it syncs the pagetable across all
1624 * processes.
1625 */
1626struct vm_struct *alloc_vm_area(size_t size)
1627{
1628 struct vm_struct *area;
1629
23016969
CL
1630 area = get_vm_area_caller(size, VM_IOREMAP,
1631 __builtin_return_address(0));
5f4352fb
JF
1632 if (area == NULL)
1633 return NULL;
1634
1635 /*
1636 * This ensures that page tables are constructed for this region
1637 * of kernel virtual address space and mapped into init_mm.
1638 */
1639 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
1640 area->size, f, NULL)) {
1641 free_vm_area(area);
1642 return NULL;
1643 }
1644
1645 /* Make sure the pagetables are constructed in process kernel
1646 mappings */
1647 vmalloc_sync_all();
1648
1649 return area;
1650}
1651EXPORT_SYMBOL_GPL(alloc_vm_area);
1652
1653void free_vm_area(struct vm_struct *area)
1654{
1655 struct vm_struct *ret;
1656 ret = remove_vm_area(area->addr);
1657 BUG_ON(ret != area);
1658 kfree(area);
1659}
1660EXPORT_SYMBOL_GPL(free_vm_area);
a10aa579
CL
1661
1662
1663#ifdef CONFIG_PROC_FS
1664static void *s_start(struct seq_file *m, loff_t *pos)
1665{
1666 loff_t n = *pos;
1667 struct vm_struct *v;
1668
1669 read_lock(&vmlist_lock);
1670 v = vmlist;
1671 while (n > 0 && v) {
1672 n--;
1673 v = v->next;
1674 }
1675 if (!n)
1676 return v;
1677
1678 return NULL;
1679
1680}
1681
1682static void *s_next(struct seq_file *m, void *p, loff_t *pos)
1683{
1684 struct vm_struct *v = p;
1685
1686 ++*pos;
1687 return v->next;
1688}
1689
1690static void s_stop(struct seq_file *m, void *p)
1691{
1692 read_unlock(&vmlist_lock);
1693}
1694
a47a126a
ED
1695static void show_numa_info(struct seq_file *m, struct vm_struct *v)
1696{
1697 if (NUMA_BUILD) {
1698 unsigned int nr, *counters = m->private;
1699
1700 if (!counters)
1701 return;
1702
1703 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
1704
1705 for (nr = 0; nr < v->nr_pages; nr++)
1706 counters[page_to_nid(v->pages[nr])]++;
1707
1708 for_each_node_state(nr, N_HIGH_MEMORY)
1709 if (counters[nr])
1710 seq_printf(m, " N%u=%u", nr, counters[nr]);
1711 }
1712}
1713
a10aa579
CL
1714static int s_show(struct seq_file *m, void *p)
1715{
1716 struct vm_struct *v = p;
1717
1718 seq_printf(m, "0x%p-0x%p %7ld",
1719 v->addr, v->addr + v->size, v->size);
1720
23016969 1721 if (v->caller) {
9c246247 1722 char buff[KSYM_SYMBOL_LEN];
23016969
CL
1723
1724 seq_putc(m, ' ');
1725 sprint_symbol(buff, (unsigned long)v->caller);
1726 seq_puts(m, buff);
1727 }
1728
a10aa579
CL
1729 if (v->nr_pages)
1730 seq_printf(m, " pages=%d", v->nr_pages);
1731
1732 if (v->phys_addr)
1733 seq_printf(m, " phys=%lx", v->phys_addr);
1734
1735 if (v->flags & VM_IOREMAP)
1736 seq_printf(m, " ioremap");
1737
1738 if (v->flags & VM_ALLOC)
1739 seq_printf(m, " vmalloc");
1740
1741 if (v->flags & VM_MAP)
1742 seq_printf(m, " vmap");
1743
1744 if (v->flags & VM_USERMAP)
1745 seq_printf(m, " user");
1746
1747 if (v->flags & VM_VPAGES)
1748 seq_printf(m, " vpages");
1749
a47a126a 1750 show_numa_info(m, v);
a10aa579
CL
1751 seq_putc(m, '\n');
1752 return 0;
1753}
1754
5f6a6a9c 1755static const struct seq_operations vmalloc_op = {
a10aa579
CL
1756 .start = s_start,
1757 .next = s_next,
1758 .stop = s_stop,
1759 .show = s_show,
1760};
5f6a6a9c
AD
1761
1762static int vmalloc_open(struct inode *inode, struct file *file)
1763{
1764 unsigned int *ptr = NULL;
1765 int ret;
1766
1767 if (NUMA_BUILD)
1768 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
1769 ret = seq_open(file, &vmalloc_op);
1770 if (!ret) {
1771 struct seq_file *m = file->private_data;
1772 m->private = ptr;
1773 } else
1774 kfree(ptr);
1775 return ret;
1776}
1777
1778static const struct file_operations proc_vmalloc_operations = {
1779 .open = vmalloc_open,
1780 .read = seq_read,
1781 .llseek = seq_lseek,
1782 .release = seq_release_private,
1783};
1784
1785static int __init proc_vmalloc_init(void)
1786{
1787 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
1788 return 0;
1789}
1790module_init(proc_vmalloc_init);
a10aa579
CL
1791#endif
1792