]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/vmalloc.c
[PATCH] mm: ia64 use expand_upwards
[net-next-2.6.git] / mm / vmalloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/vmalloc.c
3 *
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
930fc45a 8 * Numa awareness, Christoph Lameter, SGI, June 2005
1da177e4
LT
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
27static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28{
29 pte_t *pte;
30
31 pte = pte_offset_kernel(pmd, addr);
32 do {
33 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
34 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
35 } while (pte++, addr += PAGE_SIZE, addr != end);
36}
37
38static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
39 unsigned long end)
40{
41 pmd_t *pmd;
42 unsigned long next;
43
44 pmd = pmd_offset(pud, addr);
45 do {
46 next = pmd_addr_end(addr, end);
47 if (pmd_none_or_clear_bad(pmd))
48 continue;
49 vunmap_pte_range(pmd, addr, next);
50 } while (pmd++, addr = next, addr != end);
51}
52
53static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
54 unsigned long end)
55{
56 pud_t *pud;
57 unsigned long next;
58
59 pud = pud_offset(pgd, addr);
60 do {
61 next = pud_addr_end(addr, end);
62 if (pud_none_or_clear_bad(pud))
63 continue;
64 vunmap_pmd_range(pud, addr, next);
65 } while (pud++, addr = next, addr != end);
66}
67
68void unmap_vm_area(struct vm_struct *area)
69{
70 pgd_t *pgd;
71 unsigned long next;
72 unsigned long addr = (unsigned long) area->addr;
73 unsigned long end = addr + area->size;
74
75 BUG_ON(addr >= end);
76 pgd = pgd_offset_k(addr);
77 flush_cache_vunmap(addr, end);
78 do {
79 next = pgd_addr_end(addr, end);
80 if (pgd_none_or_clear_bad(pgd))
81 continue;
82 vunmap_pud_range(pgd, addr, next);
83 } while (pgd++, addr = next, addr != end);
84 flush_tlb_kernel_range((unsigned long) area->addr, end);
85}
86
87static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
88 unsigned long end, pgprot_t prot, struct page ***pages)
89{
90 pte_t *pte;
91
92 pte = pte_alloc_kernel(&init_mm, pmd, addr);
93 if (!pte)
94 return -ENOMEM;
95 do {
96 struct page *page = **pages;
97 WARN_ON(!pte_none(*pte));
98 if (!page)
99 return -ENOMEM;
100 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101 (*pages)++;
102 } while (pte++, addr += PAGE_SIZE, addr != end);
103 return 0;
104}
105
106static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
107 unsigned long end, pgprot_t prot, struct page ***pages)
108{
109 pmd_t *pmd;
110 unsigned long next;
111
112 pmd = pmd_alloc(&init_mm, pud, addr);
113 if (!pmd)
114 return -ENOMEM;
115 do {
116 next = pmd_addr_end(addr, end);
117 if (vmap_pte_range(pmd, addr, next, prot, pages))
118 return -ENOMEM;
119 } while (pmd++, addr = next, addr != end);
120 return 0;
121}
122
123static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
124 unsigned long end, pgprot_t prot, struct page ***pages)
125{
126 pud_t *pud;
127 unsigned long next;
128
129 pud = pud_alloc(&init_mm, pgd, addr);
130 if (!pud)
131 return -ENOMEM;
132 do {
133 next = pud_addr_end(addr, end);
134 if (vmap_pmd_range(pud, addr, next, prot, pages))
135 return -ENOMEM;
136 } while (pud++, addr = next, addr != end);
137 return 0;
138}
139
140int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
141{
142 pgd_t *pgd;
143 unsigned long next;
144 unsigned long addr = (unsigned long) area->addr;
145 unsigned long end = addr + area->size - PAGE_SIZE;
146 int err;
147
148 BUG_ON(addr >= end);
149 pgd = pgd_offset_k(addr);
150 spin_lock(&init_mm.page_table_lock);
151 do {
152 next = pgd_addr_end(addr, end);
153 err = vmap_pud_range(pgd, addr, next, prot, pages);
154 if (err)
155 break;
156 } while (pgd++, addr = next, addr != end);
157 spin_unlock(&init_mm.page_table_lock);
158 flush_cache_vmap((unsigned long) area->addr, end);
159 return err;
160}
161
930fc45a
CL
162struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
163 unsigned long start, unsigned long end, int node)
1da177e4
LT
164{
165 struct vm_struct **p, *tmp, *area;
166 unsigned long align = 1;
167 unsigned long addr;
168
169 if (flags & VM_IOREMAP) {
170 int bit = fls(size);
171
172 if (bit > IOREMAP_MAX_ORDER)
173 bit = IOREMAP_MAX_ORDER;
174 else if (bit < PAGE_SHIFT)
175 bit = PAGE_SHIFT;
176
177 align = 1ul << bit;
178 }
179 addr = ALIGN(start, align);
180 size = PAGE_ALIGN(size);
181
930fc45a 182 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
1da177e4
LT
183 if (unlikely(!area))
184 return NULL;
185
186 if (unlikely(!size)) {
187 kfree (area);
188 return NULL;
189 }
190
191 /*
192 * We always allocate a guard page.
193 */
194 size += PAGE_SIZE;
195
196 write_lock(&vmlist_lock);
197 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
198 if ((unsigned long)tmp->addr < addr) {
199 if((unsigned long)tmp->addr + tmp->size >= addr)
200 addr = ALIGN(tmp->size +
201 (unsigned long)tmp->addr, align);
202 continue;
203 }
204 if ((size + addr) < addr)
205 goto out;
206 if (size + addr <= (unsigned long)tmp->addr)
207 goto found;
208 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
209 if (addr > end - size)
210 goto out;
211 }
212
213found:
214 area->next = *p;
215 *p = area;
216
217 area->flags = flags;
218 area->addr = (void *)addr;
219 area->size = size;
220 area->pages = NULL;
221 area->nr_pages = 0;
222 area->phys_addr = 0;
223 write_unlock(&vmlist_lock);
224
225 return area;
226
227out:
228 write_unlock(&vmlist_lock);
229 kfree(area);
230 if (printk_ratelimit())
231 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
232 return NULL;
233}
234
930fc45a
CL
235struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
236 unsigned long start, unsigned long end)
237{
238 return __get_vm_area_node(size, flags, start, end, -1);
239}
240
1da177e4
LT
241/**
242 * get_vm_area - reserve a contingous kernel virtual area
243 *
244 * @size: size of the area
245 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
246 *
247 * Search an area of @size in the kernel virtual mapping area,
248 * and reserved it for out purposes. Returns the area descriptor
249 * on success or %NULL on failure.
250 */
251struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
252{
253 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
254}
255
930fc45a
CL
256struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
257{
258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
259}
260
7856dfeb
AK
261/* Caller must hold vmlist_lock */
262struct vm_struct *__remove_vm_area(void *addr)
1da177e4
LT
263{
264 struct vm_struct **p, *tmp;
265
1da177e4
LT
266 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
267 if (tmp->addr == addr)
268 goto found;
269 }
1da177e4
LT
270 return NULL;
271
272found:
273 unmap_vm_area(tmp);
274 *p = tmp->next;
1da177e4
LT
275
276 /*
277 * Remove the guard page.
278 */
279 tmp->size -= PAGE_SIZE;
280 return tmp;
281}
282
7856dfeb
AK
283/**
284 * remove_vm_area - find and remove a contingous kernel virtual area
285 *
286 * @addr: base address
287 *
288 * Search for the kernel VM area starting at @addr, and remove it.
289 * This function returns the found VM area, but using it is NOT safe
290 * on SMP machines, except for its size or flags.
291 */
292struct vm_struct *remove_vm_area(void *addr)
293{
294 struct vm_struct *v;
295 write_lock(&vmlist_lock);
296 v = __remove_vm_area(addr);
297 write_unlock(&vmlist_lock);
298 return v;
299}
300
1da177e4
LT
301void __vunmap(void *addr, int deallocate_pages)
302{
303 struct vm_struct *area;
304
305 if (!addr)
306 return;
307
308 if ((PAGE_SIZE-1) & (unsigned long)addr) {
309 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
310 WARN_ON(1);
311 return;
312 }
313
314 area = remove_vm_area(addr);
315 if (unlikely(!area)) {
316 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
317 addr);
318 WARN_ON(1);
319 return;
320 }
321
322 if (deallocate_pages) {
323 int i;
324
325 for (i = 0; i < area->nr_pages; i++) {
326 if (unlikely(!area->pages[i]))
327 BUG();
328 __free_page(area->pages[i]);
329 }
330
331 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
332 vfree(area->pages);
333 else
334 kfree(area->pages);
335 }
336
337 kfree(area);
338 return;
339}
340
341/**
342 * vfree - release memory allocated by vmalloc()
343 *
344 * @addr: memory base address
345 *
346 * Free the virtually contiguous memory area starting at @addr, as
80e93eff
PE
347 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
348 * NULL, no operation is performed.
1da177e4 349 *
80e93eff 350 * Must not be called in interrupt context.
1da177e4
LT
351 */
352void vfree(void *addr)
353{
354 BUG_ON(in_interrupt());
355 __vunmap(addr, 1);
356}
1da177e4
LT
357EXPORT_SYMBOL(vfree);
358
359/**
360 * vunmap - release virtual mapping obtained by vmap()
361 *
362 * @addr: memory base address
363 *
364 * Free the virtually contiguous memory area starting at @addr,
365 * which was created from the page array passed to vmap().
366 *
80e93eff 367 * Must not be called in interrupt context.
1da177e4
LT
368 */
369void vunmap(void *addr)
370{
371 BUG_ON(in_interrupt());
372 __vunmap(addr, 0);
373}
1da177e4
LT
374EXPORT_SYMBOL(vunmap);
375
376/**
377 * vmap - map an array of pages into virtually contiguous space
378 *
379 * @pages: array of page pointers
380 * @count: number of pages to map
381 * @flags: vm_area->flags
382 * @prot: page protection for the mapping
383 *
384 * Maps @count pages from @pages into contiguous kernel virtual
385 * space.
386 */
387void *vmap(struct page **pages, unsigned int count,
388 unsigned long flags, pgprot_t prot)
389{
390 struct vm_struct *area;
391
392 if (count > num_physpages)
393 return NULL;
394
395 area = get_vm_area((count << PAGE_SHIFT), flags);
396 if (!area)
397 return NULL;
398 if (map_vm_area(area, prot, &pages)) {
399 vunmap(area->addr);
400 return NULL;
401 }
402
403 return area->addr;
404}
1da177e4
LT
405EXPORT_SYMBOL(vmap);
406
930fc45a
CL
407void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
408 pgprot_t prot, int node)
1da177e4
LT
409{
410 struct page **pages;
411 unsigned int nr_pages, array_size, i;
412
413 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
414 array_size = (nr_pages * sizeof(struct page *));
415
416 area->nr_pages = nr_pages;
417 /* Please note that the recursion is strictly bounded. */
418 if (array_size > PAGE_SIZE)
930fc45a 419 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
1da177e4 420 else
930fc45a 421 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
1da177e4
LT
422 area->pages = pages;
423 if (!area->pages) {
424 remove_vm_area(area->addr);
425 kfree(area);
426 return NULL;
427 }
428 memset(area->pages, 0, array_size);
429
430 for (i = 0; i < area->nr_pages; i++) {
930fc45a
CL
431 if (node < 0)
432 area->pages[i] = alloc_page(gfp_mask);
433 else
434 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
1da177e4
LT
435 if (unlikely(!area->pages[i])) {
436 /* Successfully allocated i pages, free them in __vunmap() */
437 area->nr_pages = i;
438 goto fail;
439 }
440 }
441
442 if (map_vm_area(area, prot, &pages))
443 goto fail;
444 return area->addr;
445
446fail:
447 vfree(area->addr);
448 return NULL;
449}
450
930fc45a
CL
451void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
452{
453 return __vmalloc_area_node(area, gfp_mask, prot, -1);
454}
455
1da177e4 456/**
930fc45a 457 * __vmalloc_node - allocate virtually contiguous memory
1da177e4
LT
458 *
459 * @size: allocation size
460 * @gfp_mask: flags for the page level allocator
461 * @prot: protection mask for the allocated pages
930fc45a 462 * @node node to use for allocation or -1
1da177e4
LT
463 *
464 * Allocate enough pages to cover @size from the page level
465 * allocator with @gfp_mask flags. Map them into contiguous
466 * kernel virtual space, using a pagetable protection of @prot.
467 */
930fc45a
CL
468void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
469 int node)
1da177e4
LT
470{
471 struct vm_struct *area;
472
473 size = PAGE_ALIGN(size);
474 if (!size || (size >> PAGE_SHIFT) > num_physpages)
475 return NULL;
476
930fc45a 477 area = get_vm_area_node(size, VM_ALLOC, node);
1da177e4
LT
478 if (!area)
479 return NULL;
480
930fc45a 481 return __vmalloc_area_node(area, gfp_mask, prot, node);
1da177e4 482}
930fc45a 483EXPORT_SYMBOL(__vmalloc_node);
1da177e4 484
930fc45a
CL
485void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
486{
487 return __vmalloc_node(size, gfp_mask, prot, -1);
488}
1da177e4
LT
489EXPORT_SYMBOL(__vmalloc);
490
491/**
492 * vmalloc - allocate virtually contiguous memory
493 *
494 * @size: allocation size
495 *
496 * Allocate enough pages to cover @size from the page level
497 * allocator and map them into contiguous kernel virtual space.
498 *
499 * For tight cotrol over page level allocator and protection flags
500 * use __vmalloc() instead.
501 */
502void *vmalloc(unsigned long size)
503{
504 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
505}
1da177e4
LT
506EXPORT_SYMBOL(vmalloc);
507
930fc45a
CL
508/**
509 * vmalloc_node - allocate memory on a specific node
510 *
511 * @size: allocation size
512 * @node; numa node
513 *
514 * Allocate enough pages to cover @size from the page level
515 * allocator and map them into contiguous kernel virtual space.
516 *
517 * For tight cotrol over page level allocator and protection flags
518 * use __vmalloc() instead.
519 */
520void *vmalloc_node(unsigned long size, int node)
521{
522 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
523}
524EXPORT_SYMBOL(vmalloc_node);
525
4dc3b16b
PP
526#ifndef PAGE_KERNEL_EXEC
527# define PAGE_KERNEL_EXEC PAGE_KERNEL
528#endif
529
1da177e4
LT
530/**
531 * vmalloc_exec - allocate virtually contiguous, executable memory
532 *
533 * @size: allocation size
534 *
535 * Kernel-internal function to allocate enough pages to cover @size
536 * the page level allocator and map them into contiguous and
537 * executable kernel virtual space.
538 *
539 * For tight cotrol over page level allocator and protection flags
540 * use __vmalloc() instead.
541 */
542
1da177e4
LT
543void *vmalloc_exec(unsigned long size)
544{
545 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
546}
547
548/**
549 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
550 *
551 * @size: allocation size
552 *
553 * Allocate enough 32bit PA addressable pages to cover @size from the
554 * page level allocator and map them into contiguous kernel virtual space.
555 */
556void *vmalloc_32(unsigned long size)
557{
558 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
559}
1da177e4
LT
560EXPORT_SYMBOL(vmalloc_32);
561
562long vread(char *buf, char *addr, unsigned long count)
563{
564 struct vm_struct *tmp;
565 char *vaddr, *buf_start = buf;
566 unsigned long n;
567
568 /* Don't allow overflow */
569 if ((unsigned long) addr + count < count)
570 count = -(unsigned long) addr;
571
572 read_lock(&vmlist_lock);
573 for (tmp = vmlist; tmp; tmp = tmp->next) {
574 vaddr = (char *) tmp->addr;
575 if (addr >= vaddr + tmp->size - PAGE_SIZE)
576 continue;
577 while (addr < vaddr) {
578 if (count == 0)
579 goto finished;
580 *buf = '\0';
581 buf++;
582 addr++;
583 count--;
584 }
585 n = vaddr + tmp->size - PAGE_SIZE - addr;
586 do {
587 if (count == 0)
588 goto finished;
589 *buf = *addr;
590 buf++;
591 addr++;
592 count--;
593 } while (--n > 0);
594 }
595finished:
596 read_unlock(&vmlist_lock);
597 return buf - buf_start;
598}
599
600long vwrite(char *buf, char *addr, unsigned long count)
601{
602 struct vm_struct *tmp;
603 char *vaddr, *buf_start = buf;
604 unsigned long n;
605
606 /* Don't allow overflow */
607 if ((unsigned long) addr + count < count)
608 count = -(unsigned long) addr;
609
610 read_lock(&vmlist_lock);
611 for (tmp = vmlist; tmp; tmp = tmp->next) {
612 vaddr = (char *) tmp->addr;
613 if (addr >= vaddr + tmp->size - PAGE_SIZE)
614 continue;
615 while (addr < vaddr) {
616 if (count == 0)
617 goto finished;
618 buf++;
619 addr++;
620 count--;
621 }
622 n = vaddr + tmp->size - PAGE_SIZE - addr;
623 do {
624 if (count == 0)
625 goto finished;
626 *addr = *buf;
627 buf++;
628 addr++;
629 count--;
630 } while (--n > 0);
631 }
632finished:
633 read_unlock(&vmlist_lock);
634 return buf - buf_start;
635}