]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/hugetlb.c
vmscan: fix comments related to shrink_list()
[net-next-2.6.git] / mm / hugetlb.c
CommitLineData
1da177e4
LT
1/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
63551ae0 13#include <linux/pagemap.h>
5da7ca86 14#include <linux/mempolicy.h>
aea47ff3 15#include <linux/cpuset.h>
3935baa9 16#include <linux/mutex.h>
5da7ca86 17
63551ae0
DG
18#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
7835e98b 22#include "internal.h"
1da177e4
LT
23
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
a43a8c39 25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
1da177e4
LT
26unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
3935baa9
DG
30/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */
33static DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 34
79ac6ba4
DG
35static void clear_huge_page(struct page *page, unsigned long addr)
36{
37 int i;
38
39 might_sleep();
40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41 cond_resched();
42 clear_user_highpage(page + i, addr);
43 }
44}
45
46static void copy_huge_page(struct page *dst, struct page *src,
9de455b2 47 unsigned long addr, struct vm_area_struct *vma)
79ac6ba4
DG
48{
49 int i;
50
51 might_sleep();
52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 cond_resched();
9de455b2 54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
79ac6ba4
DG
55 }
56}
57
1da177e4
LT
58static void enqueue_huge_page(struct page *page)
59{
60 int nid = page_to_nid(page);
61 list_add(&page->lru, &hugepage_freelists[nid]);
62 free_huge_pages++;
63 free_huge_pages_node[nid]++;
64}
65
5da7ca86
CL
66static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67 unsigned long address)
1da177e4 68{
31a5c6e4 69 int nid;
1da177e4 70 struct page *page = NULL;
5da7ca86 71 struct zonelist *zonelist = huge_zonelist(vma, address);
96df9333 72 struct zone **z;
1da177e4 73
96df9333 74 for (z = zonelist->zones; *z; z++) {
89fa3024 75 nid = zone_to_nid(*z);
02a0e53d 76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
aea47ff3 77 !list_empty(&hugepage_freelists[nid]))
96df9333 78 break;
1da177e4 79 }
96df9333
CL
80
81 if (*z) {
1da177e4
LT
82 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
87 }
88 return page;
89}
90
27a85ef1
DG
91static void free_huge_page(struct page *page)
92{
93 BUG_ON(page_count(page));
94
95 INIT_LIST_HEAD(&page->lru);
96
97 spin_lock(&hugetlb_lock);
98 enqueue_huge_page(page);
99 spin_unlock(&hugetlb_lock);
100}
101
a482289d 102static int alloc_fresh_huge_page(void)
1da177e4
LT
103{
104 static int nid = 0;
105 struct page *page;
106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107 HUGETLB_PAGE_ORDER);
fdb7cc59
PJ
108 nid = next_node(nid, node_online_map);
109 if (nid == MAX_NUMNODES)
110 nid = first_node(node_online_map);
1da177e4 111 if (page) {
33f2ef89 112 set_compound_page_dtor(page, free_huge_page);
0bd0f9fb 113 spin_lock(&hugetlb_lock);
1da177e4
LT
114 nr_huge_pages++;
115 nr_huge_pages_node[page_to_nid(page)]++;
0bd0f9fb 116 spin_unlock(&hugetlb_lock);
a482289d
NP
117 put_page(page); /* free it into the hugepage allocator */
118 return 1;
1da177e4 119 }
a482289d 120 return 0;
1da177e4
LT
121}
122
27a85ef1
DG
123static struct page *alloc_huge_page(struct vm_area_struct *vma,
124 unsigned long addr)
1da177e4
LT
125{
126 struct page *page;
1da177e4
LT
127
128 spin_lock(&hugetlb_lock);
a43a8c39
KC
129 if (vma->vm_flags & VM_MAYSHARE)
130 resv_huge_pages--;
131 else if (free_huge_pages <= resv_huge_pages)
132 goto fail;
b45b5bd6
DG
133
134 page = dequeue_huge_page(vma, addr);
135 if (!page)
136 goto fail;
137
1da177e4 138 spin_unlock(&hugetlb_lock);
7835e98b 139 set_page_refcounted(page);
1da177e4 140 return page;
b45b5bd6 141
a43a8c39 142fail:
ace4bd29
KC
143 if (vma->vm_flags & VM_MAYSHARE)
144 resv_huge_pages++;
b45b5bd6
DG
145 spin_unlock(&hugetlb_lock);
146 return NULL;
147}
148
1da177e4
LT
149static int __init hugetlb_init(void)
150{
151 unsigned long i;
1da177e4 152
3c726f8d
BH
153 if (HPAGE_SHIFT == 0)
154 return 0;
155
1da177e4
LT
156 for (i = 0; i < MAX_NUMNODES; ++i)
157 INIT_LIST_HEAD(&hugepage_freelists[i]);
158
159 for (i = 0; i < max_huge_pages; ++i) {
a482289d 160 if (!alloc_fresh_huge_page())
1da177e4 161 break;
1da177e4
LT
162 }
163 max_huge_pages = free_huge_pages = nr_huge_pages = i;
164 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
165 return 0;
166}
167module_init(hugetlb_init);
168
169static int __init hugetlb_setup(char *s)
170{
171 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
172 max_huge_pages = 0;
173 return 1;
174}
175__setup("hugepages=", hugetlb_setup);
176
8a630112
KC
177static unsigned int cpuset_mems_nr(unsigned int *array)
178{
179 int node;
180 unsigned int nr = 0;
181
182 for_each_node_mask(node, cpuset_current_mems_allowed)
183 nr += array[node];
184
185 return nr;
186}
187
1da177e4
LT
188#ifdef CONFIG_SYSCTL
189static void update_and_free_page(struct page *page)
190{
191 int i;
192 nr_huge_pages--;
4415cc8d 193 nr_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
194 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
195 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
196 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
197 1 << PG_private | 1<< PG_writeback);
1da177e4 198 }
a482289d 199 page[1].lru.next = NULL;
7835e98b 200 set_page_refcounted(page);
1da177e4
LT
201 __free_pages(page, HUGETLB_PAGE_ORDER);
202}
203
204#ifdef CONFIG_HIGHMEM
205static void try_to_free_low(unsigned long count)
206{
4415cc8d
CL
207 int i;
208
1da177e4
LT
209 for (i = 0; i < MAX_NUMNODES; ++i) {
210 struct page *page, *next;
211 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
212 if (PageHighMem(page))
213 continue;
214 list_del(&page->lru);
215 update_and_free_page(page);
1da177e4 216 free_huge_pages--;
4415cc8d 217 free_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
218 if (count >= nr_huge_pages)
219 return;
220 }
221 }
222}
223#else
224static inline void try_to_free_low(unsigned long count)
225{
226}
227#endif
228
229static unsigned long set_max_huge_pages(unsigned long count)
230{
231 while (count > nr_huge_pages) {
a482289d 232 if (!alloc_fresh_huge_page())
1da177e4 233 return nr_huge_pages;
1da177e4
LT
234 }
235 if (count >= nr_huge_pages)
236 return nr_huge_pages;
237
238 spin_lock(&hugetlb_lock);
a43a8c39 239 count = max(count, resv_huge_pages);
1da177e4
LT
240 try_to_free_low(count);
241 while (count < nr_huge_pages) {
5da7ca86 242 struct page *page = dequeue_huge_page(NULL, 0);
1da177e4
LT
243 if (!page)
244 break;
245 update_and_free_page(page);
246 }
247 spin_unlock(&hugetlb_lock);
248 return nr_huge_pages;
249}
250
251int hugetlb_sysctl_handler(struct ctl_table *table, int write,
252 struct file *file, void __user *buffer,
253 size_t *length, loff_t *ppos)
254{
255 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
256 max_huge_pages = set_max_huge_pages(max_huge_pages);
257 return 0;
258}
259#endif /* CONFIG_SYSCTL */
260
261int hugetlb_report_meminfo(char *buf)
262{
263 return sprintf(buf,
264 "HugePages_Total: %5lu\n"
265 "HugePages_Free: %5lu\n"
a43a8c39 266 "HugePages_Rsvd: %5lu\n"
1da177e4
LT
267 "Hugepagesize: %5lu kB\n",
268 nr_huge_pages,
269 free_huge_pages,
a43a8c39 270 resv_huge_pages,
1da177e4
LT
271 HPAGE_SIZE/1024);
272}
273
274int hugetlb_report_node_meminfo(int nid, char *buf)
275{
276 return sprintf(buf,
277 "Node %d HugePages_Total: %5u\n"
278 "Node %d HugePages_Free: %5u\n",
279 nid, nr_huge_pages_node[nid],
280 nid, free_huge_pages_node[nid]);
281}
282
1da177e4
LT
283/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
284unsigned long hugetlb_total_pages(void)
285{
286 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
287}
1da177e4
LT
288
289/*
290 * We cannot handle pagefaults against hugetlb pages at all. They cause
291 * handle_mm_fault() to try to instantiate regular-sized pages in the
292 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
293 * this far.
294 */
295static struct page *hugetlb_nopage(struct vm_area_struct *vma,
296 unsigned long address, int *unused)
297{
298 BUG();
299 return NULL;
300}
301
302struct vm_operations_struct hugetlb_vm_ops = {
303 .nopage = hugetlb_nopage,
304};
305
1e8f889b
DG
306static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
307 int writable)
63551ae0
DG
308{
309 pte_t entry;
310
1e8f889b 311 if (writable) {
63551ae0
DG
312 entry =
313 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
314 } else {
315 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
316 }
317 entry = pte_mkyoung(entry);
318 entry = pte_mkhuge(entry);
319
320 return entry;
321}
322
1e8f889b
DG
323static void set_huge_ptep_writable(struct vm_area_struct *vma,
324 unsigned long address, pte_t *ptep)
325{
326 pte_t entry;
327
328 entry = pte_mkwrite(pte_mkdirty(*ptep));
8dab5241
BH
329 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
330 update_mmu_cache(vma, address, entry);
331 lazy_mmu_prot_update(entry);
332 }
1e8f889b
DG
333}
334
335
63551ae0
DG
336int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
337 struct vm_area_struct *vma)
338{
339 pte_t *src_pte, *dst_pte, entry;
340 struct page *ptepage;
1c59827d 341 unsigned long addr;
1e8f889b
DG
342 int cow;
343
344 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
63551ae0 345
1c59827d 346 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
c74df32c
HD
347 src_pte = huge_pte_offset(src, addr);
348 if (!src_pte)
349 continue;
63551ae0
DG
350 dst_pte = huge_pte_alloc(dst, addr);
351 if (!dst_pte)
352 goto nomem;
c74df32c 353 spin_lock(&dst->page_table_lock);
1c59827d 354 spin_lock(&src->page_table_lock);
c74df32c 355 if (!pte_none(*src_pte)) {
1e8f889b
DG
356 if (cow)
357 ptep_set_wrprotect(src, addr, src_pte);
1c59827d
HD
358 entry = *src_pte;
359 ptepage = pte_page(entry);
360 get_page(ptepage);
1c59827d
HD
361 set_huge_pte_at(dst, addr, dst_pte, entry);
362 }
363 spin_unlock(&src->page_table_lock);
c74df32c 364 spin_unlock(&dst->page_table_lock);
63551ae0
DG
365 }
366 return 0;
367
368nomem:
369 return -ENOMEM;
370}
371
502717f4
KC
372void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
373 unsigned long end)
63551ae0
DG
374{
375 struct mm_struct *mm = vma->vm_mm;
376 unsigned long address;
c7546f8f 377 pte_t *ptep;
63551ae0
DG
378 pte_t pte;
379 struct page *page;
fe1668ae 380 struct page *tmp;
c0a499c2
KC
381 /*
382 * A page gathering list, protected by per file i_mmap_lock. The
383 * lock is used to avoid list corruption from multiple unmapping
384 * of the same page since we are using page->lru.
385 */
fe1668ae 386 LIST_HEAD(page_list);
63551ae0
DG
387
388 WARN_ON(!is_vm_hugetlb_page(vma));
389 BUG_ON(start & ~HPAGE_MASK);
390 BUG_ON(end & ~HPAGE_MASK);
391
508034a3 392 spin_lock(&mm->page_table_lock);
63551ae0 393 for (address = start; address < end; address += HPAGE_SIZE) {
c7546f8f 394 ptep = huge_pte_offset(mm, address);
4c887265 395 if (!ptep)
c7546f8f
DG
396 continue;
397
39dde65c
KC
398 if (huge_pmd_unshare(mm, &address, ptep))
399 continue;
400
c7546f8f 401 pte = huge_ptep_get_and_clear(mm, address, ptep);
63551ae0
DG
402 if (pte_none(pte))
403 continue;
c7546f8f 404
63551ae0 405 page = pte_page(pte);
6649a386
KC
406 if (pte_dirty(pte))
407 set_page_dirty(page);
fe1668ae 408 list_add(&page->lru, &page_list);
63551ae0 409 }
1da177e4 410 spin_unlock(&mm->page_table_lock);
508034a3 411 flush_tlb_range(vma, start, end);
fe1668ae
KC
412 list_for_each_entry_safe(page, tmp, &page_list, lru) {
413 list_del(&page->lru);
414 put_page(page);
415 }
1da177e4 416}
63551ae0 417
502717f4
KC
418void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
419 unsigned long end)
420{
421 /*
422 * It is undesirable to test vma->vm_file as it should be non-null
423 * for valid hugetlb area. However, vm_file will be NULL in the error
424 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
425 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
426 * to clean up. Since no pte has actually been setup, it is safe to
427 * do nothing in this case.
428 */
429 if (vma->vm_file) {
430 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
431 __unmap_hugepage_range(vma, start, end);
432 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
433 }
434}
435
1e8f889b
DG
436static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
437 unsigned long address, pte_t *ptep, pte_t pte)
438{
439 struct page *old_page, *new_page;
79ac6ba4 440 int avoidcopy;
1e8f889b
DG
441
442 old_page = pte_page(pte);
443
444 /* If no-one else is actually using this page, avoid the copy
445 * and just make the page writable */
446 avoidcopy = (page_count(old_page) == 1);
447 if (avoidcopy) {
448 set_huge_ptep_writable(vma, address, ptep);
449 return VM_FAULT_MINOR;
450 }
451
452 page_cache_get(old_page);
5da7ca86 453 new_page = alloc_huge_page(vma, address);
1e8f889b
DG
454
455 if (!new_page) {
456 page_cache_release(old_page);
0df420d8 457 return VM_FAULT_OOM;
1e8f889b
DG
458 }
459
460 spin_unlock(&mm->page_table_lock);
9de455b2 461 copy_huge_page(new_page, old_page, address, vma);
1e8f889b
DG
462 spin_lock(&mm->page_table_lock);
463
464 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
465 if (likely(pte_same(*ptep, pte))) {
466 /* Break COW */
467 set_huge_pte_at(mm, address, ptep,
468 make_huge_pte(vma, new_page, 1));
469 /* Make the old page be freed below */
470 new_page = old_page;
471 }
472 page_cache_release(new_page);
473 page_cache_release(old_page);
474 return VM_FAULT_MINOR;
475}
476
86e5216f 477int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1e8f889b 478 unsigned long address, pte_t *ptep, int write_access)
ac9b9c66
HD
479{
480 int ret = VM_FAULT_SIGBUS;
4c887265
AL
481 unsigned long idx;
482 unsigned long size;
4c887265
AL
483 struct page *page;
484 struct address_space *mapping;
1e8f889b 485 pte_t new_pte;
4c887265 486
4c887265
AL
487 mapping = vma->vm_file->f_mapping;
488 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
489 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
490
491 /*
492 * Use page lock to guard against racing truncation
493 * before we get page_table_lock.
494 */
6bda666a
CL
495retry:
496 page = find_lock_page(mapping, idx);
497 if (!page) {
ebed4bfc
HD
498 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
499 if (idx >= size)
500 goto out;
6bda666a
CL
501 if (hugetlb_get_quota(mapping))
502 goto out;
503 page = alloc_huge_page(vma, address);
504 if (!page) {
505 hugetlb_put_quota(mapping);
0df420d8 506 ret = VM_FAULT_OOM;
6bda666a
CL
507 goto out;
508 }
79ac6ba4 509 clear_huge_page(page, address);
ac9b9c66 510
6bda666a
CL
511 if (vma->vm_flags & VM_SHARED) {
512 int err;
513
514 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
515 if (err) {
516 put_page(page);
517 hugetlb_put_quota(mapping);
518 if (err == -EEXIST)
519 goto retry;
520 goto out;
521 }
522 } else
523 lock_page(page);
524 }
1e8f889b 525
ac9b9c66 526 spin_lock(&mm->page_table_lock);
4c887265
AL
527 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
528 if (idx >= size)
529 goto backout;
530
531 ret = VM_FAULT_MINOR;
86e5216f 532 if (!pte_none(*ptep))
4c887265
AL
533 goto backout;
534
1e8f889b
DG
535 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
536 && (vma->vm_flags & VM_SHARED)));
537 set_huge_pte_at(mm, address, ptep, new_pte);
538
539 if (write_access && !(vma->vm_flags & VM_SHARED)) {
540 /* Optimization, do the COW without a second fault */
541 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
542 }
543
ac9b9c66 544 spin_unlock(&mm->page_table_lock);
4c887265
AL
545 unlock_page(page);
546out:
ac9b9c66 547 return ret;
4c887265
AL
548
549backout:
550 spin_unlock(&mm->page_table_lock);
551 hugetlb_put_quota(mapping);
552 unlock_page(page);
553 put_page(page);
554 goto out;
ac9b9c66
HD
555}
556
86e5216f
AL
557int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
558 unsigned long address, int write_access)
559{
560 pte_t *ptep;
561 pte_t entry;
1e8f889b 562 int ret;
3935baa9 563 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
86e5216f
AL
564
565 ptep = huge_pte_alloc(mm, address);
566 if (!ptep)
567 return VM_FAULT_OOM;
568
3935baa9
DG
569 /*
570 * Serialize hugepage allocation and instantiation, so that we don't
571 * get spurious allocation failures if two CPUs race to instantiate
572 * the same page in the page cache.
573 */
574 mutex_lock(&hugetlb_instantiation_mutex);
86e5216f 575 entry = *ptep;
3935baa9
DG
576 if (pte_none(entry)) {
577 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
578 mutex_unlock(&hugetlb_instantiation_mutex);
579 return ret;
580 }
86e5216f 581
1e8f889b
DG
582 ret = VM_FAULT_MINOR;
583
584 spin_lock(&mm->page_table_lock);
585 /* Check for a racing update before calling hugetlb_cow */
586 if (likely(pte_same(entry, *ptep)))
587 if (write_access && !pte_write(entry))
588 ret = hugetlb_cow(mm, vma, address, ptep, entry);
589 spin_unlock(&mm->page_table_lock);
3935baa9 590 mutex_unlock(&hugetlb_instantiation_mutex);
1e8f889b
DG
591
592 return ret;
86e5216f
AL
593}
594
63551ae0
DG
595int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
596 struct page **pages, struct vm_area_struct **vmas,
597 unsigned long *position, int *length, int i)
598{
d5d4b0aa
KC
599 unsigned long pfn_offset;
600 unsigned long vaddr = *position;
63551ae0
DG
601 int remainder = *length;
602
1c59827d 603 spin_lock(&mm->page_table_lock);
63551ae0 604 while (vaddr < vma->vm_end && remainder) {
4c887265
AL
605 pte_t *pte;
606 struct page *page;
63551ae0 607
4c887265
AL
608 /*
609 * Some archs (sparc64, sh*) have multiple pte_ts to
610 * each hugepage. We have to make * sure we get the
611 * first, for the page indexing below to work.
612 */
613 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
63551ae0 614
4c887265
AL
615 if (!pte || pte_none(*pte)) {
616 int ret;
63551ae0 617
4c887265
AL
618 spin_unlock(&mm->page_table_lock);
619 ret = hugetlb_fault(mm, vma, vaddr, 0);
620 spin_lock(&mm->page_table_lock);
621 if (ret == VM_FAULT_MINOR)
622 continue;
63551ae0 623
4c887265
AL
624 remainder = 0;
625 if (!i)
626 i = -EFAULT;
627 break;
628 }
629
d5d4b0aa
KC
630 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
631 page = pte_page(*pte);
632same_page:
d6692183
KC
633 if (pages) {
634 get_page(page);
d5d4b0aa 635 pages[i] = page + pfn_offset;
d6692183 636 }
63551ae0
DG
637
638 if (vmas)
639 vmas[i] = vma;
640
641 vaddr += PAGE_SIZE;
d5d4b0aa 642 ++pfn_offset;
63551ae0
DG
643 --remainder;
644 ++i;
d5d4b0aa
KC
645 if (vaddr < vma->vm_end && remainder &&
646 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
647 /*
648 * We use pfn_offset to avoid touching the pageframes
649 * of this compound page.
650 */
651 goto same_page;
652 }
63551ae0 653 }
1c59827d 654 spin_unlock(&mm->page_table_lock);
63551ae0
DG
655 *length = remainder;
656 *position = vaddr;
657
658 return i;
659}
8f860591
ZY
660
661void hugetlb_change_protection(struct vm_area_struct *vma,
662 unsigned long address, unsigned long end, pgprot_t newprot)
663{
664 struct mm_struct *mm = vma->vm_mm;
665 unsigned long start = address;
666 pte_t *ptep;
667 pte_t pte;
668
669 BUG_ON(address >= end);
670 flush_cache_range(vma, address, end);
671
39dde65c 672 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
8f860591
ZY
673 spin_lock(&mm->page_table_lock);
674 for (; address < end; address += HPAGE_SIZE) {
675 ptep = huge_pte_offset(mm, address);
676 if (!ptep)
677 continue;
39dde65c
KC
678 if (huge_pmd_unshare(mm, &address, ptep))
679 continue;
8f860591
ZY
680 if (!pte_none(*ptep)) {
681 pte = huge_ptep_get_and_clear(mm, address, ptep);
682 pte = pte_mkhuge(pte_modify(pte, newprot));
683 set_huge_pte_at(mm, address, ptep, pte);
684 lazy_mmu_prot_update(pte);
685 }
686 }
687 spin_unlock(&mm->page_table_lock);
39dde65c 688 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
8f860591
ZY
689
690 flush_tlb_range(vma, start, end);
691}
692
a43a8c39
KC
693struct file_region {
694 struct list_head link;
695 long from;
696 long to;
697};
698
699static long region_add(struct list_head *head, long f, long t)
700{
701 struct file_region *rg, *nrg, *trg;
702
703 /* Locate the region we are either in or before. */
704 list_for_each_entry(rg, head, link)
705 if (f <= rg->to)
706 break;
707
708 /* Round our left edge to the current segment if it encloses us. */
709 if (f > rg->from)
710 f = rg->from;
711
712 /* Check for and consume any regions we now overlap with. */
713 nrg = rg;
714 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
715 if (&rg->link == head)
716 break;
717 if (rg->from > t)
718 break;
719
720 /* If this area reaches higher then extend our area to
721 * include it completely. If this is not the first area
722 * which we intend to reuse, free it. */
723 if (rg->to > t)
724 t = rg->to;
725 if (rg != nrg) {
726 list_del(&rg->link);
727 kfree(rg);
728 }
729 }
730 nrg->from = f;
731 nrg->to = t;
732 return 0;
733}
734
735static long region_chg(struct list_head *head, long f, long t)
736{
737 struct file_region *rg, *nrg;
738 long chg = 0;
739
740 /* Locate the region we are before or in. */
741 list_for_each_entry(rg, head, link)
742 if (f <= rg->to)
743 break;
744
745 /* If we are below the current region then a new region is required.
746 * Subtle, allocate a new region at the position but make it zero
747 * size such that we can guarentee to record the reservation. */
748 if (&rg->link == head || t < rg->from) {
749 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
750 if (nrg == 0)
751 return -ENOMEM;
752 nrg->from = f;
753 nrg->to = f;
754 INIT_LIST_HEAD(&nrg->link);
755 list_add(&nrg->link, rg->link.prev);
756
757 return t - f;
758 }
759
760 /* Round our left edge to the current segment if it encloses us. */
761 if (f > rg->from)
762 f = rg->from;
763 chg = t - f;
764
765 /* Check for and consume any regions we now overlap with. */
766 list_for_each_entry(rg, rg->link.prev, link) {
767 if (&rg->link == head)
768 break;
769 if (rg->from > t)
770 return chg;
771
772 /* We overlap with this area, if it extends futher than
773 * us then we must extend ourselves. Account for its
774 * existing reservation. */
775 if (rg->to > t) {
776 chg += rg->to - t;
777 t = rg->to;
778 }
779 chg -= rg->to - rg->from;
780 }
781 return chg;
782}
783
784static long region_truncate(struct list_head *head, long end)
785{
786 struct file_region *rg, *trg;
787 long chg = 0;
788
789 /* Locate the region we are either in or before. */
790 list_for_each_entry(rg, head, link)
791 if (end <= rg->to)
792 break;
793 if (&rg->link == head)
794 return 0;
795
796 /* If we are in the middle of a region then adjust it. */
797 if (end > rg->from) {
798 chg = rg->to - end;
799 rg->to = end;
800 rg = list_entry(rg->link.next, typeof(*rg), link);
801 }
802
803 /* Drop any remaining regions. */
804 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
805 if (&rg->link == head)
806 break;
807 chg += rg->to - rg->from;
808 list_del(&rg->link);
809 kfree(rg);
810 }
811 return chg;
812}
813
814static int hugetlb_acct_memory(long delta)
815{
816 int ret = -ENOMEM;
817
818 spin_lock(&hugetlb_lock);
819 if ((delta + resv_huge_pages) <= free_huge_pages) {
820 resv_huge_pages += delta;
821 ret = 0;
822 }
823 spin_unlock(&hugetlb_lock);
824 return ret;
825}
826
827int hugetlb_reserve_pages(struct inode *inode, long from, long to)
828{
829 long ret, chg;
830
831 chg = region_chg(&inode->i_mapping->private_list, from, to);
832 if (chg < 0)
833 return chg;
8a630112
KC
834 /*
835 * When cpuset is configured, it breaks the strict hugetlb page
836 * reservation as the accounting is done on a global variable. Such
837 * reservation is completely rubbish in the presence of cpuset because
838 * the reservation is not checked against page availability for the
839 * current cpuset. Application can still potentially OOM'ed by kernel
840 * with lack of free htlb page in cpuset that the task is in.
841 * Attempt to enforce strict accounting with cpuset is almost
842 * impossible (or too ugly) because cpuset is too fluid that
843 * task or memory node can be dynamically moved between cpusets.
844 *
845 * The change of semantics for shared hugetlb mapping with cpuset is
846 * undesirable. However, in order to preserve some of the semantics,
847 * we fall back to check against current free page availability as
848 * a best attempt and hopefully to minimize the impact of changing
849 * semantics that cpuset has.
850 */
851 if (chg > cpuset_mems_nr(free_huge_pages_node))
852 return -ENOMEM;
853
a43a8c39
KC
854 ret = hugetlb_acct_memory(chg);
855 if (ret < 0)
856 return ret;
857 region_add(&inode->i_mapping->private_list, from, to);
858 return 0;
859}
860
861void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
862{
863 long chg = region_truncate(&inode->i_mapping->private_list, offset);
864 hugetlb_acct_memory(freed - chg);
865}