]> bbs.cooldavid.org Git - net-next-2.6.git/blob - mm/memory.c
c57678478801614257690d0b5c53a4bab338270c
[net-next-2.6.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/hugetlb.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/highmem.h>
47 #include <linux/pagemap.h>
48 #include <linux/ksm.h>
49 #include <linux/rmap.h>
50 #include <linux/module.h>
51 #include <linux/delayacct.h>
52 #include <linux/init.h>
53 #include <linux/writeback.h>
54 #include <linux/memcontrol.h>
55 #include <linux/mmu_notifier.h>
56 #include <linux/kallsyms.h>
57 #include <linux/swapops.h>
58 #include <linux/elf.h>
59
60 #include <asm/io.h>
61 #include <asm/pgalloc.h>
62 #include <asm/uaccess.h>
63 #include <asm/tlb.h>
64 #include <asm/tlbflush.h>
65 #include <asm/pgtable.h>
66
67 #include "internal.h"
68
69 #ifndef CONFIG_NEED_MULTIPLE_NODES
70 /* use the per-pgdat data instead for discontigmem - mbligh */
71 unsigned long max_mapnr;
72 struct page *mem_map;
73
74 EXPORT_SYMBOL(max_mapnr);
75 EXPORT_SYMBOL(mem_map);
76 #endif
77
78 unsigned long num_physpages;
79 /*
80  * A number of key systems in x86 including ioremap() rely on the assumption
81  * that high_memory defines the upper bound on direct map memory, then end
82  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
83  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
84  * and ZONE_HIGHMEM.
85  */
86 void * high_memory;
87
88 EXPORT_SYMBOL(num_physpages);
89 EXPORT_SYMBOL(high_memory);
90
91 /*
92  * Randomize the address space (stacks, mmaps, brk, etc.).
93  *
94  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
95  *   as ancient (libc5 based) binaries can segfault. )
96  */
97 int randomize_va_space __read_mostly =
98 #ifdef CONFIG_COMPAT_BRK
99                                         1;
100 #else
101                                         2;
102 #endif
103
104 static int __init disable_randmaps(char *s)
105 {
106         randomize_va_space = 0;
107         return 1;
108 }
109 __setup("norandmaps", disable_randmaps);
110
111 unsigned long zero_pfn __read_mostly;
112 unsigned long highest_memmap_pfn __read_mostly;
113
114 /*
115  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
116  */
117 static int __init init_zero_pfn(void)
118 {
119         zero_pfn = page_to_pfn(ZERO_PAGE(0));
120         return 0;
121 }
122 core_initcall(init_zero_pfn);
123
124
125 /*
126  * If a p?d_bad entry is found while walking page tables, report
127  * the error, before resetting entry to p?d_none.  Usually (but
128  * very seldom) called out from the p?d_none_or_clear_bad macros.
129  */
130
131 void pgd_clear_bad(pgd_t *pgd)
132 {
133         pgd_ERROR(*pgd);
134         pgd_clear(pgd);
135 }
136
137 void pud_clear_bad(pud_t *pud)
138 {
139         pud_ERROR(*pud);
140         pud_clear(pud);
141 }
142
143 void pmd_clear_bad(pmd_t *pmd)
144 {
145         pmd_ERROR(*pmd);
146         pmd_clear(pmd);
147 }
148
149 /*
150  * Note: this doesn't free the actual pages themselves. That
151  * has been handled earlier when unmapping all the memory regions.
152  */
153 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
154                            unsigned long addr)
155 {
156         pgtable_t token = pmd_pgtable(*pmd);
157         pmd_clear(pmd);
158         pte_free_tlb(tlb, token, addr);
159         tlb->mm->nr_ptes--;
160 }
161
162 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
163                                 unsigned long addr, unsigned long end,
164                                 unsigned long floor, unsigned long ceiling)
165 {
166         pmd_t *pmd;
167         unsigned long next;
168         unsigned long start;
169
170         start = addr;
171         pmd = pmd_offset(pud, addr);
172         do {
173                 next = pmd_addr_end(addr, end);
174                 if (pmd_none_or_clear_bad(pmd))
175                         continue;
176                 free_pte_range(tlb, pmd, addr);
177         } while (pmd++, addr = next, addr != end);
178
179         start &= PUD_MASK;
180         if (start < floor)
181                 return;
182         if (ceiling) {
183                 ceiling &= PUD_MASK;
184                 if (!ceiling)
185                         return;
186         }
187         if (end - 1 > ceiling - 1)
188                 return;
189
190         pmd = pmd_offset(pud, start);
191         pud_clear(pud);
192         pmd_free_tlb(tlb, pmd, start);
193 }
194
195 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
196                                 unsigned long addr, unsigned long end,
197                                 unsigned long floor, unsigned long ceiling)
198 {
199         pud_t *pud;
200         unsigned long next;
201         unsigned long start;
202
203         start = addr;
204         pud = pud_offset(pgd, addr);
205         do {
206                 next = pud_addr_end(addr, end);
207                 if (pud_none_or_clear_bad(pud))
208                         continue;
209                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
210         } while (pud++, addr = next, addr != end);
211
212         start &= PGDIR_MASK;
213         if (start < floor)
214                 return;
215         if (ceiling) {
216                 ceiling &= PGDIR_MASK;
217                 if (!ceiling)
218                         return;
219         }
220         if (end - 1 > ceiling - 1)
221                 return;
222
223         pud = pud_offset(pgd, start);
224         pgd_clear(pgd);
225         pud_free_tlb(tlb, pud, start);
226 }
227
228 /*
229  * This function frees user-level page tables of a process.
230  *
231  * Must be called with pagetable lock held.
232  */
233 void free_pgd_range(struct mmu_gather *tlb,
234                         unsigned long addr, unsigned long end,
235                         unsigned long floor, unsigned long ceiling)
236 {
237         pgd_t *pgd;
238         unsigned long next;
239         unsigned long start;
240
241         /*
242          * The next few lines have given us lots of grief...
243          *
244          * Why are we testing PMD* at this top level?  Because often
245          * there will be no work to do at all, and we'd prefer not to
246          * go all the way down to the bottom just to discover that.
247          *
248          * Why all these "- 1"s?  Because 0 represents both the bottom
249          * of the address space and the top of it (using -1 for the
250          * top wouldn't help much: the masks would do the wrong thing).
251          * The rule is that addr 0 and floor 0 refer to the bottom of
252          * the address space, but end 0 and ceiling 0 refer to the top
253          * Comparisons need to use "end - 1" and "ceiling - 1" (though
254          * that end 0 case should be mythical).
255          *
256          * Wherever addr is brought up or ceiling brought down, we must
257          * be careful to reject "the opposite 0" before it confuses the
258          * subsequent tests.  But what about where end is brought down
259          * by PMD_SIZE below? no, end can't go down to 0 there.
260          *
261          * Whereas we round start (addr) and ceiling down, by different
262          * masks at different levels, in order to test whether a table
263          * now has no other vmas using it, so can be freed, we don't
264          * bother to round floor or end up - the tests don't need that.
265          */
266
267         addr &= PMD_MASK;
268         if (addr < floor) {
269                 addr += PMD_SIZE;
270                 if (!addr)
271                         return;
272         }
273         if (ceiling) {
274                 ceiling &= PMD_MASK;
275                 if (!ceiling)
276                         return;
277         }
278         if (end - 1 > ceiling - 1)
279                 end -= PMD_SIZE;
280         if (addr > end - 1)
281                 return;
282
283         start = addr;
284         pgd = pgd_offset(tlb->mm, addr);
285         do {
286                 next = pgd_addr_end(addr, end);
287                 if (pgd_none_or_clear_bad(pgd))
288                         continue;
289                 free_pud_range(tlb, pgd, addr, next, floor, ceiling);
290         } while (pgd++, addr = next, addr != end);
291 }
292
293 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
294                 unsigned long floor, unsigned long ceiling)
295 {
296         while (vma) {
297                 struct vm_area_struct *next = vma->vm_next;
298                 unsigned long addr = vma->vm_start;
299
300                 /*
301                  * Hide vma from rmap and truncate_pagecache before freeing
302                  * pgtables
303                  */
304                 anon_vma_unlink(vma);
305                 unlink_file_vma(vma);
306
307                 if (is_vm_hugetlb_page(vma)) {
308                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
309                                 floor, next? next->vm_start: ceiling);
310                 } else {
311                         /*
312                          * Optimization: gather nearby vmas into one call down
313                          */
314                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
315                                && !is_vm_hugetlb_page(next)) {
316                                 vma = next;
317                                 next = vma->vm_next;
318                                 anon_vma_unlink(vma);
319                                 unlink_file_vma(vma);
320                         }
321                         free_pgd_range(tlb, addr, vma->vm_end,
322                                 floor, next? next->vm_start: ceiling);
323                 }
324                 vma = next;
325         }
326 }
327
328 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
329 {
330         pgtable_t new = pte_alloc_one(mm, address);
331         if (!new)
332                 return -ENOMEM;
333
334         /*
335          * Ensure all pte setup (eg. pte page lock and page clearing) are
336          * visible before the pte is made visible to other CPUs by being
337          * put into page tables.
338          *
339          * The other side of the story is the pointer chasing in the page
340          * table walking code (when walking the page table without locking;
341          * ie. most of the time). Fortunately, these data accesses consist
342          * of a chain of data-dependent loads, meaning most CPUs (alpha
343          * being the notable exception) will already guarantee loads are
344          * seen in-order. See the alpha page table accessors for the
345          * smp_read_barrier_depends() barriers in page table walking code.
346          */
347         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
348
349         spin_lock(&mm->page_table_lock);
350         if (!pmd_present(*pmd)) {       /* Has another populated it ? */
351                 mm->nr_ptes++;
352                 pmd_populate(mm, pmd, new);
353                 new = NULL;
354         }
355         spin_unlock(&mm->page_table_lock);
356         if (new)
357                 pte_free(mm, new);
358         return 0;
359 }
360
361 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
362 {
363         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
364         if (!new)
365                 return -ENOMEM;
366
367         smp_wmb(); /* See comment in __pte_alloc */
368
369         spin_lock(&init_mm.page_table_lock);
370         if (!pmd_present(*pmd)) {       /* Has another populated it ? */
371                 pmd_populate_kernel(&init_mm, pmd, new);
372                 new = NULL;
373         }
374         spin_unlock(&init_mm.page_table_lock);
375         if (new)
376                 pte_free_kernel(&init_mm, new);
377         return 0;
378 }
379
380 static inline void init_rss_vec(int *rss)
381 {
382         memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
383 }
384
385 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
386 {
387         int i;
388
389         for (i = 0; i < NR_MM_COUNTERS; i++)
390                 if (rss[i])
391                         add_mm_counter(mm, i, rss[i]);
392 }
393
394 /*
395  * This function is called to print an error when a bad pte
396  * is found. For example, we might have a PFN-mapped pte in
397  * a region that doesn't allow it.
398  *
399  * The calling function must still handle the error.
400  */
401 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
402                           pte_t pte, struct page *page)
403 {
404         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
405         pud_t *pud = pud_offset(pgd, addr);
406         pmd_t *pmd = pmd_offset(pud, addr);
407         struct address_space *mapping;
408         pgoff_t index;
409         static unsigned long resume;
410         static unsigned long nr_shown;
411         static unsigned long nr_unshown;
412
413         /*
414          * Allow a burst of 60 reports, then keep quiet for that minute;
415          * or allow a steady drip of one report per second.
416          */
417         if (nr_shown == 60) {
418                 if (time_before(jiffies, resume)) {
419                         nr_unshown++;
420                         return;
421                 }
422                 if (nr_unshown) {
423                         printk(KERN_ALERT
424                                 "BUG: Bad page map: %lu messages suppressed\n",
425                                 nr_unshown);
426                         nr_unshown = 0;
427                 }
428                 nr_shown = 0;
429         }
430         if (nr_shown++ == 0)
431                 resume = jiffies + 60 * HZ;
432
433         mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
434         index = linear_page_index(vma, addr);
435
436         printk(KERN_ALERT
437                 "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
438                 current->comm,
439                 (long long)pte_val(pte), (long long)pmd_val(*pmd));
440         if (page) {
441                 printk(KERN_ALERT
442                 "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
443                 page, (void *)page->flags, page_count(page),
444                 page_mapcount(page), page->mapping, page->index);
445         }
446         printk(KERN_ALERT
447                 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
448                 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
449         /*
450          * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
451          */
452         if (vma->vm_ops)
453                 print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
454                                 (unsigned long)vma->vm_ops->fault);
455         if (vma->vm_file && vma->vm_file->f_op)
456                 print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
457                                 (unsigned long)vma->vm_file->f_op->mmap);
458         dump_stack();
459         add_taint(TAINT_BAD_PAGE);
460 }
461
462 static inline int is_cow_mapping(unsigned int flags)
463 {
464         return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
465 }
466
467 #ifndef is_zero_pfn
468 static inline int is_zero_pfn(unsigned long pfn)
469 {
470         return pfn == zero_pfn;
471 }
472 #endif
473
474 #ifndef my_zero_pfn
475 static inline unsigned long my_zero_pfn(unsigned long addr)
476 {
477         return zero_pfn;
478 }
479 #endif
480
481 /*
482  * vm_normal_page -- This function gets the "struct page" associated with a pte.
483  *
484  * "Special" mappings do not wish to be associated with a "struct page" (either
485  * it doesn't exist, or it exists but they don't want to touch it). In this
486  * case, NULL is returned here. "Normal" mappings do have a struct page.
487  *
488  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
489  * pte bit, in which case this function is trivial. Secondly, an architecture
490  * may not have a spare pte bit, which requires a more complicated scheme,
491  * described below.
492  *
493  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
494  * special mapping (even if there are underlying and valid "struct pages").
495  * COWed pages of a VM_PFNMAP are always normal.
496  *
497  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
498  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
499  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
500  * mapping will always honor the rule
501  *
502  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
503  *
504  * And for normal mappings this is false.
505  *
506  * This restricts such mappings to be a linear translation from virtual address
507  * to pfn. To get around this restriction, we allow arbitrary mappings so long
508  * as the vma is not a COW mapping; in that case, we know that all ptes are
509  * special (because none can have been COWed).
510  *
511  *
512  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
513  *
514  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
515  * page" backing, however the difference is that _all_ pages with a struct
516  * page (that is, those where pfn_valid is true) are refcounted and considered
517  * normal pages by the VM. The disadvantage is that pages are refcounted
518  * (which can be slower and simply not an option for some PFNMAP users). The
519  * advantage is that we don't have to follow the strict linearity rule of
520  * PFNMAP mappings in order to support COWable mappings.
521  *
522  */
523 #ifdef __HAVE_ARCH_PTE_SPECIAL
524 # define HAVE_PTE_SPECIAL 1
525 #else
526 # define HAVE_PTE_SPECIAL 0
527 #endif
528 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
529                                 pte_t pte)
530 {
531         unsigned long pfn = pte_pfn(pte);
532
533         if (HAVE_PTE_SPECIAL) {
534                 if (likely(!pte_special(pte)))
535                         goto check_pfn;
536                 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
537                         return NULL;
538                 if (!is_zero_pfn(pfn))
539                         print_bad_pte(vma, addr, pte, NULL);
540                 return NULL;
541         }
542
543         /* !HAVE_PTE_SPECIAL case follows: */
544
545         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
546                 if (vma->vm_flags & VM_MIXEDMAP) {
547                         if (!pfn_valid(pfn))
548                                 return NULL;
549                         goto out;
550                 } else {
551                         unsigned long off;
552                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
553                         if (pfn == vma->vm_pgoff + off)
554                                 return NULL;
555                         if (!is_cow_mapping(vma->vm_flags))
556                                 return NULL;
557                 }
558         }
559
560         if (is_zero_pfn(pfn))
561                 return NULL;
562 check_pfn:
563         if (unlikely(pfn > highest_memmap_pfn)) {
564                 print_bad_pte(vma, addr, pte, NULL);
565                 return NULL;
566         }
567
568         /*
569          * NOTE! We still have PageReserved() pages in the page tables.
570          * eg. VDSO mappings can cause them to exist.
571          */
572 out:
573         return pfn_to_page(pfn);
574 }
575
576 /*
577  * copy one vm_area from one task to the other. Assumes the page tables
578  * already present in the new task to be cleared in the whole range
579  * covered by this vma.
580  */
581
582 static inline unsigned long
583 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
584                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
585                 unsigned long addr, int *rss)
586 {
587         unsigned long vm_flags = vma->vm_flags;
588         pte_t pte = *src_pte;
589         struct page *page;
590
591         /* pte contains position in swap or file, so copy. */
592         if (unlikely(!pte_present(pte))) {
593                 if (!pte_file(pte)) {
594                         swp_entry_t entry = pte_to_swp_entry(pte);
595
596                         if (swap_duplicate(entry) < 0)
597                                 return entry.val;
598
599                         /* make sure dst_mm is on swapoff's mmlist. */
600                         if (unlikely(list_empty(&dst_mm->mmlist))) {
601                                 spin_lock(&mmlist_lock);
602                                 if (list_empty(&dst_mm->mmlist))
603                                         list_add(&dst_mm->mmlist,
604                                                  &src_mm->mmlist);
605                                 spin_unlock(&mmlist_lock);
606                         }
607                         if (is_write_migration_entry(entry) &&
608                                         is_cow_mapping(vm_flags)) {
609                                 /*
610                                  * COW mappings require pages in both parent
611                                  * and child to be set to read.
612                                  */
613                                 make_migration_entry_read(&entry);
614                                 pte = swp_entry_to_pte(entry);
615                                 set_pte_at(src_mm, addr, src_pte, pte);
616                         }
617                 }
618                 goto out_set_pte;
619         }
620
621         /*
622          * If it's a COW mapping, write protect it both
623          * in the parent and the child
624          */
625         if (is_cow_mapping(vm_flags)) {
626                 ptep_set_wrprotect(src_mm, addr, src_pte);
627                 pte = pte_wrprotect(pte);
628         }
629
630         /*
631          * If it's a shared mapping, mark it clean in
632          * the child
633          */
634         if (vm_flags & VM_SHARED)
635                 pte = pte_mkclean(pte);
636         pte = pte_mkold(pte);
637
638         page = vm_normal_page(vma, addr, pte);
639         if (page) {
640                 get_page(page);
641                 page_dup_rmap(page);
642                 if (PageAnon(page))
643                         rss[MM_ANONPAGES]++;
644                 else
645                         rss[MM_FILEPAGES]++;
646         }
647
648 out_set_pte:
649         set_pte_at(dst_mm, addr, dst_pte, pte);
650         return 0;
651 }
652
653 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
654                 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
655                 unsigned long addr, unsigned long end)
656 {
657         pte_t *orig_src_pte, *orig_dst_pte;
658         pte_t *src_pte, *dst_pte;
659         spinlock_t *src_ptl, *dst_ptl;
660         int progress = 0;
661         int rss[NR_MM_COUNTERS];
662         swp_entry_t entry = (swp_entry_t){0};
663
664 again:
665         init_rss_vec(rss);
666
667         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
668         if (!dst_pte)
669                 return -ENOMEM;
670         src_pte = pte_offset_map_nested(src_pmd, addr);
671         src_ptl = pte_lockptr(src_mm, src_pmd);
672         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
673         orig_src_pte = src_pte;
674         orig_dst_pte = dst_pte;
675         arch_enter_lazy_mmu_mode();
676
677         do {
678                 /*
679                  * We are holding two locks at this point - either of them
680                  * could generate latencies in another task on another CPU.
681                  */
682                 if (progress >= 32) {
683                         progress = 0;
684                         if (need_resched() ||
685                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
686                                 break;
687                 }
688                 if (pte_none(*src_pte)) {
689                         progress++;
690                         continue;
691                 }
692                 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
693                                                         vma, addr, rss);
694                 if (entry.val)
695                         break;
696                 progress += 8;
697         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
698
699         arch_leave_lazy_mmu_mode();
700         spin_unlock(src_ptl);
701         pte_unmap_nested(orig_src_pte);
702         add_mm_rss_vec(dst_mm, rss);
703         pte_unmap_unlock(orig_dst_pte, dst_ptl);
704         cond_resched();
705
706         if (entry.val) {
707                 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
708                         return -ENOMEM;
709                 progress = 0;
710         }
711         if (addr != end)
712                 goto again;
713         return 0;
714 }
715
716 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
717                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
718                 unsigned long addr, unsigned long end)
719 {
720         pmd_t *src_pmd, *dst_pmd;
721         unsigned long next;
722
723         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
724         if (!dst_pmd)
725                 return -ENOMEM;
726         src_pmd = pmd_offset(src_pud, addr);
727         do {
728                 next = pmd_addr_end(addr, end);
729                 if (pmd_none_or_clear_bad(src_pmd))
730                         continue;
731                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
732                                                 vma, addr, next))
733                         return -ENOMEM;
734         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
735         return 0;
736 }
737
738 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
739                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
740                 unsigned long addr, unsigned long end)
741 {
742         pud_t *src_pud, *dst_pud;
743         unsigned long next;
744
745         dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
746         if (!dst_pud)
747                 return -ENOMEM;
748         src_pud = pud_offset(src_pgd, addr);
749         do {
750                 next = pud_addr_end(addr, end);
751                 if (pud_none_or_clear_bad(src_pud))
752                         continue;
753                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
754                                                 vma, addr, next))
755                         return -ENOMEM;
756         } while (dst_pud++, src_pud++, addr = next, addr != end);
757         return 0;
758 }
759
760 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
761                 struct vm_area_struct *vma)
762 {
763         pgd_t *src_pgd, *dst_pgd;
764         unsigned long next;
765         unsigned long addr = vma->vm_start;
766         unsigned long end = vma->vm_end;
767         int ret;
768
769         /*
770          * Don't copy ptes where a page fault will fill them correctly.
771          * Fork becomes much lighter when there are big shared or private
772          * readonly mappings. The tradeoff is that copy_page_range is more
773          * efficient than faulting.
774          */
775         if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
776                 if (!vma->anon_vma)
777                         return 0;
778         }
779
780         if (is_vm_hugetlb_page(vma))
781                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
782
783         if (unlikely(is_pfn_mapping(vma))) {
784                 /*
785                  * We do not free on error cases below as remove_vma
786                  * gets called on error from higher level routine
787                  */
788                 ret = track_pfn_vma_copy(vma);
789                 if (ret)
790                         return ret;
791         }
792
793         /*
794          * We need to invalidate the secondary MMU mappings only when
795          * there could be a permission downgrade on the ptes of the
796          * parent mm. And a permission downgrade will only happen if
797          * is_cow_mapping() returns true.
798          */
799         if (is_cow_mapping(vma->vm_flags))
800                 mmu_notifier_invalidate_range_start(src_mm, addr, end);
801
802         ret = 0;
803         dst_pgd = pgd_offset(dst_mm, addr);
804         src_pgd = pgd_offset(src_mm, addr);
805         do {
806                 next = pgd_addr_end(addr, end);
807                 if (pgd_none_or_clear_bad(src_pgd))
808                         continue;
809                 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
810                                             vma, addr, next))) {
811                         ret = -ENOMEM;
812                         break;
813                 }
814         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
815
816         if (is_cow_mapping(vma->vm_flags))
817                 mmu_notifier_invalidate_range_end(src_mm,
818                                                   vma->vm_start, end);
819         return ret;
820 }
821
822 static unsigned long zap_pte_range(struct mmu_gather *tlb,
823                                 struct vm_area_struct *vma, pmd_t *pmd,
824                                 unsigned long addr, unsigned long end,
825                                 long *zap_work, struct zap_details *details)
826 {
827         struct mm_struct *mm = tlb->mm;
828         pte_t *pte;
829         spinlock_t *ptl;
830         int rss[NR_MM_COUNTERS];
831
832         init_rss_vec(rss);
833
834         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
835         arch_enter_lazy_mmu_mode();
836         do {
837                 pte_t ptent = *pte;
838                 if (pte_none(ptent)) {
839                         (*zap_work)--;
840                         continue;
841                 }
842
843                 (*zap_work) -= PAGE_SIZE;
844
845                 if (pte_present(ptent)) {
846                         struct page *page;
847
848                         page = vm_normal_page(vma, addr, ptent);
849                         if (unlikely(details) && page) {
850                                 /*
851                                  * unmap_shared_mapping_pages() wants to
852                                  * invalidate cache without truncating:
853                                  * unmap shared but keep private pages.
854                                  */
855                                 if (details->check_mapping &&
856                                     details->check_mapping != page->mapping)
857                                         continue;
858                                 /*
859                                  * Each page->index must be checked when
860                                  * invalidating or truncating nonlinear.
861                                  */
862                                 if (details->nonlinear_vma &&
863                                     (page->index < details->first_index ||
864                                      page->index > details->last_index))
865                                         continue;
866                         }
867                         ptent = ptep_get_and_clear_full(mm, addr, pte,
868                                                         tlb->fullmm);
869                         tlb_remove_tlb_entry(tlb, pte, addr);
870                         if (unlikely(!page))
871                                 continue;
872                         if (unlikely(details) && details->nonlinear_vma
873                             && linear_page_index(details->nonlinear_vma,
874                                                 addr) != page->index)
875                                 set_pte_at(mm, addr, pte,
876                                            pgoff_to_pte(page->index));
877                         if (PageAnon(page))
878                                 rss[MM_ANONPAGES]--;
879                         else {
880                                 if (pte_dirty(ptent))
881                                         set_page_dirty(page);
882                                 if (pte_young(ptent) &&
883                                     likely(!VM_SequentialReadHint(vma)))
884                                         mark_page_accessed(page);
885                                 rss[MM_FILEPAGES]--;
886                         }
887                         page_remove_rmap(page);
888                         if (unlikely(page_mapcount(page) < 0))
889                                 print_bad_pte(vma, addr, ptent, page);
890                         tlb_remove_page(tlb, page);
891                         continue;
892                 }
893                 /*
894                  * If details->check_mapping, we leave swap entries;
895                  * if details->nonlinear_vma, we leave file entries.
896                  */
897                 if (unlikely(details))
898                         continue;
899                 if (pte_file(ptent)) {
900                         if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
901                                 print_bad_pte(vma, addr, ptent, NULL);
902                 } else if
903                   (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
904                         print_bad_pte(vma, addr, ptent, NULL);
905                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
906         } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
907
908         add_mm_rss_vec(mm, rss);
909         arch_leave_lazy_mmu_mode();
910         pte_unmap_unlock(pte - 1, ptl);
911
912         return addr;
913 }
914
915 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
916                                 struct vm_area_struct *vma, pud_t *pud,
917                                 unsigned long addr, unsigned long end,
918                                 long *zap_work, struct zap_details *details)
919 {
920         pmd_t *pmd;
921         unsigned long next;
922
923         pmd = pmd_offset(pud, addr);
924         do {
925                 next = pmd_addr_end(addr, end);
926                 if (pmd_none_or_clear_bad(pmd)) {
927                         (*zap_work)--;
928                         continue;
929                 }
930                 next = zap_pte_range(tlb, vma, pmd, addr, next,
931                                                 zap_work, details);
932         } while (pmd++, addr = next, (addr != end && *zap_work > 0));
933
934         return addr;
935 }
936
937 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
938                                 struct vm_area_struct *vma, pgd_t *pgd,
939                                 unsigned long addr, unsigned long end,
940                                 long *zap_work, struct zap_details *details)
941 {
942         pud_t *pud;
943         unsigned long next;
944
945         pud = pud_offset(pgd, addr);
946         do {
947                 next = pud_addr_end(addr, end);
948                 if (pud_none_or_clear_bad(pud)) {
949                         (*zap_work)--;
950                         continue;
951                 }
952                 next = zap_pmd_range(tlb, vma, pud, addr, next,
953                                                 zap_work, details);
954         } while (pud++, addr = next, (addr != end && *zap_work > 0));
955
956         return addr;
957 }
958
959 static unsigned long unmap_page_range(struct mmu_gather *tlb,
960                                 struct vm_area_struct *vma,
961                                 unsigned long addr, unsigned long end,
962                                 long *zap_work, struct zap_details *details)
963 {
964         pgd_t *pgd;
965         unsigned long next;
966
967         if (details && !details->check_mapping && !details->nonlinear_vma)
968                 details = NULL;
969
970         BUG_ON(addr >= end);
971         mem_cgroup_uncharge_start();
972         tlb_start_vma(tlb, vma);
973         pgd = pgd_offset(vma->vm_mm, addr);
974         do {
975                 next = pgd_addr_end(addr, end);
976                 if (pgd_none_or_clear_bad(pgd)) {
977                         (*zap_work)--;
978                         continue;
979                 }
980                 next = zap_pud_range(tlb, vma, pgd, addr, next,
981                                                 zap_work, details);
982         } while (pgd++, addr = next, (addr != end && *zap_work > 0));
983         tlb_end_vma(tlb, vma);
984         mem_cgroup_uncharge_end();
985
986         return addr;
987 }
988
989 #ifdef CONFIG_PREEMPT
990 # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
991 #else
992 /* No preempt: go for improved straight-line efficiency */
993 # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
994 #endif
995
996 /**
997  * unmap_vmas - unmap a range of memory covered by a list of vma's
998  * @tlbp: address of the caller's struct mmu_gather
999  * @vma: the starting vma
1000  * @start_addr: virtual address at which to start unmapping
1001  * @end_addr: virtual address at which to end unmapping
1002  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
1003  * @details: details of nonlinear truncation or shared cache invalidation
1004  *
1005  * Returns the end address of the unmapping (restart addr if interrupted).
1006  *
1007  * Unmap all pages in the vma list.
1008  *
1009  * We aim to not hold locks for too long (for scheduling latency reasons).
1010  * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
1011  * return the ending mmu_gather to the caller.
1012  *
1013  * Only addresses between `start' and `end' will be unmapped.
1014  *
1015  * The VMA list must be sorted in ascending virtual address order.
1016  *
1017  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1018  * range after unmap_vmas() returns.  So the only responsibility here is to
1019  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1020  * drops the lock and schedules.
1021  */
1022 unsigned long unmap_vmas(struct mmu_gather **tlbp,
1023                 struct vm_area_struct *vma, unsigned long start_addr,
1024                 unsigned long end_addr, unsigned long *nr_accounted,
1025                 struct zap_details *details)
1026 {
1027         long zap_work = ZAP_BLOCK_SIZE;
1028         unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
1029         int tlb_start_valid = 0;
1030         unsigned long start = start_addr;
1031         spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
1032         int fullmm = (*tlbp)->fullmm;
1033         struct mm_struct *mm = vma->vm_mm;
1034
1035         mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1036         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
1037                 unsigned long end;
1038
1039                 start = max(vma->vm_start, start_addr);
1040                 if (start >= vma->vm_end)
1041                         continue;
1042                 end = min(vma->vm_end, end_addr);
1043                 if (end <= vma->vm_start)
1044                         continue;
1045
1046                 if (vma->vm_flags & VM_ACCOUNT)
1047                         *nr_accounted += (end - start) >> PAGE_SHIFT;
1048
1049                 if (unlikely(is_pfn_mapping(vma)))
1050                         untrack_pfn_vma(vma, 0, 0);
1051
1052                 while (start != end) {
1053                         if (!tlb_start_valid) {
1054                                 tlb_start = start;
1055                                 tlb_start_valid = 1;
1056                         }
1057
1058                         if (unlikely(is_vm_hugetlb_page(vma))) {
1059                                 /*
1060                                  * It is undesirable to test vma->vm_file as it
1061                                  * should be non-null for valid hugetlb area.
1062                                  * However, vm_file will be NULL in the error
1063                                  * cleanup path of do_mmap_pgoff. When
1064                                  * hugetlbfs ->mmap method fails,
1065                                  * do_mmap_pgoff() nullifies vma->vm_file
1066                                  * before calling this function to clean up.
1067                                  * Since no pte has actually been setup, it is
1068                                  * safe to do nothing in this case.
1069                                  */
1070                                 if (vma->vm_file) {
1071                                         unmap_hugepage_range(vma, start, end, NULL);
1072                                         zap_work -= (end - start) /
1073                                         pages_per_huge_page(hstate_vma(vma));
1074                                 }
1075
1076                                 start = end;
1077                         } else
1078                                 start = unmap_page_range(*tlbp, vma,
1079                                                 start, end, &zap_work, details);
1080
1081                         if (zap_work > 0) {
1082                                 BUG_ON(start != end);
1083                                 break;
1084                         }
1085
1086                         tlb_finish_mmu(*tlbp, tlb_start, start);
1087
1088                         if (need_resched() ||
1089                                 (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
1090                                 if (i_mmap_lock) {
1091                                         *tlbp = NULL;
1092                                         goto out;
1093                                 }
1094                                 cond_resched();
1095                         }
1096
1097                         *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
1098                         tlb_start_valid = 0;
1099                         zap_work = ZAP_BLOCK_SIZE;
1100                 }
1101         }
1102 out:
1103         mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1104         return start;   /* which is now the end (or restart) address */
1105 }
1106
1107 /**
1108  * zap_page_range - remove user pages in a given range
1109  * @vma: vm_area_struct holding the applicable pages
1110  * @address: starting address of pages to zap
1111  * @size: number of bytes to zap
1112  * @details: details of nonlinear truncation or shared cache invalidation
1113  */
1114 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1115                 unsigned long size, struct zap_details *details)
1116 {
1117         struct mm_struct *mm = vma->vm_mm;
1118         struct mmu_gather *tlb;
1119         unsigned long end = address + size;
1120         unsigned long nr_accounted = 0;
1121
1122         lru_add_drain();
1123         tlb = tlb_gather_mmu(mm, 0);
1124         update_hiwater_rss(mm);
1125         end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
1126         if (tlb)
1127                 tlb_finish_mmu(tlb, address, end);
1128         return end;
1129 }
1130
1131 /**
1132  * zap_vma_ptes - remove ptes mapping the vma
1133  * @vma: vm_area_struct holding ptes to be zapped
1134  * @address: starting address of pages to zap
1135  * @size: number of bytes to zap
1136  *
1137  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1138  *
1139  * The entire address range must be fully contained within the vma.
1140  *
1141  * Returns 0 if successful.
1142  */
1143 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1144                 unsigned long size)
1145 {
1146         if (address < vma->vm_start || address + size > vma->vm_end ||
1147                         !(vma->vm_flags & VM_PFNMAP))
1148                 return -1;
1149         zap_page_range(vma, address, size, NULL);
1150         return 0;
1151 }
1152 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1153
1154 /*
1155  * Do a quick page-table lookup for a single page.
1156  */
1157 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1158                         unsigned int flags)
1159 {
1160         pgd_t *pgd;
1161         pud_t *pud;
1162         pmd_t *pmd;
1163         pte_t *ptep, pte;
1164         spinlock_t *ptl;
1165         struct page *page;
1166         struct mm_struct *mm = vma->vm_mm;
1167
1168         page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
1169         if (!IS_ERR(page)) {
1170                 BUG_ON(flags & FOLL_GET);
1171                 goto out;
1172         }
1173
1174         page = NULL;
1175         pgd = pgd_offset(mm, address);
1176         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1177                 goto no_page_table;
1178
1179         pud = pud_offset(pgd, address);
1180         if (pud_none(*pud))
1181                 goto no_page_table;
1182         if (pud_huge(*pud)) {
1183                 BUG_ON(flags & FOLL_GET);
1184                 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
1185                 goto out;
1186         }
1187         if (unlikely(pud_bad(*pud)))
1188                 goto no_page_table;
1189
1190         pmd = pmd_offset(pud, address);
1191         if (pmd_none(*pmd))
1192                 goto no_page_table;
1193         if (pmd_huge(*pmd)) {
1194                 BUG_ON(flags & FOLL_GET);
1195                 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1196                 goto out;
1197         }
1198         if (unlikely(pmd_bad(*pmd)))
1199                 goto no_page_table;
1200
1201         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1202
1203         pte = *ptep;
1204         if (!pte_present(pte))
1205                 goto no_page;
1206         if ((flags & FOLL_WRITE) && !pte_write(pte))
1207                 goto unlock;
1208
1209         page = vm_normal_page(vma, address, pte);
1210         if (unlikely(!page)) {
1211                 if ((flags & FOLL_DUMP) ||
1212                     !is_zero_pfn(pte_pfn(pte)))
1213                         goto bad_page;
1214                 page = pte_page(pte);
1215         }
1216
1217         if (flags & FOLL_GET)
1218                 get_page(page);
1219         if (flags & FOLL_TOUCH) {
1220                 if ((flags & FOLL_WRITE) &&
1221                     !pte_dirty(pte) && !PageDirty(page))
1222                         set_page_dirty(page);
1223                 /*
1224                  * pte_mkyoung() would be more correct here, but atomic care
1225                  * is needed to avoid losing the dirty bit: it is easier to use
1226                  * mark_page_accessed().
1227                  */
1228                 mark_page_accessed(page);
1229         }
1230 unlock:
1231         pte_unmap_unlock(ptep, ptl);
1232 out:
1233         return page;
1234
1235 bad_page:
1236         pte_unmap_unlock(ptep, ptl);
1237         return ERR_PTR(-EFAULT);
1238
1239 no_page:
1240         pte_unmap_unlock(ptep, ptl);
1241         if (!pte_none(pte))
1242                 return page;
1243
1244 no_page_table:
1245         /*
1246          * When core dumping an enormous anonymous area that nobody
1247          * has touched so far, we don't want to allocate unnecessary pages or
1248          * page tables.  Return error instead of NULL to skip handle_mm_fault,
1249          * then get_dump_page() will return NULL to leave a hole in the dump.
1250          * But we can only make this optimization where a hole would surely
1251          * be zero-filled if handle_mm_fault() actually did handle it.
1252          */
1253         if ((flags & FOLL_DUMP) &&
1254             (!vma->vm_ops || !vma->vm_ops->fault))
1255                 return ERR_PTR(-EFAULT);
1256         return page;
1257 }
1258
1259 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1260                      unsigned long start, int nr_pages, unsigned int gup_flags,
1261                      struct page **pages, struct vm_area_struct **vmas)
1262 {
1263         int i;
1264         unsigned long vm_flags;
1265
1266         if (nr_pages <= 0)
1267                 return 0;
1268
1269         VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
1270
1271         /* 
1272          * Require read or write permissions.
1273          * If FOLL_FORCE is set, we only require the "MAY" flags.
1274          */
1275         vm_flags  = (gup_flags & FOLL_WRITE) ?
1276                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1277         vm_flags &= (gup_flags & FOLL_FORCE) ?
1278                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1279         i = 0;
1280
1281         do {
1282                 struct vm_area_struct *vma;
1283
1284                 vma = find_extend_vma(mm, start);
1285                 if (!vma && in_gate_area(tsk, start)) {
1286                         unsigned long pg = start & PAGE_MASK;
1287                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1288                         pgd_t *pgd;
1289                         pud_t *pud;
1290                         pmd_t *pmd;
1291                         pte_t *pte;
1292
1293                         /* user gate pages are read-only */
1294                         if (gup_flags & FOLL_WRITE)
1295                                 return i ? : -EFAULT;
1296                         if (pg > TASK_SIZE)
1297                                 pgd = pgd_offset_k(pg);
1298                         else
1299                                 pgd = pgd_offset_gate(mm, pg);
1300                         BUG_ON(pgd_none(*pgd));
1301                         pud = pud_offset(pgd, pg);
1302                         BUG_ON(pud_none(*pud));
1303                         pmd = pmd_offset(pud, pg);
1304                         if (pmd_none(*pmd))
1305                                 return i ? : -EFAULT;
1306                         pte = pte_offset_map(pmd, pg);
1307                         if (pte_none(*pte)) {
1308                                 pte_unmap(pte);
1309                                 return i ? : -EFAULT;
1310                         }
1311                         if (pages) {
1312                                 struct page *page = vm_normal_page(gate_vma, start, *pte);
1313                                 pages[i] = page;
1314                                 if (page)
1315                                         get_page(page);
1316                         }
1317                         pte_unmap(pte);
1318                         if (vmas)
1319                                 vmas[i] = gate_vma;
1320                         i++;
1321                         start += PAGE_SIZE;
1322                         nr_pages--;
1323                         continue;
1324                 }
1325
1326                 if (!vma ||
1327                     (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1328                     !(vm_flags & vma->vm_flags))
1329                         return i ? : -EFAULT;
1330
1331                 if (is_vm_hugetlb_page(vma)) {
1332                         i = follow_hugetlb_page(mm, vma, pages, vmas,
1333                                         &start, &nr_pages, i, gup_flags);
1334                         continue;
1335                 }
1336
1337                 do {
1338                         struct page *page;
1339                         unsigned int foll_flags = gup_flags;
1340
1341                         /*
1342                          * If we have a pending SIGKILL, don't keep faulting
1343                          * pages and potentially allocating memory.
1344                          */
1345                         if (unlikely(fatal_signal_pending(current)))
1346                                 return i ? i : -ERESTARTSYS;
1347
1348                         cond_resched();
1349                         while (!(page = follow_page(vma, start, foll_flags))) {
1350                                 int ret;
1351
1352                                 ret = handle_mm_fault(mm, vma, start,
1353                                         (foll_flags & FOLL_WRITE) ?
1354                                         FAULT_FLAG_WRITE : 0);
1355
1356                                 if (ret & VM_FAULT_ERROR) {
1357                                         if (ret & VM_FAULT_OOM)
1358                                                 return i ? i : -ENOMEM;
1359                                         if (ret &
1360                                             (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
1361                                                 return i ? i : -EFAULT;
1362                                         BUG();
1363                                 }
1364                                 if (ret & VM_FAULT_MAJOR)
1365                                         tsk->maj_flt++;
1366                                 else
1367                                         tsk->min_flt++;
1368
1369                                 /*
1370                                  * The VM_FAULT_WRITE bit tells us that
1371                                  * do_wp_page has broken COW when necessary,
1372                                  * even if maybe_mkwrite decided not to set
1373                                  * pte_write. We can thus safely do subsequent
1374                                  * page lookups as if they were reads. But only
1375                                  * do so when looping for pte_write is futile:
1376                                  * in some cases userspace may also be wanting
1377                                  * to write to the gotten user page, which a
1378                                  * read fault here might prevent (a readonly
1379                                  * page might get reCOWed by userspace write).
1380                                  */
1381                                 if ((ret & VM_FAULT_WRITE) &&
1382                                     !(vma->vm_flags & VM_WRITE))
1383                                         foll_flags &= ~FOLL_WRITE;
1384
1385                                 cond_resched();
1386                         }
1387                         if (IS_ERR(page))
1388                                 return i ? i : PTR_ERR(page);
1389                         if (pages) {
1390                                 pages[i] = page;
1391
1392                                 flush_anon_page(vma, page, start);
1393                                 flush_dcache_page(page);
1394                         }
1395                         if (vmas)
1396                                 vmas[i] = vma;
1397                         i++;
1398                         start += PAGE_SIZE;
1399                         nr_pages--;
1400                 } while (nr_pages && start < vma->vm_end);
1401         } while (nr_pages);
1402         return i;
1403 }
1404
1405 /**
1406  * get_user_pages() - pin user pages in memory
1407  * @tsk:        task_struct of target task
1408  * @mm:         mm_struct of target mm
1409  * @start:      starting user address
1410  * @nr_pages:   number of pages from start to pin
1411  * @write:      whether pages will be written to by the caller
1412  * @force:      whether to force write access even if user mapping is
1413  *              readonly. This will result in the page being COWed even
1414  *              in MAP_SHARED mappings. You do not want this.
1415  * @pages:      array that receives pointers to the pages pinned.
1416  *              Should be at least nr_pages long. Or NULL, if caller
1417  *              only intends to ensure the pages are faulted in.
1418  * @vmas:       array of pointers to vmas corresponding to each page.
1419  *              Or NULL if the caller does not require them.
1420  *
1421  * Returns number of pages pinned. This may be fewer than the number
1422  * requested. If nr_pages is 0 or negative, returns 0. If no pages
1423  * were pinned, returns -errno. Each page returned must be released
1424  * with a put_page() call when it is finished with. vmas will only
1425  * remain valid while mmap_sem is held.
1426  *
1427  * Must be called with mmap_sem held for read or write.
1428  *
1429  * get_user_pages walks a process's page tables and takes a reference to
1430  * each struct page that each user address corresponds to at a given
1431  * instant. That is, it takes the page that would be accessed if a user
1432  * thread accesses the given user virtual address at that instant.
1433  *
1434  * This does not guarantee that the page exists in the user mappings when
1435  * get_user_pages returns, and there may even be a completely different
1436  * page there in some cases (eg. if mmapped pagecache has been invalidated
1437  * and subsequently re faulted). However it does guarantee that the page
1438  * won't be freed completely. And mostly callers simply care that the page
1439  * contains data that was valid *at some point in time*. Typically, an IO
1440  * or similar operation cannot guarantee anything stronger anyway because
1441  * locks can't be held over the syscall boundary.
1442  *
1443  * If write=0, the page must not be written to. If the page is written to,
1444  * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
1445  * after the page is finished with, and before put_page is called.
1446  *
1447  * get_user_pages is typically used for fewer-copy IO operations, to get a
1448  * handle on the memory by some means other than accesses via the user virtual
1449  * addresses. The pages may be submitted for DMA to devices or accessed via
1450  * their kernel linear mapping (via the kmap APIs). Care should be taken to
1451  * use the correct cache flushing APIs.
1452  *
1453  * See also get_user_pages_fast, for performance critical applications.
1454  */
1455 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1456                 unsigned long start, int nr_pages, int write, int force,
1457                 struct page **pages, struct vm_area_struct **vmas)
1458 {
1459         int flags = FOLL_TOUCH;
1460
1461         if (pages)
1462                 flags |= FOLL_GET;
1463         if (write)
1464                 flags |= FOLL_WRITE;
1465         if (force)
1466                 flags |= FOLL_FORCE;
1467
1468         return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
1469 }
1470 EXPORT_SYMBOL(get_user_pages);
1471
1472 /**
1473  * get_dump_page() - pin user page in memory while writing it to core dump
1474  * @addr: user address
1475  *
1476  * Returns struct page pointer of user page pinned for dump,
1477  * to be freed afterwards by page_cache_release() or put_page().
1478  *
1479  * Returns NULL on any kind of failure - a hole must then be inserted into
1480  * the corefile, to preserve alignment with its headers; and also returns
1481  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1482  * allowing a hole to be left in the corefile to save diskspace.
1483  *
1484  * Called without mmap_sem, but after all other threads have been killed.
1485  */
1486 #ifdef CONFIG_ELF_CORE
1487 struct page *get_dump_page(unsigned long addr)
1488 {
1489         struct vm_area_struct *vma;
1490         struct page *page;
1491
1492         if (__get_user_pages(current, current->mm, addr, 1,
1493                         FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
1494                 return NULL;
1495         flush_cache_page(vma, addr, page_to_pfn(page));
1496         return page;
1497 }
1498 #endif /* CONFIG_ELF_CORE */
1499
1500 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1501                         spinlock_t **ptl)
1502 {
1503         pgd_t * pgd = pgd_offset(mm, addr);
1504         pud_t * pud = pud_alloc(mm, pgd, addr);
1505         if (pud) {
1506                 pmd_t * pmd = pmd_alloc(mm, pud, addr);
1507                 if (pmd)
1508                         return pte_alloc_map_lock(mm, pmd, addr, ptl);
1509         }
1510         return NULL;
1511 }
1512
1513 /*
1514  * This is the old fallback for page remapping.
1515  *
1516  * For historical reasons, it only allows reserved pages. Only
1517  * old drivers should use this, and they needed to mark their
1518  * pages reserved for the old functions anyway.
1519  */
1520 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1521                         struct page *page, pgprot_t prot)
1522 {
1523         struct mm_struct *mm = vma->vm_mm;
1524         int retval;
1525         pte_t *pte;
1526         spinlock_t *ptl;
1527
1528         retval = -EINVAL;
1529         if (PageAnon(page))
1530                 goto out;
1531         retval = -ENOMEM;
1532         flush_dcache_page(page);
1533         pte = get_locked_pte(mm, addr, &ptl);
1534         if (!pte)
1535                 goto out;
1536         retval = -EBUSY;
1537         if (!pte_none(*pte))
1538                 goto out_unlock;
1539
1540         /* Ok, finally just insert the thing.. */
1541         get_page(page);
1542         inc_mm_counter(mm, MM_FILEPAGES);
1543         page_add_file_rmap(page);
1544         set_pte_at(mm, addr, pte, mk_pte(page, prot));
1545
1546         retval = 0;
1547         pte_unmap_unlock(pte, ptl);
1548         return retval;
1549 out_unlock:
1550         pte_unmap_unlock(pte, ptl);
1551 out:
1552         return retval;
1553 }
1554
1555 /**
1556  * vm_insert_page - insert single page into user vma
1557  * @vma: user vma to map to
1558  * @addr: target user address of this page
1559  * @page: source kernel page
1560  *
1561  * This allows drivers to insert individual pages they've allocated
1562  * into a user vma.
1563  *
1564  * The page has to be a nice clean _individual_ kernel allocation.
1565  * If you allocate a compound page, you need to have marked it as
1566  * such (__GFP_COMP), or manually just split the page up yourself
1567  * (see split_page()).
1568  *
1569  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1570  * took an arbitrary page protection parameter. This doesn't allow
1571  * that. Your vma protection will have to be set up correctly, which
1572  * means that if you want a shared writable mapping, you'd better
1573  * ask for a shared writable mapping!
1574  *
1575  * The page does not need to be reserved.
1576  */
1577 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1578                         struct page *page)
1579 {
1580         if (addr < vma->vm_start || addr >= vma->vm_end)
1581                 return -EFAULT;
1582         if (!page_count(page))
1583                 return -EINVAL;
1584         vma->vm_flags |= VM_INSERTPAGE;
1585         return insert_page(vma, addr, page, vma->vm_page_prot);
1586 }
1587 EXPORT_SYMBOL(vm_insert_page);
1588
1589 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1590                         unsigned long pfn, pgprot_t prot)
1591 {
1592         struct mm_struct *mm = vma->vm_mm;
1593         int retval;
1594         pte_t *pte, entry;
1595         spinlock_t *ptl;
1596
1597         retval = -ENOMEM;
1598         pte = get_locked_pte(mm, addr, &ptl);
1599         if (!pte)
1600                 goto out;
1601         retval = -EBUSY;
1602         if (!pte_none(*pte))
1603                 goto out_unlock;
1604
1605         /* Ok, finally just insert the thing.. */
1606         entry = pte_mkspecial(pfn_pte(pfn, prot));
1607         set_pte_at(mm, addr, pte, entry);
1608         update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1609
1610         retval = 0;
1611 out_unlock:
1612         pte_unmap_unlock(pte, ptl);
1613 out:
1614         return retval;
1615 }
1616
1617 /**
1618  * vm_insert_pfn - insert single pfn into user vma
1619  * @vma: user vma to map to
1620  * @addr: target user address of this page
1621  * @pfn: source kernel pfn
1622  *
1623  * Similar to vm_inert_page, this allows drivers to insert individual pages
1624  * they've allocated into a user vma. Same comments apply.
1625  *
1626  * This function should only be called from a vm_ops->fault handler, and
1627  * in that case the handler should return NULL.
1628  *
1629  * vma cannot be a COW mapping.
1630  *
1631  * As this is called only for pages that do not currently exist, we
1632  * do not need to flush old virtual caches or the TLB.
1633  */
1634 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1635                         unsigned long pfn)
1636 {
1637         int ret;
1638         pgprot_t pgprot = vma->vm_page_prot;
1639         /*
1640          * Technically, architectures with pte_special can avoid all these
1641          * restrictions (same for remap_pfn_range).  However we would like
1642          * consistency in testing and feature parity among all, so we should
1643          * try to keep these invariants in place for everybody.
1644          */
1645         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1646         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1647                                                 (VM_PFNMAP|VM_MIXEDMAP));
1648         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1649         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1650
1651         if (addr < vma->vm_start || addr >= vma->vm_end)
1652                 return -EFAULT;
1653         if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
1654                 return -EINVAL;
1655
1656         ret = insert_pfn(vma, addr, pfn, pgprot);
1657
1658         if (ret)
1659                 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
1660
1661         return ret;
1662 }
1663 EXPORT_SYMBOL(vm_insert_pfn);
1664
1665 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1666                         unsigned long pfn)
1667 {
1668         BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1669
1670         if (addr < vma->vm_start || addr >= vma->vm_end)
1671                 return -EFAULT;
1672
1673         /*
1674          * If we don't have pte special, then we have to use the pfn_valid()
1675          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1676          * refcount the page if pfn_valid is true (hence insert_page rather
1677          * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
1678          * without pte special, it would there be refcounted as a normal page.
1679          */
1680         if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
1681                 struct page *page;
1682
1683                 page = pfn_to_page(pfn);
1684                 return insert_page(vma, addr, page, vma->vm_page_prot);
1685         }
1686         return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1687 }
1688 EXPORT_SYMBOL(vm_insert_mixed);
1689
1690 /*
1691  * maps a range of physical memory into the requested pages. the old
1692  * mappings are removed. any references to nonexistent pages results
1693  * in null mappings (currently treated as "copy-on-access")
1694  */
1695 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1696                         unsigned long addr, unsigned long end,
1697                         unsigned long pfn, pgprot_t prot)
1698 {
1699         pte_t *pte;
1700         spinlock_t *ptl;
1701
1702         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1703         if (!pte)
1704                 return -ENOMEM;
1705         arch_enter_lazy_mmu_mode();
1706         do {
1707                 BUG_ON(!pte_none(*pte));
1708                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1709                 pfn++;
1710         } while (pte++, addr += PAGE_SIZE, addr != end);
1711         arch_leave_lazy_mmu_mode();
1712         pte_unmap_unlock(pte - 1, ptl);
1713         return 0;
1714 }
1715
1716 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1717                         unsigned long addr, unsigned long end,
1718                         unsigned long pfn, pgprot_t prot)
1719 {
1720         pmd_t *pmd;
1721         unsigned long next;
1722
1723         pfn -= addr >> PAGE_SHIFT;
1724         pmd = pmd_alloc(mm, pud, addr);
1725         if (!pmd)
1726                 return -ENOMEM;
1727         do {
1728                 next = pmd_addr_end(addr, end);
1729                 if (remap_pte_range(mm, pmd, addr, next,
1730                                 pfn + (addr >> PAGE_SHIFT), prot))
1731                         return -ENOMEM;
1732         } while (pmd++, addr = next, addr != end);
1733         return 0;
1734 }
1735
1736 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1737                         unsigned long addr, unsigned long end,
1738                         unsigned long pfn, pgprot_t prot)
1739 {
1740         pud_t *pud;
1741         unsigned long next;
1742
1743         pfn -= addr >> PAGE_SHIFT;
1744         pud = pud_alloc(mm, pgd, addr);
1745         if (!pud)
1746                 return -ENOMEM;
1747         do {
1748                 next = pud_addr_end(addr, end);
1749                 if (remap_pmd_range(mm, pud, addr, next,
1750                                 pfn + (addr >> PAGE_SHIFT), prot))
1751                         return -ENOMEM;
1752         } while (pud++, addr = next, addr != end);
1753         return 0;
1754 }
1755
1756 /**
1757  * remap_pfn_range - remap kernel memory to userspace
1758  * @vma: user vma to map to
1759  * @addr: target user address to start at
1760  * @pfn: physical address of kernel memory
1761  * @size: size of map area
1762  * @prot: page protection flags for this mapping
1763  *
1764  *  Note: this is only safe if the mm semaphore is held when called.
1765  */
1766 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1767                     unsigned long pfn, unsigned long size, pgprot_t prot)
1768 {
1769         pgd_t *pgd;
1770         unsigned long next;
1771         unsigned long end = addr + PAGE_ALIGN(size);
1772         struct mm_struct *mm = vma->vm_mm;
1773         int err;
1774
1775         /*
1776          * Physically remapped pages are special. Tell the
1777          * rest of the world about it:
1778          *   VM_IO tells people not to look at these pages
1779          *      (accesses can have side effects).
1780          *   VM_RESERVED is specified all over the place, because
1781          *      in 2.4 it kept swapout's vma scan off this vma; but
1782          *      in 2.6 the LRU scan won't even find its pages, so this
1783          *      flag means no more than count its pages in reserved_vm,
1784          *      and omit it from core dump, even when VM_IO turned off.
1785          *   VM_PFNMAP tells the core MM that the base pages are just
1786          *      raw PFN mappings, and do not have a "struct page" associated
1787          *      with them.
1788          *
1789          * There's a horrible special case to handle copy-on-write
1790          * behaviour that some programs depend on. We mark the "original"
1791          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1792          */
1793         if (addr == vma->vm_start && end == vma->vm_end) {
1794                 vma->vm_pgoff = pfn;
1795                 vma->vm_flags |= VM_PFN_AT_MMAP;
1796         } else if (is_cow_mapping(vma->vm_flags))
1797                 return -EINVAL;
1798
1799         vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1800
1801         err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
1802         if (err) {
1803                 /*
1804                  * To indicate that track_pfn related cleanup is not
1805                  * needed from higher level routine calling unmap_vmas
1806                  */
1807                 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
1808                 vma->vm_flags &= ~VM_PFN_AT_MMAP;
1809                 return -EINVAL;
1810         }
1811
1812         BUG_ON(addr >= end);
1813         pfn -= addr >> PAGE_SHIFT;
1814         pgd = pgd_offset(mm, addr);
1815         flush_cache_range(vma, addr, end);
1816         do {
1817                 next = pgd_addr_end(addr, end);
1818                 err = remap_pud_range(mm, pgd, addr, next,
1819                                 pfn + (addr >> PAGE_SHIFT), prot);
1820                 if (err)
1821                         break;
1822         } while (pgd++, addr = next, addr != end);
1823
1824         if (err)
1825                 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
1826
1827         return err;
1828 }
1829 EXPORT_SYMBOL(remap_pfn_range);
1830
1831 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1832                                      unsigned long addr, unsigned long end,
1833                                      pte_fn_t fn, void *data)
1834 {
1835         pte_t *pte;
1836         int err;
1837         pgtable_t token;
1838         spinlock_t *uninitialized_var(ptl);
1839
1840         pte = (mm == &init_mm) ?
1841                 pte_alloc_kernel(pmd, addr) :
1842                 pte_alloc_map_lock(mm, pmd, addr, &ptl);
1843         if (!pte)
1844                 return -ENOMEM;
1845
1846         BUG_ON(pmd_huge(*pmd));
1847
1848         arch_enter_lazy_mmu_mode();
1849
1850         token = pmd_pgtable(*pmd);
1851
1852         do {
1853                 err = fn(pte++, token, addr, data);
1854                 if (err)
1855                         break;
1856         } while (addr += PAGE_SIZE, addr != end);
1857
1858         arch_leave_lazy_mmu_mode();
1859
1860         if (mm != &init_mm)
1861                 pte_unmap_unlock(pte-1, ptl);
1862         return err;
1863 }
1864
1865 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1866                                      unsigned long addr, unsigned long end,
1867                                      pte_fn_t fn, void *data)
1868 {
1869         pmd_t *pmd;
1870         unsigned long next;
1871         int err;
1872
1873         BUG_ON(pud_huge(*pud));
1874
1875         pmd = pmd_alloc(mm, pud, addr);
1876         if (!pmd)
1877                 return -ENOMEM;
1878         do {
1879                 next = pmd_addr_end(addr, end);
1880                 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
1881                 if (err)
1882                         break;
1883         } while (pmd++, addr = next, addr != end);
1884         return err;
1885 }
1886
1887 static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1888                                      unsigned long addr, unsigned long end,
1889                                      pte_fn_t fn, void *data)
1890 {
1891         pud_t *pud;
1892         unsigned long next;
1893         int err;
1894
1895         pud = pud_alloc(mm, pgd, addr);
1896         if (!pud)
1897                 return -ENOMEM;
1898         do {
1899                 next = pud_addr_end(addr, end);
1900                 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
1901                 if (err)
1902                         break;
1903         } while (pud++, addr = next, addr != end);
1904         return err;
1905 }
1906
1907 /*
1908  * Scan a region of virtual memory, filling in page tables as necessary
1909  * and calling a provided function on each leaf page table.
1910  */
1911 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1912                         unsigned long size, pte_fn_t fn, void *data)
1913 {
1914         pgd_t *pgd;
1915         unsigned long next;
1916         unsigned long start = addr, end = addr + size;
1917         int err;
1918
1919         BUG_ON(addr >= end);
1920         mmu_notifier_invalidate_range_start(mm, start, end);
1921         pgd = pgd_offset(mm, addr);
1922         do {
1923                 next = pgd_addr_end(addr, end);
1924                 err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
1925                 if (err)
1926                         break;
1927         } while (pgd++, addr = next, addr != end);
1928         mmu_notifier_invalidate_range_end(mm, start, end);
1929         return err;
1930 }
1931 EXPORT_SYMBOL_GPL(apply_to_page_range);
1932
1933 /*
1934  * handle_pte_fault chooses page fault handler according to an entry
1935  * which was read non-atomically.  Before making any commitment, on
1936  * those architectures or configurations (e.g. i386 with PAE) which
1937  * might give a mix of unmatched parts, do_swap_page and do_file_page
1938  * must check under lock before unmapping the pte and proceeding
1939  * (but do_wp_page is only called after already making such a check;
1940  * and do_anonymous_page and do_no_page can safely check later on).
1941  */
1942 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
1943                                 pte_t *page_table, pte_t orig_pte)
1944 {
1945         int same = 1;
1946 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1947         if (sizeof(pte_t) > sizeof(unsigned long)) {
1948                 spinlock_t *ptl = pte_lockptr(mm, pmd);
1949                 spin_lock(ptl);
1950                 same = pte_same(*page_table, orig_pte);
1951                 spin_unlock(ptl);
1952         }
1953 #endif
1954         pte_unmap(page_table);
1955         return same;
1956 }
1957
1958 /*
1959  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1960  * servicing faults for write access.  In the normal case, do always want
1961  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1962  * that do not have writing enabled, when used by access_process_vm.
1963  */
1964 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1965 {
1966         if (likely(vma->vm_flags & VM_WRITE))
1967                 pte = pte_mkwrite(pte);
1968         return pte;
1969 }
1970
1971 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
1972 {
1973         /*
1974          * If the source page was a PFN mapping, we don't have
1975          * a "struct page" for it. We do a best-effort copy by
1976          * just copying from the original user address. If that
1977          * fails, we just zero-fill it. Live with it.
1978          */
1979         if (unlikely(!src)) {
1980                 void *kaddr = kmap_atomic(dst, KM_USER0);
1981                 void __user *uaddr = (void __user *)(va & PAGE_MASK);
1982
1983                 /*
1984                  * This really shouldn't fail, because the page is there
1985                  * in the page tables. But it might just be unreadable,
1986                  * in which case we just give up and fill the result with
1987                  * zeroes.
1988                  */
1989                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
1990                         memset(kaddr, 0, PAGE_SIZE);
1991                 kunmap_atomic(kaddr, KM_USER0);
1992                 flush_dcache_page(dst);
1993         } else
1994                 copy_user_highpage(dst, src, va, vma);
1995 }
1996
1997 /*
1998  * This routine handles present pages, when users try to write
1999  * to a shared page. It is done by copying the page to a new address
2000  * and decrementing the shared-page counter for the old page.
2001  *
2002  * Note that this routine assumes that the protection checks have been
2003  * done by the caller (the low-level page fault routine in most cases).
2004  * Thus we can safely just mark it writable once we've done any necessary
2005  * COW.
2006  *
2007  * We also mark the page dirty at this point even though the page will
2008  * change only once the write actually happens. This avoids a few races,
2009  * and potentially makes it more efficient.
2010  *
2011  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2012  * but allow concurrent faults), with pte both mapped and locked.
2013  * We return with mmap_sem still held, but pte unmapped and unlocked.
2014  */
2015 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2016                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2017                 spinlock_t *ptl, pte_t orig_pte)
2018 {
2019         struct page *old_page, *new_page;
2020         pte_t entry;
2021         int reuse = 0, ret = 0;
2022         int page_mkwrite = 0;
2023         struct page *dirty_page = NULL;
2024
2025         old_page = vm_normal_page(vma, address, orig_pte);
2026         if (!old_page) {
2027                 /*
2028                  * VM_MIXEDMAP !pfn_valid() case
2029                  *
2030                  * We should not cow pages in a shared writeable mapping.
2031                  * Just mark the pages writable as we can't do any dirty
2032                  * accounting on raw pfn maps.
2033                  */
2034                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2035                                      (VM_WRITE|VM_SHARED))
2036                         goto reuse;
2037                 goto gotten;
2038         }
2039
2040         /*
2041          * Take out anonymous pages first, anonymous shared vmas are
2042          * not dirty accountable.
2043          */
2044         if (PageAnon(old_page) && !PageKsm(old_page)) {
2045                 if (!trylock_page(old_page)) {
2046                         page_cache_get(old_page);
2047                         pte_unmap_unlock(page_table, ptl);
2048                         lock_page(old_page);
2049                         page_table = pte_offset_map_lock(mm, pmd, address,
2050                                                          &ptl);
2051                         if (!pte_same(*page_table, orig_pte)) {
2052                                 unlock_page(old_page);
2053                                 page_cache_release(old_page);
2054                                 goto unlock;
2055                         }
2056                         page_cache_release(old_page);
2057                 }
2058                 reuse = reuse_swap_page(old_page);
2059                 unlock_page(old_page);
2060         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2061                                         (VM_WRITE|VM_SHARED))) {
2062                 /*
2063                  * Only catch write-faults on shared writable pages,
2064                  * read-only shared pages can get COWed by
2065                  * get_user_pages(.write=1, .force=1).
2066                  */
2067                 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2068                         struct vm_fault vmf;
2069                         int tmp;
2070
2071                         vmf.virtual_address = (void __user *)(address &
2072                                                                 PAGE_MASK);
2073                         vmf.pgoff = old_page->index;
2074                         vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2075                         vmf.page = old_page;
2076
2077                         /*
2078                          * Notify the address space that the page is about to
2079                          * become writable so that it can prohibit this or wait
2080                          * for the page to get into an appropriate state.
2081                          *
2082                          * We do this without the lock held, so that it can
2083                          * sleep if it needs to.
2084                          */
2085                         page_cache_get(old_page);
2086                         pte_unmap_unlock(page_table, ptl);
2087
2088                         tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2089                         if (unlikely(tmp &
2090                                         (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2091                                 ret = tmp;
2092                                 goto unwritable_page;
2093                         }
2094                         if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
2095                                 lock_page(old_page);
2096                                 if (!old_page->mapping) {
2097                                         ret = 0; /* retry the fault */
2098                                         unlock_page(old_page);
2099                                         goto unwritable_page;
2100                                 }
2101                         } else
2102                                 VM_BUG_ON(!PageLocked(old_page));
2103
2104                         /*
2105                          * Since we dropped the lock we need to revalidate
2106                          * the PTE as someone else may have changed it.  If
2107                          * they did, we just return, as we can count on the
2108                          * MMU to tell us if they didn't also make it writable.
2109                          */
2110                         page_table = pte_offset_map_lock(mm, pmd, address,
2111                                                          &ptl);
2112                         if (!pte_same(*page_table, orig_pte)) {
2113                                 unlock_page(old_page);
2114                                 page_cache_release(old_page);
2115                                 goto unlock;
2116                         }
2117
2118                         page_mkwrite = 1;
2119                 }
2120                 dirty_page = old_page;
2121                 get_page(dirty_page);
2122                 reuse = 1;
2123         }
2124
2125         if (reuse) {
2126 reuse:
2127                 flush_cache_page(vma, address, pte_pfn(orig_pte));
2128                 entry = pte_mkyoung(orig_pte);
2129                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2130                 if (ptep_set_access_flags(vma, address, page_table, entry,1))
2131                         update_mmu_cache(vma, address, page_table);
2132                 ret |= VM_FAULT_WRITE;
2133                 goto unlock;
2134         }
2135
2136         /*
2137          * Ok, we need to copy. Oh, well..
2138          */
2139         page_cache_get(old_page);
2140 gotten:
2141         pte_unmap_unlock(page_table, ptl);
2142
2143         if (unlikely(anon_vma_prepare(vma)))
2144                 goto oom;
2145
2146         if (is_zero_pfn(pte_pfn(orig_pte))) {
2147                 new_page = alloc_zeroed_user_highpage_movable(vma, address);
2148                 if (!new_page)
2149                         goto oom;
2150         } else {
2151                 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2152                 if (!new_page)
2153                         goto oom;
2154                 cow_user_page(new_page, old_page, address, vma);
2155         }
2156         __SetPageUptodate(new_page);
2157
2158         /*
2159          * Don't let another task, with possibly unlocked vma,
2160          * keep the mlocked page.
2161          */
2162         if ((vma->vm_flags & VM_LOCKED) && old_page) {
2163                 lock_page(old_page);    /* for LRU manipulation */
2164                 clear_page_mlock(old_page);
2165                 unlock_page(old_page);
2166         }
2167
2168         if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2169                 goto oom_free_new;
2170
2171         /*
2172          * Re-check the pte - we dropped the lock
2173          */
2174         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2175         if (likely(pte_same(*page_table, orig_pte))) {
2176                 if (old_page) {
2177                         if (!PageAnon(old_page)) {
2178                                 dec_mm_counter(mm, MM_FILEPAGES);
2179                                 inc_mm_counter(mm, MM_ANONPAGES);
2180                         }
2181                 } else
2182                         inc_mm_counter(mm, MM_ANONPAGES);
2183                 flush_cache_page(vma, address, pte_pfn(orig_pte));
2184                 entry = mk_pte(new_page, vma->vm_page_prot);
2185                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2186                 /*
2187                  * Clear the pte entry and flush it first, before updating the
2188                  * pte with the new entry. This will avoid a race condition
2189                  * seen in the presence of one thread doing SMC and another
2190                  * thread doing COW.
2191                  */
2192                 ptep_clear_flush(vma, address, page_table);
2193                 page_add_new_anon_rmap(new_page, vma, address);
2194                 /*
2195                  * We call the notify macro here because, when using secondary
2196                  * mmu page tables (such as kvm shadow page tables), we want the
2197                  * new page to be mapped directly into the secondary page table.
2198                  */
2199                 set_pte_at_notify(mm, address, page_table, entry);
2200                 update_mmu_cache(vma, address, page_table);
2201                 if (old_page) {
2202                         /*
2203                          * Only after switching the pte to the new page may
2204                          * we remove the mapcount here. Otherwise another
2205                          * process may come and find the rmap count decremented
2206                          * before the pte is switched to the new page, and
2207                          * "reuse" the old page writing into it while our pte
2208                          * here still points into it and can be read by other
2209                          * threads.
2210                          *
2211                          * The critical issue is to order this
2212                          * page_remove_rmap with the ptp_clear_flush above.
2213                          * Those stores are ordered by (if nothing else,)
2214                          * the barrier present in the atomic_add_negative
2215                          * in page_remove_rmap.
2216                          *
2217                          * Then the TLB flush in ptep_clear_flush ensures that
2218                          * no process can access the old page before the
2219                          * decremented mapcount is visible. And the old page
2220                          * cannot be reused until after the decremented
2221                          * mapcount is visible. So transitively, TLBs to
2222                          * old page will be flushed before it can be reused.
2223                          */
2224                         page_remove_rmap(old_page);
2225                 }
2226
2227                 /* Free the old page.. */
2228                 new_page = old_page;
2229                 ret |= VM_FAULT_WRITE;
2230         } else
2231                 mem_cgroup_uncharge_page(new_page);
2232
2233         if (new_page)
2234                 page_cache_release(new_page);
2235         if (old_page)
2236                 page_cache_release(old_page);
2237 unlock:
2238         pte_unmap_unlock(page_table, ptl);
2239         if (dirty_page) {
2240                 /*
2241                  * Yes, Virginia, this is actually required to prevent a race
2242                  * with clear_page_dirty_for_io() from clearing the page dirty
2243                  * bit after it clear all dirty ptes, but before a racing
2244                  * do_wp_page installs a dirty pte.
2245                  *
2246                  * do_no_page is protected similarly.
2247                  */
2248                 if (!page_mkwrite) {
2249                         wait_on_page_locked(dirty_page);
2250                         set_page_dirty_balance(dirty_page, page_mkwrite);
2251                 }
2252                 put_page(dirty_page);
2253                 if (page_mkwrite) {
2254                         struct address_space *mapping = dirty_page->mapping;
2255
2256                         set_page_dirty(dirty_page);
2257                         unlock_page(dirty_page);
2258                         page_cache_release(dirty_page);
2259                         if (mapping)    {
2260                                 /*
2261                                  * Some device drivers do not set page.mapping
2262                                  * but still dirty their pages
2263                                  */
2264                                 balance_dirty_pages_ratelimited(mapping);
2265                         }
2266                 }
2267
2268                 /* file_update_time outside page_lock */
2269                 if (vma->vm_file)
2270                         file_update_time(vma->vm_file);
2271         }
2272         return ret;
2273 oom_free_new:
2274         page_cache_release(new_page);
2275 oom:
2276         if (old_page) {
2277                 if (page_mkwrite) {
2278                         unlock_page(old_page);
2279                         page_cache_release(old_page);
2280                 }
2281                 page_cache_release(old_page);
2282         }
2283         return VM_FAULT_OOM;
2284
2285 unwritable_page:
2286         page_cache_release(old_page);
2287         return ret;
2288 }
2289
2290 /*
2291  * Helper functions for unmap_mapping_range().
2292  *
2293  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
2294  *
2295  * We have to restart searching the prio_tree whenever we drop the lock,
2296  * since the iterator is only valid while the lock is held, and anyway
2297  * a later vma might be split and reinserted earlier while lock dropped.
2298  *
2299  * The list of nonlinear vmas could be handled more efficiently, using
2300  * a placeholder, but handle it in the same way until a need is shown.
2301  * It is important to search the prio_tree before nonlinear list: a vma
2302  * may become nonlinear and be shifted from prio_tree to nonlinear list
2303  * while the lock is dropped; but never shifted from list to prio_tree.
2304  *
2305  * In order to make forward progress despite restarting the search,
2306  * vm_truncate_count is used to mark a vma as now dealt with, so we can
2307  * quickly skip it next time around.  Since the prio_tree search only
2308  * shows us those vmas affected by unmapping the range in question, we
2309  * can't efficiently keep all vmas in step with mapping->truncate_count:
2310  * so instead reset them all whenever it wraps back to 0 (then go to 1).
2311  * mapping->truncate_count and vma->vm_truncate_count are protected by
2312  * i_mmap_lock.
2313  *
2314  * In order to make forward progress despite repeatedly restarting some
2315  * large vma, note the restart_addr from unmap_vmas when it breaks out:
2316  * and restart from that address when we reach that vma again.  It might
2317  * have been split or merged, shrunk or extended, but never shifted: so
2318  * restart_addr remains valid so long as it remains in the vma's range.
2319  * unmap_mapping_range forces truncate_count to leap over page-aligned
2320  * values so we can save vma's restart_addr in its truncate_count field.
2321  */
2322 #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
2323
2324 static void reset_vma_truncate_counts(struct address_space *mapping)
2325 {
2326         struct vm_area_struct *vma;
2327         struct prio_tree_iter iter;
2328
2329         vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
2330                 vma->vm_truncate_count = 0;
2331         list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
2332                 vma->vm_truncate_count = 0;
2333 }
2334
2335 static int unmap_mapping_range_vma(struct vm_area_struct *vma,
2336                 unsigned long start_addr, unsigned long end_addr,
2337                 struct zap_details *details)
2338 {
2339         unsigned long restart_addr;
2340         int need_break;
2341
2342         /*
2343          * files that support invalidating or truncating portions of the
2344          * file from under mmaped areas must have their ->fault function
2345          * return a locked page (and set VM_FAULT_LOCKED in the return).
2346          * This provides synchronisation against concurrent unmapping here.
2347          */
2348
2349 again:
2350         restart_addr = vma->vm_truncate_count;
2351         if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
2352                 start_addr = restart_addr;
2353                 if (start_addr >= end_addr) {
2354                         /* Top of vma has been split off since last time */
2355                         vma->vm_truncate_count = details->truncate_count;
2356                         return 0;
2357                 }
2358         }
2359
2360         restart_addr = zap_page_range(vma, start_addr,
2361                                         end_addr - start_addr, details);
2362         need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
2363
2364         if (restart_addr >= end_addr) {
2365                 /* We have now completed this vma: mark it so */
2366                 vma->vm_truncate_count = details->truncate_count;
2367                 if (!need_break)
2368                         return 0;
2369         } else {
2370                 /* Note restart_addr in vma's truncate_count field */
2371                 vma->vm_truncate_count = restart_addr;
2372                 if (!need_break)
2373                         goto again;
2374         }
2375
2376         spin_unlock(details->i_mmap_lock);
2377         cond_resched();
2378         spin_lock(details->i_mmap_lock);
2379         return -EINTR;
2380 }
2381
2382 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
2383                                             struct zap_details *details)
2384 {
2385         struct vm_area_struct *vma;
2386         struct prio_tree_iter iter;
2387         pgoff_t vba, vea, zba, zea;
2388
2389 restart:
2390         vma_prio_tree_foreach(vma, &iter, root,
2391                         details->first_index, details->last_index) {
2392                 /* Skip quickly over those we have already dealt with */
2393                 if (vma->vm_truncate_count == details->truncate_count)
2394                         continue;
2395
2396                 vba = vma->vm_pgoff;
2397                 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
2398                 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2399                 zba = details->first_index;
2400                 if (zba < vba)
2401                         zba = vba;
2402                 zea = details->last_index;
2403                 if (zea > vea)
2404                         zea = vea;
2405
2406                 if (unmap_mapping_range_vma(vma,
2407                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2408                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2409                                 details) < 0)
2410                         goto restart;
2411         }
2412 }
2413
2414 static inline void unmap_mapping_range_list(struct list_head *head,
2415                                             struct zap_details *details)
2416 {
2417         struct vm_area_struct *vma;
2418
2419         /*
2420          * In nonlinear VMAs there is no correspondence between virtual address
2421          * offset and file offset.  So we must perform an exhaustive search
2422          * across *all* the pages in each nonlinear VMA, not just the pages
2423          * whose virtual address lies outside the file truncation point.
2424          */
2425 restart:
2426         list_for_each_entry(vma, head, shared.vm_set.list) {
2427                 /* Skip quickly over those we have already dealt with */
2428                 if (vma->vm_truncate_count == details->truncate_count)
2429                         continue;
2430                 details->nonlinear_vma = vma;
2431                 if (unmap_mapping_range_vma(vma, vma->vm_start,
2432                                         vma->vm_end, details) < 0)
2433                         goto restart;
2434         }
2435 }
2436
2437 /**
2438  * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
2439  * @mapping: the address space containing mmaps to be unmapped.
2440  * @holebegin: byte in first page to unmap, relative to the start of
2441  * the underlying file.  This will be rounded down to a PAGE_SIZE
2442  * boundary.  Note that this is different from truncate_pagecache(), which
2443  * must keep the partial page.  In contrast, we must get rid of
2444  * partial pages.
2445  * @holelen: size of prospective hole in bytes.  This will be rounded
2446  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
2447  * end of the file.
2448  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2449  * but 0 when invalidating pagecache, don't throw away private data.
2450  */
2451 void unmap_mapping_range(struct address_space *mapping,
2452                 loff_t const holebegin, loff_t const holelen, int even_cows)
2453 {
2454         struct zap_details details;
2455         pgoff_t hba = holebegin >> PAGE_SHIFT;
2456         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2457
2458         /* Check for overflow. */
2459         if (sizeof(holelen) > sizeof(hlen)) {
2460                 long long holeend =
2461                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2462                 if (holeend & ~(long long)ULONG_MAX)
2463                         hlen = ULONG_MAX - hba + 1;
2464         }
2465
2466         details.check_mapping = even_cows? NULL: mapping;
2467         details.nonlinear_vma = NULL;
2468         details.first_index = hba;
2469         details.last_index = hba + hlen - 1;
2470         if (details.last_index < details.first_index)
2471                 details.last_index = ULONG_MAX;
2472         details.i_mmap_lock = &mapping->i_mmap_lock;
2473
2474         spin_lock(&mapping->i_mmap_lock);
2475
2476         /* Protect against endless unmapping loops */
2477         mapping->truncate_count++;
2478         if (unlikely(is_restart_addr(mapping->truncate_count))) {
2479                 if (mapping->truncate_count == 0)
2480                         reset_vma_truncate_counts(mapping);
2481                 mapping->truncate_count++;
2482         }
2483         details.truncate_count = mapping->truncate_count;
2484
2485         if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2486                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2487         if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2488                 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2489         spin_unlock(&mapping->i_mmap_lock);
2490 }
2491 EXPORT_SYMBOL(unmap_mapping_range);
2492
2493 int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2494 {
2495         struct address_space *mapping = inode->i_mapping;
2496
2497         /*
2498          * If the underlying filesystem is not going to provide
2499          * a way to truncate a range of blocks (punch a hole) -
2500          * we should return failure right now.
2501          */
2502         if (!inode->i_op->truncate_range)
2503                 return -ENOSYS;
2504
2505         mutex_lock(&inode->i_mutex);
2506         down_write(&inode->i_alloc_sem);
2507         unmap_mapping_range(mapping, offset, (end - offset), 1);
2508         truncate_inode_pages_range(mapping, offset, end);
2509         unmap_mapping_range(mapping, offset, (end - offset), 1);
2510         inode->i_op->truncate_range(inode, offset, end);
2511         up_write(&inode->i_alloc_sem);
2512         mutex_unlock(&inode->i_mutex);
2513
2514         return 0;
2515 }
2516
2517 /*
2518  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2519  * but allow concurrent faults), and pte mapped but not yet locked.
2520  * We return with mmap_sem still held, but pte unmapped and unlocked.
2521  */
2522 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2523                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2524                 unsigned int flags, pte_t orig_pte)
2525 {
2526         spinlock_t *ptl;
2527         struct page *page;
2528         swp_entry_t entry;
2529         pte_t pte;
2530         struct mem_cgroup *ptr = NULL;
2531         int ret = 0;
2532
2533         if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2534                 goto out;
2535
2536         entry = pte_to_swp_entry(orig_pte);
2537         if (unlikely(non_swap_entry(entry))) {
2538                 if (is_migration_entry(entry)) {
2539                         migration_entry_wait(mm, pmd, address);
2540                 } else if (is_hwpoison_entry(entry)) {
2541                         ret = VM_FAULT_HWPOISON;
2542                 } else {
2543                         print_bad_pte(vma, address, orig_pte, NULL);
2544                         ret = VM_FAULT_SIGBUS;
2545                 }
2546                 goto out;
2547         }
2548         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2549         page = lookup_swap_cache(entry);
2550         if (!page) {
2551                 grab_swap_token(mm); /* Contend for token _before_ read-in */
2552                 page = swapin_readahead(entry,
2553                                         GFP_HIGHUSER_MOVABLE, vma, address);
2554                 if (!page) {
2555                         /*
2556                          * Back out if somebody else faulted in this pte
2557                          * while we released the pte lock.
2558                          */
2559                         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2560                         if (likely(pte_same(*page_table, orig_pte)))
2561                                 ret = VM_FAULT_OOM;
2562                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2563                         goto unlock;
2564                 }
2565
2566                 /* Had to read the page from swap area: Major fault */
2567                 ret = VM_FAULT_MAJOR;
2568                 count_vm_event(PGMAJFAULT);
2569         } else if (PageHWPoison(page)) {
2570                 /*
2571                  * hwpoisoned dirty swapcache pages are kept for killing
2572                  * owner processes (which may be unknown at hwpoison time)
2573                  */
2574                 ret = VM_FAULT_HWPOISON;
2575                 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2576                 goto out_release;
2577         }
2578
2579         lock_page(page);
2580         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2581
2582         page = ksm_might_need_to_copy(page, vma, address);
2583         if (!page) {
2584                 ret = VM_FAULT_OOM;
2585                 goto out;
2586         }
2587
2588         if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
2589                 ret = VM_FAULT_OOM;
2590                 goto out_page;
2591         }
2592
2593         /*
2594          * Back out if somebody else already faulted in this pte.
2595          */
2596         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2597         if (unlikely(!pte_same(*page_table, orig_pte)))
2598                 goto out_nomap;
2599
2600         if (unlikely(!PageUptodate(page))) {
2601                 ret = VM_FAULT_SIGBUS;
2602                 goto out_nomap;
2603         }
2604
2605         /*
2606          * The page isn't present yet, go ahead with the fault.
2607          *
2608          * Be careful about the sequence of operations here.
2609          * To get its accounting right, reuse_swap_page() must be called
2610          * while the page is counted on swap but not yet in mapcount i.e.
2611          * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2612          * must be called after the swap_free(), or it will never succeed.
2613          * Because delete_from_swap_page() may be called by reuse_swap_page(),
2614          * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
2615          * in page->private. In this case, a record in swap_cgroup  is silently
2616          * discarded at swap_free().
2617          */
2618
2619         inc_mm_counter(mm, MM_ANONPAGES);
2620         pte = mk_pte(page, vma->vm_page_prot);
2621         if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2622                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2623                 flags &= ~FAULT_FLAG_WRITE;
2624         }
2625         flush_icache_page(vma, page);
2626         set_pte_at(mm, address, page_table, pte);
2627         page_add_anon_rmap(page, vma, address);
2628         /* It's better to call commit-charge after rmap is established */
2629         mem_cgroup_commit_charge_swapin(page, ptr);
2630
2631         swap_free(entry);
2632         if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2633                 try_to_free_swap(page);
2634         unlock_page(page);
2635
2636         if (flags & FAULT_FLAG_WRITE) {
2637                 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2638                 if (ret & VM_FAULT_ERROR)
2639                         ret &= VM_FAULT_ERROR;
2640                 goto out;
2641         }
2642
2643         /* No need to invalidate - it was non-present before */
2644         update_mmu_cache(vma, address, page_table);
2645 unlock:
2646         pte_unmap_unlock(page_table, ptl);
2647 out:
2648         return ret;
2649 out_nomap:
2650         mem_cgroup_cancel_charge_swapin(ptr);
2651         pte_unmap_unlock(page_table, ptl);
2652 out_page:
2653         unlock_page(page);
2654 out_release:
2655         page_cache_release(page);
2656         return ret;
2657 }
2658
2659 /*
2660  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2661  * but allow concurrent faults), and pte mapped but not yet locked.
2662  * We return with mmap_sem still held, but pte unmapped and unlocked.
2663  */
2664 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2665                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2666                 unsigned int flags)
2667 {
2668         struct page *page;
2669         spinlock_t *ptl;
2670         pte_t entry;
2671
2672         if (!(flags & FAULT_FLAG_WRITE)) {
2673                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
2674                                                 vma->vm_page_prot));
2675                 ptl = pte_lockptr(mm, pmd);
2676                 spin_lock(ptl);
2677                 if (!pte_none(*page_table))
2678                         goto unlock;
2679                 goto setpte;
2680         }
2681
2682         /* Allocate our own private page. */
2683         pte_unmap(page_table);
2684
2685         if (unlikely(anon_vma_prepare(vma)))
2686                 goto oom;
2687         page = alloc_zeroed_user_highpage_movable(vma, address);
2688         if (!page)
2689                 goto oom;
2690         __SetPageUptodate(page);
2691
2692         if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
2693                 goto oom_free_page;
2694
2695         entry = mk_pte(page, vma->vm_page_prot);
2696         if (vma->vm_flags & VM_WRITE)
2697                 entry = pte_mkwrite(pte_mkdirty(entry));
2698
2699         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2700         if (!pte_none(*page_table))
2701                 goto release;
2702
2703         inc_mm_counter(mm, MM_ANONPAGES);
2704         page_add_new_anon_rmap(page, vma, address);
2705 setpte:
2706         set_pte_at(mm, address, page_table, entry);
2707
2708         /* No need to invalidate - it was non-present before */
2709         update_mmu_cache(vma, address, page_table);
2710 unlock:
2711         pte_unmap_unlock(page_table, ptl);
2712         return 0;
2713 release:
2714         mem_cgroup_uncharge_page(page);
2715         page_cache_release(page);
2716         goto unlock;
2717 oom_free_page:
2718         page_cache_release(page);
2719 oom:
2720         return VM_FAULT_OOM;
2721 }
2722
2723 /*
2724  * __do_fault() tries to create a new page mapping. It aggressively
2725  * tries to share with existing pages, but makes a separate copy if
2726  * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
2727  * the next page fault.
2728  *
2729  * As this is called only for pages that do not currently exist, we
2730  * do not need to flush old virtual caches or the TLB.
2731  *
2732  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2733  * but allow concurrent faults), and pte neither mapped nor locked.
2734  * We return with mmap_sem still held, but pte unmapped and unlocked.
2735  */
2736 static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2737                 unsigned long address, pmd_t *pmd,
2738                 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2739 {
2740         pte_t *page_table;
2741         spinlock_t *ptl;
2742         struct page *page;
2743         pte_t entry;
2744         int anon = 0;
2745         int charged = 0;
2746         struct page *dirty_page = NULL;
2747         struct vm_fault vmf;
2748         int ret;
2749         int page_mkwrite = 0;
2750
2751         vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2752         vmf.pgoff = pgoff;
2753         vmf.flags = flags;
2754         vmf.page = NULL;
2755
2756         ret = vma->vm_ops->fault(vma, &vmf);
2757         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2758                 return ret;
2759
2760         if (unlikely(PageHWPoison(vmf.page))) {
2761                 if (ret & VM_FAULT_LOCKED)
2762                         unlock_page(vmf.page);
2763                 return VM_FAULT_HWPOISON;
2764         }
2765
2766         /*
2767          * For consistency in subsequent calls, make the faulted page always
2768          * locked.
2769          */
2770         if (unlikely(!(ret & VM_FAULT_LOCKED)))
2771                 lock_page(vmf.page);
2772         else
2773                 VM_BUG_ON(!PageLocked(vmf.page));
2774
2775         /*
2776          * Should we do an early C-O-W break?
2777          */
2778         page = vmf.page;
2779         if (flags & FAULT_FLAG_WRITE) {
2780                 if (!(vma->vm_flags & VM_SHARED)) {
2781                         anon = 1;
2782                         if (unlikely(anon_vma_prepare(vma))) {
2783                                 ret = VM_FAULT_OOM;
2784                                 goto out;
2785                         }
2786                         page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2787                                                 vma, address);
2788                         if (!page) {
2789                                 ret = VM_FAULT_OOM;
2790                                 goto out;
2791                         }
2792                         if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
2793                                 ret = VM_FAULT_OOM;
2794                                 page_cache_release(page);
2795                                 goto out;
2796                         }
2797                         charged = 1;
2798                         /*
2799                          * Don't let another task, with possibly unlocked vma,
2800                          * keep the mlocked page.
2801                          */
2802                         if (vma->vm_flags & VM_LOCKED)
2803                                 clear_page_mlock(vmf.page);
2804                         copy_user_highpage(page, vmf.page, address, vma);
2805                         __SetPageUptodate(page);
2806                 } else {
2807                         /*
2808                          * If the page will be shareable, see if the backing
2809                          * address space wants to know that the page is about
2810                          * to become writable
2811                          */
2812                         if (vma->vm_ops->page_mkwrite) {
2813                                 int tmp;
2814
2815                                 unlock_page(page);
2816                                 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2817                                 tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
2818                                 if (unlikely(tmp &
2819                                           (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2820                                         ret = tmp;
2821                                         goto unwritable_page;
2822                                 }
2823                                 if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
2824                                         lock_page(page);
2825                                         if (!page->mapping) {
2826                                                 ret = 0; /* retry the fault */
2827                                                 unlock_page(page);
2828                                                 goto unwritable_page;
2829                                         }
2830                                 } else
2831                                         VM_BUG_ON(!PageLocked(page));
2832                                 page_mkwrite = 1;
2833                         }
2834                 }
2835
2836         }
2837
2838         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2839
2840         /*
2841          * This silly early PAGE_DIRTY setting removes a race
2842          * due to the bad i386 page protection. But it's valid
2843          * for other architectures too.
2844          *
2845          * Note that if FAULT_FLAG_WRITE is set, we either now have
2846          * an exclusive copy of the page, or this is a shared mapping,
2847          * so we can make it writable and dirty to avoid having to
2848          * handle that later.
2849          */
2850         /* Only go through if we didn't race with anybody else... */
2851         if (likely(pte_same(*page_table, orig_pte))) {
2852                 flush_icache_page(vma, page);
2853                 entry = mk_pte(page, vma->vm_page_prot);
2854                 if (flags & FAULT_FLAG_WRITE)
2855                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2856                 if (anon) {
2857                         inc_mm_counter(mm, MM_ANONPAGES);
2858                         page_add_new_anon_rmap(page, vma, address);
2859                 } else {
2860                         inc_mm_counter(mm, MM_FILEPAGES);
2861                         page_add_file_rmap(page);
2862                         if (flags & FAULT_FLAG_WRITE) {
2863                                 dirty_page = page;
2864                                 get_page(dirty_page);
2865                         }
2866                 }
2867                 set_pte_at(mm, address, page_table, entry);
2868
2869                 /* no need to invalidate: a not-present page won't be cached */
2870                 update_mmu_cache(vma, address, page_table);
2871         } else {
2872                 if (charged)
2873                         mem_cgroup_uncharge_page(page);
2874                 if (anon)
2875                         page_cache_release(page);
2876                 else
2877                         anon = 1; /* no anon but release faulted_page */
2878         }
2879
2880         pte_unmap_unlock(page_table, ptl);
2881
2882 out:
2883         if (dirty_page) {
2884                 struct address_space *mapping = page->mapping;
2885
2886                 if (set_page_dirty(dirty_page))
2887                         page_mkwrite = 1;
2888                 unlock_page(dirty_page);
2889                 put_page(dirty_page);
2890                 if (page_mkwrite && mapping) {
2891                         /*
2892                          * Some device drivers do not set page.mapping but still
2893                          * dirty their pages
2894                          */
2895                         balance_dirty_pages_ratelimited(mapping);
2896                 }
2897
2898                 /* file_update_time outside page_lock */
2899                 if (vma->vm_file)
2900                         file_update_time(vma->vm_file);
2901         } else {
2902                 unlock_page(vmf.page);
2903                 if (anon)
2904                         page_cache_release(vmf.page);
2905         }
2906
2907         return ret;
2908
2909 unwritable_page:
2910         page_cache_release(page);
2911         return ret;
2912 }
2913
2914 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2915                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2916                 unsigned int flags, pte_t orig_pte)
2917 {
2918         pgoff_t pgoff = (((address & PAGE_MASK)
2919                         - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2920
2921         pte_unmap(page_table);
2922         return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2923 }
2924
2925 /*
2926  * Fault of a previously existing named mapping. Repopulate the pte
2927  * from the encoded file_pte if possible. This enables swappable
2928  * nonlinear vmas.
2929  *
2930  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2931  * but allow concurrent faults), and pte mapped but not yet locked.
2932  * We return with mmap_sem still held, but pte unmapped and unlocked.
2933  */
2934 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2935                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2936                 unsigned int flags, pte_t orig_pte)
2937 {
2938         pgoff_t pgoff;
2939
2940         flags |= FAULT_FLAG_NONLINEAR;
2941
2942         if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2943                 return 0;
2944
2945         if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
2946                 /*
2947                  * Page table corrupted: show pte and kill process.
2948                  */
2949                 print_bad_pte(vma, address, orig_pte, NULL);
2950                 return VM_FAULT_SIGBUS;
2951         }
2952
2953         pgoff = pte_to_pgoff(orig_pte);
2954         return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2955 }
2956
2957 /*
2958  * These routines also need to handle stuff like marking pages dirty
2959  * and/or accessed for architectures that don't do it in hardware (most
2960  * RISC architectures).  The early dirtying is also good on the i386.
2961  *
2962  * There is also a hook called "update_mmu_cache()" that architectures
2963  * with external mmu caches can use to update those (ie the Sparc or
2964  * PowerPC hashed page tables that act as extended TLBs).
2965  *
2966  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2967  * but allow concurrent faults), and pte mapped but not yet locked.
2968  * We return with mmap_sem still held, but pte unmapped and unlocked.
2969  */
2970 static inline int handle_pte_fault(struct mm_struct *mm,
2971                 struct vm_area_struct *vma, unsigned long address,
2972                 pte_t *pte, pmd_t *pmd, unsigned int flags)
2973 {
2974         pte_t entry;
2975         spinlock_t *ptl;
2976
2977         entry = *pte;
2978         if (!pte_present(entry)) {
2979                 if (pte_none(entry)) {
2980                         if (vma->vm_ops) {
2981                                 if (likely(vma->vm_ops->fault))
2982                                         return do_linear_fault(mm, vma, address,
2983                                                 pte, pmd, flags, entry);
2984                         }
2985                         return do_anonymous_page(mm, vma, address,
2986                                                  pte, pmd, flags);
2987                 }
2988                 if (pte_file(entry))
2989                         return do_nonlinear_fault(mm, vma, address,
2990                                         pte, pmd, flags, entry);
2991                 return do_swap_page(mm, vma, address,
2992                                         pte, pmd, flags, entry);
2993         }
2994
2995         ptl = pte_lockptr(mm, pmd);
2996         spin_lock(ptl);
2997         if (unlikely(!pte_same(*pte, entry)))
2998                 goto unlock;
2999         if (flags & FAULT_FLAG_WRITE) {
3000                 if (!pte_write(entry))
3001                         return do_wp_page(mm, vma, address,
3002                                         pte, pmd, ptl, entry);
3003                 entry = pte_mkdirty(entry);
3004         }
3005         entry = pte_mkyoung(entry);
3006         if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
3007                 update_mmu_cache(vma, address, pte);
3008         } else {
3009                 /*
3010                  * This is needed only for protection faults but the arch code
3011                  * is not yet telling us if this is a protection fault or not.
3012                  * This still avoids useless tlb flushes for .text page faults
3013                  * with threads.
3014                  */
3015                 if (flags & FAULT_FLAG_WRITE)
3016                         flush_tlb_page(vma, address);
3017         }
3018 unlock:
3019         pte_unmap_unlock(pte, ptl);
3020         return 0;
3021 }
3022
3023 /*
3024  * By the time we get here, we already hold the mm semaphore
3025  */
3026 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3027                 unsigned long address, unsigned int flags)
3028 {
3029         pgd_t *pgd;
3030         pud_t *pud;
3031         pmd_t *pmd;
3032         pte_t *pte;
3033
3034         __set_current_state(TASK_RUNNING);
3035
3036         count_vm_event(PGFAULT);
3037
3038         if (unlikely(is_vm_hugetlb_page(vma)))
3039                 return hugetlb_fault(mm, vma, address, flags);
3040
3041         pgd = pgd_offset(mm, address);
3042         pud = pud_alloc(mm, pgd, address);
3043         if (!pud)
3044                 return VM_FAULT_OOM;
3045         pmd = pmd_alloc(mm, pud, address);
3046         if (!pmd)
3047                 return VM_FAULT_OOM;
3048         pte = pte_alloc_map(mm, pmd, address);
3049         if (!pte)
3050                 return VM_FAULT_OOM;
3051
3052         return handle_pte_fault(mm, vma, address, pte, pmd, flags);
3053 }
3054
3055 #ifndef __PAGETABLE_PUD_FOLDED
3056 /*
3057  * Allocate page upper directory.
3058  * We've already handled the fast-path in-line.
3059  */
3060 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
3061 {
3062         pud_t *new = pud_alloc_one(mm, address);
3063         if (!new)
3064                 return -ENOMEM;
3065
3066         smp_wmb(); /* See comment in __pte_alloc */
3067
3068         spin_lock(&mm->page_table_lock);
3069         if (pgd_present(*pgd))          /* Another has populated it */
3070                 pud_free(mm, new);
3071         else
3072                 pgd_populate(mm, pgd, new);
3073         spin_unlock(&mm->page_table_lock);
3074         return 0;
3075 }
3076 #endif /* __PAGETABLE_PUD_FOLDED */
3077
3078 #ifndef __PAGETABLE_PMD_FOLDED
3079 /*
3080  * Allocate page middle directory.
3081  * We've already handled the fast-path in-line.
3082  */
3083 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3084 {
3085         pmd_t *new = pmd_alloc_one(mm, address);
3086         if (!new)
3087                 return -ENOMEM;
3088
3089         smp_wmb(); /* See comment in __pte_alloc */
3090
3091         spin_lock(&mm->page_table_lock);
3092 #ifndef __ARCH_HAS_4LEVEL_HACK
3093         if (pud_present(*pud))          /* Another has populated it */
3094                 pmd_free(mm, new);
3095         else
3096                 pud_populate(mm, pud, new);
3097 #else
3098         if (pgd_present(*pud))          /* Another has populated it */
3099                 pmd_free(mm, new);
3100         else
3101                 pgd_populate(mm, pud, new);
3102 #endif /* __ARCH_HAS_4LEVEL_HACK */
3103         spin_unlock(&mm->page_table_lock);
3104         return 0;
3105 }
3106 #endif /* __PAGETABLE_PMD_FOLDED */
3107
3108 int make_pages_present(unsigned long addr, unsigned long end)
3109 {
3110         int ret, len, write;
3111         struct vm_area_struct * vma;
3112
3113         vma = find_vma(current->mm, addr);
3114         if (!vma)
3115                 return -ENOMEM;
3116         write = (vma->vm_flags & VM_WRITE) != 0;
3117         BUG_ON(addr >= end);
3118         BUG_ON(end > vma->vm_end);
3119         len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
3120         ret = get_user_pages(current, current->mm, addr,
3121                         len, write, 0, NULL, NULL);
3122         if (ret < 0)
3123                 return ret;
3124         return ret == len ? 0 : -EFAULT;
3125 }
3126
3127 #if !defined(__HAVE_ARCH_GATE_AREA)
3128
3129 #if defined(AT_SYSINFO_EHDR)
3130 static struct vm_area_struct gate_vma;
3131
3132 static int __init gate_vma_init(void)
3133 {
3134         gate_vma.vm_mm = NULL;
3135         gate_vma.vm_start = FIXADDR_USER_START;
3136         gate_vma.vm_end = FIXADDR_USER_END;
3137         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
3138         gate_vma.vm_page_prot = __P101;
3139         /*
3140          * Make sure the vDSO gets into every core dump.
3141          * Dumping its contents makes post-mortem fully interpretable later
3142          * without matching up the same kernel and hardware config to see
3143          * what PC values meant.
3144          */
3145         gate_vma.vm_flags |= VM_ALWAYSDUMP;
3146         return 0;
3147 }
3148 __initcall(gate_vma_init);
3149 #endif
3150
3151 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
3152 {
3153 #ifdef AT_SYSINFO_EHDR
3154         return &gate_vma;
3155 #else
3156         return NULL;
3157 #endif
3158 }
3159
3160 int in_gate_area_no_task(unsigned long addr)
3161 {
3162 #ifdef AT_SYSINFO_EHDR
3163         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
3164                 return 1;
3165 #endif
3166         return 0;
3167 }
3168
3169 #endif  /* __HAVE_ARCH_GATE_AREA */
3170
3171 static int follow_pte(struct mm_struct *mm, unsigned long address,
3172                 pte_t **ptepp, spinlock_t **ptlp)
3173 {
3174         pgd_t *pgd;
3175         pud_t *pud;
3176         pmd_t *pmd;
3177         pte_t *ptep;
3178
3179         pgd = pgd_offset(mm, address);
3180         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
3181                 goto out;
3182
3183         pud = pud_offset(pgd, address);
3184         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
3185                 goto out;
3186
3187         pmd = pmd_offset(pud, address);
3188         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3189                 goto out;
3190
3191         /* We cannot handle huge page PFN maps. Luckily they don't exist. */
3192         if (pmd_huge(*pmd))
3193                 goto out;
3194
3195         ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3196         if (!ptep)
3197                 goto out;
3198         if (!pte_present(*ptep))
3199                 goto unlock;
3200         *ptepp = ptep;
3201         return 0;
3202 unlock:
3203         pte_unmap_unlock(ptep, *ptlp);
3204 out:
3205         return -EINVAL;
3206 }
3207
3208 /**
3209  * follow_pfn - look up PFN at a user virtual address
3210  * @vma: memory mapping
3211  * @address: user virtual address
3212  * @pfn: location to store found PFN
3213  *
3214  * Only IO mappings and raw PFN mappings are allowed.
3215  *
3216  * Returns zero and the pfn at @pfn on success, -ve otherwise.
3217  */
3218 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
3219         unsigned long *pfn)
3220 {
3221         int ret = -EINVAL;
3222         spinlock_t *ptl;
3223         pte_t *ptep;
3224
3225         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3226                 return ret;
3227
3228         ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3229         if (ret)
3230                 return ret;
3231         *pfn = pte_pfn(*ptep);
3232         pte_unmap_unlock(ptep, ptl);
3233         return 0;
3234 }
3235 EXPORT_SYMBOL(follow_pfn);
3236
3237 #ifdef CONFIG_HAVE_IOREMAP_PROT
3238 int follow_phys(struct vm_area_struct *vma,
3239                 unsigned long address, unsigned int flags,
3240                 unsigned long *prot, resource_size_t *phys)
3241 {
3242         int ret = -EINVAL;
3243         pte_t *ptep, pte;
3244         spinlock_t *ptl;
3245
3246         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
3247                 goto out;
3248
3249         if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
3250                 goto out;
3251         pte = *ptep;
3252
3253         if ((flags & FOLL_WRITE) && !pte_write(pte))
3254                 goto unlock;
3255
3256         *prot = pgprot_val(pte_pgprot(pte));
3257         *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
3258
3259         ret = 0;
3260 unlock:
3261         pte_unmap_unlock(ptep, ptl);
3262 out:
3263         return ret;
3264 }
3265
3266 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3267                         void *buf, int len, int write)
3268 {
3269         resource_size_t phys_addr;
3270         unsigned long prot = 0;
3271         void __iomem *maddr;
3272         int offset = addr & (PAGE_SIZE-1);
3273
3274         if (follow_phys(vma, addr, write, &prot, &phys_addr))
3275                 return -EINVAL;
3276
3277         maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
3278         if (write)
3279                 memcpy_toio(maddr + offset, buf, len);
3280         else
3281                 memcpy_fromio(buf, maddr + offset, len);
3282         iounmap(maddr);
3283
3284         return len;
3285 }
3286 #endif
3287
3288 /*
3289  * Access another process' address space.
3290  * Source/target buffer must be kernel space,
3291  * Do not walk the page table directly, use get_user_pages
3292  */
3293 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
3294 {
3295         struct mm_struct *mm;
3296         struct vm_area_struct *vma;
3297         void *old_buf = buf;
3298
3299         mm = get_task_mm(tsk);
3300         if (!mm)
3301                 return 0;
3302
3303         down_read(&mm->mmap_sem);
3304         /* ignore errors, just check how much was successfully transferred */
3305         while (len) {
3306                 int bytes, ret, offset;
3307                 void *maddr;
3308                 struct page *page = NULL;
3309
3310                 ret = get_user_pages(tsk, mm, addr, 1,
3311                                 write, 1, &page, &vma);
3312                 if (ret <= 0) {
3313                         /*
3314                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
3315                          * we can access using slightly different code.
3316                          */
3317 #ifdef CONFIG_HAVE_IOREMAP_PROT
3318                         vma = find_vma(mm, addr);
3319                         if (!vma)
3320                                 break;
3321                         if (vma->vm_ops && vma->vm_ops->access)
3322                                 ret = vma->vm_ops->access(vma, addr, buf,
3323                                                           len, write);
3324                         if (ret <= 0)
3325 #endif
3326                                 break;
3327                         bytes = ret;
3328                 } else {
3329                         bytes = len;
3330                         offset = addr & (PAGE_SIZE-1);
3331                         if (bytes > PAGE_SIZE-offset)
3332                                 bytes = PAGE_SIZE-offset;
3333
3334                         maddr = kmap(page);
3335                         if (write) {
3336                                 copy_to_user_page(vma, page, addr,
3337                                                   maddr + offset, buf, bytes);
3338                                 set_page_dirty_lock(page);
3339                         } else {
3340                                 copy_from_user_page(vma, page, addr,
3341                                                     buf, maddr + offset, bytes);
3342                         }
3343                         kunmap(page);
3344                         page_cache_release(page);
3345                 }
3346                 len -= bytes;
3347                 buf += bytes;
3348                 addr += bytes;
3349         }
3350         up_read(&mm->mmap_sem);
3351         mmput(mm);
3352
3353         return buf - old_buf;
3354 }
3355
3356 /*
3357  * Print the name of a VMA.
3358  */
3359 void print_vma_addr(char *prefix, unsigned long ip)
3360 {
3361         struct mm_struct *mm = current->mm;
3362         struct vm_area_struct *vma;
3363
3364         /*
3365          * Do not print if we are in atomic
3366          * contexts (in exception stacks, etc.):
3367          */
3368         if (preempt_count())
3369                 return;
3370
3371         down_read(&mm->mmap_sem);
3372         vma = find_vma(mm, ip);
3373         if (vma && vma->vm_file) {
3374                 struct file *f = vma->vm_file;
3375                 char *buf = (char *)__get_free_page(GFP_KERNEL);
3376                 if (buf) {
3377                         char *p, *s;
3378
3379                         p = d_path(&f->f_path, buf, PAGE_SIZE);
3380                         if (IS_ERR(p))
3381                                 p = "?";
3382                         s = strrchr(p, '/');
3383                         if (s)
3384                                 p = s+1;
3385                         printk("%s%s[%lx+%lx]", prefix, p,
3386                                         vma->vm_start,
3387                                         vma->vm_end - vma->vm_start);
3388                         free_page((unsigned long)buf);
3389                 }
3390         }
3391         up_read(&current->mm->mmap_sem);
3392 }
3393
3394 #ifdef CONFIG_PROVE_LOCKING
3395 void might_fault(void)
3396 {
3397         /*
3398          * Some code (nfs/sunrpc) uses socket ops on kernel memory while
3399          * holding the mmap_sem, this is safe because kernel memory doesn't
3400          * get paged out, therefore we'll never actually fault, and the
3401          * below annotations will generate false positives.
3402          */
3403         if (segment_eq(get_fs(), KERNEL_DS))
3404                 return;
3405
3406         might_sleep();
3407         /*
3408          * it would be nicer only to annotate paths which are not under
3409          * pagefault_disable, however that requires a larger audit and
3410          * providing helpers like get_user_atomic.
3411          */
3412         if (!in_atomic() && current->mm)
3413                 might_lock_read(&current->mm->mmap_sem);
3414 }
3415 EXPORT_SYMBOL(might_fault);
3416 #endif