]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/fremap.c
[PATCH] mm: ptd_alloc inline and out
[net-next-2.6.git] / mm / fremap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/file.h>
12#include <linux/mman.h>
13#include <linux/pagemap.h>
14#include <linux/swapops.h>
15#include <linux/rmap.h>
16#include <linux/module.h>
17#include <linux/syscalls.h>
18
19#include <asm/mmu_context.h>
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22
861f2fb8 23static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
1da177e4
LT
24 unsigned long addr, pte_t *ptep)
25{
26 pte_t pte = *ptep;
861f2fb8 27 struct page *page = NULL;
1da177e4 28
1da177e4
LT
29 if (pte_present(pte)) {
30 unsigned long pfn = pte_pfn(pte);
1da177e4
LT
31 flush_cache_page(vma, addr, pfn);
32 pte = ptep_clear_flush(vma, addr, ptep);
b5810039
NP
33 if (unlikely(!pfn_valid(pfn))) {
34 print_bad_pte(vma, pte, addr);
861f2fb8 35 goto out;
1da177e4 36 }
b5810039
NP
37 page = pfn_to_page(pfn);
38 if (pte_dirty(pte))
39 set_page_dirty(page);
40 page_remove_rmap(page);
41 page_cache_release(page);
1da177e4
LT
42 } else {
43 if (!pte_file(pte))
44 free_swap_and_cache(pte_to_swp_entry(pte));
45 pte_clear(mm, addr, ptep);
46 }
861f2fb8
HD
47out:
48 return !!page;
1da177e4
LT
49}
50
51/*
52 * Install a file page to a given virtual memory address, release any
53 * previously existing mapping.
54 */
55int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
56 unsigned long addr, struct page *page, pgprot_t prot)
57{
58 struct inode *inode;
59 pgoff_t size;
60 int err = -ENOMEM;
61 pte_t *pte;
62 pmd_t *pmd;
63 pud_t *pud;
64 pgd_t *pgd;
65 pte_t pte_val;
66
b5810039
NP
67 BUG_ON(vma->vm_flags & VM_RESERVED);
68
1da177e4
LT
69 pgd = pgd_offset(mm, addr);
70 spin_lock(&mm->page_table_lock);
71
72 pud = pud_alloc(mm, pgd, addr);
73 if (!pud)
74 goto err_unlock;
75
76 pmd = pmd_alloc(mm, pud, addr);
77 if (!pmd)
78 goto err_unlock;
79
80 pte = pte_alloc_map(mm, pmd, addr);
81 if (!pte)
82 goto err_unlock;
83
84 /*
85 * This page may have been truncated. Tell the
86 * caller about it.
87 */
88 err = -EINVAL;
89 inode = vma->vm_file->f_mapping->host;
90 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
91 if (!page->mapping || page->index >= size)
92 goto err_unlock;
f5154a98
HD
93 err = -ENOMEM;
94 if (page_mapcount(page) > INT_MAX/2)
95 goto err_unlock;
1da177e4 96
861f2fb8
HD
97 if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
98 inc_mm_counter(mm, file_rss);
1da177e4 99
1da177e4
LT
100 flush_icache_page(vma, page);
101 set_pte_at(mm, addr, pte, mk_pte(page, prot));
102 page_add_file_rmap(page);
103 pte_val = *pte;
104 pte_unmap(pte);
105 update_mmu_cache(vma, addr, pte_val);
106
107 err = 0;
108err_unlock:
109 spin_unlock(&mm->page_table_lock);
110 return err;
111}
112EXPORT_SYMBOL(install_page);
113
114
115/*
116 * Install a file pte to a given virtual memory address, release any
117 * previously existing mapping.
118 */
119int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
120 unsigned long addr, unsigned long pgoff, pgprot_t prot)
121{
122 int err = -ENOMEM;
123 pte_t *pte;
124 pmd_t *pmd;
125 pud_t *pud;
126 pgd_t *pgd;
127 pte_t pte_val;
128
b5810039
NP
129 BUG_ON(vma->vm_flags & VM_RESERVED);
130
1da177e4
LT
131 pgd = pgd_offset(mm, addr);
132 spin_lock(&mm->page_table_lock);
133
134 pud = pud_alloc(mm, pgd, addr);
135 if (!pud)
136 goto err_unlock;
137
138 pmd = pmd_alloc(mm, pud, addr);
139 if (!pmd)
140 goto err_unlock;
141
142 pte = pte_alloc_map(mm, pmd, addr);
143 if (!pte)
144 goto err_unlock;
145
365e9c87
HD
146 if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
147 update_hiwater_rss(mm);
861f2fb8 148 dec_mm_counter(mm, file_rss);
365e9c87 149 }
1da177e4
LT
150
151 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
152 pte_val = *pte;
153 pte_unmap(pte);
154 update_mmu_cache(vma, addr, pte_val);
155 spin_unlock(&mm->page_table_lock);
156 return 0;
157
158err_unlock:
159 spin_unlock(&mm->page_table_lock);
160 return err;
161}
162
163
164/***
165 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
166 * file within an existing vma.
167 * @start: start of the remapped virtual memory range
168 * @size: size of the remapped virtual memory range
169 * @prot: new protection bits of the range
170 * @pgoff: to be mapped page of the backing store file
171 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
172 *
173 * this syscall works purely via pagetables, so it's the most efficient
174 * way to map the same (large) file into a given virtual window. Unlike
175 * mmap()/mremap() it does not create any new vmas. The new mappings are
176 * also safe across swapout.
177 *
178 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
179 * protection is used. Arbitrary protections might be implemented in the
180 * future.
181 */
182asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
183 unsigned long __prot, unsigned long pgoff, unsigned long flags)
184{
185 struct mm_struct *mm = current->mm;
186 struct address_space *mapping;
187 unsigned long end = start + size;
188 struct vm_area_struct *vma;
189 int err = -EINVAL;
190 int has_write_lock = 0;
191
192 if (__prot)
193 return err;
194 /*
195 * Sanitize the syscall parameters:
196 */
197 start = start & PAGE_MASK;
198 size = size & PAGE_MASK;
199
200 /* Does the address range wrap, or is the span zero-sized? */
201 if (start + size <= start)
202 return err;
203
204 /* Can we represent this offset inside this architecture's pte's? */
205#if PTE_FILE_MAX_BITS < BITS_PER_LONG
206 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
207 return err;
208#endif
209
210 /* We need down_write() to change vma->vm_flags. */
211 down_read(&mm->mmap_sem);
212 retry:
213 vma = find_vma(mm, start);
214
215 /*
216 * Make sure the vma is shared, that it supports prefaulting,
217 * and that the remapped range is valid and fully within
218 * the single existing vma. vm_private_data is used as a
219 * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
220 * or VM_LOCKED, but VM_LOCKED could be revoked later on).
221 */
222 if (vma && (vma->vm_flags & VM_SHARED) &&
223 (!vma->vm_private_data ||
224 (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) &&
225 vma->vm_ops && vma->vm_ops->populate &&
226 end > start && start >= vma->vm_start &&
227 end <= vma->vm_end) {
228
229 /* Must set VM_NONLINEAR before any pages are populated. */
230 if (pgoff != linear_page_index(vma, start) &&
231 !(vma->vm_flags & VM_NONLINEAR)) {
232 if (!has_write_lock) {
233 up_read(&mm->mmap_sem);
234 down_write(&mm->mmap_sem);
235 has_write_lock = 1;
236 goto retry;
237 }
238 mapping = vma->vm_file->f_mapping;
239 spin_lock(&mapping->i_mmap_lock);
240 flush_dcache_mmap_lock(mapping);
241 vma->vm_flags |= VM_NONLINEAR;
242 vma_prio_tree_remove(vma, &mapping->i_mmap);
243 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
244 flush_dcache_mmap_unlock(mapping);
245 spin_unlock(&mapping->i_mmap_lock);
246 }
247
248 err = vma->vm_ops->populate(vma, start, size,
249 vma->vm_page_prot,
250 pgoff, flags & MAP_NONBLOCK);
251
252 /*
253 * We can't clear VM_NONLINEAR because we'd have to do
254 * it after ->populate completes, and that would prevent
255 * downgrading the lock. (Locks can't be upgraded).
256 */
257 }
258 if (likely(!has_write_lock))
259 up_read(&mm->mmap_sem);
260 else
261 up_write(&mm->mmap_sem);
262
263 return err;
264}
265