]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/filemap_xip.c
mm: dirty page tracking race fix
[net-next-2.6.git] / mm / filemap_xip.c
CommitLineData
ceffc078
CO
1/*
2 * linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/module.h>
14#include <linux/uio.h>
15#include <linux/rmap.h>
cddb8a5c 16#include <linux/mmu_notifier.h>
e8edc6e0 17#include <linux/sched.h>
ceffc078 18#include <asm/tlbflush.h>
70688e4d 19#include <asm/io.h>
ceffc078 20
a76c0b97
CO
21/*
22 * We do use our own empty page to avoid interference with other users
23 * of ZERO_PAGE(), such as /dev/zero
24 */
25static struct page *__xip_sparse_page;
26
27static struct page *xip_sparse_page(void)
28{
29 if (!__xip_sparse_page) {
c51b1a16
AM
30 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
31
32 if (page) {
a76c0b97
CO
33 static DEFINE_SPINLOCK(xip_alloc_lock);
34 spin_lock(&xip_alloc_lock);
35 if (!__xip_sparse_page)
c51b1a16 36 __xip_sparse_page = page;
a76c0b97 37 else
c51b1a16 38 __free_page(page);
a76c0b97
CO
39 spin_unlock(&xip_alloc_lock);
40 }
41 }
42 return __xip_sparse_page;
43}
44
ceffc078
CO
45/*
46 * This is a file read routine for execute in place files, and uses
70688e4d 47 * the mapping->a_ops->get_xip_mem() function for the actual low-level
ceffc078
CO
48 * stuff.
49 *
50 * Note the struct file* is not used at all. It may be NULL.
51 */
70688e4d 52static ssize_t
ceffc078
CO
53do_xip_mapping_read(struct address_space *mapping,
54 struct file_ra_state *_ra,
55 struct file *filp,
70688e4d
NP
56 char __user *buf,
57 size_t len,
58 loff_t *ppos)
ceffc078
CO
59{
60 struct inode *inode = mapping->host;
2004dc8e
JK
61 pgoff_t index, end_index;
62 unsigned long offset;
70688e4d
NP
63 loff_t isize, pos;
64 size_t copied = 0, error = 0;
ceffc078 65
70688e4d 66 BUG_ON(!mapping->a_ops->get_xip_mem);
ceffc078 67
70688e4d
NP
68 pos = *ppos;
69 index = pos >> PAGE_CACHE_SHIFT;
70 offset = pos & ~PAGE_CACHE_MASK;
ceffc078
CO
71
72 isize = i_size_read(inode);
73 if (!isize)
74 goto out;
75
76 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
70688e4d
NP
77 do {
78 unsigned long nr, left;
79 void *xip_mem;
80 unsigned long xip_pfn;
81 int zero = 0;
ceffc078
CO
82
83 /* nr is the maximum number of bytes to copy from this page */
84 nr = PAGE_CACHE_SIZE;
85 if (index >= end_index) {
86 if (index > end_index)
87 goto out;
88 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
89 if (nr <= offset) {
90 goto out;
91 }
92 }
93 nr = nr - offset;
70688e4d
NP
94 if (nr > len)
95 nr = len;
ceffc078 96
70688e4d
NP
97 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
98 &xip_mem, &xip_pfn);
99 if (unlikely(error)) {
100 if (error == -ENODATA) {
ceffc078 101 /* sparse */
70688e4d
NP
102 zero = 1;
103 } else
ceffc078 104 goto out;
afa597ba 105 }
ceffc078
CO
106
107 /* If users can be writing to this page using arbitrary
108 * virtual addresses, take care about potential aliasing
109 * before reading the page on the kernel side.
110 */
111 if (mapping_writably_mapped(mapping))
70688e4d 112 /* address based flush */ ;
ceffc078
CO
113
114 /*
70688e4d 115 * Ok, we have the mem, so now we can copy it to user space...
ceffc078
CO
116 *
117 * The actor routine returns how many bytes were actually used..
118 * NOTE! This may not be the same as how much of a user buffer
119 * we filled up (we may be padding etc), so we can only update
120 * "pos" here (the actor routine has to update the user buffer
121 * pointers and the remaining count).
122 */
70688e4d
NP
123 if (!zero)
124 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
125 else
126 left = __clear_user(buf + copied, nr);
ceffc078 127
70688e4d
NP
128 if (left) {
129 error = -EFAULT;
130 goto out;
131 }
ceffc078 132
70688e4d
NP
133 copied += (nr - left);
134 offset += (nr - left);
135 index += offset >> PAGE_CACHE_SHIFT;
136 offset &= ~PAGE_CACHE_MASK;
137 } while (copied < len);
ceffc078
CO
138
139out:
70688e4d 140 *ppos = pos + copied;
ceffc078
CO
141 if (filp)
142 file_accessed(filp);
70688e4d
NP
143
144 return (copied ? copied : error);
ceffc078
CO
145}
146
ceffc078 147ssize_t
eb6fe0c3 148xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
ceffc078 149{
eb6fe0c3
CO
150 if (!access_ok(VERIFY_WRITE, buf, len))
151 return -EFAULT;
ceffc078 152
70688e4d
NP
153 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
154 buf, len, ppos);
ceffc078 155}
eb6fe0c3 156EXPORT_SYMBOL_GPL(xip_file_read);
ceffc078 157
ceffc078
CO
158/*
159 * __xip_unmap is invoked from xip_unmap and
160 * xip_write
161 *
162 * This function walks all vmas of the address_space and unmaps the
a76c0b97 163 * __xip_sparse_page when found at pgoff.
ceffc078
CO
164 */
165static void
166__xip_unmap (struct address_space * mapping,
167 unsigned long pgoff)
168{
169 struct vm_area_struct *vma;
170 struct mm_struct *mm;
171 struct prio_tree_iter iter;
172 unsigned long address;
173 pte_t *pte;
174 pte_t pteval;
c0718806 175 spinlock_t *ptl;
67b02f11 176 struct page *page;
ceffc078 177
a76c0b97
CO
178 page = __xip_sparse_page;
179 if (!page)
180 return;
181
ceffc078
CO
182 spin_lock(&mapping->i_mmap_lock);
183 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
184 mm = vma->vm_mm;
185 address = vma->vm_start +
186 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
187 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
479db0bf 188 pte = page_check_address(page, mm, address, &ptl, 1);
c0718806 189 if (pte) {
ceffc078 190 /* Nuke the page table entry. */
082ff0a9 191 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 192 pteval = ptep_clear_flush_notify(vma, address, pte);
7de6b805 193 page_remove_rmap(page, vma);
b5810039 194 dec_mm_counter(mm, file_rss);
ceffc078 195 BUG_ON(pte_dirty(pteval));
c0718806 196 pte_unmap_unlock(pte, ptl);
b5810039 197 page_cache_release(page);
ceffc078
CO
198 }
199 }
200 spin_unlock(&mapping->i_mmap_lock);
201}
202
203/*
54cb8821 204 * xip_fault() is invoked via the vma operations vector for a
ceffc078
CO
205 * mapped memory region to read in file data during a page fault.
206 *
54cb8821 207 * This function is derived from filemap_fault, but used for execute in place
ceffc078 208 */
70688e4d 209static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ceffc078 210{
70688e4d 211 struct file *file = vma->vm_file;
ceffc078
CO
212 struct address_space *mapping = file->f_mapping;
213 struct inode *inode = mapping->host;
54cb8821 214 pgoff_t size;
70688e4d
NP
215 void *xip_mem;
216 unsigned long xip_pfn;
217 struct page *page;
218 int error;
ceffc078 219
54cb8821 220 /* XXX: are VM_FAULT_ codes OK? */
ceffc078
CO
221
222 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
d0217ac0
NP
223 if (vmf->pgoff >= size)
224 return VM_FAULT_SIGBUS;
ceffc078 225
70688e4d
NP
226 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
227 &xip_mem, &xip_pfn);
228 if (likely(!error))
229 goto found;
230 if (error != -ENODATA)
d0217ac0 231 return VM_FAULT_OOM;
ceffc078
CO
232
233 /* sparse block */
70688e4d
NP
234 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
235 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
ceffc078 236 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
70688e4d
NP
237 int err;
238
ceffc078 239 /* maybe shared writable, allocate new block */
70688e4d
NP
240 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
241 &xip_mem, &xip_pfn);
242 if (error)
d0217ac0 243 return VM_FAULT_SIGBUS;
70688e4d 244 /* unmap sparse mappings at pgoff from all other vmas */
d0217ac0 245 __xip_unmap(mapping, vmf->pgoff);
70688e4d
NP
246
247found:
248 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
249 xip_pfn);
250 if (err == -ENOMEM)
251 return VM_FAULT_OOM;
252 BUG_ON(err);
253 return VM_FAULT_NOPAGE;
ceffc078 254 } else {
a76c0b97
CO
255 /* not shared and writable, use xip_sparse_page() */
256 page = xip_sparse_page();
d0217ac0
NP
257 if (!page)
258 return VM_FAULT_OOM;
ceffc078 259
70688e4d
NP
260 page_cache_get(page);
261 vmf->page = page;
262 return 0;
263 }
ceffc078
CO
264}
265
266static struct vm_operations_struct xip_file_vm_ops = {
54cb8821 267 .fault = xip_file_fault,
ceffc078
CO
268};
269
270int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
271{
70688e4d 272 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
ceffc078
CO
273
274 file_accessed(file);
275 vma->vm_ops = &xip_file_vm_ops;
70688e4d 276 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
ceffc078
CO
277 return 0;
278}
279EXPORT_SYMBOL_GPL(xip_file_mmap);
280
281static ssize_t
eb6fe0c3
CO
282__xip_file_write(struct file *filp, const char __user *buf,
283 size_t count, loff_t pos, loff_t *ppos)
ceffc078 284{
eb6fe0c3 285 struct address_space * mapping = filp->f_mapping;
f5e54d6e 286 const struct address_space_operations *a_ops = mapping->a_ops;
ceffc078
CO
287 struct inode *inode = mapping->host;
288 long status = 0;
ceffc078 289 size_t bytes;
ceffc078
CO
290 ssize_t written = 0;
291
70688e4d 292 BUG_ON(!mapping->a_ops->get_xip_mem);
ceffc078 293
ceffc078
CO
294 do {
295 unsigned long index;
296 unsigned long offset;
297 size_t copied;
70688e4d
NP
298 void *xip_mem;
299 unsigned long xip_pfn;
ceffc078
CO
300
301 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
302 index = pos >> PAGE_CACHE_SHIFT;
303 bytes = PAGE_CACHE_SIZE - offset;
304 if (bytes > count)
305 bytes = count;
306
70688e4d
NP
307 status = a_ops->get_xip_mem(mapping, index, 0,
308 &xip_mem, &xip_pfn);
309 if (status == -ENODATA) {
ceffc078 310 /* we allocate a new page unmap it */
70688e4d
NP
311 status = a_ops->get_xip_mem(mapping, index, 1,
312 &xip_mem, &xip_pfn);
313 if (!status)
eb6fe0c3
CO
314 /* unmap page at pgoff from all other vmas */
315 __xip_unmap(mapping, index);
ceffc078
CO
316 }
317
70688e4d 318 if (status)
ceffc078 319 break;
ceffc078 320
4a9e5ef1 321 copied = bytes -
70688e4d 322 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
4a9e5ef1 323
ceffc078
CO
324 if (likely(copied > 0)) {
325 status = copied;
326
327 if (status >= 0) {
328 written += status;
329 count -= status;
330 pos += status;
331 buf += status;
ceffc078
CO
332 }
333 }
334 if (unlikely(copied != bytes))
335 if (status >= 0)
336 status = -EFAULT;
337 if (status < 0)
338 break;
339 } while (count);
340 *ppos = pos;
341 /*
342 * No need to use i_size_read() here, the i_size
1b1dcc1b 343 * cannot change under us because we hold i_mutex.
ceffc078
CO
344 */
345 if (pos > inode->i_size) {
346 i_size_write(inode, pos);
347 mark_inode_dirty(inode);
348 }
349
350 return written ? written : status;
351}
352
eb6fe0c3
CO
353ssize_t
354xip_file_write(struct file *filp, const char __user *buf, size_t len,
355 loff_t *ppos)
ceffc078 356{
eb6fe0c3
CO
357 struct address_space *mapping = filp->f_mapping;
358 struct inode *inode = mapping->host;
359 size_t count;
360 loff_t pos;
361 ssize_t ret;
ceffc078 362
1b1dcc1b 363 mutex_lock(&inode->i_mutex);
ceffc078 364
eb6fe0c3
CO
365 if (!access_ok(VERIFY_READ, buf, len)) {
366 ret=-EFAULT;
367 goto out_up;
ceffc078
CO
368 }
369
ceffc078 370 pos = *ppos;
eb6fe0c3 371 count = len;
ceffc078
CO
372
373 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
374
eb6fe0c3
CO
375 /* We can write back this queue in page reclaim */
376 current->backing_dev_info = mapping->backing_dev_info;
ceffc078 377
eb6fe0c3
CO
378 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
379 if (ret)
380 goto out_backing;
ceffc078 381 if (count == 0)
eb6fe0c3 382 goto out_backing;
ceffc078 383
2f1936b8 384 ret = file_remove_suid(filp);
eb6fe0c3
CO
385 if (ret)
386 goto out_backing;
ceffc078 387
870f4817 388 file_update_time(filp);
ceffc078 389
eb6fe0c3 390 ret = __xip_file_write (filp, buf, count, pos, ppos);
ceffc078 391
eb6fe0c3
CO
392 out_backing:
393 current->backing_dev_info = NULL;
394 out_up:
1b1dcc1b 395 mutex_unlock(&inode->i_mutex);
ceffc078
CO
396 return ret;
397}
eb6fe0c3 398EXPORT_SYMBOL_GPL(xip_file_write);
ceffc078
CO
399
400/*
401 * truncate a page used for execute in place
70688e4d 402 * functionality is analog to block_truncate_page but does use get_xip_mem
ceffc078
CO
403 * to get the page instead of page cache
404 */
405int
406xip_truncate_page(struct address_space *mapping, loff_t from)
407{
408 pgoff_t index = from >> PAGE_CACHE_SHIFT;
409 unsigned offset = from & (PAGE_CACHE_SIZE-1);
410 unsigned blocksize;
411 unsigned length;
70688e4d
NP
412 void *xip_mem;
413 unsigned long xip_pfn;
414 int err;
ceffc078 415
70688e4d 416 BUG_ON(!mapping->a_ops->get_xip_mem);
ceffc078
CO
417
418 blocksize = 1 << mapping->host->i_blkbits;
419 length = offset & (blocksize - 1);
420
421 /* Block boundary? Nothing to do */
422 if (!length)
423 return 0;
424
425 length = blocksize - length;
426
70688e4d
NP
427 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
428 &xip_mem, &xip_pfn);
429 if (unlikely(err)) {
430 if (err == -ENODATA)
ceffc078
CO
431 /* Hole? No need to truncate */
432 return 0;
eb6fe0c3 433 else
70688e4d 434 return err;
afa597ba 435 }
70688e4d 436 memset(xip_mem + offset, 0, length);
eb6fe0c3 437 return 0;
ceffc078
CO
438}
439EXPORT_SYMBOL_GPL(xip_truncate_page);