]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - mm/filemap.c
mm: merge populate and nopage into fault (fixes nonlinear)
[net-next-2.6.git] / mm / filemap.c
index 462cda58a18e0f1863d73637c2165e722ff3f531..26b992d169e5dcd87af82f6912fe450dcd7b519d 100644 (file)
@@ -1301,40 +1301,38 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
 #define MMAP_LOTSAMISS  (100)
 
 /**
- * filemap_nopage - read in file data for page fault handling
- * @area:      the applicable vm_area
- * @address:   target address to read in
- * @type:      returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
+ * filemap_fault - read in file data for page fault handling
+ * @vma:       user vma (not used)
+ * @fdata:     the applicable fault_data
  *
- * filemap_nopage() is invoked via the vma operations vector for a
+ * filemap_fault() is invoked via the vma operations vector for a
  * mapped memory region to read in file data during a page fault.
  *
  * The goto's are kind of ugly, but this streamlines the normal case of having
  * it in the page cache, and handles the special cases reasonably without
  * having a lot of duplicated code.
  */
-struct page *filemap_nopage(struct vm_area_struct *area,
-                               unsigned long address, int *type)
+struct page *filemap_fault(struct vm_area_struct *vma, struct fault_data *fdata)
 {
        int error;
-       struct file *file = area->vm_file;
+       struct file *file = vma->vm_file;
        struct address_space *mapping = file->f_mapping;
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
        struct page *page;
-       unsigned long size, pgoff;
-       int did_readaround = 0, majmin = VM_FAULT_MINOR;
+       unsigned long size;
+       int did_readaround = 0;
 
-       BUG_ON(!(area->vm_flags & VM_CAN_INVALIDATE));
+       fdata->type = VM_FAULT_MINOR;
 
-       pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
+       BUG_ON(!(vma->vm_flags & VM_CAN_INVALIDATE));
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (pgoff >= size)
+       if (fdata->pgoff >= size)
                goto outside_data_content;
 
        /* If we don't want any read-ahead, don't bother */
-       if (VM_RandomReadHint(area))
+       if (VM_RandomReadHint(vma))
                goto no_cached_page;
 
        /*
@@ -1343,19 +1341,19 @@ struct page *filemap_nopage(struct vm_area_struct *area,
         *
         * For sequential accesses, we use the generic readahead logic.
         */
-       if (VM_SequentialReadHint(area))
-               page_cache_readahead(mapping, ra, file, pgoff, 1);
+       if (VM_SequentialReadHint(vma))
+               page_cache_readahead(mapping, ra, file, fdata->pgoff, 1);
 
        /*
         * Do we have something in the page cache already?
         */
 retry_find:
-       page = find_lock_page(mapping, pgoff);
+       page = find_lock_page(mapping, fdata->pgoff);
        if (!page) {
                unsigned long ra_pages;
 
-               if (VM_SequentialReadHint(area)) {
-                       handle_ra_miss(mapping, ra, pgoff);
+               if (VM_SequentialReadHint(vma)) {
+                       handle_ra_miss(mapping, ra, fdata->pgoff);
                        goto no_cached_page;
                }
                ra->mmap_miss++;
@@ -1372,7 +1370,7 @@ retry_find:
                 * check did_readaround, as this is an inner loop.
                 */
                if (!did_readaround) {
-                       majmin = VM_FAULT_MAJOR;
+                       fdata->type = VM_FAULT_MAJOR;
                        count_vm_event(PGMAJFAULT);
                }
                did_readaround = 1;
@@ -1380,11 +1378,11 @@ retry_find:
                if (ra_pages) {
                        pgoff_t start = 0;
 
-                       if (pgoff > ra_pages / 2)
-                               start = pgoff - ra_pages / 2;
+                       if (fdata->pgoff > ra_pages / 2)
+                               start = fdata->pgoff - ra_pages / 2;
                        do_page_cache_readahead(mapping, file, start, ra_pages);
                }
-               page = find_lock_page(mapping, pgoff);
+               page = find_lock_page(mapping, fdata->pgoff);
                if (!page)
                        goto no_cached_page;
        }
@@ -1401,7 +1399,7 @@ retry_find:
 
        /* Must recheck i_size under page lock */
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (unlikely(pgoff >= size)) {
+       if (unlikely(fdata->pgoff >= size)) {
                unlock_page(page);
                goto outside_data_content;
        }
@@ -1410,8 +1408,6 @@ retry_find:
         * Found the page and have a reference on it.
         */
        mark_page_accessed(page);
-       if (type)
-               *type = majmin;
        return page;
 
 outside_data_content:
@@ -1419,15 +1415,17 @@ outside_data_content:
         * An external ptracer can access pages that normally aren't
         * accessible..
         */
-       if (area->vm_mm == current->mm)
-               return NOPAGE_SIGBUS;
+       if (vma->vm_mm == current->mm) {
+               fdata->type = VM_FAULT_SIGBUS;
+               return NULL;
+       }
        /* Fall through to the non-read-ahead case */
 no_cached_page:
        /*
         * We're only likely to ever get here if MADV_RANDOM is in
         * effect.
         */
-       error = page_cache_read(file, pgoff);
+       error = page_cache_read(file, fdata->pgoff);
 
        /*
         * The page we want has now been added to the page cache.
@@ -1443,13 +1441,15 @@ no_cached_page:
         * to schedule I/O.
         */
        if (error == -ENOMEM)
-               return NOPAGE_OOM;
-       return NOPAGE_SIGBUS;
+               fdata->type = VM_FAULT_OOM;
+       else
+               fdata->type = VM_FAULT_SIGBUS;
+       return NULL;
 
 page_not_uptodate:
        /* IO error path */
        if (!did_readaround) {
-               majmin = VM_FAULT_MAJOR;
+               fdata->type = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
        }
 
@@ -1468,7 +1468,30 @@ page_not_uptodate:
 
        /* Things didn't work out. Return zero to tell the mm layer so. */
        shrink_readahead_size_eio(file, ra);
-       return NOPAGE_SIGBUS;
+       fdata->type = VM_FAULT_SIGBUS;
+       return NULL;
+}
+EXPORT_SYMBOL(filemap_fault);
+
+/*
+ * filemap_nopage and filemap_populate are legacy exports that are not used
+ * in tree. Scheduled for removal.
+ */
+struct page *filemap_nopage(struct vm_area_struct *area,
+                               unsigned long address, int *type)
+{
+       struct page *page;
+       struct fault_data fdata;
+       fdata.address = address;
+       fdata.pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
+                       + area->vm_pgoff;
+       fdata.flags = 0;
+
+       page = filemap_fault(area, &fdata);
+       if (type)
+               *type = fdata.type;
+
+       return page;
 }
 EXPORT_SYMBOL(filemap_nopage);
 
@@ -1646,8 +1669,7 @@ repeat:
 EXPORT_SYMBOL(filemap_populate);
 
 struct vm_operations_struct generic_file_vm_ops = {
-       .nopage         = filemap_nopage,
-       .populate       = filemap_populate,
+       .fault          = filemap_fault,
 };
 
 /* This is used for a general mmap of a disk file */
@@ -1660,7 +1682,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &generic_file_vm_ops;
-       vma->vm_flags |= VM_CAN_INVALIDATE;
+       vma->vm_flags |= VM_CAN_INVALIDATE | VM_CAN_NONLINEAR;
        return 0;
 }