]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/filemap.c
memcg: better migration handling
[net-next-2.6.git] / mm / filemap.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
1da177e4
LT
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/compiler.h>
15#include <linux/fs.h>
c22ce143 16#include <linux/uaccess.h>
1da177e4 17#include <linux/aio.h>
c59ede7b 18#include <linux/capability.h>
1da177e4
LT
19#include <linux/kernel_stat.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
53253383 28#include <linux/backing-dev.h>
1da177e4
LT
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/security.h>
32#include <linux/syscalls.h>
44110fe3 33#include <linux/cpuset.h>
2f718ffc 34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
8a9f3ccd 35#include <linux/memcontrol.h>
0f8053a5
NP
36#include "internal.h"
37
1da177e4 38/*
1da177e4
LT
39 * FIXME: remove all knowledge of the buffer layer from the core VM
40 */
41#include <linux/buffer_head.h> /* for generic_osync_inode */
42
1da177e4
LT
43#include <asm/mman.h>
44
5ce7852c 45
1da177e4
LT
46/*
47 * Shared mappings implemented 30.11.1994. It's not fully working yet,
48 * though.
49 *
50 * Shared mappings now work. 15.8.1995 Bruno.
51 *
52 * finished 'unifying' the page and buffer cache and SMP-threaded the
53 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
54 *
55 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
56 */
57
58/*
59 * Lock ordering:
60 *
61 * ->i_mmap_lock (vmtruncate)
62 * ->private_lock (__free_pte->__set_page_dirty_buffers)
5d337b91
HD
63 * ->swap_lock (exclusive_swap_page, others)
64 * ->mapping->tree_lock
1da177e4 65 *
1b1dcc1b 66 * ->i_mutex
1da177e4
LT
67 * ->i_mmap_lock (truncate->unmap_mapping_range)
68 *
69 * ->mmap_sem
70 * ->i_mmap_lock
b8072f09 71 * ->page_table_lock or pte_lock (various, mainly in memory.c)
1da177e4
LT
72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
73 *
74 * ->mmap_sem
75 * ->lock_page (access_process_vm)
76 *
82591e6e
NP
77 * ->i_mutex (generic_file_buffered_write)
78 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
1da177e4 79 *
1b1dcc1b 80 * ->i_mutex
1da177e4
LT
81 * ->i_alloc_sem (various)
82 *
83 * ->inode_lock
84 * ->sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode)
86 *
87 * ->i_mmap_lock
88 * ->anon_vma.lock (vma_adjust)
89 *
90 * ->anon_vma.lock
b8072f09 91 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
1da177e4 92 *
b8072f09 93 * ->page_table_lock or pte_lock
5d337b91 94 * ->swap_lock (try_to_unmap_one)
1da177e4
LT
95 * ->private_lock (try_to_unmap_one)
96 * ->tree_lock (try_to_unmap_one)
97 * ->zone.lru_lock (follow_page->mark_page_accessed)
053837fc 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
1da177e4
LT
99 * ->private_lock (page_remove_rmap->set_page_dirty)
100 * ->tree_lock (page_remove_rmap->set_page_dirty)
101 * ->inode_lock (page_remove_rmap->set_page_dirty)
102 * ->inode_lock (zap_pte_range->set_page_dirty)
103 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
104 *
105 * ->task->proc_lock
106 * ->dcache_lock (proc_pid_lookup)
107 */
108
109/*
110 * Remove a page from the page cache and free it. Caller has to make
111 * sure the page is locked and that nobody else uses it - or that usage
112 * is safe. The caller must hold a write_lock on the mapping's tree_lock.
113 */
114void __remove_from_page_cache(struct page *page)
115{
116 struct address_space *mapping = page->mapping;
117
8a9f3ccd 118 mem_cgroup_uncharge_page(page);
1da177e4
LT
119 radix_tree_delete(&mapping->page_tree, page->index);
120 page->mapping = NULL;
121 mapping->nrpages--;
347ce434 122 __dec_zone_page_state(page, NR_FILE_PAGES);
45426812 123 BUG_ON(page_mapped(page));
3a692790
LT
124
125 /*
126 * Some filesystems seem to re-dirty the page even after
127 * the VM has canceled the dirty bit (eg ext3 journaling).
128 *
129 * Fix it up by doing a final dirty accounting check after
130 * having removed the page entirely.
131 */
132 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
133 dec_zone_page_state(page, NR_FILE_DIRTY);
134 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
135 }
1da177e4
LT
136}
137
138void remove_from_page_cache(struct page *page)
139{
140 struct address_space *mapping = page->mapping;
141
cd7619d6 142 BUG_ON(!PageLocked(page));
1da177e4
LT
143
144 write_lock_irq(&mapping->tree_lock);
145 __remove_from_page_cache(page);
146 write_unlock_irq(&mapping->tree_lock);
147}
148
149static int sync_page(void *word)
150{
151 struct address_space *mapping;
152 struct page *page;
153
07808b74 154 page = container_of((unsigned long *)word, struct page, flags);
1da177e4
LT
155
156 /*
dd1d5afc
WLII
157 * page_mapping() is being called without PG_locked held.
158 * Some knowledge of the state and use of the page is used to
159 * reduce the requirements down to a memory barrier.
160 * The danger here is of a stale page_mapping() return value
161 * indicating a struct address_space different from the one it's
162 * associated with when it is associated with one.
163 * After smp_mb(), it's either the correct page_mapping() for
164 * the page, or an old page_mapping() and the page's own
165 * page_mapping() has gone NULL.
166 * The ->sync_page() address_space operation must tolerate
167 * page_mapping() going NULL. By an amazing coincidence,
168 * this comes about because none of the users of the page
169 * in the ->sync_page() methods make essential use of the
170 * page_mapping(), merely passing the page down to the backing
171 * device's unplug functions when it's non-NULL, which in turn
4c21e2f2 172 * ignore it for all cases but swap, where only page_private(page) is
dd1d5afc
WLII
173 * of interest. When page_mapping() does go NULL, the entire
174 * call stack gracefully ignores the page and returns.
175 * -- wli
1da177e4
LT
176 */
177 smp_mb();
178 mapping = page_mapping(page);
179 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
180 mapping->a_ops->sync_page(page);
181 io_schedule();
182 return 0;
183}
184
2687a356
MW
185static int sync_page_killable(void *word)
186{
187 sync_page(word);
188 return fatal_signal_pending(current) ? -EINTR : 0;
189}
190
1da177e4 191/**
485bb99b 192 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
67be2dd1
MW
193 * @mapping: address space structure to write
194 * @start: offset in bytes where the range starts
469eb4d0 195 * @end: offset in bytes where the range ends (inclusive)
67be2dd1 196 * @sync_mode: enable synchronous operation
1da177e4 197 *
485bb99b
RD
198 * Start writeback against all of a mapping's dirty pages that lie
199 * within the byte offsets <start, end> inclusive.
200 *
1da177e4 201 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
485bb99b 202 * opposed to a regular memory cleansing writeback. The difference between
1da177e4
LT
203 * these two operations is that if a dirty page/buffer is encountered, it must
204 * be waited upon, and not just skipped over.
205 */
ebcf28e1
AM
206int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
207 loff_t end, int sync_mode)
1da177e4
LT
208{
209 int ret;
210 struct writeback_control wbc = {
211 .sync_mode = sync_mode,
212 .nr_to_write = mapping->nrpages * 2,
111ebb6e
OH
213 .range_start = start,
214 .range_end = end,
1da177e4
LT
215 };
216
217 if (!mapping_cap_writeback_dirty(mapping))
218 return 0;
219
220 ret = do_writepages(mapping, &wbc);
221 return ret;
222}
223
224static inline int __filemap_fdatawrite(struct address_space *mapping,
225 int sync_mode)
226{
111ebb6e 227 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
1da177e4
LT
228}
229
230int filemap_fdatawrite(struct address_space *mapping)
231{
232 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
233}
234EXPORT_SYMBOL(filemap_fdatawrite);
235
f4c0a0fd 236int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
ebcf28e1 237 loff_t end)
1da177e4
LT
238{
239 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
240}
f4c0a0fd 241EXPORT_SYMBOL(filemap_fdatawrite_range);
1da177e4 242
485bb99b
RD
243/**
244 * filemap_flush - mostly a non-blocking flush
245 * @mapping: target address_space
246 *
1da177e4
LT
247 * This is a mostly non-blocking flush. Not suitable for data-integrity
248 * purposes - I/O may not be started against all dirty pages.
249 */
250int filemap_flush(struct address_space *mapping)
251{
252 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
253}
254EXPORT_SYMBOL(filemap_flush);
255
485bb99b
RD
256/**
257 * wait_on_page_writeback_range - wait for writeback to complete
258 * @mapping: target address_space
259 * @start: beginning page index
260 * @end: ending page index
261 *
1da177e4
LT
262 * Wait for writeback to complete against pages indexed by start->end
263 * inclusive
264 */
ebcf28e1 265int wait_on_page_writeback_range(struct address_space *mapping,
1da177e4
LT
266 pgoff_t start, pgoff_t end)
267{
268 struct pagevec pvec;
269 int nr_pages;
270 int ret = 0;
271 pgoff_t index;
272
273 if (end < start)
274 return 0;
275
276 pagevec_init(&pvec, 0);
277 index = start;
278 while ((index <= end) &&
279 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
280 PAGECACHE_TAG_WRITEBACK,
281 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
282 unsigned i;
283
284 for (i = 0; i < nr_pages; i++) {
285 struct page *page = pvec.pages[i];
286
287 /* until radix tree lookup accepts end_index */
288 if (page->index > end)
289 continue;
290
291 wait_on_page_writeback(page);
292 if (PageError(page))
293 ret = -EIO;
294 }
295 pagevec_release(&pvec);
296 cond_resched();
297 }
298
299 /* Check for outstanding write errors */
300 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
301 ret = -ENOSPC;
302 if (test_and_clear_bit(AS_EIO, &mapping->flags))
303 ret = -EIO;
304
305 return ret;
306}
307
485bb99b
RD
308/**
309 * sync_page_range - write and wait on all pages in the passed range
310 * @inode: target inode
311 * @mapping: target address_space
312 * @pos: beginning offset in pages to write
313 * @count: number of bytes to write
314 *
1da177e4
LT
315 * Write and wait upon all the pages in the passed range. This is a "data
316 * integrity" operation. It waits upon in-flight writeout before starting and
317 * waiting upon new writeout. If there was an IO error, return it.
318 *
1b1dcc1b 319 * We need to re-take i_mutex during the generic_osync_inode list walk because
1da177e4
LT
320 * it is otherwise livelockable.
321 */
322int sync_page_range(struct inode *inode, struct address_space *mapping,
268fc16e 323 loff_t pos, loff_t count)
1da177e4
LT
324{
325 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
326 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
327 int ret;
328
329 if (!mapping_cap_writeback_dirty(mapping) || !count)
330 return 0;
331 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
332 if (ret == 0) {
1b1dcc1b 333 mutex_lock(&inode->i_mutex);
1da177e4 334 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
1b1dcc1b 335 mutex_unlock(&inode->i_mutex);
1da177e4
LT
336 }
337 if (ret == 0)
338 ret = wait_on_page_writeback_range(mapping, start, end);
339 return ret;
340}
341EXPORT_SYMBOL(sync_page_range);
342
485bb99b 343/**
7682486b 344 * sync_page_range_nolock - write & wait on all pages in the passed range without locking
485bb99b
RD
345 * @inode: target inode
346 * @mapping: target address_space
347 * @pos: beginning offset in pages to write
348 * @count: number of bytes to write
349 *
72fd4a35 350 * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
1da177e4
LT
351 * as it forces O_SYNC writers to different parts of the same file
352 * to be serialised right until io completion.
353 */
268fc16e
OH
354int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
355 loff_t pos, loff_t count)
1da177e4
LT
356{
357 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
358 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
359 int ret;
360
361 if (!mapping_cap_writeback_dirty(mapping) || !count)
362 return 0;
363 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
364 if (ret == 0)
365 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
366 if (ret == 0)
367 ret = wait_on_page_writeback_range(mapping, start, end);
368 return ret;
369}
268fc16e 370EXPORT_SYMBOL(sync_page_range_nolock);
1da177e4
LT
371
372/**
485bb99b 373 * filemap_fdatawait - wait for all under-writeback pages to complete
1da177e4 374 * @mapping: address space structure to wait for
485bb99b
RD
375 *
376 * Walk the list of under-writeback pages of the given address space
377 * and wait for all of them.
1da177e4
LT
378 */
379int filemap_fdatawait(struct address_space *mapping)
380{
381 loff_t i_size = i_size_read(mapping->host);
382
383 if (i_size == 0)
384 return 0;
385
386 return wait_on_page_writeback_range(mapping, 0,
387 (i_size - 1) >> PAGE_CACHE_SHIFT);
388}
389EXPORT_SYMBOL(filemap_fdatawait);
390
391int filemap_write_and_wait(struct address_space *mapping)
392{
28fd1298 393 int err = 0;
1da177e4
LT
394
395 if (mapping->nrpages) {
28fd1298
OH
396 err = filemap_fdatawrite(mapping);
397 /*
398 * Even if the above returned error, the pages may be
399 * written partially (e.g. -ENOSPC), so we wait for it.
400 * But the -EIO is special case, it may indicate the worst
401 * thing (e.g. bug) happened, so we avoid waiting for it.
402 */
403 if (err != -EIO) {
404 int err2 = filemap_fdatawait(mapping);
405 if (!err)
406 err = err2;
407 }
1da177e4 408 }
28fd1298 409 return err;
1da177e4 410}
28fd1298 411EXPORT_SYMBOL(filemap_write_and_wait);
1da177e4 412
485bb99b
RD
413/**
414 * filemap_write_and_wait_range - write out & wait on a file range
415 * @mapping: the address_space for the pages
416 * @lstart: offset in bytes where the range starts
417 * @lend: offset in bytes where the range ends (inclusive)
418 *
469eb4d0
AM
419 * Write out and wait upon file offsets lstart->lend, inclusive.
420 *
421 * Note that `lend' is inclusive (describes the last byte to be written) so
422 * that this function can be used to write to the very end-of-file (end = -1).
423 */
1da177e4
LT
424int filemap_write_and_wait_range(struct address_space *mapping,
425 loff_t lstart, loff_t lend)
426{
28fd1298 427 int err = 0;
1da177e4
LT
428
429 if (mapping->nrpages) {
28fd1298
OH
430 err = __filemap_fdatawrite_range(mapping, lstart, lend,
431 WB_SYNC_ALL);
432 /* See comment of filemap_write_and_wait() */
433 if (err != -EIO) {
434 int err2 = wait_on_page_writeback_range(mapping,
435 lstart >> PAGE_CACHE_SHIFT,
436 lend >> PAGE_CACHE_SHIFT);
437 if (!err)
438 err = err2;
439 }
1da177e4 440 }
28fd1298 441 return err;
1da177e4
LT
442}
443
485bb99b
RD
444/**
445 * add_to_page_cache - add newly allocated pagecache pages
446 * @page: page to add
447 * @mapping: the page's address_space
448 * @offset: page index
449 * @gfp_mask: page allocation mode
450 *
451 * This function is used to add newly allocated pagecache pages;
1da177e4
LT
452 * the page is new, so we can just run SetPageLocked() against it.
453 * The other page state flags were set by rmqueue().
454 *
455 * This function does not add the page to the LRU. The caller must do that.
456 */
457int add_to_page_cache(struct page *page, struct address_space *mapping,
6daa0e28 458 pgoff_t offset, gfp_t gfp_mask)
1da177e4 459{
4c6bc8dd
BP
460 int error = mem_cgroup_cache_charge(page, current->mm,
461 gfp_mask & ~__GFP_HIGHMEM);
35c754d7
BS
462 if (error)
463 goto out;
1da177e4 464
35c754d7 465 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
1da177e4
LT
466 if (error == 0) {
467 write_lock_irq(&mapping->tree_lock);
468 error = radix_tree_insert(&mapping->page_tree, offset, page);
469 if (!error) {
470 page_cache_get(page);
471 SetPageLocked(page);
472 page->mapping = mapping;
473 page->index = offset;
474 mapping->nrpages++;
347ce434 475 __inc_zone_page_state(page, NR_FILE_PAGES);
8a9f3ccd
BS
476 } else
477 mem_cgroup_uncharge_page(page);
478
1da177e4
LT
479 write_unlock_irq(&mapping->tree_lock);
480 radix_tree_preload_end();
35c754d7
BS
481 } else
482 mem_cgroup_uncharge_page(page);
8a9f3ccd 483out:
1da177e4
LT
484 return error;
485}
1da177e4
LT
486EXPORT_SYMBOL(add_to_page_cache);
487
488int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
6daa0e28 489 pgoff_t offset, gfp_t gfp_mask)
1da177e4
LT
490{
491 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
492 if (ret == 0)
493 lru_cache_add(page);
494 return ret;
495}
496
44110fe3 497#ifdef CONFIG_NUMA
2ae88149 498struct page *__page_cache_alloc(gfp_t gfp)
44110fe3
PJ
499{
500 if (cpuset_do_page_mem_spread()) {
501 int n = cpuset_mem_spread_node();
2ae88149 502 return alloc_pages_node(n, gfp, 0);
44110fe3 503 }
2ae88149 504 return alloc_pages(gfp, 0);
44110fe3 505}
2ae88149 506EXPORT_SYMBOL(__page_cache_alloc);
44110fe3
PJ
507#endif
508
db37648c
NP
509static int __sleep_on_page_lock(void *word)
510{
511 io_schedule();
512 return 0;
513}
514
1da177e4
LT
515/*
516 * In order to wait for pages to become available there must be
517 * waitqueues associated with pages. By using a hash table of
518 * waitqueues where the bucket discipline is to maintain all
519 * waiters on the same queue and wake all when any of the pages
520 * become available, and for the woken contexts to check to be
521 * sure the appropriate page became available, this saves space
522 * at a cost of "thundering herd" phenomena during rare hash
523 * collisions.
524 */
525static wait_queue_head_t *page_waitqueue(struct page *page)
526{
527 const struct zone *zone = page_zone(page);
528
529 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
530}
531
532static inline void wake_up_page(struct page *page, int bit)
533{
534 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
535}
536
920c7a5d 537void wait_on_page_bit(struct page *page, int bit_nr)
1da177e4
LT
538{
539 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
540
541 if (test_bit(bit_nr, &page->flags))
542 __wait_on_bit(page_waitqueue(page), &wait, sync_page,
543 TASK_UNINTERRUPTIBLE);
544}
545EXPORT_SYMBOL(wait_on_page_bit);
546
547/**
485bb99b 548 * unlock_page - unlock a locked page
1da177e4
LT
549 * @page: the page
550 *
551 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
552 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
553 * mechananism between PageLocked pages and PageWriteback pages is shared.
554 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
555 *
556 * The first mb is necessary to safely close the critical section opened by the
557 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
558 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
559 * parallel wait_on_page_locked()).
560 */
920c7a5d 561void unlock_page(struct page *page)
1da177e4
LT
562{
563 smp_mb__before_clear_bit();
564 if (!TestClearPageLocked(page))
565 BUG();
566 smp_mb__after_clear_bit();
567 wake_up_page(page, PG_locked);
568}
569EXPORT_SYMBOL(unlock_page);
570
485bb99b
RD
571/**
572 * end_page_writeback - end writeback against a page
573 * @page: the page
1da177e4
LT
574 */
575void end_page_writeback(struct page *page)
576{
ac6aadb2
MS
577 if (TestClearPageReclaim(page))
578 rotate_reclaimable_page(page);
579
580 if (!test_clear_page_writeback(page))
581 BUG();
582
1da177e4
LT
583 smp_mb__after_clear_bit();
584 wake_up_page(page, PG_writeback);
585}
586EXPORT_SYMBOL(end_page_writeback);
587
485bb99b
RD
588/**
589 * __lock_page - get a lock on the page, assuming we need to sleep to get it
590 * @page: the page to lock
1da177e4 591 *
485bb99b 592 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
1da177e4
LT
593 * random driver's requestfn sets TASK_RUNNING, we could busywait. However
594 * chances are that on the second loop, the block layer's plug list is empty,
595 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
596 */
920c7a5d 597void __lock_page(struct page *page)
1da177e4
LT
598{
599 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
600
601 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
602 TASK_UNINTERRUPTIBLE);
603}
604EXPORT_SYMBOL(__lock_page);
605
b5606c2d 606int __lock_page_killable(struct page *page)
2687a356
MW
607{
608 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
609
610 return __wait_on_bit_lock(page_waitqueue(page), &wait,
611 sync_page_killable, TASK_KILLABLE);
612}
613
7682486b
RD
614/**
615 * __lock_page_nosync - get a lock on the page, without calling sync_page()
616 * @page: the page to lock
617 *
db37648c
NP
618 * Variant of lock_page that does not require the caller to hold a reference
619 * on the page's mapping.
620 */
920c7a5d 621void __lock_page_nosync(struct page *page)
db37648c
NP
622{
623 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
624 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
625 TASK_UNINTERRUPTIBLE);
626}
627
485bb99b
RD
628/**
629 * find_get_page - find and get a page reference
630 * @mapping: the address_space to search
631 * @offset: the page index
632 *
da6052f7
NP
633 * Is there a pagecache struct page at the given (mapping, offset) tuple?
634 * If yes, increment its refcount and return it; if no, return NULL.
1da177e4 635 */
57f6b96c 636struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
1da177e4
LT
637{
638 struct page *page;
639
640 read_lock_irq(&mapping->tree_lock);
641 page = radix_tree_lookup(&mapping->page_tree, offset);
642 if (page)
643 page_cache_get(page);
644 read_unlock_irq(&mapping->tree_lock);
645 return page;
646}
1da177e4
LT
647EXPORT_SYMBOL(find_get_page);
648
1da177e4
LT
649/**
650 * find_lock_page - locate, pin and lock a pagecache page
67be2dd1
MW
651 * @mapping: the address_space to search
652 * @offset: the page index
1da177e4
LT
653 *
654 * Locates the desired pagecache page, locks it, increments its reference
655 * count and returns its address.
656 *
657 * Returns zero if the page was not present. find_lock_page() may sleep.
658 */
659struct page *find_lock_page(struct address_space *mapping,
57f6b96c 660 pgoff_t offset)
1da177e4
LT
661{
662 struct page *page;
663
1da177e4 664repeat:
45726cb4 665 read_lock_irq(&mapping->tree_lock);
1da177e4
LT
666 page = radix_tree_lookup(&mapping->page_tree, offset);
667 if (page) {
668 page_cache_get(page);
669 if (TestSetPageLocked(page)) {
670 read_unlock_irq(&mapping->tree_lock);
bbfbb7ce 671 __lock_page(page);
1da177e4
LT
672
673 /* Has the page been truncated while we slept? */
45726cb4 674 if (unlikely(page->mapping != mapping)) {
1da177e4
LT
675 unlock_page(page);
676 page_cache_release(page);
677 goto repeat;
678 }
45726cb4
NP
679 VM_BUG_ON(page->index != offset);
680 goto out;
1da177e4
LT
681 }
682 }
683 read_unlock_irq(&mapping->tree_lock);
45726cb4 684out:
1da177e4
LT
685 return page;
686}
1da177e4
LT
687EXPORT_SYMBOL(find_lock_page);
688
689/**
690 * find_or_create_page - locate or add a pagecache page
67be2dd1
MW
691 * @mapping: the page's address_space
692 * @index: the page's index into the mapping
693 * @gfp_mask: page allocation mode
1da177e4
LT
694 *
695 * Locates a page in the pagecache. If the page is not present, a new page
696 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
697 * LRU list. The returned page is locked and has its reference count
698 * incremented.
699 *
700 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
701 * allocation!
702 *
703 * find_or_create_page() returns the desired page's address, or zero on
704 * memory exhaustion.
705 */
706struct page *find_or_create_page(struct address_space *mapping,
57f6b96c 707 pgoff_t index, gfp_t gfp_mask)
1da177e4 708{
eb2be189 709 struct page *page;
1da177e4
LT
710 int err;
711repeat:
712 page = find_lock_page(mapping, index);
713 if (!page) {
eb2be189
NP
714 page = __page_cache_alloc(gfp_mask);
715 if (!page)
716 return NULL;
717 err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
718 if (unlikely(err)) {
719 page_cache_release(page);
720 page = NULL;
721 if (err == -EEXIST)
722 goto repeat;
1da177e4 723 }
1da177e4 724 }
1da177e4
LT
725 return page;
726}
1da177e4
LT
727EXPORT_SYMBOL(find_or_create_page);
728
729/**
730 * find_get_pages - gang pagecache lookup
731 * @mapping: The address_space to search
732 * @start: The starting page index
733 * @nr_pages: The maximum number of pages
734 * @pages: Where the resulting pages are placed
735 *
736 * find_get_pages() will search for and return a group of up to
737 * @nr_pages pages in the mapping. The pages are placed at @pages.
738 * find_get_pages() takes a reference against the returned pages.
739 *
740 * The search returns a group of mapping-contiguous pages with ascending
741 * indexes. There may be holes in the indices due to not-present pages.
742 *
743 * find_get_pages() returns the number of pages which were found.
744 */
745unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
746 unsigned int nr_pages, struct page **pages)
747{
748 unsigned int i;
749 unsigned int ret;
750
751 read_lock_irq(&mapping->tree_lock);
752 ret = radix_tree_gang_lookup(&mapping->page_tree,
753 (void **)pages, start, nr_pages);
754 for (i = 0; i < ret; i++)
755 page_cache_get(pages[i]);
756 read_unlock_irq(&mapping->tree_lock);
757 return ret;
758}
759
ebf43500
JA
760/**
761 * find_get_pages_contig - gang contiguous pagecache lookup
762 * @mapping: The address_space to search
763 * @index: The starting page index
764 * @nr_pages: The maximum number of pages
765 * @pages: Where the resulting pages are placed
766 *
767 * find_get_pages_contig() works exactly like find_get_pages(), except
768 * that the returned number of pages are guaranteed to be contiguous.
769 *
770 * find_get_pages_contig() returns the number of pages which were found.
771 */
772unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
773 unsigned int nr_pages, struct page **pages)
774{
775 unsigned int i;
776 unsigned int ret;
777
778 read_lock_irq(&mapping->tree_lock);
779 ret = radix_tree_gang_lookup(&mapping->page_tree,
780 (void **)pages, index, nr_pages);
781 for (i = 0; i < ret; i++) {
782 if (pages[i]->mapping == NULL || pages[i]->index != index)
783 break;
784
785 page_cache_get(pages[i]);
786 index++;
787 }
788 read_unlock_irq(&mapping->tree_lock);
789 return i;
790}
ef71c15c 791EXPORT_SYMBOL(find_get_pages_contig);
ebf43500 792
485bb99b
RD
793/**
794 * find_get_pages_tag - find and return pages that match @tag
795 * @mapping: the address_space to search
796 * @index: the starting page index
797 * @tag: the tag index
798 * @nr_pages: the maximum number of pages
799 * @pages: where the resulting pages are placed
800 *
1da177e4 801 * Like find_get_pages, except we only return pages which are tagged with
485bb99b 802 * @tag. We update @index to index the next page for the traversal.
1da177e4
LT
803 */
804unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
805 int tag, unsigned int nr_pages, struct page **pages)
806{
807 unsigned int i;
808 unsigned int ret;
809
810 read_lock_irq(&mapping->tree_lock);
811 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
812 (void **)pages, *index, nr_pages, tag);
813 for (i = 0; i < ret; i++)
814 page_cache_get(pages[i]);
815 if (ret)
816 *index = pages[ret - 1]->index + 1;
817 read_unlock_irq(&mapping->tree_lock);
818 return ret;
819}
ef71c15c 820EXPORT_SYMBOL(find_get_pages_tag);
1da177e4 821
485bb99b
RD
822/**
823 * grab_cache_page_nowait - returns locked page at given index in given cache
824 * @mapping: target address_space
825 * @index: the page index
826 *
72fd4a35 827 * Same as grab_cache_page(), but do not wait if the page is unavailable.
1da177e4
LT
828 * This is intended for speculative data generators, where the data can
829 * be regenerated if the page couldn't be grabbed. This routine should
830 * be safe to call while holding the lock for another page.
831 *
832 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
833 * and deadlock against the caller's locked page.
834 */
835struct page *
57f6b96c 836grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1da177e4
LT
837{
838 struct page *page = find_get_page(mapping, index);
1da177e4
LT
839
840 if (page) {
841 if (!TestSetPageLocked(page))
842 return page;
843 page_cache_release(page);
844 return NULL;
845 }
2ae88149
NP
846 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
847 if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
1da177e4
LT
848 page_cache_release(page);
849 page = NULL;
850 }
851 return page;
852}
1da177e4
LT
853EXPORT_SYMBOL(grab_cache_page_nowait);
854
76d42bd9
WF
855/*
856 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
857 * a _large_ part of the i/o request. Imagine the worst scenario:
858 *
859 * ---R__________________________________________B__________
860 * ^ reading here ^ bad block(assume 4k)
861 *
862 * read(R) => miss => readahead(R...B) => media error => frustrating retries
863 * => failing the whole request => read(R) => read(R+1) =>
864 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
865 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
866 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
867 *
868 * It is going insane. Fix it by quickly scaling down the readahead size.
869 */
870static void shrink_readahead_size_eio(struct file *filp,
871 struct file_ra_state *ra)
872{
873 if (!ra->ra_pages)
874 return;
875
876 ra->ra_pages /= 4;
76d42bd9
WF
877}
878
485bb99b 879/**
36e78914 880 * do_generic_file_read - generic file read routine
485bb99b
RD
881 * @filp: the file to read
882 * @ppos: current file position
883 * @desc: read_descriptor
884 * @actor: read method
885 *
1da177e4 886 * This is a generic file read routine, and uses the
485bb99b 887 * mapping->a_ops->readpage() function for the actual low-level stuff.
1da177e4
LT
888 *
889 * This is really ugly. But the goto's actually try to clarify some
890 * of the logic when it comes to error handling etc.
1da177e4 891 */
36e78914
CH
892static void do_generic_file_read(struct file *filp, loff_t *ppos,
893 read_descriptor_t *desc, read_actor_t actor)
1da177e4 894{
36e78914 895 struct address_space *mapping = filp->f_mapping;
1da177e4 896 struct inode *inode = mapping->host;
36e78914 897 struct file_ra_state *ra = &filp->f_ra;
57f6b96c
FW
898 pgoff_t index;
899 pgoff_t last_index;
900 pgoff_t prev_index;
901 unsigned long offset; /* offset into pagecache page */
ec0f1637 902 unsigned int prev_offset;
1da177e4 903 int error;
1da177e4 904
1da177e4 905 index = *ppos >> PAGE_CACHE_SHIFT;
7ff81078
FW
906 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
907 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1da177e4
LT
908 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
909 offset = *ppos & ~PAGE_CACHE_MASK;
910
1da177e4
LT
911 for (;;) {
912 struct page *page;
57f6b96c 913 pgoff_t end_index;
a32ea1e1 914 loff_t isize;
1da177e4
LT
915 unsigned long nr, ret;
916
1da177e4 917 cond_resched();
1da177e4
LT
918find_page:
919 page = find_get_page(mapping, index);
3ea89ee8 920 if (!page) {
cf914a7d 921 page_cache_sync_readahead(mapping,
7ff81078 922 ra, filp,
3ea89ee8
FW
923 index, last_index - index);
924 page = find_get_page(mapping, index);
925 if (unlikely(page == NULL))
926 goto no_cached_page;
927 }
928 if (PageReadahead(page)) {
cf914a7d 929 page_cache_async_readahead(mapping,
7ff81078 930 ra, filp, page,
3ea89ee8 931 index, last_index - index);
1da177e4
LT
932 }
933 if (!PageUptodate(page))
934 goto page_not_up_to_date;
935page_ok:
a32ea1e1
N
936 /*
937 * i_size must be checked after we know the page is Uptodate.
938 *
939 * Checking i_size after the check allows us to calculate
940 * the correct value for "nr", which means the zero-filled
941 * part of the page is not copied back to userspace (unless
942 * another truncate extends the file - this is desired though).
943 */
944
945 isize = i_size_read(inode);
946 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
947 if (unlikely(!isize || index > end_index)) {
948 page_cache_release(page);
949 goto out;
950 }
951
952 /* nr is the maximum number of bytes to copy from this page */
953 nr = PAGE_CACHE_SIZE;
954 if (index == end_index) {
955 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
956 if (nr <= offset) {
957 page_cache_release(page);
958 goto out;
959 }
960 }
961 nr = nr - offset;
1da177e4
LT
962
963 /* If users can be writing to this page using arbitrary
964 * virtual addresses, take care about potential aliasing
965 * before reading the page on the kernel side.
966 */
967 if (mapping_writably_mapped(mapping))
968 flush_dcache_page(page);
969
970 /*
ec0f1637
JK
971 * When a sequential read accesses a page several times,
972 * only mark it as accessed the first time.
1da177e4 973 */
ec0f1637 974 if (prev_index != index || offset != prev_offset)
1da177e4
LT
975 mark_page_accessed(page);
976 prev_index = index;
977
978 /*
979 * Ok, we have the page, and it's up-to-date, so
980 * now we can copy it to user space...
981 *
982 * The actor routine returns how many bytes were actually used..
983 * NOTE! This may not be the same as how much of a user buffer
984 * we filled up (we may be padding etc), so we can only update
985 * "pos" here (the actor routine has to update the user buffer
986 * pointers and the remaining count).
987 */
988 ret = actor(desc, page, offset, nr);
989 offset += ret;
990 index += offset >> PAGE_CACHE_SHIFT;
991 offset &= ~PAGE_CACHE_MASK;
6ce745ed 992 prev_offset = offset;
1da177e4
LT
993
994 page_cache_release(page);
995 if (ret == nr && desc->count)
996 continue;
997 goto out;
998
999page_not_up_to_date:
1000 /* Get exclusive access to the page ... */
0b94e97a
MW
1001 if (lock_page_killable(page))
1002 goto readpage_eio;
1da177e4 1003
da6052f7 1004 /* Did it get truncated before we got the lock? */
1da177e4
LT
1005 if (!page->mapping) {
1006 unlock_page(page);
1007 page_cache_release(page);
1008 continue;
1009 }
1010
1011 /* Did somebody else fill it already? */
1012 if (PageUptodate(page)) {
1013 unlock_page(page);
1014 goto page_ok;
1015 }
1016
1017readpage:
1018 /* Start the actual read. The read will unlock the page. */
1019 error = mapping->a_ops->readpage(filp, page);
1020
994fc28c
ZB
1021 if (unlikely(error)) {
1022 if (error == AOP_TRUNCATED_PAGE) {
1023 page_cache_release(page);
1024 goto find_page;
1025 }
1da177e4 1026 goto readpage_error;
994fc28c 1027 }
1da177e4
LT
1028
1029 if (!PageUptodate(page)) {
0b94e97a
MW
1030 if (lock_page_killable(page))
1031 goto readpage_eio;
1da177e4
LT
1032 if (!PageUptodate(page)) {
1033 if (page->mapping == NULL) {
1034 /*
1035 * invalidate_inode_pages got it
1036 */
1037 unlock_page(page);
1038 page_cache_release(page);
1039 goto find_page;
1040 }
1041 unlock_page(page);
7ff81078 1042 shrink_readahead_size_eio(filp, ra);
0b94e97a 1043 goto readpage_eio;
1da177e4
LT
1044 }
1045 unlock_page(page);
1046 }
1047
1da177e4
LT
1048 goto page_ok;
1049
0b94e97a
MW
1050readpage_eio:
1051 error = -EIO;
1da177e4
LT
1052readpage_error:
1053 /* UHHUH! A synchronous read error occurred. Report it */
1054 desc->error = error;
1055 page_cache_release(page);
1056 goto out;
1057
1058no_cached_page:
1059 /*
1060 * Ok, it wasn't cached, so we need to create a new
1061 * page..
1062 */
eb2be189
NP
1063 page = page_cache_alloc_cold(mapping);
1064 if (!page) {
1065 desc->error = -ENOMEM;
1066 goto out;
1da177e4 1067 }
eb2be189 1068 error = add_to_page_cache_lru(page, mapping,
1da177e4
LT
1069 index, GFP_KERNEL);
1070 if (error) {
eb2be189 1071 page_cache_release(page);
1da177e4
LT
1072 if (error == -EEXIST)
1073 goto find_page;
1074 desc->error = error;
1075 goto out;
1076 }
1da177e4
LT
1077 goto readpage;
1078 }
1079
1080out:
7ff81078
FW
1081 ra->prev_pos = prev_index;
1082 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1083 ra->prev_pos |= prev_offset;
1da177e4 1084
f4e6b498 1085 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1da177e4
LT
1086 if (filp)
1087 file_accessed(filp);
1088}
1da177e4
LT
1089
1090int file_read_actor(read_descriptor_t *desc, struct page *page,
1091 unsigned long offset, unsigned long size)
1092{
1093 char *kaddr;
1094 unsigned long left, count = desc->count;
1095
1096 if (size > count)
1097 size = count;
1098
1099 /*
1100 * Faults on the destination of a read are common, so do it before
1101 * taking the kmap.
1102 */
1103 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1104 kaddr = kmap_atomic(page, KM_USER0);
1105 left = __copy_to_user_inatomic(desc->arg.buf,
1106 kaddr + offset, size);
1107 kunmap_atomic(kaddr, KM_USER0);
1108 if (left == 0)
1109 goto success;
1110 }
1111
1112 /* Do it the slow way */
1113 kaddr = kmap(page);
1114 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1115 kunmap(page);
1116
1117 if (left) {
1118 size -= left;
1119 desc->error = -EFAULT;
1120 }
1121success:
1122 desc->count = count - size;
1123 desc->written += size;
1124 desc->arg.buf += size;
1125 return size;
1126}
1127
0ceb3314
DM
1128/*
1129 * Performs necessary checks before doing a write
1130 * @iov: io vector request
1131 * @nr_segs: number of segments in the iovec
1132 * @count: number of bytes to write
1133 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
1134 *
1135 * Adjust number of segments and amount of bytes to write (nr_segs should be
1136 * properly initialized first). Returns appropriate error code that caller
1137 * should return or zero in case that write should be allowed.
1138 */
1139int generic_segment_checks(const struct iovec *iov,
1140 unsigned long *nr_segs, size_t *count, int access_flags)
1141{
1142 unsigned long seg;
1143 size_t cnt = 0;
1144 for (seg = 0; seg < *nr_segs; seg++) {
1145 const struct iovec *iv = &iov[seg];
1146
1147 /*
1148 * If any segment has a negative length, or the cumulative
1149 * length ever wraps negative then return -EINVAL.
1150 */
1151 cnt += iv->iov_len;
1152 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1153 return -EINVAL;
1154 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1155 continue;
1156 if (seg == 0)
1157 return -EFAULT;
1158 *nr_segs = seg;
1159 cnt -= iv->iov_len; /* This segment is no good */
1160 break;
1161 }
1162 *count = cnt;
1163 return 0;
1164}
1165EXPORT_SYMBOL(generic_segment_checks);
1166
485bb99b 1167/**
b2abacf3 1168 * generic_file_aio_read - generic filesystem read routine
485bb99b
RD
1169 * @iocb: kernel I/O control block
1170 * @iov: io vector request
1171 * @nr_segs: number of segments in the iovec
b2abacf3 1172 * @pos: current file position
485bb99b 1173 *
1da177e4
LT
1174 * This is the "read()" routine for all filesystems
1175 * that can use the page cache directly.
1176 */
1177ssize_t
543ade1f
BP
1178generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1179 unsigned long nr_segs, loff_t pos)
1da177e4
LT
1180{
1181 struct file *filp = iocb->ki_filp;
1182 ssize_t retval;
1183 unsigned long seg;
1184 size_t count;
543ade1f 1185 loff_t *ppos = &iocb->ki_pos;
1da177e4
LT
1186
1187 count = 0;
0ceb3314
DM
1188 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1189 if (retval)
1190 return retval;
1da177e4
LT
1191
1192 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1193 if (filp->f_flags & O_DIRECT) {
543ade1f 1194 loff_t size;
1da177e4
LT
1195 struct address_space *mapping;
1196 struct inode *inode;
1197
1198 mapping = filp->f_mapping;
1199 inode = mapping->host;
1da177e4
LT
1200 if (!count)
1201 goto out; /* skip atime */
1202 size = i_size_read(inode);
1203 if (pos < size) {
a969e903
CH
1204 retval = filemap_write_and_wait(mapping);
1205 if (!retval) {
1206 retval = mapping->a_ops->direct_IO(READ, iocb,
1207 iov, pos, nr_segs);
1208 }
1da177e4
LT
1209 if (retval > 0)
1210 *ppos = pos + retval;
11fa977e
HD
1211 if (retval) {
1212 file_accessed(filp);
1213 goto out;
1214 }
0e0bcae3 1215 }
1da177e4
LT
1216 }
1217
11fa977e
HD
1218 for (seg = 0; seg < nr_segs; seg++) {
1219 read_descriptor_t desc;
1da177e4 1220
11fa977e
HD
1221 desc.written = 0;
1222 desc.arg.buf = iov[seg].iov_base;
1223 desc.count = iov[seg].iov_len;
1224 if (desc.count == 0)
1225 continue;
1226 desc.error = 0;
1227 do_generic_file_read(filp, ppos, &desc, file_read_actor);
1228 retval += desc.written;
1229 if (desc.error) {
1230 retval = retval ?: desc.error;
1231 break;
1da177e4 1232 }
11fa977e
HD
1233 if (desc.count > 0)
1234 break;
1da177e4
LT
1235 }
1236out:
1237 return retval;
1238}
1da177e4
LT
1239EXPORT_SYMBOL(generic_file_aio_read);
1240
1da177e4
LT
1241static ssize_t
1242do_readahead(struct address_space *mapping, struct file *filp,
57f6b96c 1243 pgoff_t index, unsigned long nr)
1da177e4
LT
1244{
1245 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1246 return -EINVAL;
1247
1248 force_page_cache_readahead(mapping, filp, index,
1249 max_sane_readahead(nr));
1250 return 0;
1251}
1252
1253asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1254{
1255 ssize_t ret;
1256 struct file *file;
1257
1258 ret = -EBADF;
1259 file = fget(fd);
1260 if (file) {
1261 if (file->f_mode & FMODE_READ) {
1262 struct address_space *mapping = file->f_mapping;
57f6b96c
FW
1263 pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1264 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1da177e4
LT
1265 unsigned long len = end - start + 1;
1266 ret = do_readahead(mapping, file, start, len);
1267 }
1268 fput(file);
1269 }
1270 return ret;
1271}
1272
1273#ifdef CONFIG_MMU
485bb99b
RD
1274/**
1275 * page_cache_read - adds requested page to the page cache if not already there
1276 * @file: file to read
1277 * @offset: page index
1278 *
1da177e4
LT
1279 * This adds the requested page to the page cache if it isn't already there,
1280 * and schedules an I/O to read in its contents from disk.
1281 */
920c7a5d 1282static int page_cache_read(struct file *file, pgoff_t offset)
1da177e4
LT
1283{
1284 struct address_space *mapping = file->f_mapping;
1285 struct page *page;
994fc28c 1286 int ret;
1da177e4 1287
994fc28c
ZB
1288 do {
1289 page = page_cache_alloc_cold(mapping);
1290 if (!page)
1291 return -ENOMEM;
1292
1293 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1294 if (ret == 0)
1295 ret = mapping->a_ops->readpage(file, page);
1296 else if (ret == -EEXIST)
1297 ret = 0; /* losing race to add is OK */
1da177e4 1298
1da177e4 1299 page_cache_release(page);
1da177e4 1300
994fc28c
ZB
1301 } while (ret == AOP_TRUNCATED_PAGE);
1302
1303 return ret;
1da177e4
LT
1304}
1305
1306#define MMAP_LOTSAMISS (100)
1307
485bb99b 1308/**
54cb8821 1309 * filemap_fault - read in file data for page fault handling
d0217ac0
NP
1310 * @vma: vma in which the fault was taken
1311 * @vmf: struct vm_fault containing details of the fault
485bb99b 1312 *
54cb8821 1313 * filemap_fault() is invoked via the vma operations vector for a
1da177e4
LT
1314 * mapped memory region to read in file data during a page fault.
1315 *
1316 * The goto's are kind of ugly, but this streamlines the normal case of having
1317 * it in the page cache, and handles the special cases reasonably without
1318 * having a lot of duplicated code.
1319 */
d0217ac0 1320int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4
LT
1321{
1322 int error;
54cb8821 1323 struct file *file = vma->vm_file;
1da177e4
LT
1324 struct address_space *mapping = file->f_mapping;
1325 struct file_ra_state *ra = &file->f_ra;
1326 struct inode *inode = mapping->host;
1327 struct page *page;
2004dc8e 1328 pgoff_t size;
54cb8821 1329 int did_readaround = 0;
83c54070 1330 int ret = 0;
1da177e4 1331
1da177e4 1332 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
d0217ac0 1333 if (vmf->pgoff >= size)
5307cc1a 1334 return VM_FAULT_SIGBUS;
1da177e4
LT
1335
1336 /* If we don't want any read-ahead, don't bother */
54cb8821 1337 if (VM_RandomReadHint(vma))
1da177e4
LT
1338 goto no_cached_page;
1339
1da177e4
LT
1340 /*
1341 * Do we have something in the page cache already?
1342 */
1343retry_find:
d0217ac0 1344 page = find_lock_page(mapping, vmf->pgoff);
3ea89ee8
FW
1345 /*
1346 * For sequential accesses, we use the generic readahead logic.
1347 */
1348 if (VM_SequentialReadHint(vma)) {
1349 if (!page) {
cf914a7d 1350 page_cache_sync_readahead(mapping, ra, file,
3ea89ee8
FW
1351 vmf->pgoff, 1);
1352 page = find_lock_page(mapping, vmf->pgoff);
1353 if (!page)
1354 goto no_cached_page;
1355 }
1356 if (PageReadahead(page)) {
cf914a7d 1357 page_cache_async_readahead(mapping, ra, file, page,
3ea89ee8
FW
1358 vmf->pgoff, 1);
1359 }
1360 }
1361
1da177e4
LT
1362 if (!page) {
1363 unsigned long ra_pages;
1364
1da177e4
LT
1365 ra->mmap_miss++;
1366
1367 /*
1368 * Do we miss much more than hit in this file? If so,
1369 * stop bothering with read-ahead. It will only hurt.
1370 */
0bb7ba6b 1371 if (ra->mmap_miss > MMAP_LOTSAMISS)
1da177e4
LT
1372 goto no_cached_page;
1373
1374 /*
1375 * To keep the pgmajfault counter straight, we need to
1376 * check did_readaround, as this is an inner loop.
1377 */
1378 if (!did_readaround) {
d0217ac0 1379 ret = VM_FAULT_MAJOR;
f8891e5e 1380 count_vm_event(PGMAJFAULT);
1da177e4
LT
1381 }
1382 did_readaround = 1;
1383 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1384 if (ra_pages) {
1385 pgoff_t start = 0;
1386
d0217ac0
NP
1387 if (vmf->pgoff > ra_pages / 2)
1388 start = vmf->pgoff - ra_pages / 2;
1da177e4
LT
1389 do_page_cache_readahead(mapping, file, start, ra_pages);
1390 }
d0217ac0 1391 page = find_lock_page(mapping, vmf->pgoff);
1da177e4
LT
1392 if (!page)
1393 goto no_cached_page;
1394 }
1395
1396 if (!did_readaround)
0bb7ba6b 1397 ra->mmap_miss--;
1da177e4
LT
1398
1399 /*
d00806b1
NP
1400 * We have a locked page in the page cache, now we need to check
1401 * that it's up-to-date. If not, it is going to be due to an error.
1da177e4 1402 */
d00806b1 1403 if (unlikely(!PageUptodate(page)))
1da177e4
LT
1404 goto page_not_uptodate;
1405
d00806b1
NP
1406 /* Must recheck i_size under page lock */
1407 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
d0217ac0 1408 if (unlikely(vmf->pgoff >= size)) {
d00806b1 1409 unlock_page(page);
745ad48e 1410 page_cache_release(page);
5307cc1a 1411 return VM_FAULT_SIGBUS;
d00806b1
NP
1412 }
1413
1da177e4
LT
1414 /*
1415 * Found the page and have a reference on it.
1416 */
1417 mark_page_accessed(page);
f4e6b498 1418 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
d0217ac0 1419 vmf->page = page;
83c54070 1420 return ret | VM_FAULT_LOCKED;
1da177e4 1421
1da177e4
LT
1422no_cached_page:
1423 /*
1424 * We're only likely to ever get here if MADV_RANDOM is in
1425 * effect.
1426 */
d0217ac0 1427 error = page_cache_read(file, vmf->pgoff);
1da177e4
LT
1428
1429 /*
1430 * The page we want has now been added to the page cache.
1431 * In the unlikely event that someone removed it in the
1432 * meantime, we'll just come back here and read it again.
1433 */
1434 if (error >= 0)
1435 goto retry_find;
1436
1437 /*
1438 * An error return from page_cache_read can result if the
1439 * system is low on memory, or a problem occurs while trying
1440 * to schedule I/O.
1441 */
1442 if (error == -ENOMEM)
d0217ac0
NP
1443 return VM_FAULT_OOM;
1444 return VM_FAULT_SIGBUS;
1da177e4
LT
1445
1446page_not_uptodate:
d00806b1 1447 /* IO error path */
1da177e4 1448 if (!did_readaround) {
d0217ac0 1449 ret = VM_FAULT_MAJOR;
f8891e5e 1450 count_vm_event(PGMAJFAULT);
1da177e4 1451 }
1da177e4
LT
1452
1453 /*
1454 * Umm, take care of errors if the page isn't up-to-date.
1455 * Try to re-read it _once_. We do this synchronously,
1456 * because there really aren't any performance issues here
1457 * and we need to check for errors.
1458 */
1da177e4 1459 ClearPageError(page);
994fc28c 1460 error = mapping->a_ops->readpage(file, page);
3ef0f720
MS
1461 if (!error) {
1462 wait_on_page_locked(page);
1463 if (!PageUptodate(page))
1464 error = -EIO;
1465 }
d00806b1
NP
1466 page_cache_release(page);
1467
1468 if (!error || error == AOP_TRUNCATED_PAGE)
994fc28c 1469 goto retry_find;
1da177e4 1470
d00806b1 1471 /* Things didn't work out. Return zero to tell the mm layer so. */
76d42bd9 1472 shrink_readahead_size_eio(file, ra);
d0217ac0 1473 return VM_FAULT_SIGBUS;
54cb8821
NP
1474}
1475EXPORT_SYMBOL(filemap_fault);
1476
1da177e4 1477struct vm_operations_struct generic_file_vm_ops = {
54cb8821 1478 .fault = filemap_fault,
1da177e4
LT
1479};
1480
1481/* This is used for a general mmap of a disk file */
1482
1483int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1484{
1485 struct address_space *mapping = file->f_mapping;
1486
1487 if (!mapping->a_ops->readpage)
1488 return -ENOEXEC;
1489 file_accessed(file);
1490 vma->vm_ops = &generic_file_vm_ops;
d0217ac0 1491 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1492 return 0;
1493}
1da177e4
LT
1494
1495/*
1496 * This is for filesystems which do not implement ->writepage.
1497 */
1498int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1499{
1500 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1501 return -EINVAL;
1502 return generic_file_mmap(file, vma);
1503}
1504#else
1505int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1506{
1507 return -ENOSYS;
1508}
1509int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1510{
1511 return -ENOSYS;
1512}
1513#endif /* CONFIG_MMU */
1514
1515EXPORT_SYMBOL(generic_file_mmap);
1516EXPORT_SYMBOL(generic_file_readonly_mmap);
1517
6fe6900e 1518static struct page *__read_cache_page(struct address_space *mapping,
57f6b96c 1519 pgoff_t index,
1da177e4
LT
1520 int (*filler)(void *,struct page*),
1521 void *data)
1522{
eb2be189 1523 struct page *page;
1da177e4
LT
1524 int err;
1525repeat:
1526 page = find_get_page(mapping, index);
1527 if (!page) {
eb2be189
NP
1528 page = page_cache_alloc_cold(mapping);
1529 if (!page)
1530 return ERR_PTR(-ENOMEM);
1531 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
1532 if (unlikely(err)) {
1533 page_cache_release(page);
1534 if (err == -EEXIST)
1535 goto repeat;
1da177e4 1536 /* Presumably ENOMEM for radix tree node */
1da177e4
LT
1537 return ERR_PTR(err);
1538 }
1da177e4
LT
1539 err = filler(data, page);
1540 if (err < 0) {
1541 page_cache_release(page);
1542 page = ERR_PTR(err);
1543 }
1544 }
1da177e4
LT
1545 return page;
1546}
1547
7682486b
RD
1548/**
1549 * read_cache_page_async - read into page cache, fill it if needed
1550 * @mapping: the page's address_space
1551 * @index: the page index
1552 * @filler: function to perform the read
1553 * @data: destination for read data
1554 *
6fe6900e
NP
1555 * Same as read_cache_page, but don't wait for page to become unlocked
1556 * after submitting it to the filler.
7682486b
RD
1557 *
1558 * Read into the page cache. If a page already exists, and PageUptodate() is
1559 * not set, try to fill the page but don't wait for it to become unlocked.
1560 *
1561 * If the page does not get brought uptodate, return -EIO.
1da177e4 1562 */
6fe6900e 1563struct page *read_cache_page_async(struct address_space *mapping,
57f6b96c 1564 pgoff_t index,
1da177e4
LT
1565 int (*filler)(void *,struct page*),
1566 void *data)
1567{
1568 struct page *page;
1569 int err;
1570
1571retry:
1572 page = __read_cache_page(mapping, index, filler, data);
1573 if (IS_ERR(page))
c855ff37 1574 return page;
1da177e4
LT
1575 if (PageUptodate(page))
1576 goto out;
1577
1578 lock_page(page);
1579 if (!page->mapping) {
1580 unlock_page(page);
1581 page_cache_release(page);
1582 goto retry;
1583 }
1584 if (PageUptodate(page)) {
1585 unlock_page(page);
1586 goto out;
1587 }
1588 err = filler(data, page);
1589 if (err < 0) {
1590 page_cache_release(page);
c855ff37 1591 return ERR_PTR(err);
1da177e4 1592 }
c855ff37 1593out:
6fe6900e
NP
1594 mark_page_accessed(page);
1595 return page;
1596}
1597EXPORT_SYMBOL(read_cache_page_async);
1598
1599/**
1600 * read_cache_page - read into page cache, fill it if needed
1601 * @mapping: the page's address_space
1602 * @index: the page index
1603 * @filler: function to perform the read
1604 * @data: destination for read data
1605 *
1606 * Read into the page cache. If a page already exists, and PageUptodate() is
1607 * not set, try to fill the page then wait for it to become unlocked.
1608 *
1609 * If the page does not get brought uptodate, return -EIO.
1610 */
1611struct page *read_cache_page(struct address_space *mapping,
57f6b96c 1612 pgoff_t index,
6fe6900e
NP
1613 int (*filler)(void *,struct page*),
1614 void *data)
1615{
1616 struct page *page;
1617
1618 page = read_cache_page_async(mapping, index, filler, data);
1619 if (IS_ERR(page))
1620 goto out;
1621 wait_on_page_locked(page);
1622 if (!PageUptodate(page)) {
1623 page_cache_release(page);
1624 page = ERR_PTR(-EIO);
1625 }
1da177e4
LT
1626 out:
1627 return page;
1628}
1da177e4
LT
1629EXPORT_SYMBOL(read_cache_page);
1630
1da177e4
LT
1631/*
1632 * The logic we want is
1633 *
1634 * if suid or (sgid and xgrp)
1635 * remove privs
1636 */
01de85e0 1637int should_remove_suid(struct dentry *dentry)
1da177e4
LT
1638{
1639 mode_t mode = dentry->d_inode->i_mode;
1640 int kill = 0;
1da177e4
LT
1641
1642 /* suid always must be killed */
1643 if (unlikely(mode & S_ISUID))
1644 kill = ATTR_KILL_SUID;
1645
1646 /*
1647 * sgid without any exec bits is just a mandatory locking mark; leave
1648 * it alone. If some exec bits are set, it's a real sgid; kill it.
1649 */
1650 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1651 kill |= ATTR_KILL_SGID;
1652
01de85e0
JA
1653 if (unlikely(kill && !capable(CAP_FSETID)))
1654 return kill;
1da177e4 1655
01de85e0
JA
1656 return 0;
1657}
d23a147b 1658EXPORT_SYMBOL(should_remove_suid);
01de85e0 1659
7f3d4ee1 1660static int __remove_suid(struct dentry *dentry, int kill)
01de85e0
JA
1661{
1662 struct iattr newattrs;
1663
1664 newattrs.ia_valid = ATTR_FORCE | kill;
1665 return notify_change(dentry, &newattrs);
1666}
1667
1668int remove_suid(struct dentry *dentry)
1669{
b5376771
SH
1670 int killsuid = should_remove_suid(dentry);
1671 int killpriv = security_inode_need_killpriv(dentry);
1672 int error = 0;
01de85e0 1673
b5376771
SH
1674 if (killpriv < 0)
1675 return killpriv;
1676 if (killpriv)
1677 error = security_inode_killpriv(dentry);
1678 if (!error && killsuid)
1679 error = __remove_suid(dentry, killsuid);
01de85e0 1680
b5376771 1681 return error;
1da177e4
LT
1682}
1683EXPORT_SYMBOL(remove_suid);
1684
2f718ffc 1685static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1da177e4
LT
1686 const struct iovec *iov, size_t base, size_t bytes)
1687{
1688 size_t copied = 0, left = 0;
1689
1690 while (bytes) {
1691 char __user *buf = iov->iov_base + base;
1692 int copy = min(bytes, iov->iov_len - base);
1693
1694 base = 0;
c22ce143 1695 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
1da177e4
LT
1696 copied += copy;
1697 bytes -= copy;
1698 vaddr += copy;
1699 iov++;
1700
01408c49 1701 if (unlikely(left))
1da177e4 1702 break;
1da177e4
LT
1703 }
1704 return copied - left;
1705}
1706
2f718ffc
NP
1707/*
1708 * Copy as much as we can into the page and return the number of bytes which
1709 * were sucessfully copied. If a fault is encountered then return the number of
1710 * bytes which were copied.
1711 */
1712size_t iov_iter_copy_from_user_atomic(struct page *page,
1713 struct iov_iter *i, unsigned long offset, size_t bytes)
1714{
1715 char *kaddr;
1716 size_t copied;
1717
1718 BUG_ON(!in_atomic());
1719 kaddr = kmap_atomic(page, KM_USER0);
1720 if (likely(i->nr_segs == 1)) {
1721 int left;
1722 char __user *buf = i->iov->iov_base + i->iov_offset;
1723 left = __copy_from_user_inatomic_nocache(kaddr + offset,
1724 buf, bytes);
1725 copied = bytes - left;
1726 } else {
1727 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1728 i->iov, i->iov_offset, bytes);
1729 }
1730 kunmap_atomic(kaddr, KM_USER0);
1731
1732 return copied;
1733}
89e10787 1734EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
2f718ffc
NP
1735
1736/*
1737 * This has the same sideeffects and return value as
1738 * iov_iter_copy_from_user_atomic().
1739 * The difference is that it attempts to resolve faults.
1740 * Page must not be locked.
1741 */
1742size_t iov_iter_copy_from_user(struct page *page,
1743 struct iov_iter *i, unsigned long offset, size_t bytes)
1744{
1745 char *kaddr;
1746 size_t copied;
1747
1748 kaddr = kmap(page);
1749 if (likely(i->nr_segs == 1)) {
1750 int left;
1751 char __user *buf = i->iov->iov_base + i->iov_offset;
1752 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
1753 copied = bytes - left;
1754 } else {
1755 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1756 i->iov, i->iov_offset, bytes);
1757 }
1758 kunmap(page);
1759 return copied;
1760}
89e10787 1761EXPORT_SYMBOL(iov_iter_copy_from_user);
2f718ffc 1762
f7009264 1763void iov_iter_advance(struct iov_iter *i, size_t bytes)
2f718ffc 1764{
f7009264
NP
1765 BUG_ON(i->count < bytes);
1766
2f718ffc
NP
1767 if (likely(i->nr_segs == 1)) {
1768 i->iov_offset += bytes;
f7009264 1769 i->count -= bytes;
2f718ffc
NP
1770 } else {
1771 const struct iovec *iov = i->iov;
1772 size_t base = i->iov_offset;
1773
124d3b70
NP
1774 /*
1775 * The !iov->iov_len check ensures we skip over unlikely
f7009264 1776 * zero-length segments (without overruning the iovec).
124d3b70 1777 */
f7009264
NP
1778 while (bytes || unlikely(!iov->iov_len && i->count)) {
1779 int copy;
2f718ffc 1780
f7009264
NP
1781 copy = min(bytes, iov->iov_len - base);
1782 BUG_ON(!i->count || i->count < copy);
1783 i->count -= copy;
2f718ffc
NP
1784 bytes -= copy;
1785 base += copy;
1786 if (iov->iov_len == base) {
1787 iov++;
1788 base = 0;
1789 }
1790 }
1791 i->iov = iov;
1792 i->iov_offset = base;
1793 }
1794}
89e10787 1795EXPORT_SYMBOL(iov_iter_advance);
2f718ffc 1796
afddba49
NP
1797/*
1798 * Fault in the first iovec of the given iov_iter, to a maximum length
1799 * of bytes. Returns 0 on success, or non-zero if the memory could not be
1800 * accessed (ie. because it is an invalid address).
1801 *
1802 * writev-intensive code may want this to prefault several iovecs -- that
1803 * would be possible (callers must not rely on the fact that _only_ the
1804 * first iovec will be faulted with the current implementation).
1805 */
1806int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2f718ffc 1807{
2f718ffc 1808 char __user *buf = i->iov->iov_base + i->iov_offset;
afddba49
NP
1809 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
1810 return fault_in_pages_readable(buf, bytes);
2f718ffc 1811}
89e10787 1812EXPORT_SYMBOL(iov_iter_fault_in_readable);
2f718ffc
NP
1813
1814/*
1815 * Return the count of just the current iov_iter segment.
1816 */
1817size_t iov_iter_single_seg_count(struct iov_iter *i)
1818{
1819 const struct iovec *iov = i->iov;
1820 if (i->nr_segs == 1)
1821 return i->count;
1822 else
1823 return min(i->count, iov->iov_len - i->iov_offset);
1824}
89e10787 1825EXPORT_SYMBOL(iov_iter_single_seg_count);
2f718ffc 1826
1da177e4
LT
1827/*
1828 * Performs necessary checks before doing a write
1829 *
485bb99b 1830 * Can adjust writing position or amount of bytes to write.
1da177e4
LT
1831 * Returns appropriate error code that caller should return or
1832 * zero in case that write should be allowed.
1833 */
1834inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1835{
1836 struct inode *inode = file->f_mapping->host;
1837 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1838
1839 if (unlikely(*pos < 0))
1840 return -EINVAL;
1841
1da177e4
LT
1842 if (!isblk) {
1843 /* FIXME: this is for backwards compatibility with 2.4 */
1844 if (file->f_flags & O_APPEND)
1845 *pos = i_size_read(inode);
1846
1847 if (limit != RLIM_INFINITY) {
1848 if (*pos >= limit) {
1849 send_sig(SIGXFSZ, current, 0);
1850 return -EFBIG;
1851 }
1852 if (*count > limit - (typeof(limit))*pos) {
1853 *count = limit - (typeof(limit))*pos;
1854 }
1855 }
1856 }
1857
1858 /*
1859 * LFS rule
1860 */
1861 if (unlikely(*pos + *count > MAX_NON_LFS &&
1862 !(file->f_flags & O_LARGEFILE))) {
1863 if (*pos >= MAX_NON_LFS) {
1da177e4
LT
1864 return -EFBIG;
1865 }
1866 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1867 *count = MAX_NON_LFS - (unsigned long)*pos;
1868 }
1869 }
1870
1871 /*
1872 * Are we about to exceed the fs block limit ?
1873 *
1874 * If we have written data it becomes a short write. If we have
1875 * exceeded without writing data we send a signal and return EFBIG.
1876 * Linus frestrict idea will clean these up nicely..
1877 */
1878 if (likely(!isblk)) {
1879 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1880 if (*count || *pos > inode->i_sb->s_maxbytes) {
1da177e4
LT
1881 return -EFBIG;
1882 }
1883 /* zero-length writes at ->s_maxbytes are OK */
1884 }
1885
1886 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1887 *count = inode->i_sb->s_maxbytes - *pos;
1888 } else {
9361401e 1889#ifdef CONFIG_BLOCK
1da177e4
LT
1890 loff_t isize;
1891 if (bdev_read_only(I_BDEV(inode)))
1892 return -EPERM;
1893 isize = i_size_read(inode);
1894 if (*pos >= isize) {
1895 if (*count || *pos > isize)
1896 return -ENOSPC;
1897 }
1898
1899 if (*pos + *count > isize)
1900 *count = isize - *pos;
9361401e
DH
1901#else
1902 return -EPERM;
1903#endif
1da177e4
LT
1904 }
1905 return 0;
1906}
1907EXPORT_SYMBOL(generic_write_checks);
1908
afddba49
NP
1909int pagecache_write_begin(struct file *file, struct address_space *mapping,
1910 loff_t pos, unsigned len, unsigned flags,
1911 struct page **pagep, void **fsdata)
1912{
1913 const struct address_space_operations *aops = mapping->a_ops;
1914
1915 if (aops->write_begin) {
1916 return aops->write_begin(file, mapping, pos, len, flags,
1917 pagep, fsdata);
1918 } else {
1919 int ret;
1920 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1921 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1922 struct inode *inode = mapping->host;
1923 struct page *page;
1924again:
1925 page = __grab_cache_page(mapping, index);
1926 *pagep = page;
1927 if (!page)
1928 return -ENOMEM;
1929
1930 if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
1931 /*
1932 * There is no way to resolve a short write situation
1933 * for a !Uptodate page (except by double copying in
1934 * the caller done by generic_perform_write_2copy).
1935 *
1936 * Instead, we have to bring it uptodate here.
1937 */
1938 ret = aops->readpage(file, page);
1939 page_cache_release(page);
1940 if (ret) {
1941 if (ret == AOP_TRUNCATED_PAGE)
1942 goto again;
1943 return ret;
1944 }
1945 goto again;
1946 }
1947
1948 ret = aops->prepare_write(file, page, offset, offset+len);
1949 if (ret) {
55144768 1950 unlock_page(page);
afddba49
NP
1951 page_cache_release(page);
1952 if (pos + len > inode->i_size)
1953 vmtruncate(inode, inode->i_size);
afddba49
NP
1954 }
1955 return ret;
1956 }
1957}
1958EXPORT_SYMBOL(pagecache_write_begin);
1959
1960int pagecache_write_end(struct file *file, struct address_space *mapping,
1961 loff_t pos, unsigned len, unsigned copied,
1962 struct page *page, void *fsdata)
1963{
1964 const struct address_space_operations *aops = mapping->a_ops;
1965 int ret;
1966
1967 if (aops->write_end) {
1968 mark_page_accessed(page);
1969 ret = aops->write_end(file, mapping, pos, len, copied,
1970 page, fsdata);
1971 } else {
1972 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1973 struct inode *inode = mapping->host;
1974
1975 flush_dcache_page(page);
1976 ret = aops->commit_write(file, page, offset, offset+len);
1977 unlock_page(page);
1978 mark_page_accessed(page);
1979 page_cache_release(page);
afddba49
NP
1980
1981 if (ret < 0) {
1982 if (pos + len > inode->i_size)
1983 vmtruncate(inode, inode->i_size);
1984 } else if (ret > 0)
1985 ret = min_t(size_t, copied, ret);
1986 else
1987 ret = copied;
1988 }
1989
1990 return ret;
1991}
1992EXPORT_SYMBOL(pagecache_write_end);
1993
1da177e4
LT
1994ssize_t
1995generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1996 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1997 size_t count, size_t ocount)
1998{
1999 struct file *file = iocb->ki_filp;
2000 struct address_space *mapping = file->f_mapping;
2001 struct inode *inode = mapping->host;
2002 ssize_t written;
a969e903
CH
2003 size_t write_len;
2004 pgoff_t end;
1da177e4
LT
2005
2006 if (count != ocount)
2007 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2008
a969e903
CH
2009 /*
2010 * Unmap all mmappings of the file up-front.
2011 *
2012 * This will cause any pte dirty bits to be propagated into the
2013 * pageframes for the subsequent filemap_write_and_wait().
2014 */
2015 write_len = iov_length(iov, *nr_segs);
2016 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2017 if (mapping_mapped(mapping))
2018 unmap_mapping_range(mapping, pos, write_len, 0);
2019
2020 written = filemap_write_and_wait(mapping);
2021 if (written)
2022 goto out;
2023
2024 /*
2025 * After a write we want buffered reads to be sure to go to disk to get
2026 * the new data. We invalidate clean cached page from the region we're
2027 * about to write. We do this *before* the write so that we can return
2028 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
2029 */
2030 if (mapping->nrpages) {
2031 written = invalidate_inode_pages2_range(mapping,
2032 pos >> PAGE_CACHE_SHIFT, end);
2033 if (written)
2034 goto out;
2035 }
2036
2037 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2038
2039 /*
2040 * Finally, try again to invalidate clean pages which might have been
2041 * cached by non-direct readahead, or faulted in by get_user_pages()
2042 * if the source of the write was an mmap'ed region of the file
2043 * we're writing. Either one is a pretty crazy thing to do,
2044 * so we don't support it 100%. If this invalidation
2045 * fails, tough, the write still worked...
2046 */
2047 if (mapping->nrpages) {
2048 invalidate_inode_pages2_range(mapping,
2049 pos >> PAGE_CACHE_SHIFT, end);
2050 }
2051
1da177e4
LT
2052 if (written > 0) {
2053 loff_t end = pos + written;
2054 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2055 i_size_write(inode, end);
2056 mark_inode_dirty(inode);
2057 }
2058 *ppos = end;
2059 }
2060
2061 /*
2062 * Sync the fs metadata but not the minor inode changes and
2063 * of course not the data as we did direct DMA for the IO.
1b1dcc1b 2064 * i_mutex is held, which protects generic_osync_inode() from
8459d86a 2065 * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
1da177e4 2066 */
a969e903 2067out:
8459d86a
ZB
2068 if ((written >= 0 || written == -EIOCBQUEUED) &&
2069 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
1e8a81c5
HH
2070 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
2071 if (err < 0)
2072 written = err;
2073 }
1da177e4
LT
2074 return written;
2075}
2076EXPORT_SYMBOL(generic_file_direct_write);
2077
eb2be189
NP
2078/*
2079 * Find or create a page at the given pagecache position. Return the locked
2080 * page. This function is specifically for buffered writes.
2081 */
afddba49 2082struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
eb2be189
NP
2083{
2084 int status;
2085 struct page *page;
2086repeat:
2087 page = find_lock_page(mapping, index);
2088 if (likely(page))
2089 return page;
2090
2091 page = page_cache_alloc(mapping);
2092 if (!page)
2093 return NULL;
2094 status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
2095 if (unlikely(status)) {
2096 page_cache_release(page);
2097 if (status == -EEXIST)
2098 goto repeat;
2099 return NULL;
2100 }
2101 return page;
2102}
afddba49 2103EXPORT_SYMBOL(__grab_cache_page);
eb2be189 2104
afddba49
NP
2105static ssize_t generic_perform_write_2copy(struct file *file,
2106 struct iov_iter *i, loff_t pos)
1da177e4 2107{
ae37461c 2108 struct address_space *mapping = file->f_mapping;
f5e54d6e 2109 const struct address_space_operations *a_ops = mapping->a_ops;
afddba49
NP
2110 struct inode *inode = mapping->host;
2111 long status = 0;
2112 ssize_t written = 0;
1da177e4
LT
2113
2114 do {
08291429 2115 struct page *src_page;
eb2be189 2116 struct page *page;
ae37461c
AM
2117 pgoff_t index; /* Pagecache index for current page */
2118 unsigned long offset; /* Offset into pagecache page */
08291429 2119 unsigned long bytes; /* Bytes to write to page */
ae37461c 2120 size_t copied; /* Bytes copied from user */
1da177e4 2121
ae37461c 2122 offset = (pos & (PAGE_CACHE_SIZE - 1));
1da177e4 2123 index = pos >> PAGE_CACHE_SHIFT;
2f718ffc 2124 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
afddba49 2125 iov_iter_count(i));
41cb8ac0 2126
08291429
NP
2127 /*
2128 * a non-NULL src_page indicates that we're doing the
2129 * copy via get_user_pages and kmap.
2130 */
2131 src_page = NULL;
2132
41cb8ac0
NP
2133 /*
2134 * Bring in the user page that we will copy from _first_.
2135 * Otherwise there's a nasty deadlock on copying from the
2136 * same page as we're writing to, without it being marked
2137 * up-to-date.
08291429
NP
2138 *
2139 * Not only is this an optimisation, but it is also required
2140 * to check that the address is actually valid, when atomic
2141 * usercopies are used, below.
41cb8ac0 2142 */
afddba49 2143 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
08291429
NP
2144 status = -EFAULT;
2145 break;
2146 }
eb2be189
NP
2147
2148 page = __grab_cache_page(mapping, index);
1da177e4
LT
2149 if (!page) {
2150 status = -ENOMEM;
2151 break;
2152 }
2153
08291429
NP
2154 /*
2155 * non-uptodate pages cannot cope with short copies, and we
2156 * cannot take a pagefault with the destination page locked.
2157 * So pin the source page to copy it.
2158 */
674b892e 2159 if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
08291429
NP
2160 unlock_page(page);
2161
2162 src_page = alloc_page(GFP_KERNEL);
2163 if (!src_page) {
2164 page_cache_release(page);
2165 status = -ENOMEM;
2166 break;
2167 }
2168
2169 /*
2170 * Cannot get_user_pages with a page locked for the
2171 * same reason as we can't take a page fault with a
2172 * page locked (as explained below).
2173 */
afddba49 2174 copied = iov_iter_copy_from_user(src_page, i,
2f718ffc 2175 offset, bytes);
08291429
NP
2176 if (unlikely(copied == 0)) {
2177 status = -EFAULT;
2178 page_cache_release(page);
2179 page_cache_release(src_page);
2180 break;
2181 }
2182 bytes = copied;
2183
2184 lock_page(page);
2185 /*
2186 * Can't handle the page going uptodate here, because
2187 * that means we would use non-atomic usercopies, which
2188 * zero out the tail of the page, which can cause
2189 * zeroes to become transiently visible. We could just
2190 * use a non-zeroing copy, but the APIs aren't too
2191 * consistent.
2192 */
2193 if (unlikely(!page->mapping || PageUptodate(page))) {
2194 unlock_page(page);
2195 page_cache_release(page);
2196 page_cache_release(src_page);
2197 continue;
2198 }
08291429
NP
2199 }
2200
1da177e4 2201 status = a_ops->prepare_write(file, page, offset, offset+bytes);
64649a58
NP
2202 if (unlikely(status))
2203 goto fs_write_aop_error;
994fc28c 2204
08291429
NP
2205 if (!src_page) {
2206 /*
2207 * Must not enter the pagefault handler here, because
2208 * we hold the page lock, so we might recursively
2209 * deadlock on the same lock, or get an ABBA deadlock
2210 * against a different lock, or against the mmap_sem
2211 * (which nests outside the page lock). So increment
2212 * preempt count, and use _atomic usercopies.
2213 *
2214 * The page is uptodate so we are OK to encounter a
2215 * short copy: if unmodified parts of the page are
2216 * marked dirty and written out to disk, it doesn't
2217 * really matter.
2218 */
2219 pagefault_disable();
afddba49 2220 copied = iov_iter_copy_from_user_atomic(page, i,
2f718ffc 2221 offset, bytes);
08291429
NP
2222 pagefault_enable();
2223 } else {
2224 void *src, *dst;
2225 src = kmap_atomic(src_page, KM_USER0);
2226 dst = kmap_atomic(page, KM_USER1);
2227 memcpy(dst + offset, src + offset, bytes);
2228 kunmap_atomic(dst, KM_USER1);
2229 kunmap_atomic(src, KM_USER0);
2230 copied = bytes;
2231 }
1da177e4 2232 flush_dcache_page(page);
4a9e5ef1 2233
1da177e4 2234 status = a_ops->commit_write(file, page, offset, offset+bytes);
55144768 2235 if (unlikely(status < 0))
64649a58 2236 goto fs_write_aop_error;
64649a58 2237 if (unlikely(status > 0)) /* filesystem did partial write */
08291429
NP
2238 copied = min_t(size_t, copied, status);
2239
2240 unlock_page(page);
2241 mark_page_accessed(page);
2242 page_cache_release(page);
2243 if (src_page)
2244 page_cache_release(src_page);
64649a58 2245
afddba49 2246 iov_iter_advance(i, copied);
4a9e5ef1 2247 pos += copied;
afddba49 2248 written += copied;
4a9e5ef1 2249
1da177e4
LT
2250 balance_dirty_pages_ratelimited(mapping);
2251 cond_resched();
64649a58
NP
2252 continue;
2253
2254fs_write_aop_error:
55144768 2255 unlock_page(page);
64649a58 2256 page_cache_release(page);
08291429
NP
2257 if (src_page)
2258 page_cache_release(src_page);
64649a58
NP
2259
2260 /*
2261 * prepare_write() may have instantiated a few blocks
2262 * outside i_size. Trim these off again. Don't need
2263 * i_size_read because we hold i_mutex.
2264 */
2265 if (pos + bytes > inode->i_size)
2266 vmtruncate(inode, inode->i_size);
55144768 2267 break;
afddba49
NP
2268 } while (iov_iter_count(i));
2269
2270 return written ? written : status;
2271}
2272
2273static ssize_t generic_perform_write(struct file *file,
2274 struct iov_iter *i, loff_t pos)
2275{
2276 struct address_space *mapping = file->f_mapping;
2277 const struct address_space_operations *a_ops = mapping->a_ops;
2278 long status = 0;
2279 ssize_t written = 0;
674b892e
NP
2280 unsigned int flags = 0;
2281
2282 /*
2283 * Copies from kernel address space cannot fail (NFSD is a big user).
2284 */
2285 if (segment_eq(get_fs(), KERNEL_DS))
2286 flags |= AOP_FLAG_UNINTERRUPTIBLE;
afddba49
NP
2287
2288 do {
2289 struct page *page;
2290 pgoff_t index; /* Pagecache index for current page */
2291 unsigned long offset; /* Offset into pagecache page */
2292 unsigned long bytes; /* Bytes to write to page */
2293 size_t copied; /* Bytes copied from user */
2294 void *fsdata;
2295
2296 offset = (pos & (PAGE_CACHE_SIZE - 1));
2297 index = pos >> PAGE_CACHE_SHIFT;
2298 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2299 iov_iter_count(i));
2300
2301again:
2302
2303 /*
2304 * Bring in the user page that we will copy from _first_.
2305 * Otherwise there's a nasty deadlock on copying from the
2306 * same page as we're writing to, without it being marked
2307 * up-to-date.
2308 *
2309 * Not only is this an optimisation, but it is also required
2310 * to check that the address is actually valid, when atomic
2311 * usercopies are used, below.
2312 */
2313 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2314 status = -EFAULT;
2315 break;
2316 }
2317
674b892e 2318 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
afddba49
NP
2319 &page, &fsdata);
2320 if (unlikely(status))
2321 break;
2322
2323 pagefault_disable();
2324 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2325 pagefault_enable();
2326 flush_dcache_page(page);
2327
2328 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2329 page, fsdata);
2330 if (unlikely(status < 0))
2331 break;
2332 copied = status;
2333
2334 cond_resched();
2335
124d3b70 2336 iov_iter_advance(i, copied);
afddba49
NP
2337 if (unlikely(copied == 0)) {
2338 /*
2339 * If we were unable to copy any data at all, we must
2340 * fall back to a single segment length write.
2341 *
2342 * If we didn't fallback here, we could livelock
2343 * because not all segments in the iov can be copied at
2344 * once without a pagefault.
2345 */
2346 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2347 iov_iter_single_seg_count(i));
2348 goto again;
2349 }
afddba49
NP
2350 pos += copied;
2351 written += copied;
2352
2353 balance_dirty_pages_ratelimited(mapping);
2354
2355 } while (iov_iter_count(i));
2356
2357 return written ? written : status;
2358}
2359
2360ssize_t
2361generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2362 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2363 size_t count, ssize_t written)
2364{
2365 struct file *file = iocb->ki_filp;
2366 struct address_space *mapping = file->f_mapping;
2367 const struct address_space_operations *a_ops = mapping->a_ops;
2368 struct inode *inode = mapping->host;
2369 ssize_t status;
2370 struct iov_iter i;
2371
2372 iov_iter_init(&i, iov, nr_segs, count, written);
2373 if (a_ops->write_begin)
2374 status = generic_perform_write(file, &i, pos);
2375 else
2376 status = generic_perform_write_2copy(file, &i, pos);
1da177e4 2377
1da177e4 2378 if (likely(status >= 0)) {
afddba49
NP
2379 written += status;
2380 *ppos = pos + status;
2381
2382 /*
2383 * For now, when the user asks for O_SYNC, we'll actually give
2384 * O_DSYNC
2385 */
1da177e4
LT
2386 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2387 if (!a_ops->writepage || !is_sync_kiocb(iocb))
2388 status = generic_osync_inode(inode, mapping,
2389 OSYNC_METADATA|OSYNC_DATA);
2390 }
2391 }
2392
2393 /*
2394 * If we get here for O_DIRECT writes then we must have fallen through
2395 * to buffered writes (block instantiation inside i_size). So we sync
2396 * the file data here, to try to honour O_DIRECT expectations.
2397 */
2398 if (unlikely(file->f_flags & O_DIRECT) && written)
2399 status = filemap_write_and_wait(mapping);
2400
1da177e4
LT
2401 return written ? written : status;
2402}
2403EXPORT_SYMBOL(generic_file_buffered_write);
2404
5ce7852c 2405static ssize_t
1da177e4
LT
2406__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2407 unsigned long nr_segs, loff_t *ppos)
2408{
2409 struct file *file = iocb->ki_filp;
fb5527e6 2410 struct address_space * mapping = file->f_mapping;
1da177e4
LT
2411 size_t ocount; /* original count */
2412 size_t count; /* after file limit checks */
2413 struct inode *inode = mapping->host;
1da177e4
LT
2414 loff_t pos;
2415 ssize_t written;
2416 ssize_t err;
2417
2418 ocount = 0;
0ceb3314
DM
2419 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2420 if (err)
2421 return err;
1da177e4
LT
2422
2423 count = ocount;
2424 pos = *ppos;
2425
2426 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2427
2428 /* We can write back this queue in page reclaim */
2429 current->backing_dev_info = mapping->backing_dev_info;
2430 written = 0;
2431
2432 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2433 if (err)
2434 goto out;
2435
2436 if (count == 0)
2437 goto out;
2438
d3ac7f89 2439 err = remove_suid(file->f_path.dentry);
1da177e4
LT
2440 if (err)
2441 goto out;
2442
870f4817 2443 file_update_time(file);
1da177e4
LT
2444
2445 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2446 if (unlikely(file->f_flags & O_DIRECT)) {
fb5527e6
JM
2447 loff_t endbyte;
2448 ssize_t written_buffered;
2449
2450 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2451 ppos, count, ocount);
1da177e4
LT
2452 if (written < 0 || written == count)
2453 goto out;
2454 /*
2455 * direct-io write to a hole: fall through to buffered I/O
2456 * for completing the rest of the request.
2457 */
2458 pos += written;
2459 count -= written;
fb5527e6
JM
2460 written_buffered = generic_file_buffered_write(iocb, iov,
2461 nr_segs, pos, ppos, count,
2462 written);
2463 /*
2464 * If generic_file_buffered_write() retuned a synchronous error
2465 * then we want to return the number of bytes which were
2466 * direct-written, or the error code if that was zero. Note
2467 * that this differs from normal direct-io semantics, which
2468 * will return -EFOO even if some bytes were written.
2469 */
2470 if (written_buffered < 0) {
2471 err = written_buffered;
2472 goto out;
2473 }
1da177e4 2474
fb5527e6
JM
2475 /*
2476 * We need to ensure that the page cache pages are written to
2477 * disk and invalidated to preserve the expected O_DIRECT
2478 * semantics.
2479 */
2480 endbyte = pos + written_buffered - written - 1;
ef51c976
MF
2481 err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
2482 SYNC_FILE_RANGE_WAIT_BEFORE|
2483 SYNC_FILE_RANGE_WRITE|
2484 SYNC_FILE_RANGE_WAIT_AFTER);
fb5527e6
JM
2485 if (err == 0) {
2486 written = written_buffered;
2487 invalidate_mapping_pages(mapping,
2488 pos >> PAGE_CACHE_SHIFT,
2489 endbyte >> PAGE_CACHE_SHIFT);
2490 } else {
2491 /*
2492 * We don't know how much we wrote, so just return
2493 * the number of bytes which were direct-written
2494 */
2495 }
2496 } else {
2497 written = generic_file_buffered_write(iocb, iov, nr_segs,
2498 pos, ppos, count, written);
2499 }
1da177e4
LT
2500out:
2501 current->backing_dev_info = NULL;
2502 return written ? written : err;
2503}
1da177e4 2504
027445c3
BP
2505ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
2506 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1da177e4
LT
2507{
2508 struct file *file = iocb->ki_filp;
2509 struct address_space *mapping = file->f_mapping;
2510 struct inode *inode = mapping->host;
2511 ssize_t ret;
1da177e4 2512
027445c3
BP
2513 BUG_ON(iocb->ki_pos != pos);
2514
2515 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2516 &iocb->ki_pos);
1da177e4
LT
2517
2518 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
027445c3 2519 ssize_t err;
1da177e4
LT
2520
2521 err = sync_page_range_nolock(inode, mapping, pos, ret);
2522 if (err < 0)
2523 ret = err;
2524 }
2525 return ret;
2526}
027445c3 2527EXPORT_SYMBOL(generic_file_aio_write_nolock);
1da177e4 2528
027445c3
BP
2529ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2530 unsigned long nr_segs, loff_t pos)
1da177e4
LT
2531{
2532 struct file *file = iocb->ki_filp;
2533 struct address_space *mapping = file->f_mapping;
2534 struct inode *inode = mapping->host;
2535 ssize_t ret;
1da177e4
LT
2536
2537 BUG_ON(iocb->ki_pos != pos);
2538
1b1dcc1b 2539 mutex_lock(&inode->i_mutex);
027445c3
BP
2540 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
2541 &iocb->ki_pos);
1b1dcc1b 2542 mutex_unlock(&inode->i_mutex);
1da177e4
LT
2543
2544 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2545 ssize_t err;
2546
2547 err = sync_page_range(inode, mapping, pos, ret);
2548 if (err < 0)
2549 ret = err;
2550 }
2551 return ret;
2552}
2553EXPORT_SYMBOL(generic_file_aio_write);
2554
cf9a2ae8
DH
2555/**
2556 * try_to_release_page() - release old fs-specific metadata on a page
2557 *
2558 * @page: the page which the kernel is trying to free
2559 * @gfp_mask: memory allocation flags (and I/O mode)
2560 *
2561 * The address_space is to try to release any data against the page
2562 * (presumably at page->private). If the release was successful, return `1'.
2563 * Otherwise return zero.
2564 *
2565 * The @gfp_mask argument specifies whether I/O may be performed to release
3f31fddf 2566 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
cf9a2ae8 2567 *
cf9a2ae8
DH
2568 */
2569int try_to_release_page(struct page *page, gfp_t gfp_mask)
2570{
2571 struct address_space * const mapping = page->mapping;
2572
2573 BUG_ON(!PageLocked(page));
2574 if (PageWriteback(page))
2575 return 0;
2576
2577 if (mapping && mapping->a_ops->releasepage)
2578 return mapping->a_ops->releasepage(page, gfp_mask);
2579 return try_to_free_buffers(page);
2580}
2581
2582EXPORT_SYMBOL(try_to_release_page);