]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/truncate.c
[PATCH] x86_64: fix put_user for 64-bit constant
[net-next-2.6.git] / mm / truncate.c
CommitLineData
1da177e4
LT
1/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
0fd0e6b0 12#include <linux/swap.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/pagemap.h>
15#include <linux/pagevec.h>
e08748ce 16#include <linux/task_io_accounting_ops.h>
1da177e4 17#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 18 do_invalidatepage */
1da177e4
LT
19
20
cf9a2ae8
DH
21/**
22 * do_invalidatepage - invalidate part of all of a page
23 * @page: the page which is affected
24 * @offset: the index of the truncation point
25 *
26 * do_invalidatepage() is called when all or part of the page has become
27 * invalidated by a truncate operation.
28 *
29 * do_invalidatepage() does not have to release all buffers, but it must
30 * ensure that no dirty buffer is left outside @offset and that no I/O
31 * is underway against any of the blocks which are outside the truncation
32 * point. Because the caller is about to free (and possibly reuse) those
33 * blocks on-disk.
34 */
35void do_invalidatepage(struct page *page, unsigned long offset)
36{
37 void (*invalidatepage)(struct page *, unsigned long);
38 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 39#ifdef CONFIG_BLOCK
cf9a2ae8
DH
40 if (!invalidatepage)
41 invalidatepage = block_invalidatepage;
9361401e 42#endif
cf9a2ae8
DH
43 if (invalidatepage)
44 (*invalidatepage)(page, offset);
45}
46
1da177e4
LT
47static inline void truncate_partial_page(struct page *page, unsigned partial)
48{
49 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
50 if (PagePrivate(page))
51 do_invalidatepage(page, partial);
52}
53
fba2591b
LT
54void cancel_dirty_page(struct page *page, unsigned int account_size)
55{
56 /* If we're cancelling the page, it had better not be mapped any more */
57 if (page_mapped(page)) {
58 static unsigned int warncount;
59
60 WARN_ON(++warncount < 5);
61 }
62
8368e328
LT
63 if (TestClearPageDirty(page)) {
64 struct address_space *mapping = page->mapping;
65 if (mapping && mapping_cap_account_dirty(mapping)) {
66 dec_zone_page_state(page, NR_FILE_DIRTY);
67 if (account_size)
68 task_io_account_cancelled_write(account_size);
69 }
3e67c098 70 }
fba2591b 71}
8368e328 72EXPORT_SYMBOL(cancel_dirty_page);
fba2591b 73
1da177e4
LT
74/*
75 * If truncate cannot remove the fs-private metadata from the page, the page
76 * becomes anonymous. It will be left on the LRU and may even be mapped into
77 * user pagetables if we're racing with filemap_nopage().
78 *
79 * We need to bale out if page->mapping is no longer equal to the original
80 * mapping. This happens a) when the VM reclaimed the page while we waited on
81 * its lock, b) when a concurrent invalidate_inode_pages got there first and
82 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
83 */
84static void
85truncate_complete_page(struct address_space *mapping, struct page *page)
86{
87 if (page->mapping != mapping)
88 return;
89
3e67c098
AM
90 cancel_dirty_page(page, PAGE_CACHE_SIZE);
91
1da177e4
LT
92 if (PagePrivate(page))
93 do_invalidatepage(page, 0);
94
1da177e4
LT
95 ClearPageUptodate(page);
96 ClearPageMappedToDisk(page);
97 remove_from_page_cache(page);
98 page_cache_release(page); /* pagecache ref */
99}
100
101/*
102 * This is for invalidate_inode_pages(). That function can be called at
103 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
104 * be marked dirty at any time too, so use remove_mapping which safely
105 * discards clean, unused pages.
1da177e4
LT
106 *
107 * Returns non-zero if the page was successfully invalidated.
108 */
109static int
110invalidate_complete_page(struct address_space *mapping, struct page *page)
111{
0fd0e6b0
NP
112 int ret;
113
1da177e4
LT
114 if (page->mapping != mapping)
115 return 0;
116
117 if (PagePrivate(page) && !try_to_release_page(page, 0))
118 return 0;
119
0fd0e6b0 120 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
121
122 return ret;
1da177e4
LT
123}
124
125/**
d7339071
HR
126 * truncate_inode_pages - truncate range of pages specified by start and
127 * end byte offsets
1da177e4
LT
128 * @mapping: mapping to truncate
129 * @lstart: offset from which to truncate
d7339071 130 * @lend: offset to which to truncate
1da177e4 131 *
d7339071
HR
132 * Truncate the page cache, removing the pages that are between
133 * specified offsets (and zeroing out partial page
134 * (if lstart is not page aligned)).
1da177e4
LT
135 *
136 * Truncate takes two passes - the first pass is nonblocking. It will not
137 * block on page locks and it will not block on writeback. The second pass
138 * will wait. This is to prevent as much IO as possible in the affected region.
139 * The first pass will remove most pages, so the search cost of the second pass
140 * is low.
141 *
142 * When looking at page->index outside the page lock we need to be careful to
143 * copy it into a local to avoid races (it could change at any time).
144 *
145 * We pass down the cache-hot hint to the page freeing code. Even if the
146 * mapping is large, it is probably the case that the final pages are the most
147 * recently touched, and freeing happens in ascending file offset order.
1da177e4 148 */
d7339071
HR
149void truncate_inode_pages_range(struct address_space *mapping,
150 loff_t lstart, loff_t lend)
1da177e4
LT
151{
152 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
d7339071 153 pgoff_t end;
1da177e4
LT
154 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
155 struct pagevec pvec;
156 pgoff_t next;
157 int i;
158
159 if (mapping->nrpages == 0)
160 return;
161
d7339071
HR
162 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
163 end = (lend >> PAGE_CACHE_SHIFT);
164
1da177e4
LT
165 pagevec_init(&pvec, 0);
166 next = start;
d7339071
HR
167 while (next <= end &&
168 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1da177e4
LT
169 for (i = 0; i < pagevec_count(&pvec); i++) {
170 struct page *page = pvec.pages[i];
171 pgoff_t page_index = page->index;
172
d7339071
HR
173 if (page_index > end) {
174 next = page_index;
175 break;
176 }
177
1da177e4
LT
178 if (page_index > next)
179 next = page_index;
180 next++;
181 if (TestSetPageLocked(page))
182 continue;
183 if (PageWriteback(page)) {
184 unlock_page(page);
185 continue;
186 }
187 truncate_complete_page(mapping, page);
188 unlock_page(page);
189 }
190 pagevec_release(&pvec);
191 cond_resched();
192 }
193
194 if (partial) {
195 struct page *page = find_lock_page(mapping, start - 1);
196 if (page) {
197 wait_on_page_writeback(page);
198 truncate_partial_page(page, partial);
199 unlock_page(page);
200 page_cache_release(page);
201 }
202 }
203
204 next = start;
205 for ( ; ; ) {
206 cond_resched();
207 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
208 if (next == start)
209 break;
210 next = start;
211 continue;
212 }
d7339071
HR
213 if (pvec.pages[0]->index > end) {
214 pagevec_release(&pvec);
215 break;
216 }
1da177e4
LT
217 for (i = 0; i < pagevec_count(&pvec); i++) {
218 struct page *page = pvec.pages[i];
219
d7339071
HR
220 if (page->index > end)
221 break;
1da177e4
LT
222 lock_page(page);
223 wait_on_page_writeback(page);
224 if (page->index > next)
225 next = page->index;
226 next++;
227 truncate_complete_page(mapping, page);
228 unlock_page(page);
229 }
230 pagevec_release(&pvec);
231 }
232}
d7339071 233EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 234
d7339071
HR
235/**
236 * truncate_inode_pages - truncate *all* the pages from an offset
237 * @mapping: mapping to truncate
238 * @lstart: offset from which to truncate
239 *
1b1dcc1b 240 * Called under (and serialised by) inode->i_mutex.
d7339071
HR
241 */
242void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
243{
244 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
245}
1da177e4
LT
246EXPORT_SYMBOL(truncate_inode_pages);
247
248/**
249 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
250 * @mapping: the address_space which holds the pages to invalidate
251 * @start: the offset 'from' which to invalidate
252 * @end: the offset 'to' which to invalidate (inclusive)
253 *
254 * This function only removes the unlocked pages, if you want to
255 * remove all the pages of one inode, you must call truncate_inode_pages.
256 *
257 * invalidate_mapping_pages() will not block on IO activity. It will not
258 * invalidate pages which are dirty, locked, under writeback or mapped into
259 * pagetables.
260 */
261unsigned long invalidate_mapping_pages(struct address_space *mapping,
262 pgoff_t start, pgoff_t end)
263{
264 struct pagevec pvec;
265 pgoff_t next = start;
266 unsigned long ret = 0;
267 int i;
268
269 pagevec_init(&pvec, 0);
270 while (next <= end &&
271 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
272 for (i = 0; i < pagevec_count(&pvec); i++) {
273 struct page *page = pvec.pages[i];
e0f23603
N
274 pgoff_t index;
275 int lock_failed;
1da177e4 276
e0f23603
N
277 lock_failed = TestSetPageLocked(page);
278
279 /*
280 * We really shouldn't be looking at the ->index of an
281 * unlocked page. But we're not allowed to lock these
282 * pages. So we rely upon nobody altering the ->index
283 * of this (pinned-by-us) page.
284 */
285 index = page->index;
286 if (index > next)
287 next = index;
1da177e4 288 next++;
e0f23603
N
289 if (lock_failed)
290 continue;
291
1da177e4
LT
292 if (PageDirty(page) || PageWriteback(page))
293 goto unlock;
294 if (page_mapped(page))
295 goto unlock;
296 ret += invalidate_complete_page(mapping, page);
297unlock:
298 unlock_page(page);
299 if (next > end)
300 break;
301 }
302 pagevec_release(&pvec);
1da177e4
LT
303 }
304 return ret;
305}
306
307unsigned long invalidate_inode_pages(struct address_space *mapping)
308{
309 return invalidate_mapping_pages(mapping, 0, ~0UL);
310}
1da177e4
LT
311EXPORT_SYMBOL(invalidate_inode_pages);
312
bd4c8ce4
AM
313/*
314 * This is like invalidate_complete_page(), except it ignores the page's
315 * refcount. We do this because invalidate_inode_pages2() needs stronger
316 * invalidation guarantees, and cannot afford to leave pages behind because
317 * shrink_list() has a temp ref on them, or because they're transiently sitting
318 * in the lru_cache_add() pagevecs.
319 */
320static int
321invalidate_complete_page2(struct address_space *mapping, struct page *page)
322{
323 if (page->mapping != mapping)
324 return 0;
325
887ed2f3 326 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
327 return 0;
328
329 write_lock_irq(&mapping->tree_lock);
330 if (PageDirty(page))
331 goto failed;
332
333 BUG_ON(PagePrivate(page));
334 __remove_from_page_cache(page);
335 write_unlock_irq(&mapping->tree_lock);
336 ClearPageUptodate(page);
337 page_cache_release(page); /* pagecache ref */
338 return 1;
339failed:
340 write_unlock_irq(&mapping->tree_lock);
341 return 0;
342}
343
e3db7691
TM
344static int do_launder_page(struct address_space *mapping, struct page *page)
345{
346 if (!PageDirty(page))
347 return 0;
348 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
349 return 0;
350 return mapping->a_ops->launder_page(page);
351}
352
1da177e4
LT
353/**
354 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 355 * @mapping: the address_space
1da177e4
LT
356 * @start: the page offset 'from' which to invalidate
357 * @end: the page offset 'to' which to invalidate (inclusive)
358 *
359 * Any pages which are found to be mapped into pagetables are unmapped prior to
360 * invalidation.
361 *
362 * Returns -EIO if any pages could not be invalidated.
363 */
364int invalidate_inode_pages2_range(struct address_space *mapping,
365 pgoff_t start, pgoff_t end)
366{
367 struct pagevec pvec;
368 pgoff_t next;
369 int i;
370 int ret = 0;
371 int did_range_unmap = 0;
372 int wrapped = 0;
373
374 pagevec_init(&pvec, 0);
375 next = start;
376 while (next <= end && !ret && !wrapped &&
377 pagevec_lookup(&pvec, mapping, next,
378 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
379 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
380 struct page *page = pvec.pages[i];
381 pgoff_t page_index;
1da177e4
LT
382
383 lock_page(page);
384 if (page->mapping != mapping) {
385 unlock_page(page);
386 continue;
387 }
388 page_index = page->index;
389 next = page_index + 1;
390 if (next == 0)
391 wrapped = 1;
392 if (page_index > end) {
393 unlock_page(page);
394 break;
395 }
396 wait_on_page_writeback(page);
397 while (page_mapped(page)) {
398 if (!did_range_unmap) {
399 /*
400 * Zap the rest of the file in one hit.
401 */
402 unmap_mapping_range(mapping,
479ef592
OD
403 (loff_t)page_index<<PAGE_CACHE_SHIFT,
404 (loff_t)(end - page_index + 1)
1da177e4
LT
405 << PAGE_CACHE_SHIFT,
406 0);
407 did_range_unmap = 1;
408 } else {
409 /*
410 * Just zap this page
411 */
412 unmap_mapping_range(mapping,
479ef592 413 (loff_t)page_index<<PAGE_CACHE_SHIFT,
1da177e4
LT
414 PAGE_CACHE_SIZE, 0);
415 }
416 }
e3db7691
TM
417 ret = do_launder_page(mapping, page);
418 if (ret == 0 && !invalidate_complete_page2(mapping, page))
1da177e4 419 ret = -EIO;
1da177e4
LT
420 unlock_page(page);
421 }
422 pagevec_release(&pvec);
423 cond_resched();
424 }
8258d4a5 425 WARN_ON_ONCE(ret);
1da177e4
LT
426 return ret;
427}
428EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
429
430/**
431 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 432 * @mapping: the address_space
1da177e4
LT
433 *
434 * Any pages which are found to be mapped into pagetables are unmapped prior to
435 * invalidation.
436 *
437 * Returns -EIO if any pages could not be invalidated.
438 */
439int invalidate_inode_pages2(struct address_space *mapping)
440{
441 return invalidate_inode_pages2_range(mapping, 0, -1);
442}
443EXPORT_SYMBOL_GPL(invalidate_inode_pages2);