]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/truncate.c
tg3: The case of switches
[net-next-2.6.git] / mm / truncate.c
CommitLineData
1da177e4
LT
1/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
e1f8e874 6 * 10Sep2002 Andrew Morton
1da177e4
LT
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
4af3c9cc 11#include <linux/backing-dev.h>
1da177e4 12#include <linux/mm.h>
0fd0e6b0 13#include <linux/swap.h>
1da177e4
LT
14#include <linux/module.h>
15#include <linux/pagemap.h>
01f2705d 16#include <linux/highmem.h>
1da177e4 17#include <linux/pagevec.h>
e08748ce 18#include <linux/task_io_accounting_ops.h>
1da177e4 19#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 20 do_invalidatepage */
ba470de4 21#include "internal.h"
1da177e4
LT
22
23
cf9a2ae8 24/**
28bc44d7 25 * do_invalidatepage - invalidate part or all of a page
cf9a2ae8
DH
26 * @page: the page which is affected
27 * @offset: the index of the truncation point
28 *
29 * do_invalidatepage() is called when all or part of the page has become
30 * invalidated by a truncate operation.
31 *
32 * do_invalidatepage() does not have to release all buffers, but it must
33 * ensure that no dirty buffer is left outside @offset and that no I/O
34 * is underway against any of the blocks which are outside the truncation
35 * point. Because the caller is about to free (and possibly reuse) those
36 * blocks on-disk.
37 */
38void do_invalidatepage(struct page *page, unsigned long offset)
39{
40 void (*invalidatepage)(struct page *, unsigned long);
41 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 42#ifdef CONFIG_BLOCK
cf9a2ae8
DH
43 if (!invalidatepage)
44 invalidatepage = block_invalidatepage;
9361401e 45#endif
cf9a2ae8
DH
46 if (invalidatepage)
47 (*invalidatepage)(page, offset);
48}
49
1da177e4
LT
50static inline void truncate_partial_page(struct page *page, unsigned partial)
51{
eebd2aa3 52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
266cf658 53 if (page_has_private(page))
1da177e4
LT
54 do_invalidatepage(page, partial);
55}
56
ecdfc978
LT
57/*
58 * This cancels just the dirty bit on the kernel page itself, it
59 * does NOT actually remove dirty bits on any mmap's that may be
60 * around. It also leaves the page tagged dirty, so any sync
61 * activity will still find it on the dirty lists, and in particular,
62 * clear_page_dirty_for_io() will still look at the dirty bits in
63 * the VM.
64 *
65 * Doing this should *normally* only ever be done when a page
66 * is truncated, and is not actually mapped anywhere at all. However,
67 * fs/buffer.c does this when it notices that somebody has cleaned
68 * out all the buffers on a page without actually doing it through
69 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
70 */
fba2591b
LT
71void cancel_dirty_page(struct page *page, unsigned int account_size)
72{
8368e328
LT
73 if (TestClearPageDirty(page)) {
74 struct address_space *mapping = page->mapping;
75 if (mapping && mapping_cap_account_dirty(mapping)) {
76 dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e41
PZ
77 dec_bdi_stat(mapping->backing_dev_info,
78 BDI_RECLAIMABLE);
8368e328
LT
79 if (account_size)
80 task_io_account_cancelled_write(account_size);
81 }
3e67c098 82 }
fba2591b 83}
8368e328 84EXPORT_SYMBOL(cancel_dirty_page);
fba2591b 85
1da177e4
LT
86/*
87 * If truncate cannot remove the fs-private metadata from the page, the page
62e1c553 88 * becomes orphaned. It will be left on the LRU and may even be mapped into
54cb8821 89 * user pagetables if we're racing with filemap_fault().
1da177e4
LT
90 *
91 * We need to bale out if page->mapping is no longer equal to the original
92 * mapping. This happens a) when the VM reclaimed the page while we waited on
fc0ecff6 93 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4
LT
94 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
95 */
750b4987 96static int
1da177e4
LT
97truncate_complete_page(struct address_space *mapping, struct page *page)
98{
99 if (page->mapping != mapping)
750b4987 100 return -EIO;
1da177e4 101
266cf658 102 if (page_has_private(page))
1da177e4
LT
103 do_invalidatepage(page, 0);
104
a2b34564
BS
105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
106
ba470de4 107 clear_page_mlock(page);
787d2214 108 remove_from_page_cache(page);
1da177e4 109 ClearPageMappedToDisk(page);
1da177e4 110 page_cache_release(page); /* pagecache ref */
750b4987 111 return 0;
1da177e4
LT
112}
113
114/*
fc0ecff6 115 * This is for invalidate_mapping_pages(). That function can be called at
1da177e4 116 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
117 * be marked dirty at any time too, so use remove_mapping which safely
118 * discards clean, unused pages.
1da177e4
LT
119 *
120 * Returns non-zero if the page was successfully invalidated.
121 */
122static int
123invalidate_complete_page(struct address_space *mapping, struct page *page)
124{
0fd0e6b0
NP
125 int ret;
126
1da177e4
LT
127 if (page->mapping != mapping)
128 return 0;
129
266cf658 130 if (page_has_private(page) && !try_to_release_page(page, 0))
1da177e4
LT
131 return 0;
132
ba470de4 133 clear_page_mlock(page);
0fd0e6b0 134 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
135
136 return ret;
1da177e4
LT
137}
138
750b4987
NP
139int truncate_inode_page(struct address_space *mapping, struct page *page)
140{
141 if (page_mapped(page)) {
142 unmap_mapping_range(mapping,
143 (loff_t)page->index << PAGE_CACHE_SHIFT,
144 PAGE_CACHE_SIZE, 0);
145 }
146 return truncate_complete_page(mapping, page);
147}
148
25718736
AK
149/*
150 * Used to get rid of pages on hardware memory corruption.
151 */
152int generic_error_remove_page(struct address_space *mapping, struct page *page)
153{
154 if (!mapping)
155 return -EINVAL;
156 /*
157 * Only punch for normal data pages for now.
158 * Handling other types like directories would need more auditing.
159 */
160 if (!S_ISREG(mapping->host->i_mode))
161 return -EIO;
162 return truncate_inode_page(mapping, page);
163}
164EXPORT_SYMBOL(generic_error_remove_page);
165
83f78668
WF
166/*
167 * Safely invalidate one page from its pagecache mapping.
168 * It only drops clean, unused pages. The page must be locked.
169 *
170 * Returns 1 if the page is successfully invalidated, otherwise 0.
171 */
172int invalidate_inode_page(struct page *page)
173{
174 struct address_space *mapping = page_mapping(page);
175 if (!mapping)
176 return 0;
177 if (PageDirty(page) || PageWriteback(page))
178 return 0;
179 if (page_mapped(page))
180 return 0;
181 return invalidate_complete_page(mapping, page);
182}
183
1da177e4 184/**
0643245f 185 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
1da177e4
LT
186 * @mapping: mapping to truncate
187 * @lstart: offset from which to truncate
d7339071 188 * @lend: offset to which to truncate
1da177e4 189 *
d7339071
HR
190 * Truncate the page cache, removing the pages that are between
191 * specified offsets (and zeroing out partial page
192 * (if lstart is not page aligned)).
1da177e4
LT
193 *
194 * Truncate takes two passes - the first pass is nonblocking. It will not
195 * block on page locks and it will not block on writeback. The second pass
196 * will wait. This is to prevent as much IO as possible in the affected region.
197 * The first pass will remove most pages, so the search cost of the second pass
198 * is low.
199 *
200 * When looking at page->index outside the page lock we need to be careful to
201 * copy it into a local to avoid races (it could change at any time).
202 *
203 * We pass down the cache-hot hint to the page freeing code. Even if the
204 * mapping is large, it is probably the case that the final pages are the most
205 * recently touched, and freeing happens in ascending file offset order.
1da177e4 206 */
d7339071
HR
207void truncate_inode_pages_range(struct address_space *mapping,
208 loff_t lstart, loff_t lend)
1da177e4
LT
209{
210 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
d7339071 211 pgoff_t end;
1da177e4
LT
212 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
213 struct pagevec pvec;
214 pgoff_t next;
215 int i;
216
217 if (mapping->nrpages == 0)
218 return;
219
d7339071
HR
220 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
221 end = (lend >> PAGE_CACHE_SHIFT);
222
1da177e4
LT
223 pagevec_init(&pvec, 0);
224 next = start;
d7339071
HR
225 while (next <= end &&
226 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1da177e4
LT
227 for (i = 0; i < pagevec_count(&pvec); i++) {
228 struct page *page = pvec.pages[i];
229 pgoff_t page_index = page->index;
230
d7339071
HR
231 if (page_index > end) {
232 next = page_index;
233 break;
234 }
235
1da177e4
LT
236 if (page_index > next)
237 next = page_index;
238 next++;
529ae9aa 239 if (!trylock_page(page))
1da177e4
LT
240 continue;
241 if (PageWriteback(page)) {
242 unlock_page(page);
243 continue;
244 }
750b4987 245 truncate_inode_page(mapping, page);
1da177e4
LT
246 unlock_page(page);
247 }
248 pagevec_release(&pvec);
249 cond_resched();
250 }
251
252 if (partial) {
253 struct page *page = find_lock_page(mapping, start - 1);
254 if (page) {
255 wait_on_page_writeback(page);
256 truncate_partial_page(page, partial);
257 unlock_page(page);
258 page_cache_release(page);
259 }
260 }
261
262 next = start;
263 for ( ; ; ) {
264 cond_resched();
265 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
266 if (next == start)
267 break;
268 next = start;
269 continue;
270 }
d7339071
HR
271 if (pvec.pages[0]->index > end) {
272 pagevec_release(&pvec);
273 break;
274 }
569b846d 275 mem_cgroup_uncharge_start();
1da177e4
LT
276 for (i = 0; i < pagevec_count(&pvec); i++) {
277 struct page *page = pvec.pages[i];
278
d7339071
HR
279 if (page->index > end)
280 break;
1da177e4
LT
281 lock_page(page);
282 wait_on_page_writeback(page);
750b4987 283 truncate_inode_page(mapping, page);
1da177e4
LT
284 if (page->index > next)
285 next = page->index;
286 next++;
1da177e4
LT
287 unlock_page(page);
288 }
289 pagevec_release(&pvec);
569b846d 290 mem_cgroup_uncharge_end();
1da177e4
LT
291 }
292}
d7339071 293EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 294
d7339071
HR
295/**
296 * truncate_inode_pages - truncate *all* the pages from an offset
297 * @mapping: mapping to truncate
298 * @lstart: offset from which to truncate
299 *
1b1dcc1b 300 * Called under (and serialised by) inode->i_mutex.
d7339071
HR
301 */
302void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
303{
304 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
305}
1da177e4
LT
306EXPORT_SYMBOL(truncate_inode_pages);
307
28697355
MW
308/**
309 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
310 * @mapping: the address_space which holds the pages to invalidate
311 * @start: the offset 'from' which to invalidate
312 * @end: the offset 'to' which to invalidate (inclusive)
313 *
314 * This function only removes the unlocked pages, if you want to
315 * remove all the pages of one inode, you must call truncate_inode_pages.
316 *
317 * invalidate_mapping_pages() will not block on IO activity. It will not
318 * invalidate pages which are dirty, locked, under writeback or mapped into
319 * pagetables.
320 */
321unsigned long invalidate_mapping_pages(struct address_space *mapping,
322 pgoff_t start, pgoff_t end)
1da177e4
LT
323{
324 struct pagevec pvec;
325 pgoff_t next = start;
326 unsigned long ret = 0;
327 int i;
328
329 pagevec_init(&pvec, 0);
330 while (next <= end &&
331 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
569b846d 332 mem_cgroup_uncharge_start();
1da177e4
LT
333 for (i = 0; i < pagevec_count(&pvec); i++) {
334 struct page *page = pvec.pages[i];
e0f23603
N
335 pgoff_t index;
336 int lock_failed;
1da177e4 337
529ae9aa 338 lock_failed = !trylock_page(page);
e0f23603
N
339
340 /*
341 * We really shouldn't be looking at the ->index of an
342 * unlocked page. But we're not allowed to lock these
343 * pages. So we rely upon nobody altering the ->index
344 * of this (pinned-by-us) page.
345 */
346 index = page->index;
347 if (index > next)
348 next = index;
1da177e4 349 next++;
e0f23603
N
350 if (lock_failed)
351 continue;
352
83f78668
WF
353 ret += invalidate_inode_page(page);
354
1da177e4
LT
355 unlock_page(page);
356 if (next > end)
357 break;
358 }
359 pagevec_release(&pvec);
569b846d 360 mem_cgroup_uncharge_end();
28697355 361 cond_resched();
1da177e4
LT
362 }
363 return ret;
364}
54bc4855 365EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4 366
bd4c8ce4
AM
367/*
368 * This is like invalidate_complete_page(), except it ignores the page's
369 * refcount. We do this because invalidate_inode_pages2() needs stronger
370 * invalidation guarantees, and cannot afford to leave pages behind because
2706a1b8
AB
371 * shrink_page_list() has a temp ref on them, or because they're transiently
372 * sitting in the lru_cache_add() pagevecs.
bd4c8ce4
AM
373 */
374static int
375invalidate_complete_page2(struct address_space *mapping, struct page *page)
376{
377 if (page->mapping != mapping)
378 return 0;
379
266cf658 380 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
381 return 0;
382
19fd6231 383 spin_lock_irq(&mapping->tree_lock);
bd4c8ce4
AM
384 if (PageDirty(page))
385 goto failed;
386
ba470de4 387 clear_page_mlock(page);
266cf658 388 BUG_ON(page_has_private(page));
bd4c8ce4 389 __remove_from_page_cache(page);
19fd6231 390 spin_unlock_irq(&mapping->tree_lock);
e767e056 391 mem_cgroup_uncharge_cache_page(page);
bd4c8ce4
AM
392 page_cache_release(page); /* pagecache ref */
393 return 1;
394failed:
19fd6231 395 spin_unlock_irq(&mapping->tree_lock);
bd4c8ce4
AM
396 return 0;
397}
398
e3db7691
TM
399static int do_launder_page(struct address_space *mapping, struct page *page)
400{
401 if (!PageDirty(page))
402 return 0;
403 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
404 return 0;
405 return mapping->a_ops->launder_page(page);
406}
407
1da177e4
LT
408/**
409 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 410 * @mapping: the address_space
1da177e4
LT
411 * @start: the page offset 'from' which to invalidate
412 * @end: the page offset 'to' which to invalidate (inclusive)
413 *
414 * Any pages which are found to be mapped into pagetables are unmapped prior to
415 * invalidation.
416 *
6ccfa806 417 * Returns -EBUSY if any pages could not be invalidated.
1da177e4
LT
418 */
419int invalidate_inode_pages2_range(struct address_space *mapping,
420 pgoff_t start, pgoff_t end)
421{
422 struct pagevec pvec;
423 pgoff_t next;
424 int i;
425 int ret = 0;
0dd1334f 426 int ret2 = 0;
1da177e4
LT
427 int did_range_unmap = 0;
428 int wrapped = 0;
429
430 pagevec_init(&pvec, 0);
431 next = start;
7b965e08 432 while (next <= end && !wrapped &&
1da177e4
LT
433 pagevec_lookup(&pvec, mapping, next,
434 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
569b846d 435 mem_cgroup_uncharge_start();
7b965e08 436 for (i = 0; i < pagevec_count(&pvec); i++) {
1da177e4
LT
437 struct page *page = pvec.pages[i];
438 pgoff_t page_index;
1da177e4
LT
439
440 lock_page(page);
441 if (page->mapping != mapping) {
442 unlock_page(page);
443 continue;
444 }
445 page_index = page->index;
446 next = page_index + 1;
447 if (next == 0)
448 wrapped = 1;
449 if (page_index > end) {
450 unlock_page(page);
451 break;
452 }
453 wait_on_page_writeback(page);
d00806b1 454 if (page_mapped(page)) {
1da177e4
LT
455 if (!did_range_unmap) {
456 /*
457 * Zap the rest of the file in one hit.
458 */
459 unmap_mapping_range(mapping,
479ef592
OD
460 (loff_t)page_index<<PAGE_CACHE_SHIFT,
461 (loff_t)(end - page_index + 1)
1da177e4
LT
462 << PAGE_CACHE_SHIFT,
463 0);
464 did_range_unmap = 1;
465 } else {
466 /*
467 * Just zap this page
468 */
469 unmap_mapping_range(mapping,
479ef592 470 (loff_t)page_index<<PAGE_CACHE_SHIFT,
1da177e4
LT
471 PAGE_CACHE_SIZE, 0);
472 }
473 }
d00806b1 474 BUG_ON(page_mapped(page));
0dd1334f
HH
475 ret2 = do_launder_page(mapping, page);
476 if (ret2 == 0) {
477 if (!invalidate_complete_page2(mapping, page))
6ccfa806 478 ret2 = -EBUSY;
0dd1334f
HH
479 }
480 if (ret2 < 0)
481 ret = ret2;
1da177e4
LT
482 unlock_page(page);
483 }
484 pagevec_release(&pvec);
569b846d 485 mem_cgroup_uncharge_end();
1da177e4
LT
486 cond_resched();
487 }
488 return ret;
489}
490EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
491
492/**
493 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 494 * @mapping: the address_space
1da177e4
LT
495 *
496 * Any pages which are found to be mapped into pagetables are unmapped prior to
497 * invalidation.
498 *
e9de25dd 499 * Returns -EBUSY if any pages could not be invalidated.
1da177e4
LT
500 */
501int invalidate_inode_pages2(struct address_space *mapping)
502{
503 return invalidate_inode_pages2_range(mapping, 0, -1);
504}
505EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
25d9e2d1 506
507/**
508 * truncate_pagecache - unmap and remove pagecache that has been truncated
509 * @inode: inode
510 * @old: old file offset
511 * @new: new file offset
512 *
513 * inode's new i_size must already be written before truncate_pagecache
514 * is called.
515 *
516 * This function should typically be called before the filesystem
517 * releases resources associated with the freed range (eg. deallocates
518 * blocks). This way, pagecache will always stay logically coherent
519 * with on-disk format, and the filesystem would not have to deal with
520 * situations such as writepage being called for a page that has already
521 * had its underlying blocks deallocated.
522 */
523void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
524{
cedabed4
OH
525 struct address_space *mapping = inode->i_mapping;
526
527 /*
528 * unmap_mapping_range is called twice, first simply for
529 * efficiency so that truncate_inode_pages does fewer
530 * single-page unmaps. However after this first call, and
531 * before truncate_inode_pages finishes, it is possible for
532 * private pages to be COWed, which remain after
533 * truncate_inode_pages finishes, hence the second
534 * unmap_mapping_range call must be made for correctness.
535 */
536 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
537 truncate_inode_pages(mapping, new);
538 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
25d9e2d1 539}
540EXPORT_SYMBOL(truncate_pagecache);
541
542/**
543 * vmtruncate - unmap mappings "freed" by truncate() syscall
544 * @inode: inode of the file used
545 * @offset: file offset to start truncating
546 *
547 * NOTE! We have to be ready to update the memory sharing
548 * between the file and the memory map for a potential last
549 * incomplete page. Ugly, but necessary.
550 */
551int vmtruncate(struct inode *inode, loff_t offset)
552{
553 loff_t oldsize;
554 int error;
555
556 error = inode_newsize_ok(inode, offset);
557 if (error)
558 return error;
559 oldsize = inode->i_size;
560 i_size_write(inode, offset);
561 truncate_pagecache(inode, oldsize, offset);
562 if (inode->i_op->truncate)
563 inode->i_op->truncate(inode);
564
565 return error;
566}
567EXPORT_SYMBOL(vmtruncate);