]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/truncate.c - code for taking down pages from address_spaces | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
6 | * 10Sep2002 akpm@zip.com.au | |
7 | * Initial version. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/mm.h> | |
0fd0e6b0 | 12 | #include <linux/swap.h> |
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/pagemap.h> | |
15 | #include <linux/pagevec.h> | |
16 | #include <linux/buffer_head.h> /* grr. try_to_release_page, | |
aaa4059b | 17 | do_invalidatepage */ |
1da177e4 LT |
18 | |
19 | ||
cf9a2ae8 DH |
20 | /** |
21 | * do_invalidatepage - invalidate part of all of a page | |
22 | * @page: the page which is affected | |
23 | * @offset: the index of the truncation point | |
24 | * | |
25 | * do_invalidatepage() is called when all or part of the page has become | |
26 | * invalidated by a truncate operation. | |
27 | * | |
28 | * do_invalidatepage() does not have to release all buffers, but it must | |
29 | * ensure that no dirty buffer is left outside @offset and that no I/O | |
30 | * is underway against any of the blocks which are outside the truncation | |
31 | * point. Because the caller is about to free (and possibly reuse) those | |
32 | * blocks on-disk. | |
33 | */ | |
34 | void do_invalidatepage(struct page *page, unsigned long offset) | |
35 | { | |
36 | void (*invalidatepage)(struct page *, unsigned long); | |
37 | invalidatepage = page->mapping->a_ops->invalidatepage; | |
38 | if (!invalidatepage) | |
39 | invalidatepage = block_invalidatepage; | |
40 | if (invalidatepage) | |
41 | (*invalidatepage)(page, offset); | |
42 | } | |
43 | ||
1da177e4 LT |
44 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
45 | { | |
46 | memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); | |
47 | if (PagePrivate(page)) | |
48 | do_invalidatepage(page, partial); | |
49 | } | |
50 | ||
51 | /* | |
52 | * If truncate cannot remove the fs-private metadata from the page, the page | |
53 | * becomes anonymous. It will be left on the LRU and may even be mapped into | |
54 | * user pagetables if we're racing with filemap_nopage(). | |
55 | * | |
56 | * We need to bale out if page->mapping is no longer equal to the original | |
57 | * mapping. This happens a) when the VM reclaimed the page while we waited on | |
58 | * its lock, b) when a concurrent invalidate_inode_pages got there first and | |
59 | * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. | |
60 | */ | |
61 | static void | |
62 | truncate_complete_page(struct address_space *mapping, struct page *page) | |
63 | { | |
64 | if (page->mapping != mapping) | |
65 | return; | |
66 | ||
67 | if (PagePrivate(page)) | |
68 | do_invalidatepage(page, 0); | |
69 | ||
70 | clear_page_dirty(page); | |
71 | ClearPageUptodate(page); | |
72 | ClearPageMappedToDisk(page); | |
73 | remove_from_page_cache(page); | |
74 | page_cache_release(page); /* pagecache ref */ | |
75 | } | |
76 | ||
77 | /* | |
78 | * This is for invalidate_inode_pages(). That function can be called at | |
79 | * any time, and is not supposed to throw away dirty pages. But pages can | |
0fd0e6b0 NP |
80 | * be marked dirty at any time too, so use remove_mapping which safely |
81 | * discards clean, unused pages. | |
1da177e4 LT |
82 | * |
83 | * Returns non-zero if the page was successfully invalidated. | |
84 | */ | |
85 | static int | |
86 | invalidate_complete_page(struct address_space *mapping, struct page *page) | |
87 | { | |
0fd0e6b0 NP |
88 | int ret; |
89 | ||
1da177e4 LT |
90 | if (page->mapping != mapping) |
91 | return 0; | |
92 | ||
93 | if (PagePrivate(page) && !try_to_release_page(page, 0)) | |
94 | return 0; | |
95 | ||
0fd0e6b0 | 96 | ret = remove_mapping(mapping, page); |
1da177e4 | 97 | ClearPageUptodate(page); |
0fd0e6b0 NP |
98 | |
99 | return ret; | |
1da177e4 LT |
100 | } |
101 | ||
102 | /** | |
d7339071 HR |
103 | * truncate_inode_pages - truncate range of pages specified by start and |
104 | * end byte offsets | |
1da177e4 LT |
105 | * @mapping: mapping to truncate |
106 | * @lstart: offset from which to truncate | |
d7339071 | 107 | * @lend: offset to which to truncate |
1da177e4 | 108 | * |
d7339071 HR |
109 | * Truncate the page cache, removing the pages that are between |
110 | * specified offsets (and zeroing out partial page | |
111 | * (if lstart is not page aligned)). | |
1da177e4 LT |
112 | * |
113 | * Truncate takes two passes - the first pass is nonblocking. It will not | |
114 | * block on page locks and it will not block on writeback. The second pass | |
115 | * will wait. This is to prevent as much IO as possible in the affected region. | |
116 | * The first pass will remove most pages, so the search cost of the second pass | |
117 | * is low. | |
118 | * | |
119 | * When looking at page->index outside the page lock we need to be careful to | |
120 | * copy it into a local to avoid races (it could change at any time). | |
121 | * | |
122 | * We pass down the cache-hot hint to the page freeing code. Even if the | |
123 | * mapping is large, it is probably the case that the final pages are the most | |
124 | * recently touched, and freeing happens in ascending file offset order. | |
1da177e4 | 125 | */ |
d7339071 HR |
126 | void truncate_inode_pages_range(struct address_space *mapping, |
127 | loff_t lstart, loff_t lend) | |
1da177e4 LT |
128 | { |
129 | const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; | |
d7339071 | 130 | pgoff_t end; |
1da177e4 LT |
131 | const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); |
132 | struct pagevec pvec; | |
133 | pgoff_t next; | |
134 | int i; | |
135 | ||
136 | if (mapping->nrpages == 0) | |
137 | return; | |
138 | ||
d7339071 HR |
139 | BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); |
140 | end = (lend >> PAGE_CACHE_SHIFT); | |
141 | ||
1da177e4 LT |
142 | pagevec_init(&pvec, 0); |
143 | next = start; | |
d7339071 HR |
144 | while (next <= end && |
145 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
1da177e4 LT |
146 | for (i = 0; i < pagevec_count(&pvec); i++) { |
147 | struct page *page = pvec.pages[i]; | |
148 | pgoff_t page_index = page->index; | |
149 | ||
d7339071 HR |
150 | if (page_index > end) { |
151 | next = page_index; | |
152 | break; | |
153 | } | |
154 | ||
1da177e4 LT |
155 | if (page_index > next) |
156 | next = page_index; | |
157 | next++; | |
158 | if (TestSetPageLocked(page)) | |
159 | continue; | |
160 | if (PageWriteback(page)) { | |
161 | unlock_page(page); | |
162 | continue; | |
163 | } | |
164 | truncate_complete_page(mapping, page); | |
165 | unlock_page(page); | |
166 | } | |
167 | pagevec_release(&pvec); | |
168 | cond_resched(); | |
169 | } | |
170 | ||
171 | if (partial) { | |
172 | struct page *page = find_lock_page(mapping, start - 1); | |
173 | if (page) { | |
174 | wait_on_page_writeback(page); | |
175 | truncate_partial_page(page, partial); | |
176 | unlock_page(page); | |
177 | page_cache_release(page); | |
178 | } | |
179 | } | |
180 | ||
181 | next = start; | |
182 | for ( ; ; ) { | |
183 | cond_resched(); | |
184 | if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
185 | if (next == start) | |
186 | break; | |
187 | next = start; | |
188 | continue; | |
189 | } | |
d7339071 HR |
190 | if (pvec.pages[0]->index > end) { |
191 | pagevec_release(&pvec); | |
192 | break; | |
193 | } | |
1da177e4 LT |
194 | for (i = 0; i < pagevec_count(&pvec); i++) { |
195 | struct page *page = pvec.pages[i]; | |
196 | ||
d7339071 HR |
197 | if (page->index > end) |
198 | break; | |
1da177e4 LT |
199 | lock_page(page); |
200 | wait_on_page_writeback(page); | |
201 | if (page->index > next) | |
202 | next = page->index; | |
203 | next++; | |
204 | truncate_complete_page(mapping, page); | |
205 | unlock_page(page); | |
206 | } | |
207 | pagevec_release(&pvec); | |
208 | } | |
209 | } | |
d7339071 | 210 | EXPORT_SYMBOL(truncate_inode_pages_range); |
1da177e4 | 211 | |
d7339071 HR |
212 | /** |
213 | * truncate_inode_pages - truncate *all* the pages from an offset | |
214 | * @mapping: mapping to truncate | |
215 | * @lstart: offset from which to truncate | |
216 | * | |
1b1dcc1b | 217 | * Called under (and serialised by) inode->i_mutex. |
d7339071 HR |
218 | */ |
219 | void truncate_inode_pages(struct address_space *mapping, loff_t lstart) | |
220 | { | |
221 | truncate_inode_pages_range(mapping, lstart, (loff_t)-1); | |
222 | } | |
1da177e4 LT |
223 | EXPORT_SYMBOL(truncate_inode_pages); |
224 | ||
225 | /** | |
226 | * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode | |
227 | * @mapping: the address_space which holds the pages to invalidate | |
228 | * @start: the offset 'from' which to invalidate | |
229 | * @end: the offset 'to' which to invalidate (inclusive) | |
230 | * | |
231 | * This function only removes the unlocked pages, if you want to | |
232 | * remove all the pages of one inode, you must call truncate_inode_pages. | |
233 | * | |
234 | * invalidate_mapping_pages() will not block on IO activity. It will not | |
235 | * invalidate pages which are dirty, locked, under writeback or mapped into | |
236 | * pagetables. | |
237 | */ | |
238 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | |
239 | pgoff_t start, pgoff_t end) | |
240 | { | |
241 | struct pagevec pvec; | |
242 | pgoff_t next = start; | |
243 | unsigned long ret = 0; | |
244 | int i; | |
245 | ||
246 | pagevec_init(&pvec, 0); | |
247 | while (next <= end && | |
248 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | |
249 | for (i = 0; i < pagevec_count(&pvec); i++) { | |
250 | struct page *page = pvec.pages[i]; | |
e0f23603 N |
251 | pgoff_t index; |
252 | int lock_failed; | |
1da177e4 | 253 | |
e0f23603 N |
254 | lock_failed = TestSetPageLocked(page); |
255 | ||
256 | /* | |
257 | * We really shouldn't be looking at the ->index of an | |
258 | * unlocked page. But we're not allowed to lock these | |
259 | * pages. So we rely upon nobody altering the ->index | |
260 | * of this (pinned-by-us) page. | |
261 | */ | |
262 | index = page->index; | |
263 | if (index > next) | |
264 | next = index; | |
1da177e4 | 265 | next++; |
e0f23603 N |
266 | if (lock_failed) |
267 | continue; | |
268 | ||
1da177e4 LT |
269 | if (PageDirty(page) || PageWriteback(page)) |
270 | goto unlock; | |
271 | if (page_mapped(page)) | |
272 | goto unlock; | |
273 | ret += invalidate_complete_page(mapping, page); | |
274 | unlock: | |
275 | unlock_page(page); | |
276 | if (next > end) | |
277 | break; | |
278 | } | |
279 | pagevec_release(&pvec); | |
1da177e4 LT |
280 | } |
281 | return ret; | |
282 | } | |
283 | ||
284 | unsigned long invalidate_inode_pages(struct address_space *mapping) | |
285 | { | |
286 | return invalidate_mapping_pages(mapping, 0, ~0UL); | |
287 | } | |
288 | ||
289 | EXPORT_SYMBOL(invalidate_inode_pages); | |
290 | ||
291 | /** | |
292 | * invalidate_inode_pages2_range - remove range of pages from an address_space | |
67be2dd1 | 293 | * @mapping: the address_space |
1da177e4 LT |
294 | * @start: the page offset 'from' which to invalidate |
295 | * @end: the page offset 'to' which to invalidate (inclusive) | |
296 | * | |
297 | * Any pages which are found to be mapped into pagetables are unmapped prior to | |
298 | * invalidation. | |
299 | * | |
300 | * Returns -EIO if any pages could not be invalidated. | |
301 | */ | |
302 | int invalidate_inode_pages2_range(struct address_space *mapping, | |
303 | pgoff_t start, pgoff_t end) | |
304 | { | |
305 | struct pagevec pvec; | |
306 | pgoff_t next; | |
307 | int i; | |
308 | int ret = 0; | |
309 | int did_range_unmap = 0; | |
310 | int wrapped = 0; | |
311 | ||
312 | pagevec_init(&pvec, 0); | |
313 | next = start; | |
314 | while (next <= end && !ret && !wrapped && | |
315 | pagevec_lookup(&pvec, mapping, next, | |
316 | min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { | |
317 | for (i = 0; !ret && i < pagevec_count(&pvec); i++) { | |
318 | struct page *page = pvec.pages[i]; | |
319 | pgoff_t page_index; | |
320 | int was_dirty; | |
321 | ||
322 | lock_page(page); | |
323 | if (page->mapping != mapping) { | |
324 | unlock_page(page); | |
325 | continue; | |
326 | } | |
327 | page_index = page->index; | |
328 | next = page_index + 1; | |
329 | if (next == 0) | |
330 | wrapped = 1; | |
331 | if (page_index > end) { | |
332 | unlock_page(page); | |
333 | break; | |
334 | } | |
335 | wait_on_page_writeback(page); | |
336 | while (page_mapped(page)) { | |
337 | if (!did_range_unmap) { | |
338 | /* | |
339 | * Zap the rest of the file in one hit. | |
340 | */ | |
341 | unmap_mapping_range(mapping, | |
479ef592 OD |
342 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
343 | (loff_t)(end - page_index + 1) | |
1da177e4 LT |
344 | << PAGE_CACHE_SHIFT, |
345 | 0); | |
346 | did_range_unmap = 1; | |
347 | } else { | |
348 | /* | |
349 | * Just zap this page | |
350 | */ | |
351 | unmap_mapping_range(mapping, | |
479ef592 | 352 | (loff_t)page_index<<PAGE_CACHE_SHIFT, |
1da177e4 LT |
353 | PAGE_CACHE_SIZE, 0); |
354 | } | |
355 | } | |
356 | was_dirty = test_clear_page_dirty(page); | |
357 | if (!invalidate_complete_page(mapping, page)) { | |
358 | if (was_dirty) | |
359 | set_page_dirty(page); | |
360 | ret = -EIO; | |
361 | } | |
362 | unlock_page(page); | |
363 | } | |
364 | pagevec_release(&pvec); | |
365 | cond_resched(); | |
366 | } | |
367 | return ret; | |
368 | } | |
369 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); | |
370 | ||
371 | /** | |
372 | * invalidate_inode_pages2 - remove all pages from an address_space | |
67be2dd1 | 373 | * @mapping: the address_space |
1da177e4 LT |
374 | * |
375 | * Any pages which are found to be mapped into pagetables are unmapped prior to | |
376 | * invalidation. | |
377 | * | |
378 | * Returns -EIO if any pages could not be invalidated. | |
379 | */ | |
380 | int invalidate_inode_pages2(struct address_space *mapping) | |
381 | { | |
382 | return invalidate_inode_pages2_range(mapping, 0, -1); | |
383 | } | |
384 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2); |