]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/swap_state.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | * Swap reorganised 29.12.95, Stephen Tweedie | |
6 | * | |
7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
8 | */ | |
9 | #include <linux/module.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/kernel_stat.h> | |
12 | #include <linux/swap.h> | |
46017e95 | 13 | #include <linux/swapops.h> |
1da177e4 LT |
14 | #include <linux/init.h> |
15 | #include <linux/pagemap.h> | |
16 | #include <linux/buffer_head.h> | |
17 | #include <linux/backing-dev.h> | |
c484d410 | 18 | #include <linux/pagevec.h> |
b20a3503 | 19 | #include <linux/migrate.h> |
8c7c6e34 | 20 | #include <linux/page_cgroup.h> |
1da177e4 LT |
21 | |
22 | #include <asm/pgtable.h> | |
23 | ||
24 | /* | |
25 | * swapper_space is a fiction, retained to simplify the path through | |
2706a1b8 | 26 | * vmscan's shrink_page_list, to make sync_page look nicer, and to allow |
1da177e4 LT |
27 | * future use of radix_tree tags in the swap cache. |
28 | */ | |
f5e54d6e | 29 | static const struct address_space_operations swap_aops = { |
1da177e4 LT |
30 | .writepage = swap_writepage, |
31 | .sync_page = block_sync_page, | |
32 | .set_page_dirty = __set_page_dirty_nobuffers, | |
e965f963 | 33 | .migratepage = migrate_page, |
1da177e4 LT |
34 | }; |
35 | ||
36 | static struct backing_dev_info swap_backing_dev_info = { | |
d993831f | 37 | .name = "swap", |
4f98a2fe | 38 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, |
1da177e4 LT |
39 | .unplug_io_fn = swap_unplug_io_fn, |
40 | }; | |
41 | ||
42 | struct address_space swapper_space = { | |
43 | .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), | |
19fd6231 | 44 | .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), |
1da177e4 LT |
45 | .a_ops = &swap_aops, |
46 | .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), | |
47 | .backing_dev_info = &swap_backing_dev_info, | |
48 | }; | |
1da177e4 LT |
49 | |
50 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) | |
51 | ||
52 | static struct { | |
53 | unsigned long add_total; | |
54 | unsigned long del_total; | |
55 | unsigned long find_success; | |
56 | unsigned long find_total; | |
1da177e4 LT |
57 | } swap_cache_info; |
58 | ||
59 | void show_swap_cache_info(void) | |
60 | { | |
2c97b7fc JW |
61 | printk("%lu pages in swap cache\n", total_swapcache_pages); |
62 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", | |
1da177e4 | 63 | swap_cache_info.add_total, swap_cache_info.del_total, |
bb63be0a | 64 | swap_cache_info.find_success, swap_cache_info.find_total); |
07279cdf | 65 | printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); |
1da177e4 LT |
66 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
67 | } | |
68 | ||
69 | /* | |
31a56396 | 70 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
1da177e4 LT |
71 | * but sets SwapCache flag and private instead of mapping and index. |
72 | */ | |
31a56396 | 73 | static int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
1da177e4 LT |
74 | { |
75 | int error; | |
76 | ||
51726b12 HD |
77 | VM_BUG_ON(!PageLocked(page)); |
78 | VM_BUG_ON(PageSwapCache(page)); | |
79 | VM_BUG_ON(!PageSwapBacked(page)); | |
80 | ||
31a56396 DN |
81 | page_cache_get(page); |
82 | SetPageSwapCache(page); | |
83 | set_page_private(page, entry.val); | |
84 | ||
85 | spin_lock_irq(&swapper_space.tree_lock); | |
86 | error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); | |
87 | if (likely(!error)) { | |
88 | total_swapcache_pages++; | |
89 | __inc_zone_page_state(page, NR_FILE_PAGES); | |
90 | INC_CACHE_INFO(add_total); | |
91 | } | |
92 | spin_unlock_irq(&swapper_space.tree_lock); | |
93 | ||
94 | if (unlikely(error)) { | |
95 | set_page_private(page, 0UL); | |
96 | ClearPageSwapCache(page); | |
97 | page_cache_release(page); | |
98 | } | |
99 | ||
100 | return error; | |
101 | } | |
102 | ||
103 | ||
104 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |
105 | { | |
106 | int error; | |
107 | ||
35c754d7 BS |
108 | error = radix_tree_preload(gfp_mask); |
109 | if (!error) { | |
31a56396 | 110 | error = __add_to_swap_cache(page, entry); |
1da177e4 | 111 | radix_tree_preload_end(); |
fa1de900 | 112 | } |
1da177e4 LT |
113 | return error; |
114 | } | |
115 | ||
1da177e4 LT |
116 | /* |
117 | * This must be called only on pages that have | |
118 | * been verified to be in the swap cache. | |
119 | */ | |
120 | void __delete_from_swap_cache(struct page *page) | |
121 | { | |
51726b12 HD |
122 | VM_BUG_ON(!PageLocked(page)); |
123 | VM_BUG_ON(!PageSwapCache(page)); | |
124 | VM_BUG_ON(PageWriteback(page)); | |
1da177e4 | 125 | |
4c21e2f2 HD |
126 | radix_tree_delete(&swapper_space.page_tree, page_private(page)); |
127 | set_page_private(page, 0); | |
1da177e4 LT |
128 | ClearPageSwapCache(page); |
129 | total_swapcache_pages--; | |
347ce434 | 130 | __dec_zone_page_state(page, NR_FILE_PAGES); |
1da177e4 LT |
131 | INC_CACHE_INFO(del_total); |
132 | } | |
133 | ||
134 | /** | |
135 | * add_to_swap - allocate swap space for a page | |
136 | * @page: page we want to move to swap | |
137 | * | |
138 | * Allocate swap space for the page and add the page to the | |
139 | * swap cache. Caller needs to hold the page lock. | |
140 | */ | |
ac47b003 | 141 | int add_to_swap(struct page *page) |
1da177e4 LT |
142 | { |
143 | swp_entry_t entry; | |
1da177e4 LT |
144 | int err; |
145 | ||
51726b12 HD |
146 | VM_BUG_ON(!PageLocked(page)); |
147 | VM_BUG_ON(!PageUptodate(page)); | |
1da177e4 LT |
148 | |
149 | for (;;) { | |
150 | entry = get_swap_page(); | |
151 | if (!entry.val) | |
152 | return 0; | |
153 | ||
bd53b714 NP |
154 | /* |
155 | * Radix-tree node allocations from PF_MEMALLOC contexts could | |
156 | * completely exhaust the page allocator. __GFP_NOMEMALLOC | |
157 | * stops emergency reserves from being allocated. | |
1da177e4 | 158 | * |
bd53b714 NP |
159 | * TODO: this could cause a theoretical memory reclaim |
160 | * deadlock in the swap out path. | |
1da177e4 | 161 | */ |
1da177e4 LT |
162 | /* |
163 | * Add it to the swap cache and mark it dirty | |
164 | */ | |
f000944d | 165 | err = add_to_swap_cache(page, entry, |
ac47b003 | 166 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
1da177e4 LT |
167 | |
168 | switch (err) { | |
169 | case 0: /* Success */ | |
1da177e4 | 170 | SetPageDirty(page); |
1da177e4 LT |
171 | return 1; |
172 | case -EEXIST: | |
173 | /* Raced with "speculative" read_swap_cache_async */ | |
cb4b86ba | 174 | swapcache_free(entry, NULL); |
1da177e4 LT |
175 | continue; |
176 | default: | |
177 | /* -ENOMEM radix-tree allocation failure */ | |
cb4b86ba | 178 | swapcache_free(entry, NULL); |
1da177e4 LT |
179 | return 0; |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | /* | |
185 | * This must be called only on pages that have | |
186 | * been verified to be in the swap cache and locked. | |
187 | * It will never put the page into the free list, | |
188 | * the caller has a reference on the page. | |
189 | */ | |
190 | void delete_from_swap_cache(struct page *page) | |
191 | { | |
192 | swp_entry_t entry; | |
193 | ||
4c21e2f2 | 194 | entry.val = page_private(page); |
1da177e4 | 195 | |
19fd6231 | 196 | spin_lock_irq(&swapper_space.tree_lock); |
1da177e4 | 197 | __delete_from_swap_cache(page); |
19fd6231 | 198 | spin_unlock_irq(&swapper_space.tree_lock); |
1da177e4 | 199 | |
cb4b86ba | 200 | swapcache_free(entry, page); |
1da177e4 LT |
201 | page_cache_release(page); |
202 | } | |
203 | ||
1da177e4 LT |
204 | /* |
205 | * If we are the only user, then try to free up the swap cache. | |
206 | * | |
207 | * Its ok to check for PageSwapCache without the page lock | |
a2c43eed HD |
208 | * here because we are going to recheck again inside |
209 | * try_to_free_swap() _with_ the lock. | |
1da177e4 LT |
210 | * - Marcelo |
211 | */ | |
212 | static inline void free_swap_cache(struct page *page) | |
213 | { | |
a2c43eed HD |
214 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
215 | try_to_free_swap(page); | |
1da177e4 LT |
216 | unlock_page(page); |
217 | } | |
218 | } | |
219 | ||
220 | /* | |
221 | * Perform a free_page(), also freeing any swap cache associated with | |
b8072f09 | 222 | * this page if it is the last user of the page. |
1da177e4 LT |
223 | */ |
224 | void free_page_and_swap_cache(struct page *page) | |
225 | { | |
226 | free_swap_cache(page); | |
227 | page_cache_release(page); | |
228 | } | |
229 | ||
230 | /* | |
231 | * Passed an array of pages, drop them all from swapcache and then release | |
232 | * them. They are removed from the LRU and freed if this is their last use. | |
233 | */ | |
234 | void free_pages_and_swap_cache(struct page **pages, int nr) | |
235 | { | |
1da177e4 LT |
236 | struct page **pagep = pages; |
237 | ||
238 | lru_add_drain(); | |
239 | while (nr) { | |
c484d410 | 240 | int todo = min(nr, PAGEVEC_SIZE); |
1da177e4 LT |
241 | int i; |
242 | ||
243 | for (i = 0; i < todo; i++) | |
244 | free_swap_cache(pagep[i]); | |
245 | release_pages(pagep, todo, 0); | |
246 | pagep += todo; | |
247 | nr -= todo; | |
248 | } | |
249 | } | |
250 | ||
251 | /* | |
252 | * Lookup a swap entry in the swap cache. A found page will be returned | |
253 | * unlocked and with its refcount incremented - we rely on the kernel | |
254 | * lock getting page table operations atomic even if we drop the page | |
255 | * lock before returning. | |
256 | */ | |
257 | struct page * lookup_swap_cache(swp_entry_t entry) | |
258 | { | |
259 | struct page *page; | |
260 | ||
261 | page = find_get_page(&swapper_space, entry.val); | |
262 | ||
263 | if (page) | |
264 | INC_CACHE_INFO(find_success); | |
265 | ||
266 | INC_CACHE_INFO(find_total); | |
267 | return page; | |
268 | } | |
269 | ||
270 | /* | |
271 | * Locate a page of swap in physical memory, reserving swap cache space | |
272 | * and reading the disk if it is not already cached. | |
273 | * A failure return means that either the page allocation failed or that | |
274 | * the swap entry is no longer in use. | |
275 | */ | |
02098fea | 276 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
1da177e4 LT |
277 | struct vm_area_struct *vma, unsigned long addr) |
278 | { | |
279 | struct page *found_page, *new_page = NULL; | |
280 | int err; | |
281 | ||
282 | do { | |
283 | /* | |
284 | * First check the swap cache. Since this is normally | |
285 | * called after lookup_swap_cache() failed, re-calling | |
286 | * that would confuse statistics. | |
287 | */ | |
288 | found_page = find_get_page(&swapper_space, entry.val); | |
289 | if (found_page) | |
290 | break; | |
291 | ||
292 | /* | |
293 | * Get a new page to read into from swap. | |
294 | */ | |
295 | if (!new_page) { | |
02098fea | 296 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
1da177e4 LT |
297 | if (!new_page) |
298 | break; /* Out of memory */ | |
299 | } | |
300 | ||
31a56396 DN |
301 | /* |
302 | * call radix_tree_preload() while we can wait. | |
303 | */ | |
304 | err = radix_tree_preload(gfp_mask & GFP_KERNEL); | |
305 | if (err) | |
306 | break; | |
307 | ||
f000944d HD |
308 | /* |
309 | * Swap entry may have been freed since our caller observed it. | |
310 | */ | |
355cfa73 | 311 | err = swapcache_prepare(entry); |
31a56396 DN |
312 | if (err == -EEXIST) { /* seems racy */ |
313 | radix_tree_preload_end(); | |
355cfa73 | 314 | continue; |
31a56396 DN |
315 | } |
316 | if (err) { /* swp entry is obsolete ? */ | |
317 | radix_tree_preload_end(); | |
f000944d | 318 | break; |
31a56396 | 319 | } |
f000944d | 320 | |
1da177e4 LT |
321 | /* |
322 | * Associate the page with swap entry in the swap cache. | |
f000944d HD |
323 | * May fail (-EEXIST) if there is already a page associated |
324 | * with this entry in the swap cache: added by a racing | |
325 | * read_swap_cache_async, or add_to_swap or shmem_writepage | |
326 | * re-using the just freed swap entry for an existing page. | |
1da177e4 LT |
327 | * May fail (-ENOMEM) if radix-tree node allocation failed. |
328 | */ | |
f45840b5 | 329 | __set_page_locked(new_page); |
b2e18538 | 330 | SetPageSwapBacked(new_page); |
31a56396 | 331 | err = __add_to_swap_cache(new_page, entry); |
529ae9aa | 332 | if (likely(!err)) { |
31a56396 | 333 | radix_tree_preload_end(); |
1da177e4 LT |
334 | /* |
335 | * Initiate read into locked page and return. | |
336 | */ | |
c5fdae46 | 337 | lru_cache_add_anon(new_page); |
aca8bf32 | 338 | swap_readpage(new_page); |
1da177e4 LT |
339 | return new_page; |
340 | } | |
31a56396 | 341 | radix_tree_preload_end(); |
b2e18538 | 342 | ClearPageSwapBacked(new_page); |
f45840b5 | 343 | __clear_page_locked(new_page); |
cb4b86ba | 344 | swapcache_free(entry, NULL); |
f000944d | 345 | } while (err != -ENOMEM); |
1da177e4 LT |
346 | |
347 | if (new_page) | |
348 | page_cache_release(new_page); | |
349 | return found_page; | |
350 | } | |
46017e95 HD |
351 | |
352 | /** | |
353 | * swapin_readahead - swap in pages in hope we need them soon | |
354 | * @entry: swap entry of this memory | |
7682486b | 355 | * @gfp_mask: memory allocation flags |
46017e95 HD |
356 | * @vma: user vma this address belongs to |
357 | * @addr: target address for mempolicy | |
358 | * | |
359 | * Returns the struct page for entry and addr, after queueing swapin. | |
360 | * | |
361 | * Primitive swap readahead code. We simply read an aligned block of | |
362 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
363 | * because it doesn't cost us any seek time. We also make sure to queue | |
364 | * the 'original' request together with the readahead ones... | |
365 | * | |
366 | * This has been extended to use the NUMA policies from the mm triggering | |
367 | * the readahead. | |
368 | * | |
369 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | |
370 | */ | |
02098fea | 371 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
46017e95 HD |
372 | struct vm_area_struct *vma, unsigned long addr) |
373 | { | |
374 | int nr_pages; | |
375 | struct page *page; | |
376 | unsigned long offset; | |
377 | unsigned long end_offset; | |
378 | ||
379 | /* | |
380 | * Get starting offset for readaround, and number of pages to read. | |
381 | * Adjust starting address by readbehind (for NUMA interleave case)? | |
382 | * No, it's very unlikely that swap layout would follow vma layout, | |
383 | * more likely that neighbouring swap pages came from the same node: | |
384 | * so use the same "addr" to choose the same node for each swap read. | |
385 | */ | |
386 | nr_pages = valid_swaphandles(entry, &offset); | |
387 | for (end_offset = offset + nr_pages; offset < end_offset; offset++) { | |
388 | /* Ok, do the async read-ahead now */ | |
389 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | |
02098fea | 390 | gfp_mask, vma, addr); |
46017e95 HD |
391 | if (!page) |
392 | break; | |
393 | page_cache_release(page); | |
394 | } | |
395 | lru_add_drain(); /* Push any new pages onto the LRU now */ | |
02098fea | 396 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
46017e95 | 397 | } |