]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/swap_state.c
mm: remove CONFIG_UNEVICTABLE_LRU config option
[net-next-2.6.git] / mm / swap_state.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
46017e95 13#include <linux/swapops.h>
1da177e4
LT
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/buffer_head.h>
17#include <linux/backing-dev.h>
c484d410 18#include <linux/pagevec.h>
b20a3503 19#include <linux/migrate.h>
8c7c6e34 20#include <linux/page_cgroup.h>
1da177e4
LT
21
22#include <asm/pgtable.h>
23
24/*
25 * swapper_space is a fiction, retained to simplify the path through
2706a1b8 26 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
1da177e4
LT
27 * future use of radix_tree tags in the swap cache.
28 */
f5e54d6e 29static const struct address_space_operations swap_aops = {
1da177e4
LT
30 .writepage = swap_writepage,
31 .sync_page = block_sync_page,
32 .set_page_dirty = __set_page_dirty_nobuffers,
e965f963 33 .migratepage = migrate_page,
1da177e4
LT
34};
35
36static struct backing_dev_info swap_backing_dev_info = {
4f98a2fe 37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
38 .unplug_io_fn = swap_unplug_io_fn,
39};
40
41struct address_space swapper_space = {
42 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
19fd6231 43 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
1da177e4
LT
44 .a_ops = &swap_aops,
45 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
46 .backing_dev_info = &swap_backing_dev_info,
47};
1da177e4
LT
48
49#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
50
51static struct {
52 unsigned long add_total;
53 unsigned long del_total;
54 unsigned long find_success;
55 unsigned long find_total;
1da177e4
LT
56} swap_cache_info;
57
58void show_swap_cache_info(void)
59{
2c97b7fc
JW
60 printk("%lu pages in swap cache\n", total_swapcache_pages);
61 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
1da177e4 62 swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a 63 swap_cache_info.find_success, swap_cache_info.find_total);
07279cdf 64 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
1da177e4
LT
65 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
66}
67
68/*
e286781d 69 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4
LT
70 * but sets SwapCache flag and private instead of mapping and index.
71 */
73b1262f 72int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
1da177e4
LT
73{
74 int error;
75
51726b12
HD
76 VM_BUG_ON(!PageLocked(page));
77 VM_BUG_ON(PageSwapCache(page));
78 VM_BUG_ON(!PageSwapBacked(page));
79
35c754d7
BS
80 error = radix_tree_preload(gfp_mask);
81 if (!error) {
e286781d
NP
82 page_cache_get(page);
83 SetPageSwapCache(page);
84 set_page_private(page, entry.val);
85
19fd6231 86 spin_lock_irq(&swapper_space.tree_lock);
1da177e4
LT
87 error = radix_tree_insert(&swapper_space.page_tree,
88 entry.val, page);
e286781d 89 if (likely(!error)) {
1da177e4 90 total_swapcache_pages++;
347ce434 91 __inc_zone_page_state(page, NR_FILE_PAGES);
bb63be0a 92 INC_CACHE_INFO(add_total);
1da177e4 93 }
19fd6231 94 spin_unlock_irq(&swapper_space.tree_lock);
1da177e4 95 radix_tree_preload_end();
e286781d
NP
96
97 if (unlikely(error)) {
98 set_page_private(page, 0UL);
99 ClearPageSwapCache(page);
100 page_cache_release(page);
101 }
fa1de900 102 }
1da177e4
LT
103 return error;
104}
105
1da177e4
LT
106/*
107 * This must be called only on pages that have
108 * been verified to be in the swap cache.
109 */
110void __delete_from_swap_cache(struct page *page)
111{
51726b12
HD
112 VM_BUG_ON(!PageLocked(page));
113 VM_BUG_ON(!PageSwapCache(page));
114 VM_BUG_ON(PageWriteback(page));
1da177e4 115
4c21e2f2
HD
116 radix_tree_delete(&swapper_space.page_tree, page_private(page));
117 set_page_private(page, 0);
1da177e4
LT
118 ClearPageSwapCache(page);
119 total_swapcache_pages--;
347ce434 120 __dec_zone_page_state(page, NR_FILE_PAGES);
1da177e4
LT
121 INC_CACHE_INFO(del_total);
122}
123
124/**
125 * add_to_swap - allocate swap space for a page
126 * @page: page we want to move to swap
7682486b 127 * @gfp_mask: memory allocation flags
1da177e4
LT
128 *
129 * Allocate swap space for the page and add the page to the
130 * swap cache. Caller needs to hold the page lock.
131 */
ac47b003 132int add_to_swap(struct page *page)
1da177e4
LT
133{
134 swp_entry_t entry;
1da177e4
LT
135 int err;
136
51726b12
HD
137 VM_BUG_ON(!PageLocked(page));
138 VM_BUG_ON(!PageUptodate(page));
1da177e4
LT
139
140 for (;;) {
141 entry = get_swap_page();
142 if (!entry.val)
143 return 0;
144
bd53b714
NP
145 /*
146 * Radix-tree node allocations from PF_MEMALLOC contexts could
147 * completely exhaust the page allocator. __GFP_NOMEMALLOC
148 * stops emergency reserves from being allocated.
1da177e4 149 *
bd53b714
NP
150 * TODO: this could cause a theoretical memory reclaim
151 * deadlock in the swap out path.
1da177e4 152 */
1da177e4
LT
153 /*
154 * Add it to the swap cache and mark it dirty
155 */
f000944d 156 err = add_to_swap_cache(page, entry,
ac47b003 157 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
1da177e4
LT
158
159 switch (err) {
160 case 0: /* Success */
1da177e4 161 SetPageDirty(page);
1da177e4
LT
162 return 1;
163 case -EEXIST:
164 /* Raced with "speculative" read_swap_cache_async */
1da177e4
LT
165 swap_free(entry);
166 continue;
167 default:
168 /* -ENOMEM radix-tree allocation failure */
169 swap_free(entry);
170 return 0;
171 }
172 }
173}
174
175/*
176 * This must be called only on pages that have
177 * been verified to be in the swap cache and locked.
178 * It will never put the page into the free list,
179 * the caller has a reference on the page.
180 */
181void delete_from_swap_cache(struct page *page)
182{
183 swp_entry_t entry;
184
4c21e2f2 185 entry.val = page_private(page);
1da177e4 186
19fd6231 187 spin_lock_irq(&swapper_space.tree_lock);
1da177e4 188 __delete_from_swap_cache(page);
19fd6231 189 spin_unlock_irq(&swapper_space.tree_lock);
1da177e4 190
e767e056 191 mem_cgroup_uncharge_swapcache(page, entry);
1da177e4
LT
192 swap_free(entry);
193 page_cache_release(page);
194}
195
1da177e4
LT
196/*
197 * If we are the only user, then try to free up the swap cache.
198 *
199 * Its ok to check for PageSwapCache without the page lock
a2c43eed
HD
200 * here because we are going to recheck again inside
201 * try_to_free_swap() _with_ the lock.
1da177e4
LT
202 * - Marcelo
203 */
204static inline void free_swap_cache(struct page *page)
205{
a2c43eed
HD
206 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
207 try_to_free_swap(page);
1da177e4
LT
208 unlock_page(page);
209 }
210}
211
212/*
213 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 214 * this page if it is the last user of the page.
1da177e4
LT
215 */
216void free_page_and_swap_cache(struct page *page)
217{
218 free_swap_cache(page);
219 page_cache_release(page);
220}
221
222/*
223 * Passed an array of pages, drop them all from swapcache and then release
224 * them. They are removed from the LRU and freed if this is their last use.
225 */
226void free_pages_and_swap_cache(struct page **pages, int nr)
227{
1da177e4
LT
228 struct page **pagep = pages;
229
230 lru_add_drain();
231 while (nr) {
c484d410 232 int todo = min(nr, PAGEVEC_SIZE);
1da177e4
LT
233 int i;
234
235 for (i = 0; i < todo; i++)
236 free_swap_cache(pagep[i]);
237 release_pages(pagep, todo, 0);
238 pagep += todo;
239 nr -= todo;
240 }
241}
242
243/*
244 * Lookup a swap entry in the swap cache. A found page will be returned
245 * unlocked and with its refcount incremented - we rely on the kernel
246 * lock getting page table operations atomic even if we drop the page
247 * lock before returning.
248 */
249struct page * lookup_swap_cache(swp_entry_t entry)
250{
251 struct page *page;
252
253 page = find_get_page(&swapper_space, entry.val);
254
255 if (page)
256 INC_CACHE_INFO(find_success);
257
258 INC_CACHE_INFO(find_total);
259 return page;
260}
261
262/*
263 * Locate a page of swap in physical memory, reserving swap cache space
264 * and reading the disk if it is not already cached.
265 * A failure return means that either the page allocation failed or that
266 * the swap entry is no longer in use.
267 */
02098fea 268struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
1da177e4
LT
269 struct vm_area_struct *vma, unsigned long addr)
270{
271 struct page *found_page, *new_page = NULL;
272 int err;
273
274 do {
275 /*
276 * First check the swap cache. Since this is normally
277 * called after lookup_swap_cache() failed, re-calling
278 * that would confuse statistics.
279 */
280 found_page = find_get_page(&swapper_space, entry.val);
281 if (found_page)
282 break;
283
284 /*
285 * Get a new page to read into from swap.
286 */
287 if (!new_page) {
02098fea 288 new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4
LT
289 if (!new_page)
290 break; /* Out of memory */
291 }
292
f000944d
HD
293 /*
294 * Swap entry may have been freed since our caller observed it.
295 */
296 if (!swap_duplicate(entry))
297 break;
298
1da177e4
LT
299 /*
300 * Associate the page with swap entry in the swap cache.
f000944d
HD
301 * May fail (-EEXIST) if there is already a page associated
302 * with this entry in the swap cache: added by a racing
303 * read_swap_cache_async, or add_to_swap or shmem_writepage
304 * re-using the just freed swap entry for an existing page.
1da177e4
LT
305 * May fail (-ENOMEM) if radix-tree node allocation failed.
306 */
f45840b5 307 __set_page_locked(new_page);
b2e18538 308 SetPageSwapBacked(new_page);
f000944d 309 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
529ae9aa 310 if (likely(!err)) {
1da177e4
LT
311 /*
312 * Initiate read into locked page and return.
313 */
c5fdae46 314 lru_cache_add_anon(new_page);
1da177e4
LT
315 swap_readpage(NULL, new_page);
316 return new_page;
317 }
b2e18538 318 ClearPageSwapBacked(new_page);
f45840b5 319 __clear_page_locked(new_page);
f000944d
HD
320 swap_free(entry);
321 } while (err != -ENOMEM);
1da177e4
LT
322
323 if (new_page)
324 page_cache_release(new_page);
325 return found_page;
326}
46017e95
HD
327
328/**
329 * swapin_readahead - swap in pages in hope we need them soon
330 * @entry: swap entry of this memory
7682486b 331 * @gfp_mask: memory allocation flags
46017e95
HD
332 * @vma: user vma this address belongs to
333 * @addr: target address for mempolicy
334 *
335 * Returns the struct page for entry and addr, after queueing swapin.
336 *
337 * Primitive swap readahead code. We simply read an aligned block of
338 * (1 << page_cluster) entries in the swap area. This method is chosen
339 * because it doesn't cost us any seek time. We also make sure to queue
340 * the 'original' request together with the readahead ones...
341 *
342 * This has been extended to use the NUMA policies from the mm triggering
343 * the readahead.
344 *
345 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
346 */
02098fea 347struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e95
HD
348 struct vm_area_struct *vma, unsigned long addr)
349{
350 int nr_pages;
351 struct page *page;
352 unsigned long offset;
353 unsigned long end_offset;
354
355 /*
356 * Get starting offset for readaround, and number of pages to read.
357 * Adjust starting address by readbehind (for NUMA interleave case)?
358 * No, it's very unlikely that swap layout would follow vma layout,
359 * more likely that neighbouring swap pages came from the same node:
360 * so use the same "addr" to choose the same node for each swap read.
361 */
362 nr_pages = valid_swaphandles(entry, &offset);
363 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
364 /* Ok, do the async read-ahead now */
365 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098fea 366 gfp_mask, vma, addr);
46017e95
HD
367 if (!page)
368 break;
369 page_cache_release(page);
370 }
371 lru_add_drain(); /* Push any new pages onto the LRU now */
02098fea 372 return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e95 373}