]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/swap_state.c
[PATCH] Direct Migration V9: upgrade MPOL_MF_MOVE and sys_migrate_pages()
[net-next-2.6.git] / mm / swap_state.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/pagemap.h>
15#include <linux/buffer_head.h>
16#include <linux/backing-dev.h>
c484d410 17#include <linux/pagevec.h>
1da177e4
LT
18
19#include <asm/pgtable.h>
20
21/*
22 * swapper_space is a fiction, retained to simplify the path through
23 * vmscan's shrink_list, to make sync_page look nicer, and to allow
24 * future use of radix_tree tags in the swap cache.
25 */
26static struct address_space_operations swap_aops = {
27 .writepage = swap_writepage,
28 .sync_page = block_sync_page,
29 .set_page_dirty = __set_page_dirty_nobuffers,
30};
31
32static struct backing_dev_info swap_backing_dev_info = {
33 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
34 .unplug_io_fn = swap_unplug_io_fn,
35};
36
37struct address_space swapper_space = {
38 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
39 .tree_lock = RW_LOCK_UNLOCKED,
40 .a_ops = &swap_aops,
41 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
42 .backing_dev_info = &swap_backing_dev_info,
43};
1da177e4
LT
44
45#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
46
47static struct {
48 unsigned long add_total;
49 unsigned long del_total;
50 unsigned long find_success;
51 unsigned long find_total;
52 unsigned long noent_race;
53 unsigned long exist_race;
54} swap_cache_info;
55
56void show_swap_cache_info(void)
57{
58 printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
59 swap_cache_info.add_total, swap_cache_info.del_total,
60 swap_cache_info.find_success, swap_cache_info.find_total,
61 swap_cache_info.noent_race, swap_cache_info.exist_race);
62 printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
63 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
64}
65
66/*
67 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
69 */
9de75d11 70static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
dd0fc66f 71 gfp_t gfp_mask)
1da177e4
LT
72{
73 int error;
74
75 BUG_ON(PageSwapCache(page));
76 BUG_ON(PagePrivate(page));
77 error = radix_tree_preload(gfp_mask);
78 if (!error) {
79 write_lock_irq(&swapper_space.tree_lock);
80 error = radix_tree_insert(&swapper_space.page_tree,
81 entry.val, page);
82 if (!error) {
83 page_cache_get(page);
84 SetPageLocked(page);
85 SetPageSwapCache(page);
4c21e2f2 86 set_page_private(page, entry.val);
1da177e4
LT
87 total_swapcache_pages++;
88 pagecache_acct(1);
89 }
90 write_unlock_irq(&swapper_space.tree_lock);
91 radix_tree_preload_end();
92 }
93 return error;
94}
95
96static int add_to_swap_cache(struct page *page, swp_entry_t entry)
97{
98 int error;
99
100 if (!swap_duplicate(entry)) {
101 INC_CACHE_INFO(noent_race);
102 return -ENOENT;
103 }
104 error = __add_to_swap_cache(page, entry, GFP_KERNEL);
105 /*
106 * Anon pages are already on the LRU, we don't run lru_cache_add here.
107 */
108 if (error) {
109 swap_free(entry);
110 if (error == -EEXIST)
111 INC_CACHE_INFO(exist_race);
112 return error;
113 }
114 INC_CACHE_INFO(add_total);
115 return 0;
116}
117
118/*
119 * This must be called only on pages that have
120 * been verified to be in the swap cache.
121 */
122void __delete_from_swap_cache(struct page *page)
123{
124 BUG_ON(!PageLocked(page));
125 BUG_ON(!PageSwapCache(page));
126 BUG_ON(PageWriteback(page));
3279ffd9 127 BUG_ON(PagePrivate(page));
1da177e4 128
4c21e2f2
HD
129 radix_tree_delete(&swapper_space.page_tree, page_private(page));
130 set_page_private(page, 0);
1da177e4
LT
131 ClearPageSwapCache(page);
132 total_swapcache_pages--;
133 pagecache_acct(-1);
134 INC_CACHE_INFO(del_total);
135}
136
137/**
138 * add_to_swap - allocate swap space for a page
139 * @page: page we want to move to swap
140 *
141 * Allocate swap space for the page and add the page to the
142 * swap cache. Caller needs to hold the page lock.
143 */
1480a540 144int add_to_swap(struct page * page, gfp_t gfp_mask)
1da177e4
LT
145{
146 swp_entry_t entry;
1da177e4
LT
147 int err;
148
149 if (!PageLocked(page))
150 BUG();
151
152 for (;;) {
153 entry = get_swap_page();
154 if (!entry.val)
155 return 0;
156
bd53b714
NP
157 /*
158 * Radix-tree node allocations from PF_MEMALLOC contexts could
159 * completely exhaust the page allocator. __GFP_NOMEMALLOC
160 * stops emergency reserves from being allocated.
1da177e4 161 *
bd53b714
NP
162 * TODO: this could cause a theoretical memory reclaim
163 * deadlock in the swap out path.
1da177e4 164 */
1da177e4
LT
165 /*
166 * Add it to the swap cache and mark it dirty
167 */
bd53b714 168 err = __add_to_swap_cache(page, entry,
1480a540 169 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
1da177e4
LT
170
171 switch (err) {
172 case 0: /* Success */
173 SetPageUptodate(page);
174 SetPageDirty(page);
175 INC_CACHE_INFO(add_total);
176 return 1;
177 case -EEXIST:
178 /* Raced with "speculative" read_swap_cache_async */
179 INC_CACHE_INFO(exist_race);
180 swap_free(entry);
181 continue;
182 default:
183 /* -ENOMEM radix-tree allocation failure */
184 swap_free(entry);
185 return 0;
186 }
187 }
188}
189
190/*
191 * This must be called only on pages that have
192 * been verified to be in the swap cache and locked.
193 * It will never put the page into the free list,
194 * the caller has a reference on the page.
195 */
196void delete_from_swap_cache(struct page *page)
197{
198 swp_entry_t entry;
199
4c21e2f2 200 entry.val = page_private(page);
1da177e4
LT
201
202 write_lock_irq(&swapper_space.tree_lock);
203 __delete_from_swap_cache(page);
204 write_unlock_irq(&swapper_space.tree_lock);
205
206 swap_free(entry);
207 page_cache_release(page);
208}
209
210/*
211 * Strange swizzling function only for use by shmem_writepage
212 */
213int move_to_swap_cache(struct page *page, swp_entry_t entry)
214{
215 int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
216 if (!err) {
217 remove_from_page_cache(page);
218 page_cache_release(page); /* pagecache ref */
219 if (!swap_duplicate(entry))
220 BUG();
221 SetPageDirty(page);
222 INC_CACHE_INFO(add_total);
223 } else if (err == -EEXIST)
224 INC_CACHE_INFO(exist_race);
225 return err;
226}
227
228/*
229 * Strange swizzling function for shmem_getpage (and shmem_unuse)
230 */
231int move_from_swap_cache(struct page *page, unsigned long index,
232 struct address_space *mapping)
233{
234 int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
235 if (!err) {
236 delete_from_swap_cache(page);
237 /* shift page from clean_pages to dirty_pages list */
238 ClearPageDirty(page);
239 set_page_dirty(page);
240 }
241 return err;
242}
243
244/*
245 * If we are the only user, then try to free up the swap cache.
246 *
247 * Its ok to check for PageSwapCache without the page lock
248 * here because we are going to recheck again inside
249 * exclusive_swap_page() _with_ the lock.
250 * - Marcelo
251 */
252static inline void free_swap_cache(struct page *page)
253{
254 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
255 remove_exclusive_swap_page(page);
256 unlock_page(page);
257 }
258}
259
260/*
261 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 262 * this page if it is the last user of the page.
1da177e4
LT
263 */
264void free_page_and_swap_cache(struct page *page)
265{
266 free_swap_cache(page);
267 page_cache_release(page);
268}
269
270/*
271 * Passed an array of pages, drop them all from swapcache and then release
272 * them. They are removed from the LRU and freed if this is their last use.
273 */
274void free_pages_and_swap_cache(struct page **pages, int nr)
275{
1da177e4
LT
276 struct page **pagep = pages;
277
278 lru_add_drain();
279 while (nr) {
c484d410 280 int todo = min(nr, PAGEVEC_SIZE);
1da177e4
LT
281 int i;
282
283 for (i = 0; i < todo; i++)
284 free_swap_cache(pagep[i]);
285 release_pages(pagep, todo, 0);
286 pagep += todo;
287 nr -= todo;
288 }
289}
290
291/*
292 * Lookup a swap entry in the swap cache. A found page will be returned
293 * unlocked and with its refcount incremented - we rely on the kernel
294 * lock getting page table operations atomic even if we drop the page
295 * lock before returning.
296 */
297struct page * lookup_swap_cache(swp_entry_t entry)
298{
299 struct page *page;
300
301 page = find_get_page(&swapper_space, entry.val);
302
303 if (page)
304 INC_CACHE_INFO(find_success);
305
306 INC_CACHE_INFO(find_total);
307 return page;
308}
309
310/*
311 * Locate a page of swap in physical memory, reserving swap cache space
312 * and reading the disk if it is not already cached.
313 * A failure return means that either the page allocation failed or that
314 * the swap entry is no longer in use.
315 */
316struct page *read_swap_cache_async(swp_entry_t entry,
317 struct vm_area_struct *vma, unsigned long addr)
318{
319 struct page *found_page, *new_page = NULL;
320 int err;
321
322 do {
323 /*
324 * First check the swap cache. Since this is normally
325 * called after lookup_swap_cache() failed, re-calling
326 * that would confuse statistics.
327 */
328 found_page = find_get_page(&swapper_space, entry.val);
329 if (found_page)
330 break;
331
332 /*
333 * Get a new page to read into from swap.
334 */
335 if (!new_page) {
336 new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
337 if (!new_page)
338 break; /* Out of memory */
339 }
340
341 /*
342 * Associate the page with swap entry in the swap cache.
343 * May fail (-ENOENT) if swap entry has been freed since
344 * our caller observed it. May fail (-EEXIST) if there
345 * is already a page associated with this entry in the
346 * swap cache: added by a racing read_swap_cache_async,
347 * or by try_to_swap_out (or shmem_writepage) re-using
348 * the just freed swap entry for an existing page.
349 * May fail (-ENOMEM) if radix-tree node allocation failed.
350 */
351 err = add_to_swap_cache(new_page, entry);
352 if (!err) {
353 /*
354 * Initiate read into locked page and return.
355 */
356 lru_cache_add_active(new_page);
357 swap_readpage(NULL, new_page);
358 return new_page;
359 }
360 } while (err != -ENOENT && err != -ENOMEM);
361
362 if (new_page)
363 page_cache_release(new_page);
364 return found_page;
365}