]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/swap_state.c
xps: Transmit Packet Steering
[net-next-2.6.git] / mm / swap_state.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
46017e95 14#include <linux/swapops.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/buffer_head.h>
18#include <linux/backing-dev.h>
c484d410 19#include <linux/pagevec.h>
b20a3503 20#include <linux/migrate.h>
8c7c6e34 21#include <linux/page_cgroup.h>
1da177e4
LT
22
23#include <asm/pgtable.h>
24
25/*
26 * swapper_space is a fiction, retained to simplify the path through
2706a1b8 27 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
1da177e4
LT
28 * future use of radix_tree tags in the swap cache.
29 */
f5e54d6e 30static const struct address_space_operations swap_aops = {
1da177e4
LT
31 .writepage = swap_writepage,
32 .sync_page = block_sync_page,
33 .set_page_dirty = __set_page_dirty_nobuffers,
e965f963 34 .migratepage = migrate_page,
1da177e4
LT
35};
36
37static struct backing_dev_info swap_backing_dev_info = {
d993831f 38 .name = "swap",
4f98a2fe 39 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
40 .unplug_io_fn = swap_unplug_io_fn,
41};
42
43struct address_space swapper_space = {
44 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
19fd6231 45 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
1da177e4
LT
46 .a_ops = &swap_aops,
47 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
48 .backing_dev_info = &swap_backing_dev_info,
49};
1da177e4
LT
50
51#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
52
53static struct {
54 unsigned long add_total;
55 unsigned long del_total;
56 unsigned long find_success;
57 unsigned long find_total;
1da177e4
LT
58} swap_cache_info;
59
60void show_swap_cache_info(void)
61{
2c97b7fc
JW
62 printk("%lu pages in swap cache\n", total_swapcache_pages);
63 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
1da177e4 64 swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a 65 swap_cache_info.find_success, swap_cache_info.find_total);
07279cdf 66 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
1da177e4
LT
67 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
68}
69
70/*
31a56396 71 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4
LT
72 * but sets SwapCache flag and private instead of mapping and index.
73 */
31a56396 74static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
1da177e4
LT
75{
76 int error;
77
51726b12
HD
78 VM_BUG_ON(!PageLocked(page));
79 VM_BUG_ON(PageSwapCache(page));
80 VM_BUG_ON(!PageSwapBacked(page));
81
31a56396
DN
82 page_cache_get(page);
83 SetPageSwapCache(page);
84 set_page_private(page, entry.val);
85
86 spin_lock_irq(&swapper_space.tree_lock);
87 error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
88 if (likely(!error)) {
89 total_swapcache_pages++;
90 __inc_zone_page_state(page, NR_FILE_PAGES);
91 INC_CACHE_INFO(add_total);
92 }
93 spin_unlock_irq(&swapper_space.tree_lock);
94
95 if (unlikely(error)) {
2ca4532a
DN
96 /*
97 * Only the context which have set SWAP_HAS_CACHE flag
98 * would call add_to_swap_cache().
99 * So add_to_swap_cache() doesn't returns -EEXIST.
100 */
101 VM_BUG_ON(error == -EEXIST);
31a56396
DN
102 set_page_private(page, 0UL);
103 ClearPageSwapCache(page);
104 page_cache_release(page);
105 }
106
107 return error;
108}
109
110
111int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
112{
113 int error;
114
35c754d7
BS
115 error = radix_tree_preload(gfp_mask);
116 if (!error) {
31a56396 117 error = __add_to_swap_cache(page, entry);
1da177e4 118 radix_tree_preload_end();
fa1de900 119 }
1da177e4
LT
120 return error;
121}
122
1da177e4
LT
123/*
124 * This must be called only on pages that have
125 * been verified to be in the swap cache.
126 */
127void __delete_from_swap_cache(struct page *page)
128{
51726b12
HD
129 VM_BUG_ON(!PageLocked(page));
130 VM_BUG_ON(!PageSwapCache(page));
131 VM_BUG_ON(PageWriteback(page));
1da177e4 132
4c21e2f2
HD
133 radix_tree_delete(&swapper_space.page_tree, page_private(page));
134 set_page_private(page, 0);
1da177e4
LT
135 ClearPageSwapCache(page);
136 total_swapcache_pages--;
347ce434 137 __dec_zone_page_state(page, NR_FILE_PAGES);
1da177e4
LT
138 INC_CACHE_INFO(del_total);
139}
140
141/**
142 * add_to_swap - allocate swap space for a page
143 * @page: page we want to move to swap
144 *
145 * Allocate swap space for the page and add the page to the
146 * swap cache. Caller needs to hold the page lock.
147 */
ac47b003 148int add_to_swap(struct page *page)
1da177e4
LT
149{
150 swp_entry_t entry;
1da177e4
LT
151 int err;
152
51726b12
HD
153 VM_BUG_ON(!PageLocked(page));
154 VM_BUG_ON(!PageUptodate(page));
1da177e4 155
2ca4532a
DN
156 entry = get_swap_page();
157 if (!entry.val)
158 return 0;
159
160 /*
161 * Radix-tree node allocations from PF_MEMALLOC contexts could
162 * completely exhaust the page allocator. __GFP_NOMEMALLOC
163 * stops emergency reserves from being allocated.
164 *
165 * TODO: this could cause a theoretical memory reclaim
166 * deadlock in the swap out path.
167 */
168 /*
169 * Add it to the swap cache and mark it dirty
170 */
171 err = add_to_swap_cache(page, entry,
172 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
173
174 if (!err) { /* Success */
175 SetPageDirty(page);
176 return 1;
177 } else { /* -ENOMEM radix-tree allocation failure */
bd53b714 178 /*
2ca4532a
DN
179 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
180 * clear SWAP_HAS_CACHE flag.
1da177e4 181 */
2ca4532a
DN
182 swapcache_free(entry, NULL);
183 return 0;
1da177e4
LT
184 }
185}
186
187/*
188 * This must be called only on pages that have
189 * been verified to be in the swap cache and locked.
190 * It will never put the page into the free list,
191 * the caller has a reference on the page.
192 */
193void delete_from_swap_cache(struct page *page)
194{
195 swp_entry_t entry;
196
4c21e2f2 197 entry.val = page_private(page);
1da177e4 198
19fd6231 199 spin_lock_irq(&swapper_space.tree_lock);
1da177e4 200 __delete_from_swap_cache(page);
19fd6231 201 spin_unlock_irq(&swapper_space.tree_lock);
1da177e4 202
cb4b86ba 203 swapcache_free(entry, page);
1da177e4
LT
204 page_cache_release(page);
205}
206
1da177e4
LT
207/*
208 * If we are the only user, then try to free up the swap cache.
209 *
210 * Its ok to check for PageSwapCache without the page lock
a2c43eed
HD
211 * here because we are going to recheck again inside
212 * try_to_free_swap() _with_ the lock.
1da177e4
LT
213 * - Marcelo
214 */
215static inline void free_swap_cache(struct page *page)
216{
a2c43eed
HD
217 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
218 try_to_free_swap(page);
1da177e4
LT
219 unlock_page(page);
220 }
221}
222
223/*
224 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 225 * this page if it is the last user of the page.
1da177e4
LT
226 */
227void free_page_and_swap_cache(struct page *page)
228{
229 free_swap_cache(page);
230 page_cache_release(page);
231}
232
233/*
234 * Passed an array of pages, drop them all from swapcache and then release
235 * them. They are removed from the LRU and freed if this is their last use.
236 */
237void free_pages_and_swap_cache(struct page **pages, int nr)
238{
1da177e4
LT
239 struct page **pagep = pages;
240
241 lru_add_drain();
242 while (nr) {
c484d410 243 int todo = min(nr, PAGEVEC_SIZE);
1da177e4
LT
244 int i;
245
246 for (i = 0; i < todo; i++)
247 free_swap_cache(pagep[i]);
248 release_pages(pagep, todo, 0);
249 pagep += todo;
250 nr -= todo;
251 }
252}
253
254/*
255 * Lookup a swap entry in the swap cache. A found page will be returned
256 * unlocked and with its refcount incremented - we rely on the kernel
257 * lock getting page table operations atomic even if we drop the page
258 * lock before returning.
259 */
260struct page * lookup_swap_cache(swp_entry_t entry)
261{
262 struct page *page;
263
264 page = find_get_page(&swapper_space, entry.val);
265
266 if (page)
267 INC_CACHE_INFO(find_success);
268
269 INC_CACHE_INFO(find_total);
270 return page;
271}
272
273/*
274 * Locate a page of swap in physical memory, reserving swap cache space
275 * and reading the disk if it is not already cached.
276 * A failure return means that either the page allocation failed or that
277 * the swap entry is no longer in use.
278 */
02098fea 279struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
1da177e4
LT
280 struct vm_area_struct *vma, unsigned long addr)
281{
282 struct page *found_page, *new_page = NULL;
283 int err;
284
285 do {
286 /*
287 * First check the swap cache. Since this is normally
288 * called after lookup_swap_cache() failed, re-calling
289 * that would confuse statistics.
290 */
291 found_page = find_get_page(&swapper_space, entry.val);
292 if (found_page)
293 break;
294
295 /*
296 * Get a new page to read into from swap.
297 */
298 if (!new_page) {
02098fea 299 new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4
LT
300 if (!new_page)
301 break; /* Out of memory */
302 }
303
31a56396
DN
304 /*
305 * call radix_tree_preload() while we can wait.
306 */
307 err = radix_tree_preload(gfp_mask & GFP_KERNEL);
308 if (err)
309 break;
310
f000944d
HD
311 /*
312 * Swap entry may have been freed since our caller observed it.
313 */
355cfa73 314 err = swapcache_prepare(entry);
31a56396
DN
315 if (err == -EEXIST) { /* seems racy */
316 radix_tree_preload_end();
355cfa73 317 continue;
31a56396
DN
318 }
319 if (err) { /* swp entry is obsolete ? */
320 radix_tree_preload_end();
f000944d 321 break;
31a56396 322 }
f000944d 323
2ca4532a 324 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
f45840b5 325 __set_page_locked(new_page);
b2e18538 326 SetPageSwapBacked(new_page);
31a56396 327 err = __add_to_swap_cache(new_page, entry);
529ae9aa 328 if (likely(!err)) {
31a56396 329 radix_tree_preload_end();
1da177e4
LT
330 /*
331 * Initiate read into locked page and return.
332 */
c5fdae46 333 lru_cache_add_anon(new_page);
aca8bf32 334 swap_readpage(new_page);
1da177e4
LT
335 return new_page;
336 }
31a56396 337 radix_tree_preload_end();
b2e18538 338 ClearPageSwapBacked(new_page);
f45840b5 339 __clear_page_locked(new_page);
2ca4532a
DN
340 /*
341 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
342 * clear SWAP_HAS_CACHE flag.
343 */
cb4b86ba 344 swapcache_free(entry, NULL);
f000944d 345 } while (err != -ENOMEM);
1da177e4
LT
346
347 if (new_page)
348 page_cache_release(new_page);
349 return found_page;
350}
46017e95
HD
351
352/**
353 * swapin_readahead - swap in pages in hope we need them soon
354 * @entry: swap entry of this memory
7682486b 355 * @gfp_mask: memory allocation flags
46017e95
HD
356 * @vma: user vma this address belongs to
357 * @addr: target address for mempolicy
358 *
359 * Returns the struct page for entry and addr, after queueing swapin.
360 *
361 * Primitive swap readahead code. We simply read an aligned block of
362 * (1 << page_cluster) entries in the swap area. This method is chosen
363 * because it doesn't cost us any seek time. We also make sure to queue
364 * the 'original' request together with the readahead ones...
365 *
366 * This has been extended to use the NUMA policies from the mm triggering
367 * the readahead.
368 *
369 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
370 */
02098fea 371struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e95
HD
372 struct vm_area_struct *vma, unsigned long addr)
373{
374 int nr_pages;
375 struct page *page;
376 unsigned long offset;
377 unsigned long end_offset;
378
379 /*
380 * Get starting offset for readaround, and number of pages to read.
381 * Adjust starting address by readbehind (for NUMA interleave case)?
382 * No, it's very unlikely that swap layout would follow vma layout,
383 * more likely that neighbouring swap pages came from the same node:
384 * so use the same "addr" to choose the same node for each swap read.
385 */
386 nr_pages = valid_swaphandles(entry, &offset);
387 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
388 /* Ok, do the async read-ahead now */
389 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098fea 390 gfp_mask, vma, addr);
46017e95
HD
391 if (!page)
392 break;
393 page_cache_release(page);
394 }
395 lru_add_drain(); /* Push any new pages onto the LRU now */
02098fea 396 return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e95 397}