]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/swap_state.c
ipv6: Check the hop limit setting in ancillary data.
[net-next-2.6.git] / mm / swap_state.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
46017e95 13#include <linux/swapops.h>
1da177e4
LT
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/buffer_head.h>
17#include <linux/backing-dev.h>
c484d410 18#include <linux/pagevec.h>
b20a3503 19#include <linux/migrate.h>
1da177e4
LT
20
21#include <asm/pgtable.h>
22
23/*
24 * swapper_space is a fiction, retained to simplify the path through
2706a1b8 25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
1da177e4
LT
26 * future use of radix_tree tags in the swap cache.
27 */
f5e54d6e 28static const struct address_space_operations swap_aops = {
1da177e4
LT
29 .writepage = swap_writepage,
30 .sync_page = block_sync_page,
31 .set_page_dirty = __set_page_dirty_nobuffers,
e965f963 32 .migratepage = migrate_page,
1da177e4
LT
33};
34
35static struct backing_dev_info swap_backing_dev_info = {
e4ad08fe 36 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
1da177e4
LT
37 .unplug_io_fn = swap_unplug_io_fn,
38};
39
40struct address_space swapper_space = {
41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
e4d91918 42 .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
1da177e4
LT
43 .a_ops = &swap_aops,
44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
45 .backing_dev_info = &swap_backing_dev_info,
46};
1da177e4
LT
47
48#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
49
50static struct {
51 unsigned long add_total;
52 unsigned long del_total;
53 unsigned long find_success;
54 unsigned long find_total;
1da177e4
LT
55} swap_cache_info;
56
57void show_swap_cache_info(void)
58{
bb63be0a 59 printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
1da177e4 60 swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a 61 swap_cache_info.find_success, swap_cache_info.find_total);
1da177e4
LT
62 printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
63 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
64}
65
66/*
f000944d 67 * add_to_swap_cache resembles add_to_page_cache on swapper_space,
1da177e4
LT
68 * but sets SwapCache flag and private instead of mapping and index.
69 */
73b1262f 70int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
1da177e4
LT
71{
72 int error;
73
b55ed816 74 BUG_ON(!PageLocked(page));
1da177e4
LT
75 BUG_ON(PageSwapCache(page));
76 BUG_ON(PagePrivate(page));
35c754d7
BS
77 error = radix_tree_preload(gfp_mask);
78 if (!error) {
1da177e4
LT
79 write_lock_irq(&swapper_space.tree_lock);
80 error = radix_tree_insert(&swapper_space.page_tree,
81 entry.val, page);
82 if (!error) {
83 page_cache_get(page);
1da177e4 84 SetPageSwapCache(page);
4c21e2f2 85 set_page_private(page, entry.val);
1da177e4 86 total_swapcache_pages++;
347ce434 87 __inc_zone_page_state(page, NR_FILE_PAGES);
bb63be0a 88 INC_CACHE_INFO(add_total);
1da177e4
LT
89 }
90 write_unlock_irq(&swapper_space.tree_lock);
91 radix_tree_preload_end();
fa1de900 92 }
1da177e4
LT
93 return error;
94}
95
1da177e4
LT
96/*
97 * This must be called only on pages that have
98 * been verified to be in the swap cache.
99 */
100void __delete_from_swap_cache(struct page *page)
101{
102 BUG_ON(!PageLocked(page));
103 BUG_ON(!PageSwapCache(page));
104 BUG_ON(PageWriteback(page));
3279ffd9 105 BUG_ON(PagePrivate(page));
1da177e4 106
4c21e2f2
HD
107 radix_tree_delete(&swapper_space.page_tree, page_private(page));
108 set_page_private(page, 0);
1da177e4
LT
109 ClearPageSwapCache(page);
110 total_swapcache_pages--;
347ce434 111 __dec_zone_page_state(page, NR_FILE_PAGES);
1da177e4
LT
112 INC_CACHE_INFO(del_total);
113}
114
115/**
116 * add_to_swap - allocate swap space for a page
117 * @page: page we want to move to swap
7682486b 118 * @gfp_mask: memory allocation flags
1da177e4
LT
119 *
120 * Allocate swap space for the page and add the page to the
121 * swap cache. Caller needs to hold the page lock.
122 */
1480a540 123int add_to_swap(struct page * page, gfp_t gfp_mask)
1da177e4
LT
124{
125 swp_entry_t entry;
1da177e4
LT
126 int err;
127
e74ca2b4 128 BUG_ON(!PageLocked(page));
0ed361de 129 BUG_ON(!PageUptodate(page));
1da177e4
LT
130
131 for (;;) {
132 entry = get_swap_page();
133 if (!entry.val)
134 return 0;
135
bd53b714
NP
136 /*
137 * Radix-tree node allocations from PF_MEMALLOC contexts could
138 * completely exhaust the page allocator. __GFP_NOMEMALLOC
139 * stops emergency reserves from being allocated.
1da177e4 140 *
bd53b714
NP
141 * TODO: this could cause a theoretical memory reclaim
142 * deadlock in the swap out path.
1da177e4 143 */
1da177e4
LT
144 /*
145 * Add it to the swap cache and mark it dirty
146 */
f000944d 147 err = add_to_swap_cache(page, entry,
1480a540 148 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
1da177e4
LT
149
150 switch (err) {
151 case 0: /* Success */
1da177e4 152 SetPageDirty(page);
1da177e4
LT
153 return 1;
154 case -EEXIST:
155 /* Raced with "speculative" read_swap_cache_async */
1da177e4
LT
156 swap_free(entry);
157 continue;
158 default:
159 /* -ENOMEM radix-tree allocation failure */
160 swap_free(entry);
161 return 0;
162 }
163 }
164}
165
166/*
167 * This must be called only on pages that have
168 * been verified to be in the swap cache and locked.
169 * It will never put the page into the free list,
170 * the caller has a reference on the page.
171 */
172void delete_from_swap_cache(struct page *page)
173{
174 swp_entry_t entry;
175
4c21e2f2 176 entry.val = page_private(page);
1da177e4
LT
177
178 write_lock_irq(&swapper_space.tree_lock);
179 __delete_from_swap_cache(page);
180 write_unlock_irq(&swapper_space.tree_lock);
181
182 swap_free(entry);
183 page_cache_release(page);
184}
185
1da177e4
LT
186/*
187 * If we are the only user, then try to free up the swap cache.
188 *
189 * Its ok to check for PageSwapCache without the page lock
190 * here because we are going to recheck again inside
191 * exclusive_swap_page() _with_ the lock.
192 * - Marcelo
193 */
194static inline void free_swap_cache(struct page *page)
195{
196 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
197 remove_exclusive_swap_page(page);
198 unlock_page(page);
199 }
200}
201
202/*
203 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 204 * this page if it is the last user of the page.
1da177e4
LT
205 */
206void free_page_and_swap_cache(struct page *page)
207{
208 free_swap_cache(page);
209 page_cache_release(page);
210}
211
212/*
213 * Passed an array of pages, drop them all from swapcache and then release
214 * them. They are removed from the LRU and freed if this is their last use.
215 */
216void free_pages_and_swap_cache(struct page **pages, int nr)
217{
1da177e4
LT
218 struct page **pagep = pages;
219
220 lru_add_drain();
221 while (nr) {
c484d410 222 int todo = min(nr, PAGEVEC_SIZE);
1da177e4
LT
223 int i;
224
225 for (i = 0; i < todo; i++)
226 free_swap_cache(pagep[i]);
227 release_pages(pagep, todo, 0);
228 pagep += todo;
229 nr -= todo;
230 }
231}
232
233/*
234 * Lookup a swap entry in the swap cache. A found page will be returned
235 * unlocked and with its refcount incremented - we rely on the kernel
236 * lock getting page table operations atomic even if we drop the page
237 * lock before returning.
238 */
239struct page * lookup_swap_cache(swp_entry_t entry)
240{
241 struct page *page;
242
243 page = find_get_page(&swapper_space, entry.val);
244
245 if (page)
246 INC_CACHE_INFO(find_success);
247
248 INC_CACHE_INFO(find_total);
249 return page;
250}
251
252/*
253 * Locate a page of swap in physical memory, reserving swap cache space
254 * and reading the disk if it is not already cached.
255 * A failure return means that either the page allocation failed or that
256 * the swap entry is no longer in use.
257 */
02098fea 258struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
1da177e4
LT
259 struct vm_area_struct *vma, unsigned long addr)
260{
261 struct page *found_page, *new_page = NULL;
262 int err;
263
264 do {
265 /*
266 * First check the swap cache. Since this is normally
267 * called after lookup_swap_cache() failed, re-calling
268 * that would confuse statistics.
269 */
270 found_page = find_get_page(&swapper_space, entry.val);
271 if (found_page)
272 break;
273
274 /*
275 * Get a new page to read into from swap.
276 */
277 if (!new_page) {
02098fea 278 new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4
LT
279 if (!new_page)
280 break; /* Out of memory */
281 }
282
f000944d
HD
283 /*
284 * Swap entry may have been freed since our caller observed it.
285 */
286 if (!swap_duplicate(entry))
287 break;
288
1da177e4
LT
289 /*
290 * Associate the page with swap entry in the swap cache.
f000944d
HD
291 * May fail (-EEXIST) if there is already a page associated
292 * with this entry in the swap cache: added by a racing
293 * read_swap_cache_async, or add_to_swap or shmem_writepage
294 * re-using the just freed swap entry for an existing page.
1da177e4
LT
295 * May fail (-ENOMEM) if radix-tree node allocation failed.
296 */
f000944d
HD
297 SetPageLocked(new_page);
298 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
1da177e4
LT
299 if (!err) {
300 /*
301 * Initiate read into locked page and return.
302 */
303 lru_cache_add_active(new_page);
304 swap_readpage(NULL, new_page);
305 return new_page;
306 }
f000944d
HD
307 ClearPageLocked(new_page);
308 swap_free(entry);
309 } while (err != -ENOMEM);
1da177e4
LT
310
311 if (new_page)
312 page_cache_release(new_page);
313 return found_page;
314}
46017e95
HD
315
316/**
317 * swapin_readahead - swap in pages in hope we need them soon
318 * @entry: swap entry of this memory
7682486b 319 * @gfp_mask: memory allocation flags
46017e95
HD
320 * @vma: user vma this address belongs to
321 * @addr: target address for mempolicy
322 *
323 * Returns the struct page for entry and addr, after queueing swapin.
324 *
325 * Primitive swap readahead code. We simply read an aligned block of
326 * (1 << page_cluster) entries in the swap area. This method is chosen
327 * because it doesn't cost us any seek time. We also make sure to queue
328 * the 'original' request together with the readahead ones...
329 *
330 * This has been extended to use the NUMA policies from the mm triggering
331 * the readahead.
332 *
333 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
334 */
02098fea 335struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e95
HD
336 struct vm_area_struct *vma, unsigned long addr)
337{
338 int nr_pages;
339 struct page *page;
340 unsigned long offset;
341 unsigned long end_offset;
342
343 /*
344 * Get starting offset for readaround, and number of pages to read.
345 * Adjust starting address by readbehind (for NUMA interleave case)?
346 * No, it's very unlikely that swap layout would follow vma layout,
347 * more likely that neighbouring swap pages came from the same node:
348 * so use the same "addr" to choose the same node for each swap read.
349 */
350 nr_pages = valid_swaphandles(entry, &offset);
351 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
352 /* Ok, do the async read-ahead now */
353 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098fea 354 gfp_mask, vma, addr);
46017e95
HD
355 if (!page)
356 break;
357 page_cache_release(page);
358 }
359 lru_add_drain(); /* Push any new pages onto the LRU now */
02098fea 360 return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e95 361}