]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/power/snapshot.c
[PATCH] swsusp: improve freeing of memory
[net-next-2.6.git] / kernel / power / snapshot.c
CommitLineData
25761b6e 1/*
96bc7aec 2 * linux/kernel/power/snapshot.c
25761b6e 3 *
96bc7aec 4 * This file provide system snapshot/restore functionality.
25761b6e
RW
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7 *
8 * This file is released under the GPLv2, and is based on swsusp.c.
9 *
10 */
11
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/suspend.h>
16#include <linux/smp_lock.h>
25761b6e 17#include <linux/delay.h>
25761b6e 18#include <linux/bitops.h>
25761b6e 19#include <linux/spinlock.h>
25761b6e 20#include <linux/kernel.h>
25761b6e
RW
21#include <linux/pm.h>
22#include <linux/device.h>
25761b6e
RW
23#include <linux/bootmem.h>
24#include <linux/syscalls.h>
25#include <linux/console.h>
26#include <linux/highmem.h>
25761b6e
RW
27
28#include <asm/uaccess.h>
29#include <asm/mmu_context.h>
30#include <asm/pgtable.h>
31#include <asm/tlbflush.h>
32#include <asm/io.h>
33
25761b6e
RW
34#include "power.h"
35
7088a5c0
RW
36struct pbe *pagedir_nosave;
37unsigned int nr_copy_pages;
38
25761b6e 39#ifdef CONFIG_HIGHMEM
72a97e08
RW
40unsigned int count_highmem_pages(void)
41{
42 struct zone *zone;
43 unsigned long zone_pfn;
44 unsigned int n = 0;
45
46 for_each_zone (zone)
47 if (is_highmem(zone)) {
48 mark_free_pages(zone);
49 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
50 struct page *page;
51 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
52 if (!pfn_valid(pfn))
53 continue;
54 page = pfn_to_page(pfn);
55 if (PageReserved(page))
56 continue;
57 if (PageNosaveFree(page))
58 continue;
59 n++;
60 }
61 }
62 return n;
63}
64
25761b6e
RW
65struct highmem_page {
66 char *data;
67 struct page *page;
68 struct highmem_page *next;
69};
70
71static struct highmem_page *highmem_copy;
72
73static int save_highmem_zone(struct zone *zone)
74{
75 unsigned long zone_pfn;
76 mark_free_pages(zone);
77 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
78 struct page *page;
79 struct highmem_page *save;
80 void *kaddr;
81 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
82
83 if (!(pfn%1000))
84 printk(".");
85 if (!pfn_valid(pfn))
86 continue;
87 page = pfn_to_page(pfn);
88 /*
89 * This condition results from rvmalloc() sans vmalloc_32()
90 * and architectural memory reservations. This should be
91 * corrected eventually when the cases giving rise to this
92 * are better understood.
93 */
94 if (PageReserved(page)) {
95 printk("highmem reserved page?!\n");
96 continue;
97 }
98 BUG_ON(PageNosave(page));
99 if (PageNosaveFree(page))
100 continue;
101 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
102 if (!save)
103 return -ENOMEM;
104 save->next = highmem_copy;
105 save->page = page;
106 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
107 if (!save->data) {
108 kfree(save);
109 return -ENOMEM;
110 }
111 kaddr = kmap_atomic(page, KM_USER0);
112 memcpy(save->data, kaddr, PAGE_SIZE);
113 kunmap_atomic(kaddr, KM_USER0);
114 highmem_copy = save;
115 }
116 return 0;
117}
25761b6e 118
0fbeb5a4 119int save_highmem(void)
25761b6e 120{
25761b6e
RW
121 struct zone *zone;
122 int res = 0;
123
124 pr_debug("swsusp: Saving Highmem\n");
125 for_each_zone (zone) {
126 if (is_highmem(zone))
127 res = save_highmem_zone(zone);
128 if (res)
129 return res;
130 }
25761b6e
RW
131 return 0;
132}
133
134int restore_highmem(void)
135{
25761b6e
RW
136 printk("swsusp: Restoring Highmem\n");
137 while (highmem_copy) {
138 struct highmem_page *save = highmem_copy;
139 void *kaddr;
140 highmem_copy = save->next;
141
142 kaddr = kmap_atomic(save->page, KM_USER0);
143 memcpy(kaddr, save->data, PAGE_SIZE);
144 kunmap_atomic(kaddr, KM_USER0);
145 free_page((long) save->data);
146 kfree(save);
147 }
25761b6e
RW
148 return 0;
149}
0fbeb5a4 150#endif
25761b6e
RW
151
152static int pfn_is_nosave(unsigned long pfn)
153{
154 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
155 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
156 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
157}
158
159/**
160 * saveable - Determine whether a page should be cloned or not.
161 * @pfn: The page
162 *
163 * We save a page if it's Reserved, and not in the range of pages
164 * statically defined as 'unsaveable', or if it isn't reserved, and
165 * isn't part of a free chunk of pages.
166 */
167
de491861 168static int saveable(struct zone *zone, unsigned long *zone_pfn)
25761b6e
RW
169{
170 unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
de491861 171 struct page *page;
25761b6e
RW
172
173 if (!pfn_valid(pfn))
174 return 0;
175
176 page = pfn_to_page(pfn);
177 BUG_ON(PageReserved(page) && PageNosave(page));
178 if (PageNosave(page))
179 return 0;
72a97e08 180 if (PageReserved(page) && pfn_is_nosave(pfn))
25761b6e 181 return 0;
25761b6e
RW
182 if (PageNosaveFree(page))
183 return 0;
184
185 return 1;
186}
187
72a97e08 188unsigned int count_data_pages(void)
25761b6e
RW
189{
190 struct zone *zone;
191 unsigned long zone_pfn;
dc19d507 192 unsigned int n = 0;
25761b6e 193
25761b6e
RW
194 for_each_zone (zone) {
195 if (is_highmem(zone))
196 continue;
197 mark_free_pages(zone);
198 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
a0f49651 199 n += saveable(zone, &zone_pfn);
25761b6e 200 }
a0f49651 201 return n;
25761b6e
RW
202}
203
a0f49651 204static void copy_data_pages(struct pbe *pblist)
25761b6e
RW
205{
206 struct zone *zone;
207 unsigned long zone_pfn;
a0f49651 208 struct pbe *pbe, *p;
25761b6e 209
a0f49651 210 pbe = pblist;
25761b6e
RW
211 for_each_zone (zone) {
212 if (is_highmem(zone))
213 continue;
214 mark_free_pages(zone);
215 /* This is necessary for swsusp_free() */
a0f49651 216 for_each_pb_page (p, pblist)
25761b6e 217 SetPageNosaveFree(virt_to_page(p));
a0f49651 218 for_each_pbe (p, pblist)
25761b6e
RW
219 SetPageNosaveFree(virt_to_page(p->address));
220 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
221 if (saveable(zone, &zone_pfn)) {
de491861 222 struct page *page;
25761b6e
RW
223 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
224 BUG_ON(!pbe);
225 pbe->orig_address = (unsigned long)page_address(page);
226 /* copy_page is not usable for copying task structs. */
227 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
228 pbe = pbe->next;
229 }
230 }
231 }
232 BUG_ON(pbe);
233}
234
235
236/**
237 * free_pagedir - free pages allocated with alloc_pagedir()
238 */
239
ed14b527 240void free_pagedir(struct pbe *pblist)
25761b6e
RW
241{
242 struct pbe *pbe;
243
244 while (pblist) {
245 pbe = (pblist + PB_PAGE_SKIP)->next;
246 ClearPageNosave(virt_to_page(pblist));
247 ClearPageNosaveFree(virt_to_page(pblist));
248 free_page((unsigned long)pblist);
249 pblist = pbe;
250 }
251}
252
253/**
254 * fill_pb_page - Create a list of PBEs on a given memory page
255 */
256
257static inline void fill_pb_page(struct pbe *pbpage)
258{
259 struct pbe *p;
260
261 p = pbpage;
262 pbpage += PB_PAGE_SKIP;
263 do
264 p->next = p + 1;
265 while (++p < pbpage);
266}
267
268/**
269 * create_pbe_list - Create a list of PBEs on top of a given chain
270 * of memory pages allocated with alloc_pagedir()
271 */
272
7088a5c0 273static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
25761b6e
RW
274{
275 struct pbe *pbpage, *p;
dc19d507 276 unsigned int num = PBES_PER_PAGE;
25761b6e
RW
277
278 for_each_pb_page (pbpage, pblist) {
279 if (num >= nr_pages)
280 break;
281
282 fill_pb_page(pbpage);
283 num += PBES_PER_PAGE;
284 }
285 if (pbpage) {
286 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
287 p->next = p + 1;
288 p->next = NULL;
289 }
25761b6e
RW
290}
291
72a97e08
RW
292/**
293 * On resume it is necessary to trace and eventually free the unsafe
294 * pages that have been allocated, because they are needed for I/O
295 * (on x86-64 we likely will "eat" these pages once again while
296 * creating the temporary page translation tables)
297 */
298
299struct eaten_page {
300 struct eaten_page *next;
301 char padding[PAGE_SIZE - sizeof(void *)];
302};
303
304static struct eaten_page *eaten_pages = NULL;
305
306void release_eaten_pages(void)
307{
308 struct eaten_page *p, *q;
309
310 p = eaten_pages;
311 while (p) {
312 q = p->next;
313 /* We don't want swsusp_free() to free this page again */
314 ClearPageNosave(virt_to_page(p));
315 free_page((unsigned long)p);
316 p = q;
317 }
318 eaten_pages = NULL;
319}
320
054bd4c1
RW
321/**
322 * @safe_needed - on resume, for storing the PBE list and the image,
323 * we can only use memory pages that do not conflict with the pages
324 * which had been used before suspend.
325 *
326 * The unsafe pages are marked with the PG_nosave_free flag
327 *
328 * Allocated but unusable (ie eaten) memory pages should be marked
329 * so that swsusp_free() can release them
330 */
331
332static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
25761b6e 333{
054bd4c1
RW
334 void *res;
335
336 if (safe_needed)
337 do {
338 res = (void *)get_zeroed_page(gfp_mask);
72a97e08 339 if (res && PageNosaveFree(virt_to_page(res))) {
054bd4c1
RW
340 /* This is for swsusp_free() */
341 SetPageNosave(virt_to_page(res));
72a97e08
RW
342 ((struct eaten_page *)res)->next = eaten_pages;
343 eaten_pages = res;
344 }
054bd4c1
RW
345 } while (res && PageNosaveFree(virt_to_page(res)));
346 else
347 res = (void *)get_zeroed_page(gfp_mask);
25761b6e
RW
348 if (res) {
349 SetPageNosave(virt_to_page(res));
350 SetPageNosaveFree(virt_to_page(res));
351 }
352 return res;
353}
354
054bd4c1
RW
355unsigned long get_safe_page(gfp_t gfp_mask)
356{
357 return (unsigned long)alloc_image_page(gfp_mask, 1);
358}
359
25761b6e
RW
360/**
361 * alloc_pagedir - Allocate the page directory.
362 *
363 * First, determine exactly how many pages we need and
364 * allocate them.
365 *
366 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
367 * struct pbe elements (pbes) and the last element in the page points
368 * to the next page.
369 *
370 * On each page we set up a list of struct_pbe elements.
371 */
372
054bd4c1 373struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed)
25761b6e 374{
dc19d507 375 unsigned int num;
25761b6e
RW
376 struct pbe *pblist, *pbe;
377
378 if (!nr_pages)
379 return NULL;
380
381 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
054bd4c1 382 pblist = alloc_image_page(gfp_mask, safe_needed);
25761b6e
RW
383 /* FIXME: rewrite this ugly loop */
384 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
385 pbe = pbe->next, num += PBES_PER_PAGE) {
386 pbe += PB_PAGE_SKIP;
054bd4c1 387 pbe->next = alloc_image_page(gfp_mask, safe_needed);
25761b6e
RW
388 }
389 if (!pbe) { /* get_zeroed_page() failed */
390 free_pagedir(pblist);
391 pblist = NULL;
7088a5c0
RW
392 } else
393 create_pbe_list(pblist, nr_pages);
25761b6e
RW
394 return pblist;
395}
396
397/**
398 * Free pages we allocated for suspend. Suspend pages are alocated
399 * before atomic copy, so we need to free them after resume.
400 */
401
402void swsusp_free(void)
403{
404 struct zone *zone;
405 unsigned long zone_pfn;
406
407 for_each_zone(zone) {
408 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
409 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
dc19d507 410 struct page *page;
25761b6e
RW
411 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
412 if (PageNosave(page) && PageNosaveFree(page)) {
413 ClearPageNosave(page);
414 ClearPageNosaveFree(page);
415 free_page((long) page_address(page));
416 }
417 }
418 }
419}
420
421
422/**
423 * enough_free_mem - Make sure we enough free memory to snapshot.
424 *
425 * Returns TRUE or FALSE after checking the number of available
426 * free pages.
427 */
428
dc19d507 429static int enough_free_mem(unsigned int nr_pages)
25761b6e
RW
430{
431 pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
a0f49651
RW
432 return nr_free_pages() > (nr_pages + PAGES_FOR_IO +
433 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
25761b6e
RW
434}
435
054bd4c1
RW
436int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
437{
438 struct pbe *p;
439
440 for_each_pbe (p, pblist) {
441 p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
442 if (!p->address)
443 return -ENOMEM;
444 }
445 return 0;
446}
25761b6e 447
dc19d507 448static struct pbe *swsusp_alloc(unsigned int nr_pages)
25761b6e 449{
054bd4c1 450 struct pbe *pblist;
25761b6e 451
054bd4c1 452 if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
25761b6e 453 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
a0f49651 454 return NULL;
25761b6e 455 }
25761b6e 456
054bd4c1
RW
457 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
458 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
459 swsusp_free();
460 return NULL;
25761b6e
RW
461 }
462
a0f49651 463 return pblist;
25761b6e
RW
464}
465
2e32a43e 466asmlinkage int swsusp_save(void)
25761b6e 467{
dc19d507 468 unsigned int nr_pages;
25761b6e
RW
469
470 pr_debug("swsusp: critical section: \n");
25761b6e
RW
471
472 drain_local_pages();
a0f49651
RW
473 nr_pages = count_data_pages();
474 printk("swsusp: Need to copy %u pages\n", nr_pages);
25761b6e
RW
475
476 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
a0f49651
RW
477 nr_pages,
478 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
25761b6e
RW
479 PAGES_FOR_IO, nr_free_pages());
480
a0f49651 481 if (!enough_free_mem(nr_pages)) {
25761b6e
RW
482 printk(KERN_ERR "swsusp: Not enough free memory\n");
483 return -ENOMEM;
484 }
485
a0f49651
RW
486 pagedir_nosave = swsusp_alloc(nr_pages);
487 if (!pagedir_nosave)
488 return -ENOMEM;
25761b6e
RW
489
490 /* During allocating of suspend pagedir, new cold pages may appear.
491 * Kill them.
492 */
493 drain_local_pages();
a0f49651 494 copy_data_pages(pagedir_nosave);
25761b6e
RW
495
496 /*
497 * End of critical section. From now on, we can write to memory,
498 * but we should not touch disk. This specially means we must _not_
499 * touch swap space! Except we must write out our image of course.
500 */
501
a0f49651
RW
502 nr_copy_pages = nr_pages;
503
504 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
25761b6e
RW
505 return 0;
506}