]>
Commit | Line | Data |
---|---|---|
f8af4da3 HD |
1 | #ifndef __LINUX_KSM_H |
2 | #define __LINUX_KSM_H | |
3 | /* | |
4 | * Memory merging support. | |
5 | * | |
6 | * This code enables dynamic sharing of identical pages found in different | |
7 | * memory areas, even if they are not shared by fork(). | |
8 | */ | |
9 | ||
10 | #include <linux/bitops.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/sched.h> | |
9a840895 | 13 | #include <linux/vmstat.h> |
f8af4da3 | 14 | |
9ba69294 HD |
15 | struct mmu_gather; |
16 | ||
f8af4da3 HD |
17 | #ifdef CONFIG_KSM |
18 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
19 | unsigned long end, int advice, unsigned long *vm_flags); | |
20 | int __ksm_enter(struct mm_struct *mm); | |
1c2fb7a4 | 21 | void __ksm_exit(struct mm_struct *mm); |
f8af4da3 HD |
22 | |
23 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
24 | { | |
25 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | |
26 | return __ksm_enter(mm); | |
27 | return 0; | |
28 | } | |
29 | ||
9ba69294 HD |
30 | /* |
31 | * For KSM to handle OOM without deadlock when it's breaking COW in a | |
32 | * likely victim of the OOM killer, exit_mmap() has to serialize with | |
33 | * ksm_exit() after freeing mm's pages but before freeing its page tables. | |
34 | * That leaves a window in which KSM might refault pages which have just | |
35 | * been finally unmapped: guard against that with ksm_test_exit(), and | |
36 | * use it after getting mmap_sem in ksm.c, to check if mm is exiting. | |
37 | */ | |
38 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
39 | { | |
40 | return atomic_read(&mm->mm_users) == 0; | |
41 | } | |
42 | ||
1c2fb7a4 | 43 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
44 | { |
45 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
1c2fb7a4 | 46 | __ksm_exit(mm); |
f8af4da3 | 47 | } |
9a840895 HD |
48 | |
49 | /* | |
50 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
51 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
52 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | |
53 | */ | |
54 | static inline int PageKsm(struct page *page) | |
55 | { | |
56 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | |
57 | } | |
58 | ||
59 | /* | |
60 | * But we have to avoid the checking which page_add_anon_rmap() performs. | |
61 | */ | |
62 | static inline void page_add_ksm_rmap(struct page *page) | |
63 | { | |
64 | if (atomic_inc_and_test(&page->_mapcount)) { | |
65 | page->mapping = (void *) PAGE_MAPPING_ANON; | |
66 | __inc_zone_page_state(page, NR_ANON_PAGES); | |
67 | } | |
68 | } | |
f8af4da3 HD |
69 | #else /* !CONFIG_KSM */ |
70 | ||
71 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
72 | unsigned long end, int advice, unsigned long *vm_flags) | |
73 | { | |
74 | return 0; | |
75 | } | |
76 | ||
77 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
78 | { | |
79 | return 0; | |
80 | } | |
81 | ||
9ba69294 HD |
82 | static inline bool ksm_test_exit(struct mm_struct *mm) |
83 | { | |
84 | return 0; | |
85 | } | |
86 | ||
1c2fb7a4 | 87 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
88 | { |
89 | } | |
9a840895 HD |
90 | |
91 | static inline int PageKsm(struct page *page) | |
92 | { | |
93 | return 0; | |
94 | } | |
95 | ||
96 | /* No stub required for page_add_ksm_rmap(page) */ | |
f8af4da3 HD |
97 | #endif /* !CONFIG_KSM */ |
98 | ||
99 | #endif |