]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _X86_64_PGALLOC_H |
2 | #define _X86_64_PGALLOC_H | |
3 | ||
1da177e4 LT |
4 | #include <asm/pda.h> |
5 | #include <linux/threads.h> | |
6 | #include <linux/mm.h> | |
7 | ||
8 | #define pmd_populate_kernel(mm, pmd, pte) \ | |
9 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) | |
10 | #define pud_populate(mm, pud, pmd) \ | |
11 | set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))) | |
12 | #define pgd_populate(mm, pgd, pud) \ | |
13 | set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))) | |
14 | ||
2f569afd MS |
15 | #define pmd_pgtable(pmd) pmd_page(pmd) |
16 | ||
1da177e4 LT |
17 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) |
18 | { | |
19 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); | |
20 | } | |
21 | ||
5e541973 | 22 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
1da177e4 LT |
23 | { |
24 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | |
da8f153e | 25 | free_page((unsigned long)pmd); |
1da177e4 LT |
26 | } |
27 | ||
28 | static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) | |
29 | { | |
da8f153e | 30 | return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
1da177e4 LT |
31 | } |
32 | ||
33 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |
34 | { | |
da8f153e | 35 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
1da177e4 LT |
36 | } |
37 | ||
5e541973 | 38 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
1da177e4 LT |
39 | { |
40 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | |
da8f153e | 41 | free_page((unsigned long)pud); |
1da177e4 LT |
42 | } |
43 | ||
8c914cb7 JB |
44 | static inline void pgd_list_add(pgd_t *pgd) |
45 | { | |
46 | struct page *page = virt_to_page(pgd); | |
58d5d0d8 | 47 | unsigned long flags; |
8c914cb7 | 48 | |
58d5d0d8 | 49 | spin_lock_irqsave(&pgd_lock, flags); |
2bff7383 | 50 | list_add(&page->lru, &pgd_list); |
58d5d0d8 | 51 | spin_unlock_irqrestore(&pgd_lock, flags); |
8c914cb7 JB |
52 | } |
53 | ||
54 | static inline void pgd_list_del(pgd_t *pgd) | |
55 | { | |
2bff7383 | 56 | struct page *page = virt_to_page(pgd); |
58d5d0d8 | 57 | unsigned long flags; |
8c914cb7 | 58 | |
58d5d0d8 | 59 | spin_lock_irqsave(&pgd_lock, flags); |
2bff7383 | 60 | list_del(&page->lru); |
58d5d0d8 | 61 | spin_unlock_irqrestore(&pgd_lock, flags); |
8c914cb7 JB |
62 | } |
63 | ||
da8f153e | 64 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
1da177e4 LT |
65 | { |
66 | unsigned boundary; | |
da8f153e LT |
67 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); |
68 | if (!pgd) | |
69 | return NULL; | |
70 | pgd_list_add(pgd); | |
1da177e4 LT |
71 | /* |
72 | * Copy kernel pointers in from init. | |
da8f153e LT |
73 | * Could keep a freelist or slab cache of those because the kernel |
74 | * part never changes. | |
1da177e4 LT |
75 | */ |
76 | boundary = pgd_index(__PAGE_OFFSET); | |
da8f153e | 77 | memset(pgd, 0, boundary * sizeof(pgd_t)); |
1da177e4 | 78 | memcpy(pgd + boundary, |
da8f153e LT |
79 | init_level4_pgt + boundary, |
80 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | |
1da177e4 LT |
81 | return pgd; |
82 | } | |
83 | ||
5e541973 | 84 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
1da177e4 LT |
85 | { |
86 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | |
da8f153e LT |
87 | pgd_list_del(pgd); |
88 | free_page((unsigned long)pgd); | |
1da177e4 LT |
89 | } |
90 | ||
91 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
92 | { | |
da8f153e | 93 | return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
1da177e4 LT |
94 | } |
95 | ||
2f569afd | 96 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
1da177e4 | 97 | { |
2f569afd MS |
98 | struct page *page; |
99 | void *p; | |
100 | ||
101 | p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
1da177e4 LT |
102 | if (!p) |
103 | return NULL; | |
2f569afd MS |
104 | page = virt_to_page(p); |
105 | pgtable_page_ctor(page); | |
106 | return page; | |
1da177e4 LT |
107 | } |
108 | ||
109 | /* Should really implement gc for free page table pages. This could be | |
110 | done with a reference count in struct page. */ | |
111 | ||
5e541973 | 112 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
1da177e4 LT |
113 | { |
114 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | |
da8f153e | 115 | free_page((unsigned long)pte); |
1da177e4 LT |
116 | } |
117 | ||
2f569afd | 118 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) |
1da177e4 | 119 | { |
2f569afd | 120 | pgtable_page_dtor(pte); |
da8f153e LT |
121 | __free_page(pte); |
122 | } | |
1da177e4 | 123 | |
2f569afd MS |
124 | #define __pte_free_tlb(tlb,pte) \ |
125 | do { \ | |
126 | pgtable_page_dtor((pte)); \ | |
127 | tlb_remove_page((tlb), (pte)); \ | |
128 | } while (0) | |
1da177e4 | 129 | |
da8f153e LT |
130 | #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) |
131 | #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) | |
1da177e4 LT |
132 | |
133 | #endif /* _X86_64_PGALLOC_H */ |