]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/i386/mm/pgtable.c
[PATCH] MM: page allocation hooks for VMI backend
[net-next-2.6.git] / arch / i386 / mm / pgtable.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/mm/pgtable.c
3 */
4
1da177e4
LT
5#include <linux/sched.h>
6#include <linux/kernel.h>
7#include <linux/errno.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/smp.h>
11#include <linux/highmem.h>
12#include <linux/slab.h>
13#include <linux/pagemap.h>
14#include <linux/spinlock.h>
052e7994 15#include <linux/module.h>
1da177e4
LT
16
17#include <asm/system.h>
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/fixmap.h>
21#include <asm/e820.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24
25void show_mem(void)
26{
27 int total = 0, reserved = 0;
28 int shared = 0, cached = 0;
29 int highmem = 0;
30 struct page *page;
31 pg_data_t *pgdat;
32 unsigned long i;
208d54e5 33 unsigned long flags;
1da177e4 34
f90e7185 35 printk(KERN_INFO "Mem-info:\n");
1da177e4 36 show_free_areas();
f90e7185 37 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
ec936fc5 38 for_each_online_pgdat(pgdat) {
208d54e5 39 pgdat_resize_lock(pgdat, &flags);
1da177e4 40 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
408fde81 41 page = pgdat_page_nr(pgdat, i);
1da177e4
LT
42 total++;
43 if (PageHighMem(page))
44 highmem++;
45 if (PageReserved(page))
46 reserved++;
47 else if (PageSwapCache(page))
48 cached++;
49 else if (page_count(page))
50 shared += page_count(page) - 1;
51 }
208d54e5 52 pgdat_resize_unlock(pgdat, &flags);
1da177e4 53 }
f90e7185
CL
54 printk(KERN_INFO "%d pages of RAM\n", total);
55 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
56 printk(KERN_INFO "%d reserved pages\n", reserved);
57 printk(KERN_INFO "%d pages shared\n", shared);
58 printk(KERN_INFO "%d pages swap cached\n", cached);
6f4e1e50 59
b1e7a8fd 60 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
ce866b34
CL
61 printk(KERN_INFO "%lu pages writeback\n",
62 global_page_state(NR_WRITEBACK));
65ba55f5 63 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
972d1a7b
CL
64 printk(KERN_INFO "%lu pages slab\n",
65 global_page_state(NR_SLAB_RECLAIMABLE) +
66 global_page_state(NR_SLAB_UNRECLAIMABLE));
df849a15
CL
67 printk(KERN_INFO "%lu pages pagetables\n",
68 global_page_state(NR_PAGETABLE));
1da177e4
LT
69}
70
71/*
72 * Associate a virtual page frame with a given physical page frame
73 * and protection flags for that frame.
74 */
75static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
76{
77 pgd_t *pgd;
78 pud_t *pud;
79 pmd_t *pmd;
80 pte_t *pte;
81
82 pgd = swapper_pg_dir + pgd_index(vaddr);
83 if (pgd_none(*pgd)) {
84 BUG();
85 return;
86 }
87 pud = pud_offset(pgd, vaddr);
88 if (pud_none(*pud)) {
89 BUG();
90 return;
91 }
92 pmd = pmd_offset(pud, vaddr);
93 if (pmd_none(*pmd)) {
94 BUG();
95 return;
96 }
97 pte = pte_offset_kernel(pmd, vaddr);
b0bfece4
JB
98 if (pgprot_val(flags))
99 /* <pfn,flags> stored as-is, to permit clearing entries */
100 set_pte(pte, pfn_pte(pfn, flags));
101 else
102 pte_clear(&init_mm, vaddr, pte);
1da177e4
LT
103
104 /*
105 * It's enough to flush this one mapping.
106 * (PGE mappings get flushed as well)
107 */
108 __flush_tlb_one(vaddr);
109}
110
111/*
112 * Associate a large virtual page frame with a given physical page frame
113 * and protection flags for that frame. pfn is for the base of the page,
114 * vaddr is what the page gets mapped to - both must be properly aligned.
115 * The pmd must already be instantiated. Assumes PAE mode.
116 */
117void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
118{
119 pgd_t *pgd;
120 pud_t *pud;
121 pmd_t *pmd;
122
123 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
f90e7185 124 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
1da177e4
LT
125 return; /* BUG(); */
126 }
127 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
f90e7185 128 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
1da177e4
LT
129 return; /* BUG(); */
130 }
131 pgd = swapper_pg_dir + pgd_index(vaddr);
132 if (pgd_none(*pgd)) {
f90e7185 133 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
1da177e4
LT
134 return; /* BUG(); */
135 }
136 pud = pud_offset(pgd, vaddr);
137 pmd = pmd_offset(pud, vaddr);
138 set_pmd(pmd, pfn_pmd(pfn, flags));
139 /*
140 * It's enough to flush this one mapping.
141 * (PGE mappings get flushed as well)
142 */
143 __flush_tlb_one(vaddr);
144}
145
052e7994
JF
146static int fixmaps;
147#ifndef CONFIG_COMPAT_VDSO
148unsigned long __FIXADDR_TOP = 0xfffff000;
149EXPORT_SYMBOL(__FIXADDR_TOP);
150#endif
151
1da177e4
LT
152void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
153{
154 unsigned long address = __fix_to_virt(idx);
155
156 if (idx >= __end_of_fixed_addresses) {
157 BUG();
158 return;
159 }
160 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
052e7994
JF
161 fixmaps++;
162}
163
164/**
165 * reserve_top_address - reserves a hole in the top of kernel address space
166 * @reserve - size of hole to reserve
167 *
168 * Can be used to relocate the fixmap area and poke a hole in the top
169 * of kernel address space to make room for a hypervisor.
170 */
171void reserve_top_address(unsigned long reserve)
172{
173 BUG_ON(fixmaps > 0);
174#ifdef CONFIG_COMPAT_VDSO
175 BUG_ON(reserve != 0);
176#else
177 __FIXADDR_TOP = -reserve - PAGE_SIZE;
178 __VMALLOC_RESERVE += reserve;
179#endif
1da177e4
LT
180}
181
182pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
183{
184 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
185}
186
187struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
188{
189 struct page *pte;
190
191#ifdef CONFIG_HIGHPTE
192 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
193#else
194 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
195#endif
196 return pte;
197}
198
e18b890b 199void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
1da177e4
LT
200{
201 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
202}
203
204/*
205 * List of all pgd's needed for non-PAE so it can invalidate entries
206 * in both cached and uncached pgd's; not needed for PAE since the
207 * kernel pmd is shared. If PAE were not to share the pmd a similar
208 * tactic would be needed. This is essentially codepath-based locking
209 * against pageattr.c; it is the unique case in which a valid change
210 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
211 * vmalloc faults work because attached pagetables are never freed.
212 * The locking scheme was chosen on the basis of manfred's
213 * recommendations and having no core impact whatsoever.
214 * -- wli
215 */
216DEFINE_SPINLOCK(pgd_lock);
217struct page *pgd_list;
218
219static inline void pgd_list_add(pgd_t *pgd)
220{
221 struct page *page = virt_to_page(pgd);
222 page->index = (unsigned long)pgd_list;
223 if (pgd_list)
4c21e2f2 224 set_page_private(pgd_list, (unsigned long)&page->index);
1da177e4 225 pgd_list = page;
4c21e2f2 226 set_page_private(page, (unsigned long)&pgd_list);
1da177e4
LT
227}
228
229static inline void pgd_list_del(pgd_t *pgd)
230{
231 struct page *next, **pprev, *page = virt_to_page(pgd);
232 next = (struct page *)page->index;
4c21e2f2 233 pprev = (struct page **)page_private(page);
1da177e4
LT
234 *pprev = next;
235 if (next)
4c21e2f2 236 set_page_private(next, (unsigned long)pprev);
1da177e4
LT
237}
238
e18b890b 239void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
1da177e4
LT
240{
241 unsigned long flags;
242
d7271b14
ZA
243 if (PTRS_PER_PMD == 1) {
244 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
1da177e4 245 spin_lock_irqsave(&pgd_lock, flags);
d7271b14 246 }
1da177e4 247
d7271b14 248 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
1da177e4 249 swapper_pg_dir + USER_PTRS_PER_PGD,
d7271b14 250 KERNEL_PGD_PTRS);
c119ecce 251
1da177e4
LT
252 if (PTRS_PER_PMD > 1)
253 return;
254
c119ecce
ZA
255 /* must happen under lock */
256 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
257 __pa(swapper_pg_dir) >> PAGE_SHIFT,
258 USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);
259
1da177e4
LT
260 pgd_list_add(pgd);
261 spin_unlock_irqrestore(&pgd_lock, flags);
1da177e4
LT
262}
263
264/* never called when PTRS_PER_PMD > 1 */
e18b890b 265void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
1da177e4
LT
266{
267 unsigned long flags; /* can be called from interrupt context */
268
c119ecce 269 paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
1da177e4
LT
270 spin_lock_irqsave(&pgd_lock, flags);
271 pgd_list_del(pgd);
272 spin_unlock_irqrestore(&pgd_lock, flags);
273}
274
275pgd_t *pgd_alloc(struct mm_struct *mm)
276{
277 int i;
278 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
279
280 if (PTRS_PER_PMD == 1 || !pgd)
281 return pgd;
282
283 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
284 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
285 if (!pmd)
286 goto out_oom;
c119ecce 287 paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
1da177e4
LT
288 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
289 }
290 return pgd;
291
292out_oom:
c119ecce
ZA
293 for (i--; i >= 0; i--) {
294 pgd_t pgdent = pgd[i];
295 void* pmd = (void *)__va(pgd_val(pgdent)-1);
296 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
297 kmem_cache_free(pmd_cache, pmd);
298 }
1da177e4
LT
299 kmem_cache_free(pgd_cache, pgd);
300 return NULL;
301}
302
303void pgd_free(pgd_t *pgd)
304{
305 int i;
306
307 /* in the PAE case user pgd entries are overwritten before usage */
308 if (PTRS_PER_PMD > 1)
c119ecce
ZA
309 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
310 pgd_t pgdent = pgd[i];
311 void* pmd = (void *)__va(pgd_val(pgdent)-1);
312 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
313 kmem_cache_free(pmd_cache, pmd);
314 }
e0da382c 315 /* in the non-PAE case, free_pgtables() clears user pgd entries */
1da177e4
LT
316 kmem_cache_free(pgd_cache, pgd);
317}