]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/i386/mm/pgtable.c
[PATCH] slab: remove SLAB_DMA
[net-next-2.6.git] / arch / i386 / mm / pgtable.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/mm/pgtable.c
3 */
4
1da177e4
LT
5#include <linux/sched.h>
6#include <linux/kernel.h>
7#include <linux/errno.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/smp.h>
11#include <linux/highmem.h>
12#include <linux/slab.h>
13#include <linux/pagemap.h>
14#include <linux/spinlock.h>
052e7994 15#include <linux/module.h>
1da177e4
LT
16
17#include <asm/system.h>
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/fixmap.h>
21#include <asm/e820.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24
25void show_mem(void)
26{
27 int total = 0, reserved = 0;
28 int shared = 0, cached = 0;
29 int highmem = 0;
30 struct page *page;
31 pg_data_t *pgdat;
32 unsigned long i;
208d54e5 33 unsigned long flags;
1da177e4 34
f90e7185 35 printk(KERN_INFO "Mem-info:\n");
1da177e4 36 show_free_areas();
f90e7185 37 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
ec936fc5 38 for_each_online_pgdat(pgdat) {
208d54e5 39 pgdat_resize_lock(pgdat, &flags);
1da177e4 40 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
408fde81 41 page = pgdat_page_nr(pgdat, i);
1da177e4
LT
42 total++;
43 if (PageHighMem(page))
44 highmem++;
45 if (PageReserved(page))
46 reserved++;
47 else if (PageSwapCache(page))
48 cached++;
49 else if (page_count(page))
50 shared += page_count(page) - 1;
51 }
208d54e5 52 pgdat_resize_unlock(pgdat, &flags);
1da177e4 53 }
f90e7185
CL
54 printk(KERN_INFO "%d pages of RAM\n", total);
55 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
56 printk(KERN_INFO "%d reserved pages\n", reserved);
57 printk(KERN_INFO "%d pages shared\n", shared);
58 printk(KERN_INFO "%d pages swap cached\n", cached);
6f4e1e50 59
b1e7a8fd 60 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
ce866b34
CL
61 printk(KERN_INFO "%lu pages writeback\n",
62 global_page_state(NR_WRITEBACK));
65ba55f5 63 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
972d1a7b
CL
64 printk(KERN_INFO "%lu pages slab\n",
65 global_page_state(NR_SLAB_RECLAIMABLE) +
66 global_page_state(NR_SLAB_UNRECLAIMABLE));
df849a15
CL
67 printk(KERN_INFO "%lu pages pagetables\n",
68 global_page_state(NR_PAGETABLE));
1da177e4
LT
69}
70
71/*
72 * Associate a virtual page frame with a given physical page frame
73 * and protection flags for that frame.
74 */
75static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
76{
77 pgd_t *pgd;
78 pud_t *pud;
79 pmd_t *pmd;
80 pte_t *pte;
81
82 pgd = swapper_pg_dir + pgd_index(vaddr);
83 if (pgd_none(*pgd)) {
84 BUG();
85 return;
86 }
87 pud = pud_offset(pgd, vaddr);
88 if (pud_none(*pud)) {
89 BUG();
90 return;
91 }
92 pmd = pmd_offset(pud, vaddr);
93 if (pmd_none(*pmd)) {
94 BUG();
95 return;
96 }
97 pte = pte_offset_kernel(pmd, vaddr);
98 /* <pfn,flags> stored as-is, to permit clearing entries */
99 set_pte(pte, pfn_pte(pfn, flags));
100
101 /*
102 * It's enough to flush this one mapping.
103 * (PGE mappings get flushed as well)
104 */
105 __flush_tlb_one(vaddr);
106}
107
108/*
109 * Associate a large virtual page frame with a given physical page frame
110 * and protection flags for that frame. pfn is for the base of the page,
111 * vaddr is what the page gets mapped to - both must be properly aligned.
112 * The pmd must already be instantiated. Assumes PAE mode.
113 */
114void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
115{
116 pgd_t *pgd;
117 pud_t *pud;
118 pmd_t *pmd;
119
120 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
f90e7185 121 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
1da177e4
LT
122 return; /* BUG(); */
123 }
124 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
f90e7185 125 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
1da177e4
LT
126 return; /* BUG(); */
127 }
128 pgd = swapper_pg_dir + pgd_index(vaddr);
129 if (pgd_none(*pgd)) {
f90e7185 130 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
1da177e4
LT
131 return; /* BUG(); */
132 }
133 pud = pud_offset(pgd, vaddr);
134 pmd = pmd_offset(pud, vaddr);
135 set_pmd(pmd, pfn_pmd(pfn, flags));
136 /*
137 * It's enough to flush this one mapping.
138 * (PGE mappings get flushed as well)
139 */
140 __flush_tlb_one(vaddr);
141}
142
052e7994
JF
143static int fixmaps;
144#ifndef CONFIG_COMPAT_VDSO
145unsigned long __FIXADDR_TOP = 0xfffff000;
146EXPORT_SYMBOL(__FIXADDR_TOP);
147#endif
148
1da177e4
LT
149void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
150{
151 unsigned long address = __fix_to_virt(idx);
152
153 if (idx >= __end_of_fixed_addresses) {
154 BUG();
155 return;
156 }
157 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
052e7994
JF
158 fixmaps++;
159}
160
161/**
162 * reserve_top_address - reserves a hole in the top of kernel address space
163 * @reserve - size of hole to reserve
164 *
165 * Can be used to relocate the fixmap area and poke a hole in the top
166 * of kernel address space to make room for a hypervisor.
167 */
168void reserve_top_address(unsigned long reserve)
169{
170 BUG_ON(fixmaps > 0);
171#ifdef CONFIG_COMPAT_VDSO
172 BUG_ON(reserve != 0);
173#else
174 __FIXADDR_TOP = -reserve - PAGE_SIZE;
175 __VMALLOC_RESERVE += reserve;
176#endif
1da177e4
LT
177}
178
179pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
180{
181 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
182}
183
184struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
185{
186 struct page *pte;
187
188#ifdef CONFIG_HIGHPTE
189 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
190#else
191 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
192#endif
193 return pte;
194}
195
196void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
197{
198 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
199}
200
201/*
202 * List of all pgd's needed for non-PAE so it can invalidate entries
203 * in both cached and uncached pgd's; not needed for PAE since the
204 * kernel pmd is shared. If PAE were not to share the pmd a similar
205 * tactic would be needed. This is essentially codepath-based locking
206 * against pageattr.c; it is the unique case in which a valid change
207 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
208 * vmalloc faults work because attached pagetables are never freed.
209 * The locking scheme was chosen on the basis of manfred's
210 * recommendations and having no core impact whatsoever.
211 * -- wli
212 */
213DEFINE_SPINLOCK(pgd_lock);
214struct page *pgd_list;
215
216static inline void pgd_list_add(pgd_t *pgd)
217{
218 struct page *page = virt_to_page(pgd);
219 page->index = (unsigned long)pgd_list;
220 if (pgd_list)
4c21e2f2 221 set_page_private(pgd_list, (unsigned long)&page->index);
1da177e4 222 pgd_list = page;
4c21e2f2 223 set_page_private(page, (unsigned long)&pgd_list);
1da177e4
LT
224}
225
226static inline void pgd_list_del(pgd_t *pgd)
227{
228 struct page *next, **pprev, *page = virt_to_page(pgd);
229 next = (struct page *)page->index;
4c21e2f2 230 pprev = (struct page **)page_private(page);
1da177e4
LT
231 *pprev = next;
232 if (next)
4c21e2f2 233 set_page_private(next, (unsigned long)pprev);
1da177e4
LT
234}
235
236void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
237{
238 unsigned long flags;
239
d7271b14
ZA
240 if (PTRS_PER_PMD == 1) {
241 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
1da177e4 242 spin_lock_irqsave(&pgd_lock, flags);
d7271b14 243 }
1da177e4 244
d7271b14 245 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
1da177e4 246 swapper_pg_dir + USER_PTRS_PER_PGD,
d7271b14 247 KERNEL_PGD_PTRS);
1da177e4
LT
248 if (PTRS_PER_PMD > 1)
249 return;
250
251 pgd_list_add(pgd);
252 spin_unlock_irqrestore(&pgd_lock, flags);
1da177e4
LT
253}
254
255/* never called when PTRS_PER_PMD > 1 */
256void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
257{
258 unsigned long flags; /* can be called from interrupt context */
259
260 spin_lock_irqsave(&pgd_lock, flags);
261 pgd_list_del(pgd);
262 spin_unlock_irqrestore(&pgd_lock, flags);
263}
264
265pgd_t *pgd_alloc(struct mm_struct *mm)
266{
267 int i;
268 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
269
270 if (PTRS_PER_PMD == 1 || !pgd)
271 return pgd;
272
273 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
274 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
275 if (!pmd)
276 goto out_oom;
277 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
278 }
279 return pgd;
280
281out_oom:
282 for (i--; i >= 0; i--)
283 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
284 kmem_cache_free(pgd_cache, pgd);
285 return NULL;
286}
287
288void pgd_free(pgd_t *pgd)
289{
290 int i;
291
292 /* in the PAE case user pgd entries are overwritten before usage */
293 if (PTRS_PER_PMD > 1)
294 for (i = 0; i < USER_PTRS_PER_PGD; ++i)
295 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
e0da382c 296 /* in the non-PAE case, free_pgtables() clears user pgd entries */
1da177e4
LT
297 kmem_cache_free(pgd_cache, pgd);
298}