]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Initialize MMU support. | |
3 | * | |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
6 | */ | |
1da177e4 LT |
7 | #include <linux/kernel.h> |
8 | #include <linux/init.h> | |
9 | ||
10 | #include <linux/bootmem.h> | |
11 | #include <linux/efi.h> | |
12 | #include <linux/elf.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/mmzone.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/personality.h> | |
17 | #include <linux/reboot.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/proc_fs.h> | |
21 | #include <linux/bitops.h> | |
139b8304 | 22 | #include <linux/kexec.h> |
1da177e4 LT |
23 | |
24 | #include <asm/a.out.h> | |
25 | #include <asm/dma.h> | |
26 | #include <asm/ia32.h> | |
27 | #include <asm/io.h> | |
28 | #include <asm/machvec.h> | |
29 | #include <asm/numa.h> | |
30 | #include <asm/patch.h> | |
31 | #include <asm/pgalloc.h> | |
32 | #include <asm/sal.h> | |
33 | #include <asm/sections.h> | |
34 | #include <asm/system.h> | |
35 | #include <asm/tlb.h> | |
36 | #include <asm/uaccess.h> | |
37 | #include <asm/unistd.h> | |
38 | #include <asm/mca.h> | |
39 | ||
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
41 | ||
fde740e4 RH |
42 | DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); |
43 | DEFINE_PER_CPU(long, __pgtable_quicklist_size); | |
44 | ||
1da177e4 LT |
45 | extern void ia64_tlb_init (void); |
46 | ||
47 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | |
48 | ||
49 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
50 | unsigned long vmalloc_end = VMALLOC_END_INIT; | |
51 | EXPORT_SYMBOL(vmalloc_end); | |
52 | struct page *vmem_map; | |
53 | EXPORT_SYMBOL(vmem_map); | |
54 | #endif | |
55 | ||
fde740e4 | 56 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
1da177e4 LT |
57 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
58 | ||
fde740e4 | 59 | #define MIN_PGT_PAGES 25UL |
e96c9b47 | 60 | #define MAX_PGT_FREES_PER_PASS 16L |
fde740e4 RH |
61 | #define PGT_FRACTION_OF_NODE_MEM 16 |
62 | ||
63 | static inline long | |
64 | max_pgt_pages(void) | |
65 | { | |
66 | u64 node_free_pages, max_pgt_pages; | |
67 | ||
68 | #ifndef CONFIG_NUMA | |
69 | node_free_pages = nr_free_pages(); | |
70 | #else | |
9195481d | 71 | node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES); |
fde740e4 RH |
72 | #endif |
73 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; | |
74 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); | |
75 | return max_pgt_pages; | |
76 | } | |
77 | ||
78 | static inline long | |
79 | min_pages_to_free(void) | |
80 | { | |
81 | long pages_to_free; | |
82 | ||
83 | pages_to_free = pgtable_quicklist_size - max_pgt_pages(); | |
84 | pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); | |
85 | return pages_to_free; | |
86 | } | |
87 | ||
1da177e4 | 88 | void |
fde740e4 | 89 | check_pgt_cache(void) |
1da177e4 | 90 | { |
fde740e4 | 91 | long pages_to_free; |
1da177e4 | 92 | |
fde740e4 RH |
93 | if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) |
94 | return; | |
1da177e4 LT |
95 | |
96 | preempt_disable(); | |
fde740e4 RH |
97 | while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { |
98 | while (pages_to_free--) { | |
99 | free_page((unsigned long)pgtable_quicklist_alloc()); | |
100 | } | |
101 | preempt_enable(); | |
102 | preempt_disable(); | |
1da177e4 LT |
103 | } |
104 | preempt_enable(); | |
105 | } | |
106 | ||
107 | void | |
108 | lazy_mmu_prot_update (pte_t pte) | |
109 | { | |
110 | unsigned long addr; | |
111 | struct page *page; | |
5e48521e | 112 | unsigned long order; |
1da177e4 LT |
113 | |
114 | if (!pte_exec(pte)) | |
115 | return; /* not an executable page... */ | |
116 | ||
117 | page = pte_page(pte); | |
118 | addr = (unsigned long) page_address(page); | |
119 | ||
120 | if (test_bit(PG_arch_1, &page->flags)) | |
121 | return; /* i-cache is already coherent with d-cache */ | |
122 | ||
5e48521e ZY |
123 | if (PageCompound(page)) { |
124 | order = (unsigned long) (page[1].lru.prev); | |
125 | flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); | |
126 | } | |
127 | else | |
128 | flush_icache_range(addr, addr + PAGE_SIZE); | |
1da177e4 LT |
129 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
130 | } | |
131 | ||
cde14bbf JB |
132 | /* |
133 | * Since DMA is i-cache coherent, any (complete) pages that were written via | |
134 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | |
135 | * flush them when they get mapped into an executable vm-area. | |
136 | */ | |
137 | void | |
138 | dma_mark_clean(void *addr, size_t size) | |
139 | { | |
140 | unsigned long pg_addr, end; | |
141 | ||
142 | pg_addr = PAGE_ALIGN((unsigned long) addr); | |
143 | end = (unsigned long) addr + size; | |
144 | while (pg_addr + PAGE_SIZE <= end) { | |
145 | struct page *page = virt_to_page(pg_addr); | |
146 | set_bit(PG_arch_1, &page->flags); | |
147 | pg_addr += PAGE_SIZE; | |
148 | } | |
149 | } | |
150 | ||
1da177e4 LT |
151 | inline void |
152 | ia64_set_rbs_bot (void) | |
153 | { | |
154 | unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; | |
155 | ||
156 | if (stack_size > MAX_USER_STACK_SIZE) | |
157 | stack_size = MAX_USER_STACK_SIZE; | |
83d2cd3d | 158 | current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); |
1da177e4 LT |
159 | } |
160 | ||
161 | /* | |
162 | * This performs some platform-dependent address space initialization. | |
163 | * On IA-64, we want to setup the VM area for the register backing | |
164 | * store (which grows upwards) and install the gateway page which is | |
165 | * used for signal trampolines, etc. | |
166 | */ | |
167 | void | |
168 | ia64_init_addr_space (void) | |
169 | { | |
170 | struct vm_area_struct *vma; | |
171 | ||
172 | ia64_set_rbs_bot(); | |
173 | ||
174 | /* | |
175 | * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore | |
176 | * the problem. When the process attempts to write to the register backing store | |
177 | * for the first time, it will get a SEGFAULT in this case. | |
178 | */ | |
c3762229 | 179 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
1da177e4 | 180 | if (vma) { |
1da177e4 LT |
181 | vma->vm_mm = current->mm; |
182 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | |
183 | vma->vm_end = vma->vm_start + PAGE_SIZE; | |
184 | vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; | |
46dea3d0 | 185 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
1da177e4 LT |
186 | down_write(¤t->mm->mmap_sem); |
187 | if (insert_vm_struct(current->mm, vma)) { | |
188 | up_write(¤t->mm->mmap_sem); | |
189 | kmem_cache_free(vm_area_cachep, vma); | |
190 | return; | |
191 | } | |
192 | up_write(¤t->mm->mmap_sem); | |
193 | } | |
194 | ||
195 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | |
196 | if (!(current->personality & MMAP_PAGE_ZERO)) { | |
c3762229 | 197 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
1da177e4 | 198 | if (vma) { |
1da177e4 LT |
199 | vma->vm_mm = current->mm; |
200 | vma->vm_end = PAGE_SIZE; | |
201 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | |
202 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; | |
203 | down_write(¤t->mm->mmap_sem); | |
204 | if (insert_vm_struct(current->mm, vma)) { | |
205 | up_write(¤t->mm->mmap_sem); | |
206 | kmem_cache_free(vm_area_cachep, vma); | |
207 | return; | |
208 | } | |
209 | up_write(¤t->mm->mmap_sem); | |
210 | } | |
211 | } | |
212 | } | |
213 | ||
214 | void | |
215 | free_initmem (void) | |
216 | { | |
217 | unsigned long addr, eaddr; | |
218 | ||
219 | addr = (unsigned long) ia64_imva(__init_begin); | |
220 | eaddr = (unsigned long) ia64_imva(__init_end); | |
221 | while (addr < eaddr) { | |
222 | ClearPageReserved(virt_to_page(addr)); | |
7835e98b | 223 | init_page_count(virt_to_page(addr)); |
1da177e4 LT |
224 | free_page(addr); |
225 | ++totalram_pages; | |
226 | addr += PAGE_SIZE; | |
227 | } | |
228 | printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", | |
229 | (__init_end - __init_begin) >> 10); | |
230 | } | |
231 | ||
dae28066 | 232 | void __init |
1da177e4 LT |
233 | free_initrd_mem (unsigned long start, unsigned long end) |
234 | { | |
235 | struct page *page; | |
236 | /* | |
237 | * EFI uses 4KB pages while the kernel can use 4KB or bigger. | |
238 | * Thus EFI and the kernel may have different page sizes. It is | |
239 | * therefore possible to have the initrd share the same page as | |
240 | * the end of the kernel (given current setup). | |
241 | * | |
242 | * To avoid freeing/using the wrong page (kernel sized) we: | |
243 | * - align up the beginning of initrd | |
244 | * - align down the end of initrd | |
245 | * | |
246 | * | | | |
247 | * |=============| a000 | |
248 | * | | | |
249 | * | | | |
250 | * | | 9000 | |
251 | * |/////////////| | |
252 | * |/////////////| | |
253 | * |=============| 8000 | |
254 | * |///INITRD////| | |
255 | * |/////////////| | |
256 | * |/////////////| 7000 | |
257 | * | | | |
258 | * |KKKKKKKKKKKKK| | |
259 | * |=============| 6000 | |
260 | * |KKKKKKKKKKKKK| | |
261 | * |KKKKKKKKKKKKK| | |
262 | * K=kernel using 8KB pages | |
263 | * | |
264 | * In this example, we must free page 8000 ONLY. So we must align up | |
265 | * initrd_start and keep initrd_end as is. | |
266 | */ | |
267 | start = PAGE_ALIGN(start); | |
268 | end = end & PAGE_MASK; | |
269 | ||
270 | if (start < end) | |
271 | printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); | |
272 | ||
273 | for (; start < end; start += PAGE_SIZE) { | |
274 | if (!virt_addr_valid(start)) | |
275 | continue; | |
276 | page = virt_to_page(start); | |
277 | ClearPageReserved(page); | |
7835e98b | 278 | init_page_count(page); |
1da177e4 LT |
279 | free_page(start); |
280 | ++totalram_pages; | |
281 | } | |
282 | } | |
283 | ||
284 | /* | |
285 | * This installs a clean page in the kernel's page table. | |
286 | */ | |
dae28066 | 287 | static struct page * __init |
1da177e4 LT |
288 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) |
289 | { | |
290 | pgd_t *pgd; | |
291 | pud_t *pud; | |
292 | pmd_t *pmd; | |
293 | pte_t *pte; | |
294 | ||
295 | if (!PageReserved(page)) | |
296 | printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", | |
297 | page_address(page)); | |
298 | ||
299 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ | |
300 | ||
1da177e4 LT |
301 | { |
302 | pud = pud_alloc(&init_mm, pgd, address); | |
303 | if (!pud) | |
304 | goto out; | |
1da177e4 LT |
305 | pmd = pmd_alloc(&init_mm, pud, address); |
306 | if (!pmd) | |
307 | goto out; | |
872fec16 | 308 | pte = pte_alloc_kernel(pmd, address); |
1da177e4 LT |
309 | if (!pte) |
310 | goto out; | |
872fec16 | 311 | if (!pte_none(*pte)) |
1da177e4 | 312 | goto out; |
1da177e4 | 313 | set_pte(pte, mk_pte(page, pgprot)); |
1da177e4 | 314 | } |
872fec16 | 315 | out: |
1da177e4 LT |
316 | /* no need for flush_tlb */ |
317 | return page; | |
318 | } | |
319 | ||
914a4ea4 | 320 | static void __init |
1da177e4 LT |
321 | setup_gate (void) |
322 | { | |
323 | struct page *page; | |
324 | ||
325 | /* | |
ad597bd5 DMT |
326 | * Map the gate page twice: once read-only to export the ELF |
327 | * headers etc. and once execute-only page to enable | |
328 | * privilege-promotion via "epc": | |
1da177e4 LT |
329 | */ |
330 | page = virt_to_page(ia64_imva(__start_gate_section)); | |
331 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); | |
332 | #ifdef HAVE_BUGGY_SEGREL | |
333 | page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); | |
334 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); | |
335 | #else | |
336 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); | |
ad597bd5 DMT |
337 | /* Fill in the holes (if any) with read-only zero pages: */ |
338 | { | |
339 | unsigned long addr; | |
340 | ||
341 | for (addr = GATE_ADDR + PAGE_SIZE; | |
342 | addr < GATE_ADDR + PERCPU_PAGE_SIZE; | |
343 | addr += PAGE_SIZE) | |
344 | { | |
345 | put_kernel_page(ZERO_PAGE(0), addr, | |
346 | PAGE_READONLY); | |
347 | put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, | |
348 | PAGE_READONLY); | |
349 | } | |
350 | } | |
1da177e4 LT |
351 | #endif |
352 | ia64_patch_gate(); | |
353 | } | |
354 | ||
355 | void __devinit | |
356 | ia64_mmu_init (void *my_cpu_data) | |
357 | { | |
00b65985 | 358 | unsigned long pta, impl_va_bits; |
1da177e4 LT |
359 | extern void __devinit tlb_init (void); |
360 | ||
361 | #ifdef CONFIG_DISABLE_VHPT | |
362 | # define VHPT_ENABLE_BIT 0 | |
363 | #else | |
364 | # define VHPT_ENABLE_BIT 1 | |
365 | #endif | |
366 | ||
1da177e4 LT |
367 | /* |
368 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped | |
369 | * address space. The IA-64 architecture guarantees that at least 50 bits of | |
370 | * virtual address space are implemented but if we pick a large enough page size | |
371 | * (e.g., 64KB), the mapped address space is big enough that it will overlap with | |
372 | * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, | |
373 | * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a | |
374 | * problem in practice. Alternatively, we could truncate the top of the mapped | |
375 | * address space to not permit mappings that would overlap with the VMLPT. | |
376 | * --davidm 00/12/06 | |
377 | */ | |
378 | # define pte_bits 3 | |
379 | # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) | |
380 | /* | |
381 | * The virtual page table has to cover the entire implemented address space within | |
382 | * a region even though not all of this space may be mappable. The reason for | |
383 | * this is that the Access bit and Dirty bit fault handlers perform | |
384 | * non-speculative accesses to the virtual page table, so the address range of the | |
385 | * virtual page table itself needs to be covered by virtual page table. | |
386 | */ | |
387 | # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) | |
388 | # define POW2(n) (1ULL << (n)) | |
389 | ||
390 | impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); | |
391 | ||
392 | if (impl_va_bits < 51 || impl_va_bits > 61) | |
393 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); | |
6cf07a8c PC |
394 | /* |
395 | * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, | |
396 | * which must fit into "vmlpt_bits - pte_bits" slots. Second half of | |
397 | * the test makes sure that our mapped space doesn't overlap the | |
398 | * unimplemented hole in the middle of the region. | |
399 | */ | |
400 | if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || | |
401 | (mapped_space_bits > impl_va_bits - 1)) | |
402 | panic("Cannot build a big enough virtual-linear page table" | |
403 | " to cover mapped address space.\n" | |
404 | " Try using a smaller page size.\n"); | |
405 | ||
1da177e4 LT |
406 | |
407 | /* place the VMLPT at the end of each page-table mapped region: */ | |
408 | pta = POW2(61) - POW2(vmlpt_bits); | |
409 | ||
1da177e4 LT |
410 | /* |
411 | * Set the (virtually mapped linear) page table address. Bit | |
412 | * 8 selects between the short and long format, bits 2-7 the | |
413 | * size of the table, and bit 0 whether the VHPT walker is | |
414 | * enabled. | |
415 | */ | |
416 | ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); | |
417 | ||
418 | ia64_tlb_init(); | |
419 | ||
420 | #ifdef CONFIG_HUGETLB_PAGE | |
421 | ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); | |
422 | ia64_srlz_d(); | |
423 | #endif | |
424 | } | |
425 | ||
426 | #ifdef CONFIG_VIRTUAL_MEM_MAP | |
e44e41d0 BP |
427 | int vmemmap_find_next_valid_pfn(int node, int i) |
428 | { | |
429 | unsigned long end_address, hole_next_pfn; | |
430 | unsigned long stop_address; | |
431 | pg_data_t *pgdat = NODE_DATA(node); | |
432 | ||
433 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | |
434 | end_address = PAGE_ALIGN(end_address); | |
435 | ||
436 | stop_address = (unsigned long) &vmem_map[ | |
437 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | |
438 | ||
439 | do { | |
440 | pgd_t *pgd; | |
441 | pud_t *pud; | |
442 | pmd_t *pmd; | |
443 | pte_t *pte; | |
444 | ||
445 | pgd = pgd_offset_k(end_address); | |
446 | if (pgd_none(*pgd)) { | |
447 | end_address += PGDIR_SIZE; | |
448 | continue; | |
449 | } | |
450 | ||
451 | pud = pud_offset(pgd, end_address); | |
452 | if (pud_none(*pud)) { | |
453 | end_address += PUD_SIZE; | |
454 | continue; | |
455 | } | |
456 | ||
457 | pmd = pmd_offset(pud, end_address); | |
458 | if (pmd_none(*pmd)) { | |
459 | end_address += PMD_SIZE; | |
460 | continue; | |
461 | } | |
462 | ||
463 | pte = pte_offset_kernel(pmd, end_address); | |
464 | retry_pte: | |
465 | if (pte_none(*pte)) { | |
466 | end_address += PAGE_SIZE; | |
467 | pte++; | |
468 | if ((end_address < stop_address) && | |
469 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | |
470 | goto retry_pte; | |
471 | continue; | |
472 | } | |
473 | /* Found next valid vmem_map page */ | |
474 | break; | |
475 | } while (end_address < stop_address); | |
476 | ||
477 | end_address = min(end_address, stop_address); | |
478 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | |
479 | hole_next_pfn = end_address / sizeof(struct page); | |
480 | return hole_next_pfn - pgdat->node_start_pfn; | |
481 | } | |
1da177e4 | 482 | |
dae28066 | 483 | int __init |
1da177e4 LT |
484 | create_mem_map_page_table (u64 start, u64 end, void *arg) |
485 | { | |
486 | unsigned long address, start_page, end_page; | |
487 | struct page *map_start, *map_end; | |
488 | int node; | |
489 | pgd_t *pgd; | |
490 | pud_t *pud; | |
491 | pmd_t *pmd; | |
492 | pte_t *pte; | |
493 | ||
494 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | |
495 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); | |
496 | ||
497 | start_page = (unsigned long) map_start & PAGE_MASK; | |
498 | end_page = PAGE_ALIGN((unsigned long) map_end); | |
499 | node = paddr_to_nid(__pa(start)); | |
500 | ||
501 | for (address = start_page; address < end_page; address += PAGE_SIZE) { | |
502 | pgd = pgd_offset_k(address); | |
503 | if (pgd_none(*pgd)) | |
504 | pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | |
505 | pud = pud_offset(pgd, address); | |
506 | ||
507 | if (pud_none(*pud)) | |
508 | pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | |
509 | pmd = pmd_offset(pud, address); | |
510 | ||
511 | if (pmd_none(*pmd)) | |
512 | pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | |
513 | pte = pte_offset_kernel(pmd, address); | |
514 | ||
515 | if (pte_none(*pte)) | |
516 | set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, | |
517 | PAGE_KERNEL)); | |
518 | } | |
519 | return 0; | |
520 | } | |
521 | ||
522 | struct memmap_init_callback_data { | |
523 | struct page *start; | |
524 | struct page *end; | |
525 | int nid; | |
526 | unsigned long zone; | |
527 | }; | |
528 | ||
529 | static int | |
530 | virtual_memmap_init (u64 start, u64 end, void *arg) | |
531 | { | |
532 | struct memmap_init_callback_data *args; | |
533 | struct page *map_start, *map_end; | |
534 | ||
535 | args = (struct memmap_init_callback_data *) arg; | |
536 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | |
537 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); | |
538 | ||
539 | if (map_start < args->start) | |
540 | map_start = args->start; | |
541 | if (map_end > args->end) | |
542 | map_end = args->end; | |
543 | ||
544 | /* | |
545 | * We have to initialize "out of bounds" struct page elements that fit completely | |
546 | * on the same pages that were allocated for the "in bounds" elements because they | |
547 | * may be referenced later (and found to be "reserved"). | |
548 | */ | |
549 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); | |
550 | map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) | |
551 | / sizeof(struct page)); | |
552 | ||
553 | if (map_start < map_end) | |
554 | memmap_init_zone((unsigned long)(map_end - map_start), | |
a2f3aa02 DH |
555 | args->nid, args->zone, page_to_pfn(map_start), |
556 | MEMMAP_EARLY); | |
1da177e4 LT |
557 | return 0; |
558 | } | |
559 | ||
560 | void | |
561 | memmap_init (unsigned long size, int nid, unsigned long zone, | |
562 | unsigned long start_pfn) | |
563 | { | |
564 | if (!vmem_map) | |
a2f3aa02 | 565 | memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); |
1da177e4 LT |
566 | else { |
567 | struct page *start; | |
568 | struct memmap_init_callback_data args; | |
569 | ||
570 | start = pfn_to_page(start_pfn); | |
571 | args.start = start; | |
572 | args.end = start + size; | |
573 | args.nid = nid; | |
574 | args.zone = zone; | |
575 | ||
576 | efi_memmap_walk(virtual_memmap_init, &args); | |
577 | } | |
578 | } | |
579 | ||
580 | int | |
581 | ia64_pfn_valid (unsigned long pfn) | |
582 | { | |
583 | char byte; | |
584 | struct page *pg = pfn_to_page(pfn); | |
585 | ||
586 | return (__get_user(byte, (char __user *) pg) == 0) | |
587 | && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) | |
588 | || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); | |
589 | } | |
590 | EXPORT_SYMBOL(ia64_pfn_valid); | |
591 | ||
dae28066 | 592 | int __init |
1da177e4 LT |
593 | find_largest_hole (u64 start, u64 end, void *arg) |
594 | { | |
595 | u64 *max_gap = arg; | |
596 | ||
597 | static u64 last_end = PAGE_OFFSET; | |
598 | ||
599 | /* NOTE: this algorithm assumes efi memmap table is ordered */ | |
600 | ||
601 | if (*max_gap < (start - last_end)) | |
602 | *max_gap = start - last_end; | |
603 | last_end = end; | |
604 | return 0; | |
605 | } | |
05e0caad | 606 | |
139b8304 BP |
607 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
608 | ||
05e0caad | 609 | int __init |
8b9c1068 | 610 | register_active_ranges(u64 start, u64 end, void *arg) |
05e0caad | 611 | { |
139b8304 BP |
612 | int nid = paddr_to_nid(__pa(start)); |
613 | ||
614 | if (nid < 0) | |
615 | nid = 0; | |
616 | #ifdef CONFIG_KEXEC | |
617 | if (start > crashk_res.start && start < crashk_res.end) | |
618 | start = crashk_res.end; | |
619 | if (end > crashk_res.start && end < crashk_res.end) | |
620 | end = crashk_res.start; | |
621 | #endif | |
622 | ||
623 | if (start < end) | |
624 | add_active_range(nid, __pa(start) >> PAGE_SHIFT, | |
625 | __pa(end) >> PAGE_SHIFT); | |
05e0caad MG |
626 | return 0; |
627 | } | |
1da177e4 | 628 | |
dae28066 | 629 | static int __init |
1da177e4 LT |
630 | count_reserved_pages (u64 start, u64 end, void *arg) |
631 | { | |
632 | unsigned long num_reserved = 0; | |
633 | unsigned long *count = arg; | |
634 | ||
635 | for (; start < end; start += PAGE_SIZE) | |
636 | if (PageReserved(virt_to_page(start))) | |
637 | ++num_reserved; | |
638 | *count += num_reserved; | |
639 | return 0; | |
640 | } | |
641 | ||
a3f5c338 ZN |
642 | int |
643 | find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg) | |
644 | { | |
645 | unsigned long pfn_start, pfn_end; | |
646 | #ifdef CONFIG_FLATMEM | |
647 | pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; | |
648 | pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; | |
649 | #else | |
650 | pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; | |
651 | pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; | |
652 | #endif | |
653 | min_low_pfn = min(min_low_pfn, pfn_start); | |
654 | max_low_pfn = max(max_low_pfn, pfn_end); | |
655 | return 0; | |
656 | } | |
657 | ||
1da177e4 LT |
658 | /* |
659 | * Boot command-line option "nolwsys" can be used to disable the use of any light-weight | |
660 | * system call handler. When this option is in effect, all fsyscalls will end up bubbling | |
661 | * down into the kernel and calling the normal (heavy-weight) syscall handler. This is | |
662 | * useful for performance testing, but conceivably could also come in handy for debugging | |
663 | * purposes. | |
664 | */ | |
665 | ||
03906ea0 | 666 | static int nolwsys __initdata; |
1da177e4 LT |
667 | |
668 | static int __init | |
669 | nolwsys_setup (char *s) | |
670 | { | |
671 | nolwsys = 1; | |
672 | return 1; | |
673 | } | |
674 | ||
675 | __setup("nolwsys", nolwsys_setup); | |
676 | ||
dae28066 | 677 | void __init |
1da177e4 LT |
678 | mem_init (void) |
679 | { | |
680 | long reserved_pages, codesize, datasize, initsize; | |
1da177e4 LT |
681 | pg_data_t *pgdat; |
682 | int i; | |
683 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; | |
684 | ||
fde740e4 RH |
685 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); |
686 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); | |
687 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); | |
688 | ||
1da177e4 LT |
689 | #ifdef CONFIG_PCI |
690 | /* | |
691 | * This needs to be called _after_ the command line has been parsed but _before_ | |
692 | * any drivers that may need the PCI DMA interface are initialized or bootmem has | |
693 | * been freed. | |
694 | */ | |
695 | platform_dma_init(); | |
696 | #endif | |
697 | ||
2d4b1fa2 | 698 | #ifdef CONFIG_FLATMEM |
1da177e4 LT |
699 | if (!mem_map) |
700 | BUG(); | |
701 | max_mapnr = max_low_pfn; | |
702 | #endif | |
703 | ||
704 | high_memory = __va(max_low_pfn * PAGE_SIZE); | |
705 | ||
706 | kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); | |
707 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); | |
708 | kclist_add(&kcore_kernel, _stext, _end - _stext); | |
709 | ||
ec936fc5 | 710 | for_each_online_pgdat(pgdat) |
564601a5 | 711 | if (pgdat->bdata->node_bootmem_map) |
712 | totalram_pages += free_all_bootmem_node(pgdat); | |
1da177e4 LT |
713 | |
714 | reserved_pages = 0; | |
715 | efi_memmap_walk(count_reserved_pages, &reserved_pages); | |
716 | ||
717 | codesize = (unsigned long) _etext - (unsigned long) _stext; | |
718 | datasize = (unsigned long) _edata - (unsigned long) _etext; | |
719 | initsize = (unsigned long) __init_end - (unsigned long) __init_begin; | |
720 | ||
721 | printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " | |
722 | "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), | |
723 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, | |
724 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); | |
725 | ||
1da177e4 LT |
726 | |
727 | /* | |
728 | * For fsyscall entrpoints with no light-weight handler, use the ordinary | |
729 | * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry | |
730 | * code can tell them apart. | |
731 | */ | |
732 | for (i = 0; i < NR_syscalls; ++i) { | |
733 | extern unsigned long fsyscall_table[NR_syscalls]; | |
734 | extern unsigned long sys_call_table[NR_syscalls]; | |
735 | ||
736 | if (!fsyscall_table[i] || nolwsys) | |
737 | fsyscall_table[i] = sys_call_table[i] | 1; | |
738 | } | |
739 | setup_gate(); | |
740 | ||
741 | #ifdef CONFIG_IA32_SUPPORT | |
742 | ia32_mem_init(); | |
743 | #endif | |
744 | } | |
1681b8e1 YG |
745 | |
746 | #ifdef CONFIG_MEMORY_HOTPLUG | |
747 | void online_page(struct page *page) | |
748 | { | |
749 | ClearPageReserved(page); | |
7835e98b | 750 | init_page_count(page); |
1681b8e1 YG |
751 | __free_page(page); |
752 | totalram_pages++; | |
753 | num_physpages++; | |
754 | } | |
755 | ||
bc02af93 | 756 | int arch_add_memory(int nid, u64 start, u64 size) |
1681b8e1 YG |
757 | { |
758 | pg_data_t *pgdat; | |
759 | struct zone *zone; | |
760 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
761 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
762 | int ret; | |
763 | ||
bc02af93 | 764 | pgdat = NODE_DATA(nid); |
1681b8e1 YG |
765 | |
766 | zone = pgdat->node_zones + ZONE_NORMAL; | |
767 | ret = __add_pages(zone, start_pfn, nr_pages); | |
768 | ||
769 | if (ret) | |
770 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", | |
771 | __FUNCTION__, ret); | |
772 | ||
773 | return ret; | |
774 | } | |
775 | ||
776 | int remove_memory(u64 start, u64 size) | |
777 | { | |
778 | return -EINVAL; | |
779 | } | |
9c576ff1 | 780 | EXPORT_SYMBOL_GPL(remove_memory); |
1681b8e1 | 781 | #endif |