]>
Commit | Line | Data |
---|---|---|
01066625 PM |
1 | /* |
2 | * linux/arch/sh/mm/init.c | |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | |
01066625 | 5 | * Copyright (C) 2002 - 2007 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/init.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
9 | */ | |
1da177e4 LT |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
1da177e4 | 12 | #include <linux/init.h> |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
1da177e4 | 14 | #include <linux/bootmem.h> |
2cb7ce3b | 15 | #include <linux/proc_fs.h> |
27641dee | 16 | #include <linux/pagemap.h> |
01066625 PM |
17 | #include <linux/percpu.h> |
18 | #include <linux/io.h> | |
94c28510 | 19 | #include <linux/dma-mapping.h> |
1da177e4 | 20 | #include <asm/mmu_context.h> |
1da177e4 LT |
21 | #include <asm/tlb.h> |
22 | #include <asm/cacheflush.h> | |
07cbb41b | 23 | #include <asm/sections.h> |
1da177e4 | 24 | #include <asm/cache.h> |
b0f3ae03 | 25 | #include <asm/sizes.h> |
1da177e4 LT |
26 | |
27 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
c6feb614 | 29 | |
11cbb70e | 30 | #ifdef CONFIG_MMU |
07cad4dc | 31 | static pte_t *__get_pte_phys(unsigned long addr) |
1da177e4 LT |
32 | { |
33 | pgd_t *pgd; | |
26ff6c11 | 34 | pud_t *pud; |
1da177e4 LT |
35 | pmd_t *pmd; |
36 | pte_t *pte; | |
37 | ||
99a596f9 | 38 | pgd = pgd_offset_k(addr); |
1da177e4 LT |
39 | if (pgd_none(*pgd)) { |
40 | pgd_ERROR(*pgd); | |
07cad4dc | 41 | return NULL; |
1da177e4 LT |
42 | } |
43 | ||
99a596f9 SM |
44 | pud = pud_alloc(NULL, pgd, addr); |
45 | if (unlikely(!pud)) { | |
46 | pud_ERROR(*pud); | |
07cad4dc | 47 | return NULL; |
26ff6c11 PM |
48 | } |
49 | ||
99a596f9 SM |
50 | pmd = pmd_alloc(NULL, pud, addr); |
51 | if (unlikely(!pmd)) { | |
52 | pmd_ERROR(*pmd); | |
07cad4dc | 53 | return NULL; |
1da177e4 LT |
54 | } |
55 | ||
56 | pte = pte_offset_kernel(pmd, addr); | |
07cad4dc MF |
57 | return pte; |
58 | } | |
59 | ||
60 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |
61 | { | |
62 | pte_t *pte; | |
63 | ||
64 | pte = __get_pte_phys(addr); | |
1da177e4 LT |
65 | if (!pte_none(*pte)) { |
66 | pte_ERROR(*pte); | |
67 | return; | |
68 | } | |
69 | ||
70 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | |
997d0030 | 71 | local_flush_tlb_one(get_asid(), addr); |
07cad4dc MF |
72 | |
73 | if (pgprot_val(prot) & _PAGE_WIRED) | |
74 | tlb_wire_entry(NULL, addr, *pte); | |
75 | } | |
76 | ||
77 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) | |
78 | { | |
79 | pte_t *pte; | |
80 | ||
81 | pte = __get_pte_phys(addr); | |
82 | ||
83 | if (pgprot_val(prot) & _PAGE_WIRED) | |
84 | tlb_unwire_entry(); | |
85 | ||
86 | set_pte(pte, pfn_pte(0, __pgprot(0))); | |
87 | local_flush_tlb_one(get_asid(), addr); | |
1da177e4 LT |
88 | } |
89 | ||
1da177e4 LT |
90 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
91 | { | |
92 | unsigned long address = __fix_to_virt(idx); | |
93 | ||
94 | if (idx >= __end_of_fixed_addresses) { | |
95 | BUG(); | |
96 | return; | |
97 | } | |
98 | ||
99 | set_pte_phys(address, phys, prot); | |
100 | } | |
2adb4e10 | 101 | |
07cad4dc MF |
102 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) |
103 | { | |
104 | unsigned long address = __fix_to_virt(idx); | |
105 | ||
106 | if (idx >= __end_of_fixed_addresses) { | |
107 | BUG(); | |
108 | return; | |
109 | } | |
110 | ||
111 | clear_pte_phys(address, prot); | |
112 | } | |
113 | ||
2adb4e10 SM |
114 | void __init page_table_range_init(unsigned long start, unsigned long end, |
115 | pgd_t *pgd_base) | |
116 | { | |
117 | pgd_t *pgd; | |
118 | pud_t *pud; | |
119 | pmd_t *pmd; | |
0906a3ad PM |
120 | pte_t *pte; |
121 | int i, j, k; | |
2adb4e10 SM |
122 | unsigned long vaddr; |
123 | ||
0906a3ad PM |
124 | vaddr = start; |
125 | i = __pgd_offset(vaddr); | |
126 | j = __pud_offset(vaddr); | |
127 | k = __pmd_offset(vaddr); | |
128 | pgd = pgd_base + i; | |
129 | ||
130 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | |
131 | pud = (pud_t *)pgd; | |
132 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | |
5d9b4b19 | 133 | #ifdef __PAGETABLE_PMD_FOLDED |
0906a3ad | 134 | pmd = (pmd_t *)pud; |
5d9b4b19 MF |
135 | #else |
136 | pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
137 | pud_populate(&init_mm, pud, pmd); | |
138 | pmd += k; | |
139 | #endif | |
0906a3ad PM |
140 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
141 | if (pmd_none(*pmd)) { | |
142 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
143 | pmd_populate_kernel(&init_mm, pmd, pte); | |
144 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); | |
145 | } | |
146 | vaddr += PMD_SIZE; | |
147 | } | |
148 | k = 0; | |
2adb4e10 | 149 | } |
0906a3ad | 150 | j = 0; |
2adb4e10 SM |
151 | } |
152 | } | |
11cbb70e | 153 | #endif /* CONFIG_MMU */ |
1da177e4 | 154 | |
1da177e4 LT |
155 | /* |
156 | * paging_init() sets up the page tables | |
1da177e4 LT |
157 | */ |
158 | void __init paging_init(void) | |
159 | { | |
2de212eb | 160 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
0906a3ad | 161 | unsigned long vaddr, end; |
01066625 | 162 | int nid; |
1da177e4 | 163 | |
01066625 PM |
164 | /* We don't need to map the kernel through the TLB, as |
165 | * it is permanatly mapped using P1. So clear the | |
166 | * entire pgd. */ | |
167 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | |
1da177e4 | 168 | |
6e4662ff SM |
169 | /* Set an initial value for the MMU.TTB so we don't have to |
170 | * check for a null value. */ | |
171 | set_TTB(swapper_pg_dir); | |
172 | ||
acca4f4d PM |
173 | /* |
174 | * Populate the relevant portions of swapper_pg_dir so that | |
2adb4e10 | 175 | * we can use the fixmap entries without calling kmalloc. |
acca4f4d PM |
176 | * pte's will be filled in by __set_fixmap(). |
177 | */ | |
178 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
0906a3ad PM |
179 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
180 | page_table_range_init(vaddr, end, swapper_pg_dir); | |
acca4f4d PM |
181 | |
182 | kmap_coherent_init(); | |
2adb4e10 | 183 | |
2de212eb PM |
184 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
185 | ||
01066625 PM |
186 | for_each_online_node(nid) { |
187 | pg_data_t *pgdat = NODE_DATA(nid); | |
01066625 PM |
188 | unsigned long low, start_pfn; |
189 | ||
3560e249 | 190 | start_pfn = pgdat->bdata->node_min_pfn; |
01066625 PM |
191 | low = pgdat->bdata->node_low_pfn; |
192 | ||
2de212eb PM |
193 | if (max_zone_pfns[ZONE_NORMAL] < low) |
194 | max_zone_pfns[ZONE_NORMAL] = low; | |
01066625 PM |
195 | |
196 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | |
197 | nid, start_pfn, low); | |
01066625 | 198 | } |
2de212eb PM |
199 | |
200 | free_area_init_nodes(max_zone_pfns); | |
1da177e4 LT |
201 | } |
202 | ||
94c28510 PM |
203 | /* |
204 | * Early initialization for any I/O MMUs we might have. | |
205 | */ | |
206 | static void __init iommu_init(void) | |
207 | { | |
208 | no_iommu_init(); | |
209 | } | |
210 | ||
d9b9487a PM |
211 | unsigned int mem_init_done = 0; |
212 | ||
1da177e4 LT |
213 | void __init mem_init(void) |
214 | { | |
dfbb9042 | 215 | int codesize, datasize, initsize; |
01066625 | 216 | int nid; |
1da177e4 | 217 | |
94c28510 PM |
218 | iommu_init(); |
219 | ||
2de212eb PM |
220 | num_physpages = 0; |
221 | high_memory = NULL; | |
222 | ||
01066625 PM |
223 | for_each_online_node(nid) { |
224 | pg_data_t *pgdat = NODE_DATA(nid); | |
225 | unsigned long node_pages = 0; | |
226 | void *node_high_memory; | |
01066625 PM |
227 | |
228 | num_physpages += pgdat->node_present_pages; | |
229 | ||
230 | if (pgdat->node_spanned_pages) | |
231 | node_pages = free_all_bootmem_node(pgdat); | |
232 | ||
233 | totalram_pages += node_pages; | |
1da177e4 | 234 | |
2de212eb PM |
235 | node_high_memory = (void *)__va((pgdat->node_start_pfn + |
236 | pgdat->node_spanned_pages) << | |
237 | PAGE_SHIFT); | |
01066625 PM |
238 | if (node_high_memory > high_memory) |
239 | high_memory = node_high_memory; | |
240 | } | |
1da177e4 | 241 | |
37443ef3 PM |
242 | /* Set this up early, so we can take care of the zero page */ |
243 | cpu_cache_init(); | |
244 | ||
1da177e4 LT |
245 | /* clear the zero-page */ |
246 | memset(empty_zero_page, 0, PAGE_SIZE); | |
247 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | |
248 | ||
35f99c0d PM |
249 | vsyscall_init(); |
250 | ||
1da177e4 LT |
251 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
252 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
253 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
254 | ||
2cb7ce3b | 255 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
dfbb9042 | 256 | "%dk data, %dk init)\n", |
cc013a88 | 257 | nr_free_pages() << (PAGE_SHIFT-10), |
2de212eb | 258 | num_physpages << (PAGE_SHIFT-10), |
1da177e4 | 259 | codesize >> 10, |
1da177e4 LT |
260 | datasize >> 10, |
261 | initsize >> 10); | |
262 | ||
35f99c0d PM |
263 | printk(KERN_INFO "virtual kernel memory layout:\n" |
264 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
265 | #ifdef CONFIG_HIGHMEM | |
266 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
267 | #endif | |
268 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
3125ee72 | 269 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" |
b0f3ae03 | 270 | #ifdef CONFIG_UNCACHED_MAPPING |
3125ee72 | 271 | " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" |
b0f3ae03 | 272 | #endif |
35f99c0d PM |
273 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" |
274 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
275 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
276 | FIXADDR_START, FIXADDR_TOP, | |
277 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
278 | ||
279 | #ifdef CONFIG_HIGHMEM | |
280 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
281 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
282 | #endif | |
283 | ||
284 | (unsigned long)VMALLOC_START, VMALLOC_END, | |
285 | (VMALLOC_END - VMALLOC_START) >> 20, | |
286 | ||
287 | (unsigned long)memory_start, (unsigned long)high_memory, | |
288 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, | |
289 | ||
b0f3ae03 | 290 | #ifdef CONFIG_UNCACHED_MAPPING |
9edef286 | 291 | uncached_start, uncached_end, uncached_size >> 20, |
b0f3ae03 | 292 | #endif |
3125ee72 | 293 | |
35f99c0d PM |
294 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
295 | ((unsigned long)&__init_end - | |
296 | (unsigned long)&__init_begin) >> 10, | |
297 | ||
298 | (unsigned long)&_etext, (unsigned long)&_edata, | |
299 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
300 | ||
301 | (unsigned long)&_text, (unsigned long)&_etext, | |
302 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
d9b9487a PM |
303 | |
304 | mem_init_done = 1; | |
1da177e4 LT |
305 | } |
306 | ||
307 | void free_initmem(void) | |
308 | { | |
309 | unsigned long addr; | |
65463b73 | 310 | |
1da177e4 LT |
311 | addr = (unsigned long)(&__init_begin); |
312 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
313 | ClearPageReserved(virt_to_page(addr)); | |
7835e98b | 314 | init_page_count(virt_to_page(addr)); |
1da177e4 LT |
315 | free_page(addr); |
316 | totalram_pages++; | |
317 | } | |
07cbb41b PM |
318 | printk("Freeing unused kernel memory: %ldk freed\n", |
319 | ((unsigned long)&__init_end - | |
320 | (unsigned long)&__init_begin) >> 10); | |
1da177e4 LT |
321 | } |
322 | ||
323 | #ifdef CONFIG_BLK_DEV_INITRD | |
324 | void free_initrd_mem(unsigned long start, unsigned long end) | |
325 | { | |
326 | unsigned long p; | |
327 | for (p = start; p < end; p += PAGE_SIZE) { | |
328 | ClearPageReserved(virt_to_page(p)); | |
7835e98b | 329 | init_page_count(virt_to_page(p)); |
1da177e4 LT |
330 | free_page(p); |
331 | totalram_pages++; | |
332 | } | |
2de212eb | 333 | printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); |
1da177e4 LT |
334 | } |
335 | #endif | |
33d63bd8 PM |
336 | |
337 | #ifdef CONFIG_MEMORY_HOTPLUG | |
33d63bd8 PM |
338 | int arch_add_memory(int nid, u64 start, u64 size) |
339 | { | |
340 | pg_data_t *pgdat; | |
341 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
342 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
343 | int ret; | |
344 | ||
345 | pgdat = NODE_DATA(nid); | |
346 | ||
347 | /* We only have ZONE_NORMAL, so this is easy.. */ | |
c04fc586 GH |
348 | ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, |
349 | start_pfn, nr_pages); | |
33d63bd8 | 350 | if (unlikely(ret)) |
866e6b9e | 351 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); |
33d63bd8 PM |
352 | |
353 | return ret; | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(arch_add_memory); | |
356 | ||
357d5946 | 357 | #ifdef CONFIG_NUMA |
33d63bd8 PM |
358 | int memory_add_physaddr_to_nid(u64 addr) |
359 | { | |
360 | /* Node 0 for now.. */ | |
361 | return 0; | |
362 | } | |
363 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |
364 | #endif | |
1f69b6af | 365 | |
3159e7d6 | 366 | #endif /* CONFIG_MEMORY_HOTPLUG */ |