]>
Commit | Line | Data |
---|---|---|
01066625 PM |
1 | /* |
2 | * linux/arch/sh/mm/init.c | |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | |
01066625 | 5 | * Copyright (C) 2002 - 2007 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/init.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
9 | */ | |
1da177e4 LT |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
1da177e4 | 12 | #include <linux/init.h> |
1da177e4 | 13 | #include <linux/bootmem.h> |
2cb7ce3b | 14 | #include <linux/proc_fs.h> |
27641dee | 15 | #include <linux/pagemap.h> |
01066625 PM |
16 | #include <linux/percpu.h> |
17 | #include <linux/io.h> | |
1da177e4 | 18 | #include <asm/mmu_context.h> |
1da177e4 LT |
19 | #include <asm/tlb.h> |
20 | #include <asm/cacheflush.h> | |
07cbb41b | 21 | #include <asm/sections.h> |
1da177e4 LT |
22 | #include <asm/cache.h> |
23 | ||
24 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
25 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
c6feb614 SM |
26 | |
27 | #ifdef CONFIG_SUPERH32 | |
28 | /* | |
29 | * Handle trivial transitions between cached and uncached | |
30 | * segments, making use of the 1:1 mapping relationship in | |
31 | * 512MB lowmem. | |
32 | * | |
33 | * This is the offset of the uncached section from its cached alias. | |
34 | * Default value only valid in 29 bit mode, in 32bit mode will be | |
35 | * overridden in pmb_init. | |
36 | */ | |
37 | unsigned long cached_to_uncached = P2SEG - P1SEG; | |
38 | #endif | |
1da177e4 | 39 | |
11cbb70e | 40 | #ifdef CONFIG_MMU |
1da177e4 LT |
41 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) |
42 | { | |
43 | pgd_t *pgd; | |
26ff6c11 | 44 | pud_t *pud; |
1da177e4 LT |
45 | pmd_t *pmd; |
46 | pte_t *pte; | |
47 | ||
99a596f9 | 48 | pgd = pgd_offset_k(addr); |
1da177e4 LT |
49 | if (pgd_none(*pgd)) { |
50 | pgd_ERROR(*pgd); | |
51 | return; | |
52 | } | |
53 | ||
99a596f9 SM |
54 | pud = pud_alloc(NULL, pgd, addr); |
55 | if (unlikely(!pud)) { | |
56 | pud_ERROR(*pud); | |
57 | return; | |
26ff6c11 PM |
58 | } |
59 | ||
99a596f9 SM |
60 | pmd = pmd_alloc(NULL, pud, addr); |
61 | if (unlikely(!pmd)) { | |
62 | pmd_ERROR(*pmd); | |
63 | return; | |
1da177e4 LT |
64 | } |
65 | ||
66 | pte = pte_offset_kernel(pmd, addr); | |
67 | if (!pte_none(*pte)) { | |
68 | pte_ERROR(*pte); | |
69 | return; | |
70 | } | |
71 | ||
72 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | |
997d0030 | 73 | local_flush_tlb_one(get_asid(), addr); |
1da177e4 LT |
74 | } |
75 | ||
76 | /* | |
77 | * As a performance optimization, other platforms preserve the fixmap mapping | |
78 | * across a context switch, we don't presently do this, but this could be done | |
79 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | |
e868d612 | 80 | * of the memory mapped UTLB configuration) -- this unfortunately forces us to |
1da177e4 LT |
81 | * give up a TLB entry for each mapping we want to preserve. While this may be |
82 | * viable for a small number of fixmaps, it's not particularly useful for | |
83 | * everything and needs to be carefully evaluated. (ie, we may want this for | |
84 | * the vsyscall page). | |
85 | * | |
86 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | |
87 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | |
88 | * | |
89 | * -- PFM. | |
90 | */ | |
91 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |
92 | { | |
93 | unsigned long address = __fix_to_virt(idx); | |
94 | ||
95 | if (idx >= __end_of_fixed_addresses) { | |
96 | BUG(); | |
97 | return; | |
98 | } | |
99 | ||
100 | set_pte_phys(address, phys, prot); | |
101 | } | |
2adb4e10 SM |
102 | |
103 | void __init page_table_range_init(unsigned long start, unsigned long end, | |
104 | pgd_t *pgd_base) | |
105 | { | |
106 | pgd_t *pgd; | |
107 | pud_t *pud; | |
108 | pmd_t *pmd; | |
0906a3ad PM |
109 | pte_t *pte; |
110 | int i, j, k; | |
2adb4e10 SM |
111 | unsigned long vaddr; |
112 | ||
0906a3ad PM |
113 | vaddr = start; |
114 | i = __pgd_offset(vaddr); | |
115 | j = __pud_offset(vaddr); | |
116 | k = __pmd_offset(vaddr); | |
117 | pgd = pgd_base + i; | |
118 | ||
119 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | |
120 | pud = (pud_t *)pgd; | |
121 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | |
122 | pmd = (pmd_t *)pud; | |
123 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | |
124 | if (pmd_none(*pmd)) { | |
125 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
126 | pmd_populate_kernel(&init_mm, pmd, pte); | |
127 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); | |
128 | } | |
129 | vaddr += PMD_SIZE; | |
130 | } | |
131 | k = 0; | |
2adb4e10 | 132 | } |
0906a3ad | 133 | j = 0; |
2adb4e10 SM |
134 | } |
135 | } | |
11cbb70e | 136 | #endif /* CONFIG_MMU */ |
1da177e4 | 137 | |
1da177e4 LT |
138 | /* |
139 | * paging_init() sets up the page tables | |
1da177e4 LT |
140 | */ |
141 | void __init paging_init(void) | |
142 | { | |
2de212eb | 143 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
0906a3ad | 144 | unsigned long vaddr, end; |
01066625 | 145 | int nid; |
1da177e4 | 146 | |
01066625 PM |
147 | /* We don't need to map the kernel through the TLB, as |
148 | * it is permanatly mapped using P1. So clear the | |
149 | * entire pgd. */ | |
150 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | |
1da177e4 | 151 | |
6e4662ff SM |
152 | /* Set an initial value for the MMU.TTB so we don't have to |
153 | * check for a null value. */ | |
154 | set_TTB(swapper_pg_dir); | |
155 | ||
acca4f4d PM |
156 | /* |
157 | * Populate the relevant portions of swapper_pg_dir so that | |
2adb4e10 | 158 | * we can use the fixmap entries without calling kmalloc. |
acca4f4d PM |
159 | * pte's will be filled in by __set_fixmap(). |
160 | */ | |
161 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
0906a3ad PM |
162 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
163 | page_table_range_init(vaddr, end, swapper_pg_dir); | |
acca4f4d PM |
164 | |
165 | kmap_coherent_init(); | |
2adb4e10 | 166 | |
2de212eb PM |
167 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
168 | ||
01066625 PM |
169 | for_each_online_node(nid) { |
170 | pg_data_t *pgdat = NODE_DATA(nid); | |
01066625 PM |
171 | unsigned long low, start_pfn; |
172 | ||
3560e249 | 173 | start_pfn = pgdat->bdata->node_min_pfn; |
01066625 PM |
174 | low = pgdat->bdata->node_low_pfn; |
175 | ||
2de212eb PM |
176 | if (max_zone_pfns[ZONE_NORMAL] < low) |
177 | max_zone_pfns[ZONE_NORMAL] = low; | |
01066625 PM |
178 | |
179 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | |
180 | nid, start_pfn, low); | |
01066625 | 181 | } |
2de212eb PM |
182 | |
183 | free_area_init_nodes(max_zone_pfns); | |
cbaa118e SM |
184 | |
185 | /* Set up the uncached fixmap */ | |
186 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | |
1da177e4 LT |
187 | } |
188 | ||
a0614da8 | 189 | static struct kcore_list kcore_mem; |
2cb7ce3b | 190 | |
1da177e4 LT |
191 | void __init mem_init(void) |
192 | { | |
dfbb9042 | 193 | int codesize, datasize, initsize; |
01066625 | 194 | int nid; |
1da177e4 | 195 | |
2de212eb PM |
196 | num_physpages = 0; |
197 | high_memory = NULL; | |
198 | ||
01066625 PM |
199 | for_each_online_node(nid) { |
200 | pg_data_t *pgdat = NODE_DATA(nid); | |
201 | unsigned long node_pages = 0; | |
202 | void *node_high_memory; | |
01066625 PM |
203 | |
204 | num_physpages += pgdat->node_present_pages; | |
205 | ||
206 | if (pgdat->node_spanned_pages) | |
207 | node_pages = free_all_bootmem_node(pgdat); | |
208 | ||
209 | totalram_pages += node_pages; | |
1da177e4 | 210 | |
2de212eb PM |
211 | node_high_memory = (void *)__va((pgdat->node_start_pfn + |
212 | pgdat->node_spanned_pages) << | |
213 | PAGE_SHIFT); | |
01066625 PM |
214 | if (node_high_memory > high_memory) |
215 | high_memory = node_high_memory; | |
216 | } | |
1da177e4 | 217 | |
37443ef3 PM |
218 | /* Set this up early, so we can take care of the zero page */ |
219 | cpu_cache_init(); | |
220 | ||
1da177e4 LT |
221 | /* clear the zero-page */ |
222 | memset(empty_zero_page, 0, PAGE_SIZE); | |
223 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | |
224 | ||
1da177e4 LT |
225 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
226 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
227 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
228 | ||
c30bb2a2 | 229 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT, KCORE_RAM); |
2cb7ce3b PM |
230 | |
231 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | |
dfbb9042 | 232 | "%dk data, %dk init)\n", |
cc013a88 | 233 | nr_free_pages() << (PAGE_SHIFT-10), |
2de212eb | 234 | num_physpages << (PAGE_SHIFT-10), |
1da177e4 | 235 | codesize >> 10, |
1da177e4 LT |
236 | datasize >> 10, |
237 | initsize >> 10); | |
238 | ||
19f9a34f PM |
239 | /* Initialize the vDSO */ |
240 | vsyscall_init(); | |
1da177e4 LT |
241 | } |
242 | ||
243 | void free_initmem(void) | |
244 | { | |
245 | unsigned long addr; | |
65463b73 | 246 | |
1da177e4 LT |
247 | addr = (unsigned long)(&__init_begin); |
248 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
249 | ClearPageReserved(virt_to_page(addr)); | |
7835e98b | 250 | init_page_count(virt_to_page(addr)); |
1da177e4 LT |
251 | free_page(addr); |
252 | totalram_pages++; | |
253 | } | |
07cbb41b PM |
254 | printk("Freeing unused kernel memory: %ldk freed\n", |
255 | ((unsigned long)&__init_end - | |
256 | (unsigned long)&__init_begin) >> 10); | |
1da177e4 LT |
257 | } |
258 | ||
259 | #ifdef CONFIG_BLK_DEV_INITRD | |
260 | void free_initrd_mem(unsigned long start, unsigned long end) | |
261 | { | |
262 | unsigned long p; | |
263 | for (p = start; p < end; p += PAGE_SIZE) { | |
264 | ClearPageReserved(virt_to_page(p)); | |
7835e98b | 265 | init_page_count(virt_to_page(p)); |
1da177e4 LT |
266 | free_page(p); |
267 | totalram_pages++; | |
268 | } | |
2de212eb | 269 | printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); |
1da177e4 LT |
270 | } |
271 | #endif | |
33d63bd8 | 272 | |
c15c5f8c PM |
273 | #if THREAD_SHIFT < PAGE_SHIFT |
274 | static struct kmem_cache *thread_info_cache; | |
275 | ||
276 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | |
277 | { | |
278 | struct thread_info *ti; | |
279 | ||
280 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | |
281 | if (unlikely(ti == NULL)) | |
282 | return NULL; | |
283 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
284 | memset(ti, 0, THREAD_SIZE); | |
285 | #endif | |
286 | return ti; | |
287 | } | |
288 | ||
289 | void free_thread_info(struct thread_info *ti) | |
290 | { | |
291 | kmem_cache_free(thread_info_cache, ti); | |
292 | } | |
293 | ||
294 | void thread_info_cache_init(void) | |
295 | { | |
296 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | |
297 | THREAD_SIZE, 0, NULL); | |
298 | BUG_ON(thread_info_cache == NULL); | |
299 | } | |
300 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | |
301 | ||
33d63bd8 | 302 | #ifdef CONFIG_MEMORY_HOTPLUG |
33d63bd8 PM |
303 | int arch_add_memory(int nid, u64 start, u64 size) |
304 | { | |
305 | pg_data_t *pgdat; | |
306 | unsigned long start_pfn = start >> PAGE_SHIFT; | |
307 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
308 | int ret; | |
309 | ||
310 | pgdat = NODE_DATA(nid); | |
311 | ||
312 | /* We only have ZONE_NORMAL, so this is easy.. */ | |
c04fc586 GH |
313 | ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, |
314 | start_pfn, nr_pages); | |
33d63bd8 | 315 | if (unlikely(ret)) |
866e6b9e | 316 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); |
33d63bd8 PM |
317 | |
318 | return ret; | |
319 | } | |
320 | EXPORT_SYMBOL_GPL(arch_add_memory); | |
321 | ||
357d5946 | 322 | #ifdef CONFIG_NUMA |
33d63bd8 PM |
323 | int memory_add_physaddr_to_nid(u64 addr) |
324 | { | |
325 | /* Node 0 for now.. */ | |
326 | return 0; | |
327 | } | |
328 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |
329 | #endif | |
3159e7d6 | 330 | #endif /* CONFIG_MEMORY_HOTPLUG */ |