]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/parisc/mm/init.c
parisc: Remove casts from atomic macros
[net-next-2.6.git] / arch / parisc / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/parisc/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
a8f44e38 9 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
1da177e4
LT
10 *
11 */
12
1da177e4
LT
13
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/bootmem.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
20#include <linux/initrd.h>
21#include <linux/swap.h>
22#include <linux/unistd.h>
23#include <linux/nodemask.h> /* for node_online_map */
24#include <linux/pagemap.h> /* for release_pages and page_cache_release */
25
26#include <asm/pgalloc.h>
ce8420bb 27#include <asm/pgtable.h>
1da177e4
LT
28#include <asm/tlb.h>
29#include <asm/pdc_chassis.h>
30#include <asm/mmzone.h>
a581c2a4 31#include <asm/sections.h>
1da177e4
LT
32
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
1da177e4 35extern int data_start;
1da177e4
LT
36
37#ifdef CONFIG_DISCONTIGMEM
8039de10 38struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
8039de10 39unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
1da177e4
LT
40#endif
41
42static struct resource data_resource = {
43 .name = "Kernel data",
44 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
45};
46
47static struct resource code_resource = {
48 .name = "Kernel code",
49 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
50};
51
52static struct resource pdcdata_resource = {
53 .name = "PDC data (Page Zero)",
54 .start = 0,
55 .end = 0x9ff,
56 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
57};
58
8039de10 59static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
1da177e4
LT
60
61/* The following array is initialized from the firmware specific
62 * information retrieved in kernel/inventory.c.
63 */
64
8039de10
HD
65physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
66int npmem_ranges __read_mostly;
1da177e4 67
a8f44e38 68#ifdef CONFIG_64BIT
1da177e4 69#define MAX_MEM (~0UL)
a8f44e38 70#else /* !CONFIG_64BIT */
1da177e4 71#define MAX_MEM (3584U*1024U*1024U)
a8f44e38 72#endif /* !CONFIG_64BIT */
1da177e4 73
8039de10 74static unsigned long mem_limit __read_mostly = MAX_MEM;
1da177e4
LT
75
76static void __init mem_limit_func(void)
77{
78 char *cp, *end;
79 unsigned long limit;
1da177e4
LT
80
81 /* We need this before __setup() functions are called */
82
83 limit = MAX_MEM;
668f9931 84 for (cp = boot_command_line; *cp; ) {
1da177e4
LT
85 if (memcmp(cp, "mem=", 4) == 0) {
86 cp += 4;
87 limit = memparse(cp, &end);
88 if (end != cp)
89 break;
90 cp = end;
91 } else {
92 while (*cp != ' ' && *cp)
93 ++cp;
94 while (*cp == ' ')
95 ++cp;
96 }
97 }
98
99 if (limit < mem_limit)
100 mem_limit = limit;
101}
102
103#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
104
105static void __init setup_bootmem(void)
106{
107 unsigned long bootmap_size;
108 unsigned long mem_max;
109 unsigned long bootmap_pages;
110 unsigned long bootmap_start_pfn;
111 unsigned long bootmap_pfn;
112#ifndef CONFIG_DISCONTIGMEM
113 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
114 int npmem_holes;
115#endif
116 int i, sysram_resource_count;
117
118 disable_sr_hashing(); /* Turn off space register hashing */
119
120 /*
121 * Sort the ranges. Since the number of ranges is typically
122 * small, and performance is not an issue here, just do
123 * a simple insertion sort.
124 */
125
126 for (i = 1; i < npmem_ranges; i++) {
127 int j;
128
129 for (j = i; j > 0; j--) {
130 unsigned long tmp;
131
132 if (pmem_ranges[j-1].start_pfn <
133 pmem_ranges[j].start_pfn) {
134
135 break;
136 }
137 tmp = pmem_ranges[j-1].start_pfn;
138 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
139 pmem_ranges[j].start_pfn = tmp;
140 tmp = pmem_ranges[j-1].pages;
141 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
142 pmem_ranges[j].pages = tmp;
143 }
144 }
145
146#ifndef CONFIG_DISCONTIGMEM
147 /*
148 * Throw out ranges that are too far apart (controlled by
149 * MAX_GAP).
150 */
151
152 for (i = 1; i < npmem_ranges; i++) {
153 if (pmem_ranges[i].start_pfn -
154 (pmem_ranges[i-1].start_pfn +
155 pmem_ranges[i-1].pages) > MAX_GAP) {
156 npmem_ranges = i;
157 printk("Large gap in memory detected (%ld pages). "
158 "Consider turning on CONFIG_DISCONTIGMEM\n",
159 pmem_ranges[i].start_pfn -
160 (pmem_ranges[i-1].start_pfn +
161 pmem_ranges[i-1].pages));
162 break;
163 }
164 }
165#endif
166
167 if (npmem_ranges > 1) {
168
169 /* Print the memory ranges */
170
171 printk(KERN_INFO "Memory Ranges:\n");
172
173 for (i = 0; i < npmem_ranges; i++) {
174 unsigned long start;
175 unsigned long size;
176
177 size = (pmem_ranges[i].pages << PAGE_SHIFT);
178 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
179 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
180 i,start, start + (size - 1), size >> 20);
181 }
182 }
183
184 sysram_resource_count = npmem_ranges;
185 for (i = 0; i < sysram_resource_count; i++) {
186 struct resource *res = &sysram_resources[i];
187 res->name = "System RAM";
188 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
189 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
190 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
191 request_resource(&iomem_resource, res);
192 }
193
194 /*
195 * For 32 bit kernels we limit the amount of memory we can
196 * support, in order to preserve enough kernel address space
197 * for other purposes. For 64 bit kernels we don't normally
198 * limit the memory, but this mechanism can be used to
199 * artificially limit the amount of memory (and it is written
200 * to work with multiple memory ranges).
201 */
202
203 mem_limit_func(); /* check for "mem=" argument */
204
205 mem_max = 0;
206 num_physpages = 0;
207 for (i = 0; i < npmem_ranges; i++) {
208 unsigned long rsize;
209
210 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
211 if ((mem_max + rsize) > mem_limit) {
212 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
213 if (mem_max == mem_limit)
214 npmem_ranges = i;
215 else {
216 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
217 - (mem_max >> PAGE_SHIFT);
218 npmem_ranges = i + 1;
219 mem_max = mem_limit;
220 }
221 num_physpages += pmem_ranges[i].pages;
222 break;
223 }
224 num_physpages += pmem_ranges[i].pages;
225 mem_max += rsize;
226 }
227
228 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
229
230#ifndef CONFIG_DISCONTIGMEM
231 /* Merge the ranges, keeping track of the holes */
232
233 {
234 unsigned long end_pfn;
235 unsigned long hole_pages;
236
237 npmem_holes = 0;
238 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
239 for (i = 1; i < npmem_ranges; i++) {
240
241 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
242 if (hole_pages) {
243 pmem_holes[npmem_holes].start_pfn = end_pfn;
244 pmem_holes[npmem_holes++].pages = hole_pages;
245 end_pfn += hole_pages;
246 }
247 end_pfn += pmem_ranges[i].pages;
248 }
249
250 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
251 npmem_ranges = 1;
252 }
253#endif
254
255 bootmap_pages = 0;
256 for (i = 0; i < npmem_ranges; i++)
257 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
258
259 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
260
261#ifdef CONFIG_DISCONTIGMEM
262 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
263 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
b61bfa3c 264 NODE_DATA(i)->bdata = &bootmem_node_data[i];
1da177e4
LT
265 }
266 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
267
268 for (i = 0; i < npmem_ranges; i++)
269 node_set_online(i);
270#endif
271
272 /*
273 * Initialize and free the full range of memory in each range.
274 * Note that the only writing these routines do are to the bootmap,
275 * and we've made sure to locate the bootmap properly so that they
276 * won't be writing over anything important.
277 */
278
279 bootmap_pfn = bootmap_start_pfn;
280 max_pfn = 0;
281 for (i = 0; i < npmem_ranges; i++) {
282 unsigned long start_pfn;
283 unsigned long npages;
284
285 start_pfn = pmem_ranges[i].start_pfn;
286 npages = pmem_ranges[i].pages;
287
288 bootmap_size = init_bootmem_node(NODE_DATA(i),
289 bootmap_pfn,
290 start_pfn,
291 (start_pfn + npages) );
292 free_bootmem_node(NODE_DATA(i),
293 (start_pfn << PAGE_SHIFT),
294 (npages << PAGE_SHIFT) );
295 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
296 if ((start_pfn + npages) > max_pfn)
297 max_pfn = start_pfn + npages;
298 }
299
5cdb8205
GG
300 /* IOMMU is always used to access "high mem" on those boxes
301 * that can support enough mem that a PCI device couldn't
302 * directly DMA to any physical addresses.
303 * ISA DMA support will need to revisit this.
304 */
305 max_low_pfn = max_pfn;
306
8980a7ba
HD
307 /* bootmap sizing messed up? */
308 BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
1da177e4
LT
309
310 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
311
312#define PDC_CONSOLE_IO_IODC_SIZE 32768
313
314 reserve_bootmem_node(NODE_DATA(0), 0UL,
72a7fe39
BW
315 (unsigned long)(PAGE0->mem_free +
316 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
c51d476a 317 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
72a7fe39 318 (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
1da177e4 319 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
72a7fe39
BW
320 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
321 BOOTMEM_DEFAULT);
1da177e4
LT
322
323#ifndef CONFIG_DISCONTIGMEM
324
325 /* reserve the holes */
326
327 for (i = 0; i < npmem_holes; i++) {
328 reserve_bootmem_node(NODE_DATA(0),
329 (pmem_holes[i].start_pfn << PAGE_SHIFT),
72a7fe39
BW
330 (pmem_holes[i].pages << PAGE_SHIFT),
331 BOOTMEM_DEFAULT);
1da177e4
LT
332 }
333#endif
334
335#ifdef CONFIG_BLK_DEV_INITRD
336 if (initrd_start) {
337 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
338 if (__pa(initrd_start) < mem_max) {
339 unsigned long initrd_reserve;
340
341 if (__pa(initrd_end) > mem_max) {
342 initrd_reserve = mem_max - __pa(initrd_start);
343 } else {
344 initrd_reserve = initrd_end - initrd_start;
345 }
346 initrd_below_start_ok = 1;
347 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
348
72a7fe39
BW
349 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
350 initrd_reserve, BOOTMEM_DEFAULT);
1da177e4
LT
351 }
352 }
353#endif
354
355 data_resource.start = virt_to_phys(&data_start);
c51d476a
KM
356 data_resource.end = virt_to_phys(_end) - 1;
357 code_resource.start = virt_to_phys(_text);
1da177e4
LT
358 code_resource.end = virt_to_phys(&data_start)-1;
359
360 /* We don't know which region the kernel will be in, so try
361 * all of them.
362 */
363 for (i = 0; i < sysram_resource_count; i++) {
364 struct resource *res = &sysram_resources[i];
365 request_resource(res, &code_resource);
366 request_resource(res, &data_resource);
367 }
368 request_resource(&sysram_resources[0], &pdcdata_resource);
369}
370
371void free_initmem(void)
372{
2fd83038
HD
373 unsigned long addr, init_begin, init_end;
374
1da177e4
LT
375 printk(KERN_INFO "Freeing unused kernel memory: ");
376
81a3de3e 377#ifdef CONFIG_DEBUG_KERNEL
1da177e4
LT
378 /* Attempt to catch anyone trying to execute code here
379 * by filling the page with BRK insns.
380 *
381 * If we disable interrupts for all CPUs, then IPI stops working.
382 * Kinda breaks the global cache flushing.
383 */
384 local_irq_disable();
385
c51d476a
KM
386 memset(__init_begin, 0x00,
387 (unsigned long)__init_end - (unsigned long)__init_begin);
1da177e4
LT
388
389 flush_data_cache();
390 asm volatile("sync" : : );
c51d476a 391 flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
1da177e4
LT
392 asm volatile("sync" : : );
393
394 local_irq_enable();
395#endif
396
2fd83038
HD
397 /* align __init_begin and __init_end to page size,
398 ignoring linker script where we might have tried to save RAM */
c51d476a
KM
399 init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
400 init_end = PAGE_ALIGN((unsigned long)(__init_end));
2fd83038 401 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
1da177e4 402 ClearPageReserved(virt_to_page(addr));
7835e98b 403 init_page_count(virt_to_page(addr));
1da177e4
LT
404 free_page(addr);
405 num_physpages++;
406 totalram_pages++;
407 }
408
409 /* set up a new led state on systems shipped LED State panel */
410 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
411
2fd83038 412 printk("%luk freed\n", (init_end - init_begin) >> 10);
1da177e4
LT
413}
414
1bcdd854
HD
415
416#ifdef CONFIG_DEBUG_RODATA
417void mark_rodata_ro(void)
418{
1bcdd854
HD
419 /* rodata memory was already mapped with KERNEL_RO access rights by
420 pagetable_init() and map_pages(). No need to do additional stuff here */
421 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
a581c2a4 422 (unsigned long)(__end_rodata - __start_rodata) >> 10);
1bcdd854
HD
423}
424#endif
425
426
1da177e4
LT
427/*
428 * Just an arbitrary offset to serve as a "hole" between mapping areas
429 * (between top of physical memory and a potential pcxl dma mapping
430 * area, and below the vmalloc mapping area).
431 *
432 * The current 32K value just means that there will be a 32K "hole"
433 * between mapping areas. That means that any out-of-bounds memory
434 * accesses will hopefully be caught. The vmalloc() routines leaves
435 * a hole of 4kB between each vmalloced area for the same reason.
436 */
437
438 /* Leave room for gateway page expansion */
439#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
440#error KERNEL_MAP_START is in gateway reserved region
441#endif
442#define MAP_START (KERNEL_MAP_START)
443
444#define VM_MAP_OFFSET (32*1024)
445#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
446 & ~(VM_MAP_OFFSET-1)))
447
8039de10 448void *vmalloc_start __read_mostly;
1da177e4
LT
449EXPORT_SYMBOL(vmalloc_start);
450
451#ifdef CONFIG_PA11
8039de10 452unsigned long pcxl_dma_start __read_mostly;
1da177e4
LT
453#endif
454
455void __init mem_init(void)
456{
ce8420bb 457 int codesize, reservedpages, datasize, initsize;
ce8420bb 458
48d27cb2
HD
459 /* Do sanity checks on page table constants */
460 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
461 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
462 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
463 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
464 > BITS_PER_LONG);
465
1da177e4
LT
466 high_memory = __va((max_pfn << PAGE_SHIFT));
467
468#ifndef CONFIG_DISCONTIGMEM
469 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
470 totalram_pages += free_all_bootmem();
471#else
472 {
473 int i;
474
475 for (i = 0; i < npmem_ranges; i++)
476 totalram_pages += free_all_bootmem_node(NODE_DATA(i));
477 }
478#endif
479
53faf291
KM
480 codesize = (unsigned long)_etext - (unsigned long)_text;
481 datasize = (unsigned long)_edata - (unsigned long)_etext;
482 initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
ce8420bb
HD
483
484 reservedpages = 0;
53faf291
KM
485{
486 unsigned long pfn;
487#ifdef CONFIG_DISCONTIGMEM
488 int i;
489
490 for (i = 0; i < npmem_ranges; i++) {
491 for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
492 if (PageReserved(pfn_to_page(pfn)))
493 reservedpages++;
494 }
495 }
496#else /* !CONFIG_DISCONTIGMEM */
497 for (pfn = 0; pfn < max_pfn; pfn++) {
ce8420bb
HD
498 /*
499 * Only count reserved RAM pages
500 */
53faf291 501 if (PageReserved(pfn_to_page(pfn)))
ce8420bb 502 reservedpages++;
53faf291
KM
503 }
504#endif
505}
1da177e4
LT
506
507#ifdef CONFIG_PA11
508 if (hppa_dma_ops == &pcxl_dma_ops) {
509 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
510 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
511 } else {
512 pcxl_dma_start = 0;
513 vmalloc_start = SET_MAP_OFFSET(MAP_START);
514 }
515#else
516 vmalloc_start = SET_MAP_OFFSET(MAP_START);
517#endif
518
53faf291
KM
519 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
520 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
ce8420bb
HD
521 num_physpages << (PAGE_SHIFT-10),
522 codesize >> 10,
523 reservedpages << (PAGE_SHIFT-10),
524 datasize >> 10,
53faf291
KM
525 initsize >> 10
526 );
ce8420bb
HD
527
528#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
529 printk("virtual kernel memory layout:\n"
530 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
53faf291 531 " memory : 0x%p - 0x%p (%4ld MB)\n"
ce8420bb
HD
532 " .init : 0x%p - 0x%p (%4ld kB)\n"
533 " .data : 0x%p - 0x%p (%4ld kB)\n"
534 " .text : 0x%p - 0x%p (%4ld kB)\n",
535
536 (void*)VMALLOC_START, (void*)VMALLOC_END,
537 (VMALLOC_END - VMALLOC_START) >> 20,
538
539 __va(0), high_memory,
540 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
541
53faf291
KM
542 __init_begin, __init_end,
543 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
ce8420bb 544
53faf291
KM
545 _etext, _edata,
546 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
ce8420bb 547
53faf291
KM
548 _text, _etext,
549 ((unsigned long)_etext - (unsigned long)_text) >> 10);
ce8420bb 550#endif
1da177e4
LT
551}
552
8039de10 553unsigned long *empty_zero_page __read_mostly;
22febf1f 554EXPORT_SYMBOL(empty_zero_page);
1da177e4
LT
555
556void show_mem(void)
557{
558 int i,free = 0,total = 0,reserved = 0;
559 int shared = 0, cached = 0;
560
561 printk(KERN_INFO "Mem-info:\n");
562 show_free_areas();
1da177e4
LT
563#ifndef CONFIG_DISCONTIGMEM
564 i = max_mapnr;
565 while (i-- > 0) {
566 total++;
567 if (PageReserved(mem_map+i))
568 reserved++;
569 else if (PageSwapCache(mem_map+i))
570 cached++;
571 else if (!page_count(&mem_map[i]))
572 free++;
573 else
574 shared += page_count(&mem_map[i]) - 1;
575 }
576#else
577 for (i = 0; i < npmem_ranges; i++) {
578 int j;
579
580 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
581 struct page *p;
208d54e5 582 unsigned long flags;
1da177e4 583
208d54e5 584 pgdat_resize_lock(NODE_DATA(i), &flags);
408fde81 585 p = nid_page_nr(i, j) - node_start_pfn(i);
1da177e4
LT
586
587 total++;
588 if (PageReserved(p))
589 reserved++;
590 else if (PageSwapCache(p))
591 cached++;
592 else if (!page_count(p))
593 free++;
594 else
595 shared += page_count(p) - 1;
208d54e5 596 pgdat_resize_unlock(NODE_DATA(i), &flags);
1da177e4
LT
597 }
598 }
599#endif
600 printk(KERN_INFO "%d pages of RAM\n", total);
601 printk(KERN_INFO "%d reserved pages\n", reserved);
602 printk(KERN_INFO "%d pages shared\n", shared);
603 printk(KERN_INFO "%d pages swap cached\n", cached);
604
605
606#ifdef CONFIG_DISCONTIGMEM
607 {
608 struct zonelist *zl;
54a6eb5c 609 int i, j;
1da177e4
LT
610
611 for (i = 0; i < npmem_ranges; i++) {
4413a0f6 612 zl = node_zonelist(i, 0);
1da177e4 613 for (j = 0; j < MAX_NR_ZONES; j++) {
dd1a239f 614 struct zoneref *z;
54a6eb5c 615 struct zone *zone;
1da177e4
LT
616
617 printk("Zone list for zone %d on node %d: ", j, i);
54a6eb5c
MG
618 for_each_zone_zonelist(zone, z, zl, j)
619 printk("[%d/%s] ", zone_to_nid(zone),
620 zone->name);
1da177e4
LT
621 printk("\n");
622 }
623 }
624 }
625#endif
626}
627
628
629static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
630{
631 pgd_t *pg_dir;
632 pmd_t *pmd;
633 pte_t *pg_table;
634 unsigned long end_paddr;
635 unsigned long start_pmd;
636 unsigned long start_pte;
637 unsigned long tmp1;
638 unsigned long tmp2;
639 unsigned long address;
640 unsigned long ro_start;
641 unsigned long ro_end;
642 unsigned long fv_addr;
643 unsigned long gw_addr;
644 extern const unsigned long fault_vector_20;
645 extern void * const linux_gateway_page;
646
c51d476a 647 ro_start = __pa((unsigned long)_text);
1da177e4
LT
648 ro_end = __pa((unsigned long)&data_start);
649 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
650 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
651
652 end_paddr = start_paddr + size;
653
654 pg_dir = pgd_offset_k(start_vaddr);
655
656#if PTRS_PER_PMD == 1
657 start_pmd = 0;
658#else
659 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
660#endif
661 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
662
663 address = start_paddr;
664 while (address < end_paddr) {
665#if PTRS_PER_PMD == 1
666 pmd = (pmd_t *)__pa(pg_dir);
667#else
668 pmd = (pmd_t *)pgd_address(*pg_dir);
669
670 /*
671 * pmd is physical at this point
672 */
673
674 if (!pmd) {
675 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
676 pmd = (pmd_t *) __pa(pmd);
677 }
678
679 pgd_populate(NULL, pg_dir, __va(pmd));
680#endif
681 pg_dir++;
682
683 /* now change pmd to kernel virtual addresses */
684
685 pmd = (pmd_t *)__va(pmd) + start_pmd;
686 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
687
688 /*
689 * pg_table is physical at this point
690 */
691
692 pg_table = (pte_t *)pmd_address(*pmd);
693 if (!pg_table) {
694 pg_table = (pte_t *)
695 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
696 pg_table = (pte_t *) __pa(pg_table);
697 }
698
699 pmd_populate_kernel(NULL, pmd, __va(pg_table));
700
701 /* now change pg_table to kernel virtual addresses */
702
703 pg_table = (pte_t *) __va(pg_table) + start_pte;
704 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
705 pte_t pte;
706
707 /*
708 * Map the fault vector writable so we can
709 * write the HPMC checksum.
710 */
2fd83038 711#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
1da177e4
LT
712 if (address >= ro_start && address < ro_end
713 && address != fv_addr
714 && address != gw_addr)
715 pte = __mk_pte(address, PAGE_KERNEL_RO);
716 else
2fd83038 717#endif
1da177e4
LT
718 pte = __mk_pte(address, pgprot);
719
720 if (address >= end_paddr)
721 pte_val(pte) = 0;
722
723 set_pte(pg_table, pte);
724
725 address += PAGE_SIZE;
726 }
727 start_pte = 0;
728
729 if (address >= end_paddr)
730 break;
731 }
732 start_pmd = 0;
733 }
734}
735
736/*
737 * pagetable_init() sets up the page tables
738 *
739 * Note that gateway_init() places the Linux gateway page at page 0.
740 * Since gateway pages cannot be dereferenced this has the desirable
741 * side effect of trapping those pesky NULL-reference errors in the
742 * kernel.
743 */
744static void __init pagetable_init(void)
745{
746 int range;
747
748 /* Map each physical memory range to its kernel vaddr */
749
750 for (range = 0; range < npmem_ranges; range++) {
751 unsigned long start_paddr;
752 unsigned long end_paddr;
753 unsigned long size;
754
755 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
756 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
757 size = pmem_ranges[range].pages << PAGE_SHIFT;
758
759 map_pages((unsigned long)__va(start_paddr), start_paddr,
760 size, PAGE_KERNEL);
761 }
762
763#ifdef CONFIG_BLK_DEV_INITRD
764 if (initrd_end && initrd_end > mem_limit) {
1bcdd854 765 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
1da177e4
LT
766 map_pages(initrd_start, __pa(initrd_start),
767 initrd_end - initrd_start, PAGE_KERNEL);
768 }
769#endif
770
771 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
772 memset(empty_zero_page, 0, PAGE_SIZE);
773}
774
775static void __init gateway_init(void)
776{
777 unsigned long linux_gateway_page_addr;
778 /* FIXME: This is 'const' in order to trick the compiler
779 into not treating it as DP-relative data. */
780 extern void * const linux_gateway_page;
781
782 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
783
784 /*
785 * Setup Linux Gateway page.
786 *
787 * The Linux gateway page will reside in kernel space (on virtual
788 * page 0), so it doesn't need to be aliased into user space.
789 */
790
791 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
792 PAGE_SIZE, PAGE_GATEWAY);
793}
794
795#ifdef CONFIG_HPUX
796void
797map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
798{
799 pgd_t *pg_dir;
800 pmd_t *pmd;
801 pte_t *pg_table;
802 unsigned long start_pmd;
803 unsigned long start_pte;
804 unsigned long address;
805 unsigned long hpux_gw_page_addr;
806 /* FIXME: This is 'const' in order to trick the compiler
807 into not treating it as DP-relative data. */
808 extern void * const hpux_gateway_page;
809
810 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
811
812 /*
813 * Setup HP-UX Gateway page.
814 *
815 * The HP-UX gateway page resides in the user address space,
816 * so it needs to be aliased into each process.
817 */
818
819 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
820
821#if PTRS_PER_PMD == 1
822 start_pmd = 0;
823#else
824 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
825#endif
826 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
827
828 address = __pa(&hpux_gateway_page);
829#if PTRS_PER_PMD == 1
830 pmd = (pmd_t *)__pa(pg_dir);
831#else
832 pmd = (pmd_t *) pgd_address(*pg_dir);
833
834 /*
835 * pmd is physical at this point
836 */
837
838 if (!pmd) {
839 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
840 pmd = (pmd_t *) __pa(pmd);
841 }
842
843 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
844#endif
845 /* now change pmd to kernel virtual addresses */
846
847 pmd = (pmd_t *)__va(pmd) + start_pmd;
848
849 /*
850 * pg_table is physical at this point
851 */
852
853 pg_table = (pte_t *) pmd_address(*pmd);
854 if (!pg_table)
855 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
856
857 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
858
859 /* now change pg_table to kernel virtual addresses */
860
861 pg_table = (pte_t *) __va(pg_table) + start_pte;
862 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
863}
864EXPORT_SYMBOL(map_hpux_gateway_page);
865#endif
866
1da177e4
LT
867void __init paging_init(void)
868{
869 int i;
870
871 setup_bootmem();
872 pagetable_init();
873 gateway_init();
874 flush_cache_all_local(); /* start with known state */
ce33941f 875 flush_tlb_all_local(NULL);
1da177e4
LT
876
877 for (i = 0; i < npmem_ranges; i++) {
f06a9684 878 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
1da177e4 879
00592837 880 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
1da177e4
LT
881
882#ifdef CONFIG_DISCONTIGMEM
883 /* Need to initialize the pfnnid_map before we can initialize
884 the zone */
885 {
886 int j;
887 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
888 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
889 j++) {
890 pfnnid_map[j] = i;
891 }
892 }
893#endif
894
9109fb7b 895 free_area_init_node(i, zones_size,
1da177e4
LT
896 pmem_ranges[i].start_pfn, NULL);
897 }
898}
899
900#ifdef CONFIG_PA20
901
902/*
7022672e 903 * Currently, all PA20 chips have 18 bit protection IDs, which is the
1da177e4
LT
904 * limiting factor (space ids are 32 bits).
905 */
906
907#define NR_SPACE_IDS 262144
908
909#else
910
911/*
7022672e
SA
912 * Currently we have a one-to-one relationship between space IDs and
913 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
914 * support 15 bit protection IDs, so that is the limiting factor.
915 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
1da177e4
LT
916 * probably not worth the effort for a special case here.
917 */
918
919#define NR_SPACE_IDS 32768
920
921#endif /* !CONFIG_PA20 */
922
923#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
924#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
925
926static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
927static unsigned long dirty_space_id[SID_ARRAY_SIZE];
928static unsigned long space_id_index;
929static unsigned long free_space_ids = NR_SPACE_IDS - 1;
930static unsigned long dirty_space_ids = 0;
931
932static DEFINE_SPINLOCK(sid_lock);
933
934unsigned long alloc_sid(void)
935{
936 unsigned long index;
937
938 spin_lock(&sid_lock);
939
940 if (free_space_ids == 0) {
941 if (dirty_space_ids != 0) {
942 spin_unlock(&sid_lock);
943 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
944 spin_lock(&sid_lock);
945 }
2fd83038 946 BUG_ON(free_space_ids == 0);
1da177e4
LT
947 }
948
949 free_space_ids--;
950
951 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
952 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
953 space_id_index = index;
954
955 spin_unlock(&sid_lock);
956
957 return index << SPACEID_SHIFT;
958}
959
960void free_sid(unsigned long spaceid)
961{
962 unsigned long index = spaceid >> SPACEID_SHIFT;
963 unsigned long *dirty_space_offset;
964
965 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
966 index &= (BITS_PER_LONG - 1);
967
968 spin_lock(&sid_lock);
969
2fd83038 970 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
1da177e4
LT
971
972 *dirty_space_offset |= (1L << index);
973 dirty_space_ids++;
974
975 spin_unlock(&sid_lock);
976}
977
978
979#ifdef CONFIG_SMP
980static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
981{
982 int i;
983
984 /* NOTE: sid_lock must be held upon entry */
985
986 *ndirtyptr = dirty_space_ids;
987 if (dirty_space_ids != 0) {
988 for (i = 0; i < SID_ARRAY_SIZE; i++) {
989 dirty_array[i] = dirty_space_id[i];
990 dirty_space_id[i] = 0;
991 }
992 dirty_space_ids = 0;
993 }
994
995 return;
996}
997
998static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
999{
1000 int i;
1001
1002 /* NOTE: sid_lock must be held upon entry */
1003
1004 if (ndirty != 0) {
1005 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1006 space_id[i] ^= dirty_array[i];
1007 }
1008
1009 free_space_ids += ndirty;
1010 space_id_index = 0;
1011 }
1012}
1013
1014#else /* CONFIG_SMP */
1015
1016static void recycle_sids(void)
1017{
1018 int i;
1019
1020 /* NOTE: sid_lock must be held upon entry */
1021
1022 if (dirty_space_ids != 0) {
1023 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1024 space_id[i] ^= dirty_space_id[i];
1025 dirty_space_id[i] = 0;
1026 }
1027
1028 free_space_ids += dirty_space_ids;
1029 dirty_space_ids = 0;
1030 space_id_index = 0;
1031 }
1032}
1033#endif
1034
1035/*
1036 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1037 * purged, we can safely reuse the space ids that were released but
1038 * not flushed from the tlb.
1039 */
1040
1041#ifdef CONFIG_SMP
1042
1043static unsigned long recycle_ndirty;
1044static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
2fd83038 1045static unsigned int recycle_inuse;
1da177e4
LT
1046
1047void flush_tlb_all(void)
1048{
1049 int do_recycle;
1050
1051 do_recycle = 0;
1052 spin_lock(&sid_lock);
1053 if (dirty_space_ids > RECYCLE_THRESHOLD) {
2fd83038 1054 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
1da177e4
LT
1055 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1056 recycle_inuse++;
1057 do_recycle++;
1058 }
1059 spin_unlock(&sid_lock);
15c8b6c1 1060 on_each_cpu(flush_tlb_all_local, NULL, 1);
1da177e4
LT
1061 if (do_recycle) {
1062 spin_lock(&sid_lock);
1063 recycle_sids(recycle_ndirty,recycle_dirty_array);
1064 recycle_inuse = 0;
1065 spin_unlock(&sid_lock);
1066 }
1067}
1068#else
1069void flush_tlb_all(void)
1070{
1071 spin_lock(&sid_lock);
1b2425e3 1072 flush_tlb_all_local(NULL);
1da177e4
LT
1073 recycle_sids();
1074 spin_unlock(&sid_lock);
1075}
1076#endif
1077
1078#ifdef CONFIG_BLK_DEV_INITRD
1079void free_initrd_mem(unsigned long start, unsigned long end)
1080{
94c3e87a
HD
1081 if (start >= end)
1082 return;
1083 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1da177e4
LT
1084 for (; start < end; start += PAGE_SIZE) {
1085 ClearPageReserved(virt_to_page(start));
7835e98b 1086 init_page_count(virt_to_page(start));
1da177e4
LT
1087 free_page(start);
1088 num_physpages++;
1089 totalram_pages++;
1090 }
1da177e4
LT
1091}
1092#endif