]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/sparc64/mm/init.c
[SPARC64]: Fix new context version SMP handling.
[net-next-2.6.git] / arch / sparc64 / mm / init.c
CommitLineData
1da177e4
LT
1/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/config.h>
c4bce90e 9#include <linux/module.h>
1da177e4
LT
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mm.h>
16#include <linux/hugetlb.h>
17#include <linux/slab.h>
18#include <linux/initrd.h>
19#include <linux/swap.h>
20#include <linux/pagemap.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
05e14cb3 23#include <linux/kprobes.h>
1ac4f5eb 24#include <linux/cache.h>
13edad7a 25#include <linux/sort.h>
1da177e4
LT
26
27#include <asm/head.h>
28#include <asm/system.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32#include <asm/oplib.h>
33#include <asm/iommu.h>
34#include <asm/io.h>
35#include <asm/uaccess.h>
36#include <asm/mmu_context.h>
37#include <asm/tlbflush.h>
38#include <asm/dma.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
41#include <asm/spitfire.h>
42#include <asm/sections.h>
517af332 43#include <asm/tsb.h>
481295f9 44#include <asm/hypervisor.h>
1da177e4
LT
45
46extern void device_scan(void);
47
9cc3a1ac
DM
48#define MAX_PHYS_ADDRESS (1UL << 42UL)
49#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
50#define KPTE_BITMAP_BYTES \
51 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
52
53unsigned long kern_linear_pte_xor[2] __read_mostly;
54
55/* A bitmap, one bit for every 256MB of physical memory. If the bit
56 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
57 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
58 */
59unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
60
d7744a09
DM
61/* A special kernel TSB for 4MB and 256MB linear mappings. */
62struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
63
13edad7a
DM
64#define MAX_BANKS 32
65
66static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
67static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
68static int pavail_ents __initdata;
69static int pavail_rescan_ents __initdata;
70
71static int cmp_p64(const void *a, const void *b)
72{
73 const struct linux_prom64_registers *x = a, *y = b;
74
75 if (x->phys_addr > y->phys_addr)
76 return 1;
77 if (x->phys_addr < y->phys_addr)
78 return -1;
79 return 0;
80}
81
82static void __init read_obp_memory(const char *property,
83 struct linux_prom64_registers *regs,
84 int *num_ents)
85{
86 int node = prom_finddevice("/memory");
87 int prop_size = prom_getproplen(node, property);
88 int ents, ret, i;
89
90 ents = prop_size / sizeof(struct linux_prom64_registers);
91 if (ents > MAX_BANKS) {
92 prom_printf("The machine has more %s property entries than "
93 "this kernel can support (%d).\n",
94 property, MAX_BANKS);
95 prom_halt();
96 }
97
98 ret = prom_getproperty(node, property, (char *) regs, prop_size);
99 if (ret == -1) {
100 prom_printf("Couldn't get %s property from /memory.\n");
101 prom_halt();
102 }
103
104 *num_ents = ents;
10147570 105
13edad7a
DM
106 /* Sanitize what we got from the firmware, by page aligning
107 * everything.
108 */
109 for (i = 0; i < ents; i++) {
110 unsigned long base, size;
111
112 base = regs[i].phys_addr;
113 size = regs[i].reg_size;
10147570 114
13edad7a
DM
115 size &= PAGE_MASK;
116 if (base & ~PAGE_MASK) {
117 unsigned long new_base = PAGE_ALIGN(base);
118
119 size -= new_base - base;
120 if ((long) size < 0L)
121 size = 0UL;
122 base = new_base;
123 }
124 regs[i].phys_addr = base;
125 regs[i].reg_size = size;
126 }
c9c10830 127 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
128 cmp_p64, NULL);
129}
1da177e4 130
2bdb3cb2 131unsigned long *sparc64_valid_addr_bitmap __read_mostly;
1da177e4
LT
132
133/* Ugly, but necessary... -DaveM */
1ac4f5eb
DM
134unsigned long phys_base __read_mostly;
135unsigned long kern_base __read_mostly;
136unsigned long kern_size __read_mostly;
137unsigned long pfn_base __read_mostly;
1da177e4 138
1da177e4
LT
139/* get_new_mmu_context() uses "cache + 1". */
140DEFINE_SPINLOCK(ctx_alloc_lock);
141unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
142#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
143unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
144
145/* References to special section boundaries */
146extern char _start[], _end[];
147
148/* Initial ramdisk setup */
149extern unsigned long sparc_ramdisk_image64;
150extern unsigned int sparc_ramdisk_image;
151extern unsigned int sparc_ramdisk_size;
152
1ac4f5eb 153struct page *mem_map_zero __read_mostly;
1da177e4 154
0835ae0f
DM
155unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
156
157unsigned long sparc64_kern_pri_context __read_mostly;
158unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
159unsigned long sparc64_kern_sec_context __read_mostly;
160
1da177e4
LT
161int bigkernel = 0;
162
3c936465 163kmem_cache_t *pgtable_cache __read_mostly;
1da177e4 164
3c936465
DM
165static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
166{
167 clear_page(addr);
168}
05e28f9d 169
3c936465 170void pgtable_cache_init(void)
1da177e4 171{
3c936465
DM
172 pgtable_cache = kmem_cache_create("pgtable_cache",
173 PAGE_SIZE, PAGE_SIZE,
174 SLAB_HWCACHE_ALIGN |
175 SLAB_MUST_HWCACHE_ALIGN,
176 zero_ctor,
177 NULL);
178 if (!pgtable_cache) {
179 prom_printf("pgtable_cache_init(): Could not create!\n");
180 prom_halt();
1da177e4 181 }
1da177e4
LT
182}
183
184#ifdef CONFIG_DEBUG_DCFLUSH
185atomic_t dcpage_flushes = ATOMIC_INIT(0);
186#ifdef CONFIG_SMP
187atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
188#endif
189#endif
190
7a591cfe 191inline void flush_dcache_page_impl(struct page *page)
1da177e4 192{
7a591cfe 193 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
194#ifdef CONFIG_DEBUG_DCFLUSH
195 atomic_inc(&dcpage_flushes);
196#endif
197
198#ifdef DCACHE_ALIASING_POSSIBLE
199 __flush_dcache_page(page_address(page),
200 ((tlb_type == spitfire) &&
201 page_mapping(page) != NULL));
202#else
203 if (page_mapping(page) != NULL &&
204 tlb_type == spitfire)
205 __flush_icache_page(__pa(page_address(page)));
206#endif
207}
208
209#define PG_dcache_dirty PG_arch_1
48b0e548
DM
210#define PG_dcache_cpu_shift 24
211#define PG_dcache_cpu_mask (256 - 1)
212
213#if NR_CPUS > 256
214#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
215#endif
1da177e4
LT
216
217#define dcache_dirty_cpu(page) \
48b0e548 218 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4
LT
219
220static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
221{
222 unsigned long mask = this_cpu;
48b0e548
DM
223 unsigned long non_cpu_bits;
224
225 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
226 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
227
1da177e4
LT
228 __asm__ __volatile__("1:\n\t"
229 "ldx [%2], %%g7\n\t"
230 "and %%g7, %1, %%g1\n\t"
231 "or %%g1, %0, %%g1\n\t"
232 "casx [%2], %%g7, %%g1\n\t"
233 "cmp %%g7, %%g1\n\t"
b445e26c 234 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 235 "bne,pn %%xcc, 1b\n\t"
b445e26c 236 " nop"
1da177e4
LT
237 : /* no outputs */
238 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
239 : "g1", "g7");
240}
241
242static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
243{
244 unsigned long mask = (1UL << PG_dcache_dirty);
245
246 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
247 "1:\n\t"
248 "ldx [%2], %%g7\n\t"
48b0e548 249 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
250 "and %%g1, %3, %%g1\n\t"
251 "cmp %%g1, %0\n\t"
252 "bne,pn %%icc, 2f\n\t"
253 " andn %%g7, %1, %%g1\n\t"
254 "casx [%2], %%g7, %%g1\n\t"
255 "cmp %%g7, %%g1\n\t"
b445e26c 256 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 257 "bne,pn %%xcc, 1b\n\t"
b445e26c 258 " nop\n"
1da177e4
LT
259 "2:"
260 : /* no outputs */
261 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
262 "i" (PG_dcache_cpu_mask),
263 "i" (PG_dcache_cpu_shift)
1da177e4
LT
264 : "g1", "g7");
265}
266
517af332
DM
267static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
268{
269 unsigned long tsb_addr = (unsigned long) ent;
270
3b3ab2eb 271 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
272 tsb_addr = __pa(tsb_addr);
273
274 __tsb_insert(tsb_addr, tag, pte);
275}
276
c4bce90e
DM
277unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
278unsigned long _PAGE_SZBITS __read_mostly;
279
1da177e4
LT
280void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
281{
bd40791e 282 struct mm_struct *mm;
74ae9987
DM
283 struct tsb *tsb;
284 unsigned long tag;
7a591cfe
DM
285
286 if (tlb_type != hypervisor) {
287 unsigned long pfn = pte_pfn(pte);
288 unsigned long pg_flags;
289 struct page *page;
290
291 if (pfn_valid(pfn) &&
292 (page = pfn_to_page(pfn), page_mapping(page)) &&
293 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
294 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
295 PG_dcache_cpu_mask);
296 int this_cpu = get_cpu();
297
298 /* This is just to optimize away some function calls
299 * in the SMP case.
300 */
301 if (cpu == this_cpu)
302 flush_dcache_page_impl(page);
303 else
304 smp_flush_dcache_page_impl(page, cpu);
305
306 clear_dcache_dirty_cpu(page, cpu);
307
308 put_cpu();
309 }
1da177e4 310 }
bd40791e
DM
311
312 mm = vma->vm_mm;
74ae9987
DM
313 tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
314 (mm->context.tsb_nentries - 1UL)];
315 tag = (address >> 22UL);
316 tsb_insert(tsb, tag, pte_val(pte));
1da177e4
LT
317}
318
319void flush_dcache_page(struct page *page)
320{
a9546f59
DM
321 struct address_space *mapping;
322 int this_cpu;
1da177e4 323
7a591cfe
DM
324 if (tlb_type == hypervisor)
325 return;
326
a9546f59
DM
327 /* Do not bother with the expensive D-cache flush if it
328 * is merely the zero page. The 'bigcore' testcase in GDB
329 * causes this case to run millions of times.
330 */
331 if (page == ZERO_PAGE(0))
332 return;
333
334 this_cpu = get_cpu();
335
336 mapping = page_mapping(page);
1da177e4 337 if (mapping && !mapping_mapped(mapping)) {
a9546f59 338 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 339 if (dirty) {
a9546f59
DM
340 int dirty_cpu = dcache_dirty_cpu(page);
341
1da177e4
LT
342 if (dirty_cpu == this_cpu)
343 goto out;
344 smp_flush_dcache_page_impl(page, dirty_cpu);
345 }
346 set_dcache_dirty(page, this_cpu);
347 } else {
348 /* We could delay the flush for the !page_mapping
349 * case too. But that case is for exec env/arg
350 * pages and those are %99 certainly going to get
351 * faulted into the tlb (and thus flushed) anyways.
352 */
353 flush_dcache_page_impl(page);
354 }
355
356out:
357 put_cpu();
358}
359
05e14cb3 360void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 361{
a43fe0e7 362 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
363 if (tlb_type == spitfire) {
364 unsigned long kaddr;
365
366 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
367 __flush_icache_page(__get_phys(kaddr));
368 }
369}
370
371unsigned long page_to_pfn(struct page *page)
372{
373 return (unsigned long) ((page - mem_map) + pfn_base);
374}
375
376struct page *pfn_to_page(unsigned long pfn)
377{
378 return (mem_map + (pfn - pfn_base));
379}
380
381void show_mem(void)
382{
383 printk("Mem-info:\n");
384 show_free_areas();
385 printk("Free swap: %6ldkB\n",
386 nr_swap_pages << (PAGE_SHIFT-10));
387 printk("%ld pages of RAM\n", num_physpages);
388 printk("%d free pages\n", nr_free_pages());
1da177e4
LT
389}
390
391void mmu_info(struct seq_file *m)
392{
393 if (tlb_type == cheetah)
394 seq_printf(m, "MMU Type\t: Cheetah\n");
395 else if (tlb_type == cheetah_plus)
396 seq_printf(m, "MMU Type\t: Cheetah+\n");
397 else if (tlb_type == spitfire)
398 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
399 else if (tlb_type == hypervisor)
400 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
401 else
402 seq_printf(m, "MMU Type\t: ???\n");
403
404#ifdef CONFIG_DEBUG_DCFLUSH
405 seq_printf(m, "DCPageFlushes\t: %d\n",
406 atomic_read(&dcpage_flushes));
407#ifdef CONFIG_SMP
408 seq_printf(m, "DCPageFlushesXC\t: %d\n",
409 atomic_read(&dcpage_flushes_xcall));
410#endif /* CONFIG_SMP */
411#endif /* CONFIG_DEBUG_DCFLUSH */
412}
413
414struct linux_prom_translation {
415 unsigned long virt;
416 unsigned long size;
417 unsigned long data;
418};
c9c10830
DM
419
420/* Exported for kernel TLB miss handling in ktlb.S */
421struct linux_prom_translation prom_trans[512] __read_mostly;
422unsigned int prom_trans_ents __read_mostly;
1da177e4 423
1da177e4
LT
424/* Exported for SMP bootup purposes. */
425unsigned long kern_locked_tte_data;
426
c9c10830
DM
427/* The obp translations are saved based on 8k pagesize, since obp can
428 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 429 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 430 */
5085b4a5
DM
431static inline int in_obp_range(unsigned long vaddr)
432{
433 return (vaddr >= LOW_OBP_ADDRESS &&
434 vaddr < HI_OBP_ADDRESS);
435}
436
c9c10830 437static int cmp_ptrans(const void *a, const void *b)
405599bd 438{
c9c10830 439 const struct linux_prom_translation *x = a, *y = b;
405599bd 440
c9c10830
DM
441 if (x->virt > y->virt)
442 return 1;
443 if (x->virt < y->virt)
444 return -1;
445 return 0;
405599bd
DM
446}
447
c9c10830 448/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 449static void __init read_obp_translations(void)
405599bd 450{
c9c10830 451 int n, node, ents, first, last, i;
1da177e4
LT
452
453 node = prom_finddevice("/virtual-memory");
454 n = prom_getproplen(node, "translations");
405599bd 455 if (unlikely(n == 0 || n == -1)) {
b206fc4c 456 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
457 prom_halt();
458 }
405599bd
DM
459 if (unlikely(n > sizeof(prom_trans))) {
460 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
1da177e4
LT
461 prom_halt();
462 }
405599bd 463
b206fc4c 464 if ((n = prom_getproperty(node, "translations",
405599bd
DM
465 (char *)&prom_trans[0],
466 sizeof(prom_trans))) == -1) {
b206fc4c 467 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
468 prom_halt();
469 }
9ad98c5b 470
b206fc4c 471 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 472
c9c10830
DM
473 ents = n;
474
475 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
476 cmp_ptrans, NULL);
477
478 /* Now kick out all the non-OBP entries. */
479 for (i = 0; i < ents; i++) {
480 if (in_obp_range(prom_trans[i].virt))
481 break;
482 }
483 first = i;
484 for (; i < ents; i++) {
485 if (!in_obp_range(prom_trans[i].virt))
486 break;
487 }
488 last = i;
489
490 for (i = 0; i < (last - first); i++) {
491 struct linux_prom_translation *src = &prom_trans[i + first];
492 struct linux_prom_translation *dest = &prom_trans[i];
493
494 *dest = *src;
495 }
496 for (; i < ents; i++) {
497 struct linux_prom_translation *dest = &prom_trans[i];
498 dest->virt = dest->size = dest->data = 0x0UL;
499 }
500
501 prom_trans_ents = last - first;
502
503 if (tlb_type == spitfire) {
504 /* Clear diag TTE bits. */
505 for (i = 0; i < prom_trans_ents; i++)
506 prom_trans[i].data &= ~0x0003fe0000000000UL;
507 }
405599bd 508}
1da177e4 509
d82ace7d
DM
510static void __init hypervisor_tlb_lock(unsigned long vaddr,
511 unsigned long pte,
512 unsigned long mmu)
513{
164c220f
DM
514 register unsigned long func asm("%o5");
515 register unsigned long arg0 asm("%o0");
516 register unsigned long arg1 asm("%o1");
517 register unsigned long arg2 asm("%o2");
518 register unsigned long arg3 asm("%o3");
d82ace7d
DM
519
520 func = HV_FAST_MMU_MAP_PERM_ADDR;
521 arg0 = vaddr;
522 arg1 = 0;
523 arg2 = pte;
524 arg3 = mmu;
525 __asm__ __volatile__("ta 0x80"
526 : "=&r" (func), "=&r" (arg0),
527 "=&r" (arg1), "=&r" (arg2),
528 "=&r" (arg3)
529 : "0" (func), "1" (arg0), "2" (arg1),
530 "3" (arg2), "4" (arg3));
12e126ad
DM
531 if (arg0 != 0) {
532 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
533 "errors with %lx\n", vaddr, 0, pte, mmu, arg0);
534 prom_halt();
535 }
d82ace7d
DM
536}
537
c4bce90e
DM
538static unsigned long kern_large_tte(unsigned long paddr);
539
898cf0ec 540static void __init remap_kernel(void)
405599bd
DM
541{
542 unsigned long phys_page, tte_vaddr, tte_data;
405599bd
DM
543 int tlb_ent = sparc64_highest_locked_tlbent();
544
1da177e4 545 tte_vaddr = (unsigned long) KERNBASE;
bff06d55 546 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
c4bce90e 547 tte_data = kern_large_tte(phys_page);
1da177e4
LT
548
549 kern_locked_tte_data = tte_data;
550
d82ace7d
DM
551 /* Now lock us into the TLBs via Hypervisor or OBP. */
552 if (tlb_type == hypervisor) {
553 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
554 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
555 if (bigkernel) {
556 tte_vaddr += 0x400000;
557 tte_data += 0x400000;
558 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
559 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
560 }
561 } else {
562 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
563 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
564 if (bigkernel) {
565 tlb_ent -= 1;
566 prom_dtlb_load(tlb_ent,
567 tte_data + 0x400000,
568 tte_vaddr + 0x400000);
569 prom_itlb_load(tlb_ent,
570 tte_data + 0x400000,
571 tte_vaddr + 0x400000);
572 }
573 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
1da177e4 574 }
0835ae0f
DM
575 if (tlb_type == cheetah_plus) {
576 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
577 CTX_CHEETAH_PLUS_NUC);
578 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
579 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
580 }
405599bd 581}
1da177e4 582
405599bd 583
c9c10830 584static void __init inherit_prom_mappings(void)
9ad98c5b
DM
585{
586 read_obp_translations();
405599bd
DM
587
588 /* Now fixup OBP's idea about where we really are mapped. */
589 prom_printf("Remapping the kernel... ");
590 remap_kernel();
1da177e4 591 prom_printf("done.\n");
1da177e4
LT
592}
593
1da177e4
LT
594void prom_world(int enter)
595{
1da177e4
LT
596 if (!enter)
597 set_fs((mm_segment_t) { get_thread_current_ds() });
598
3487d1d4 599 __asm__ __volatile__("flushw");
1da177e4
LT
600}
601
602#ifdef DCACHE_ALIASING_POSSIBLE
603void __flush_dcache_range(unsigned long start, unsigned long end)
604{
605 unsigned long va;
606
607 if (tlb_type == spitfire) {
608 int n = 0;
609
610 for (va = start; va < end; va += 32) {
611 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
612 if (++n >= 512)
613 break;
614 }
a43fe0e7 615 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
616 start = __pa(start);
617 end = __pa(end);
618 for (va = start; va < end; va += 32)
619 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
620 "membar #Sync"
621 : /* no outputs */
622 : "r" (va),
623 "i" (ASI_DCACHE_INVALIDATE));
624 }
625}
626#endif /* DCACHE_ALIASING_POSSIBLE */
627
1da177e4
LT
628/* Caller does TLB context flushing on local CPU if necessary.
629 * The caller also ensures that CTX_VALID(mm->context) is false.
630 *
631 * We must be careful about boundary cases so that we never
632 * let the user have CTX 0 (nucleus) or we ever use a CTX
633 * version of zero (and thus NO_CONTEXT would not be caught
634 * by version mis-match tests in mmu_context.h).
a0663a79
DM
635 *
636 * Always invoked with interrupts disabled.
1da177e4
LT
637 */
638void get_new_mmu_context(struct mm_struct *mm)
639{
640 unsigned long ctx, new_ctx;
641 unsigned long orig_pgsz_bits;
a77754b4 642 unsigned long flags;
a0663a79 643 int new_version;
1da177e4 644
a77754b4 645 spin_lock_irqsave(&ctx_alloc_lock, flags);
1da177e4
LT
646 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
647 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
648 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 649 new_version = 0;
1da177e4
LT
650 if (new_ctx >= (1 << CTX_NR_BITS)) {
651 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
652 if (new_ctx >= ctx) {
653 int i;
654 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
655 CTX_FIRST_VERSION;
656 if (new_ctx == 1)
657 new_ctx = CTX_FIRST_VERSION;
658
659 /* Don't call memset, for 16 entries that's just
660 * plain silly...
661 */
662 mmu_context_bmap[0] = 3;
663 mmu_context_bmap[1] = 0;
664 mmu_context_bmap[2] = 0;
665 mmu_context_bmap[3] = 0;
666 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
667 mmu_context_bmap[i + 0] = 0;
668 mmu_context_bmap[i + 1] = 0;
669 mmu_context_bmap[i + 2] = 0;
670 mmu_context_bmap[i + 3] = 0;
671 }
a0663a79 672 new_version = 1;
1da177e4
LT
673 goto out;
674 }
675 }
676 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
677 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
678out:
679 tlb_context_cache = new_ctx;
680 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
a77754b4 681 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
a0663a79
DM
682
683 if (unlikely(new_version))
684 smp_new_mmu_context_version();
1da177e4
LT
685}
686
1da177e4
LT
687void sparc_ultra_dump_itlb(void)
688{
689 int slot;
690
691 if (tlb_type == spitfire) {
692 printk ("Contents of itlb: ");
693 for (slot = 0; slot < 14; slot++) printk (" ");
694 printk ("%2x:%016lx,%016lx\n",
695 0,
696 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
697 for (slot = 1; slot < 64; slot+=3) {
698 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
699 slot,
700 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
701 slot+1,
702 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
703 slot+2,
704 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
705 }
706 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
707 printk ("Contents of itlb0:\n");
708 for (slot = 0; slot < 16; slot+=2) {
709 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
710 slot,
711 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
712 slot+1,
713 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
714 }
715 printk ("Contents of itlb2:\n");
716 for (slot = 0; slot < 128; slot+=2) {
717 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
718 slot,
719 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
720 slot+1,
721 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
722 }
723 }
724}
725
726void sparc_ultra_dump_dtlb(void)
727{
728 int slot;
729
730 if (tlb_type == spitfire) {
731 printk ("Contents of dtlb: ");
732 for (slot = 0; slot < 14; slot++) printk (" ");
733 printk ("%2x:%016lx,%016lx\n", 0,
734 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
735 for (slot = 1; slot < 64; slot+=3) {
736 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
737 slot,
738 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
739 slot+1,
740 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
741 slot+2,
742 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
743 }
744 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
745 printk ("Contents of dtlb0:\n");
746 for (slot = 0; slot < 16; slot+=2) {
747 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
748 slot,
749 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
750 slot+1,
751 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
752 }
753 printk ("Contents of dtlb2:\n");
754 for (slot = 0; slot < 512; slot+=2) {
755 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
756 slot,
757 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
758 slot+1,
759 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
760 }
761 if (tlb_type == cheetah_plus) {
762 printk ("Contents of dtlb3:\n");
763 for (slot = 0; slot < 512; slot+=2) {
764 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
765 slot,
766 cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
767 slot+1,
768 cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
769 }
770 }
771 }
772}
773
774extern unsigned long cmdline_memory_size;
775
776unsigned long __init bootmem_init(unsigned long *pages_avail)
777{
778 unsigned long bootmap_size, start_pfn, end_pfn;
779 unsigned long end_of_phys_memory = 0UL;
780 unsigned long bootmap_pfn, bytes_avail, size;
781 int i;
782
783#ifdef CONFIG_DEBUG_BOOTMEM
13edad7a 784 prom_printf("bootmem_init: Scan pavail, ");
1da177e4
LT
785#endif
786
787 bytes_avail = 0UL;
13edad7a
DM
788 for (i = 0; i < pavail_ents; i++) {
789 end_of_phys_memory = pavail[i].phys_addr +
790 pavail[i].reg_size;
791 bytes_avail += pavail[i].reg_size;
1da177e4
LT
792 if (cmdline_memory_size) {
793 if (bytes_avail > cmdline_memory_size) {
794 unsigned long slack = bytes_avail - cmdline_memory_size;
795
796 bytes_avail -= slack;
797 end_of_phys_memory -= slack;
798
13edad7a
DM
799 pavail[i].reg_size -= slack;
800 if ((long)pavail[i].reg_size <= 0L) {
801 pavail[i].phys_addr = 0xdeadbeefUL;
802 pavail[i].reg_size = 0UL;
803 pavail_ents = i;
1da177e4 804 } else {
13edad7a
DM
805 pavail[i+1].reg_size = 0Ul;
806 pavail[i+1].phys_addr = 0xdeadbeefUL;
807 pavail_ents = i + 1;
1da177e4
LT
808 }
809 break;
810 }
811 }
812 }
813
814 *pages_avail = bytes_avail >> PAGE_SHIFT;
815
816 /* Start with page aligned address of last symbol in kernel
817 * image. The kernel is hard mapped below PAGE_OFFSET in a
818 * 4MB locked TLB translation.
819 */
820 start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
821
822 bootmap_pfn = start_pfn;
823
824 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
825
826#ifdef CONFIG_BLK_DEV_INITRD
827 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
828 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
829 unsigned long ramdisk_image = sparc_ramdisk_image ?
830 sparc_ramdisk_image : sparc_ramdisk_image64;
831 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
832 ramdisk_image -= KERNBASE;
833 initrd_start = ramdisk_image + phys_base;
834 initrd_end = initrd_start + sparc_ramdisk_size;
835 if (initrd_end > end_of_phys_memory) {
836 printk(KERN_CRIT "initrd extends beyond end of memory "
837 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
838 initrd_end, end_of_phys_memory);
839 initrd_start = 0;
840 }
841 if (initrd_start) {
842 if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
843 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
844 bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
845 }
846 }
847#endif
848 /* Initialize the boot-time allocator. */
849 max_pfn = max_low_pfn = end_pfn;
850 min_low_pfn = pfn_base;
851
852#ifdef CONFIG_DEBUG_BOOTMEM
853 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
854 min_low_pfn, bootmap_pfn, max_low_pfn);
855#endif
856 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
857
1da177e4
LT
858 /* Now register the available physical memory with the
859 * allocator.
860 */
13edad7a 861 for (i = 0; i < pavail_ents; i++) {
1da177e4 862#ifdef CONFIG_DEBUG_BOOTMEM
13edad7a
DM
863 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
864 i, pavail[i].phys_addr, pavail[i].reg_size);
1da177e4 865#endif
13edad7a 866 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
1da177e4
LT
867 }
868
869#ifdef CONFIG_BLK_DEV_INITRD
870 if (initrd_start) {
871 size = initrd_end - initrd_start;
872
873 /* Resert the initrd image area. */
874#ifdef CONFIG_DEBUG_BOOTMEM
875 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
876 initrd_start, initrd_end);
877#endif
878 reserve_bootmem(initrd_start, size);
879 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
880
881 initrd_start += PAGE_OFFSET;
882 initrd_end += PAGE_OFFSET;
883 }
884#endif
885 /* Reserve the kernel text/data/bss. */
886#ifdef CONFIG_DEBUG_BOOTMEM
887 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
888#endif
889 reserve_bootmem(kern_base, kern_size);
890 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
891
892 /* Reserve the bootmem map. We do not account for it
893 * in pages_avail because we will release that memory
894 * in free_all_bootmem.
895 */
896 size = bootmap_size;
897#ifdef CONFIG_DEBUG_BOOTMEM
898 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
899 (bootmap_pfn << PAGE_SHIFT), size);
900#endif
901 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
902 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
903
904 return end_pfn;
905}
906
9cc3a1ac
DM
907static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
908static int pall_ents __initdata;
909
56425306
DM
910#ifdef CONFIG_DEBUG_PAGEALLOC
911static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
912{
913 unsigned long vstart = PAGE_OFFSET + pstart;
914 unsigned long vend = PAGE_OFFSET + pend;
915 unsigned long alloc_bytes = 0UL;
916
917 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 918 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
919 vstart, vend);
920 prom_halt();
921 }
922
923 while (vstart < vend) {
924 unsigned long this_end, paddr = __pa(vstart);
925 pgd_t *pgd = pgd_offset_k(vstart);
926 pud_t *pud;
927 pmd_t *pmd;
928 pte_t *pte;
929
930 pud = pud_offset(pgd, vstart);
931 if (pud_none(*pud)) {
932 pmd_t *new;
933
934 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
935 alloc_bytes += PAGE_SIZE;
936 pud_populate(&init_mm, pud, new);
937 }
938
939 pmd = pmd_offset(pud, vstart);
940 if (!pmd_present(*pmd)) {
941 pte_t *new;
942
943 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
944 alloc_bytes += PAGE_SIZE;
945 pmd_populate_kernel(&init_mm, pmd, new);
946 }
947
948 pte = pte_offset_kernel(pmd, vstart);
949 this_end = (vstart + PMD_SIZE) & PMD_MASK;
950 if (this_end > vend)
951 this_end = vend;
952
953 while (vstart < this_end) {
954 pte_val(*pte) = (paddr | pgprot_val(prot));
955
956 vstart += PAGE_SIZE;
957 paddr += PAGE_SIZE;
958 pte++;
959 }
960 }
961
962 return alloc_bytes;
963}
964
56425306 965extern unsigned int kvmap_linear_patch[1];
9cc3a1ac
DM
966#endif /* CONFIG_DEBUG_PAGEALLOC */
967
968static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
969{
970 const unsigned long shift_256MB = 28;
971 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
972 const unsigned long size_256MB = (1UL << shift_256MB);
973
974 while (start < end) {
975 long remains;
976
f7c00338
DM
977 remains = end - start;
978 if (remains < size_256MB)
979 break;
980
9cc3a1ac
DM
981 if (start & mask_256MB) {
982 start = (start + size_256MB) & ~mask_256MB;
983 continue;
984 }
985
9cc3a1ac
DM
986 while (remains >= size_256MB) {
987 unsigned long index = start >> shift_256MB;
988
989 __set_bit(index, kpte_linear_bitmap);
990
991 start += size_256MB;
992 remains -= size_256MB;
993 }
994 }
995}
56425306
DM
996
997static void __init kernel_physical_mapping_init(void)
998{
9cc3a1ac
DM
999 unsigned long i;
1000#ifdef CONFIG_DEBUG_PAGEALLOC
1001 unsigned long mem_alloced = 0UL;
1002#endif
56425306 1003
13edad7a
DM
1004 read_obp_memory("reg", &pall[0], &pall_ents);
1005
1006 for (i = 0; i < pall_ents; i++) {
56425306
DM
1007 unsigned long phys_start, phys_end;
1008
13edad7a
DM
1009 phys_start = pall[i].phys_addr;
1010 phys_end = phys_start + pall[i].reg_size;
9cc3a1ac
DM
1011
1012 mark_kpte_bitmap(phys_start, phys_end);
1013
1014#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1015 mem_alloced += kernel_map_range(phys_start, phys_end,
1016 PAGE_KERNEL);
9cc3a1ac 1017#endif
56425306
DM
1018 }
1019
9cc3a1ac 1020#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1021 printk("Allocated %ld bytes for kernel page tables.\n",
1022 mem_alloced);
1023
1024 kvmap_linear_patch[0] = 0x01000000; /* nop */
1025 flushi(&kvmap_linear_patch[0]);
1026
1027 __flush_tlb_all();
9cc3a1ac 1028#endif
56425306
DM
1029}
1030
9cc3a1ac 1031#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1032void kernel_map_pages(struct page *page, int numpages, int enable)
1033{
1034 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1035 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1036
1037 kernel_map_range(phys_start, phys_end,
1038 (enable ? PAGE_KERNEL : __pgprot(0)));
1039
74bf4312
DM
1040 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1041 PAGE_OFFSET + phys_end);
1042
56425306
DM
1043 /* we should perform an IPI and flush all tlbs,
1044 * but that can deadlock->flush only current cpu.
1045 */
1046 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1047 PAGE_OFFSET + phys_end);
1048}
1049#endif
1050
10147570
DM
1051unsigned long __init find_ecache_flush_span(unsigned long size)
1052{
0836a0eb
DM
1053 int i;
1054
13edad7a
DM
1055 for (i = 0; i < pavail_ents; i++) {
1056 if (pavail[i].reg_size >= size)
1057 return pavail[i].phys_addr;
0836a0eb
DM
1058 }
1059
13edad7a 1060 return ~0UL;
0836a0eb
DM
1061}
1062
517af332
DM
1063static void __init tsb_phys_patch(void)
1064{
d257d5da 1065 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1066 struct tsb_phys_patch_entry *p;
1067
d257d5da
DM
1068 pquad = &__tsb_ldquad_phys_patch;
1069 while (pquad < &__tsb_ldquad_phys_patch_end) {
1070 unsigned long addr = pquad->addr;
1071
1072 if (tlb_type == hypervisor)
1073 *(unsigned int *) addr = pquad->sun4v_insn;
1074 else
1075 *(unsigned int *) addr = pquad->sun4u_insn;
1076 wmb();
1077 __asm__ __volatile__("flush %0"
1078 : /* no outputs */
1079 : "r" (addr));
1080
1081 pquad++;
1082 }
1083
517af332
DM
1084 p = &__tsb_phys_patch;
1085 while (p < &__tsb_phys_patch_end) {
1086 unsigned long addr = p->addr;
1087
1088 *(unsigned int *) addr = p->insn;
1089 wmb();
1090 __asm__ __volatile__("flush %0"
1091 : /* no outputs */
1092 : "r" (addr));
1093
1094 p++;
1095 }
1096}
1097
490384e7
DM
1098/* Don't mark as init, we give this to the Hypervisor. */
1099static struct hv_tsb_descr ktsb_descr[2];
1100extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1101
1102static void __init sun4v_ktsb_init(void)
1103{
1104 unsigned long ktsb_pa;
1105
d7744a09 1106 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1107 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1108
1109 switch (PAGE_SIZE) {
1110 case 8 * 1024:
1111 default:
1112 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1113 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1114 break;
1115
1116 case 64 * 1024:
1117 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1118 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1119 break;
1120
1121 case 512 * 1024:
1122 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1123 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1124 break;
1125
1126 case 4 * 1024 * 1024:
1127 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1128 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1129 break;
1130 };
1131
3f19a84e 1132 ktsb_descr[0].assoc = 1;
490384e7
DM
1133 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1134 ktsb_descr[0].ctx_idx = 0;
1135 ktsb_descr[0].tsb_base = ktsb_pa;
1136 ktsb_descr[0].resv = 0;
1137
d7744a09
DM
1138 /* Second KTSB for 4MB/256MB mappings. */
1139 ktsb_pa = (kern_base +
1140 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1141
1142 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1143 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1144 HV_PGSZ_MASK_256MB);
1145 ktsb_descr[1].assoc = 1;
1146 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1147 ktsb_descr[1].ctx_idx = 0;
1148 ktsb_descr[1].tsb_base = ktsb_pa;
1149 ktsb_descr[1].resv = 0;
490384e7
DM
1150}
1151
1152void __cpuinit sun4v_ktsb_register(void)
1153{
1154 register unsigned long func asm("%o5");
1155 register unsigned long arg0 asm("%o0");
1156 register unsigned long arg1 asm("%o1");
1157 unsigned long pa;
1158
1159 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1160
1161 func = HV_FAST_MMU_TSB_CTX0;
d7744a09 1162 arg0 = 2;
490384e7
DM
1163 arg1 = pa;
1164 __asm__ __volatile__("ta %6"
1165 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
1166 : "0" (func), "1" (arg0), "2" (arg1),
1167 "i" (HV_FAST_TRAP));
1168}
1169
1da177e4
LT
1170/* paging_init() sets up the page tables */
1171
1172extern void cheetah_ecache_flush_init(void);
d257d5da 1173extern void sun4v_patch_tlb_handlers(void);
1da177e4
LT
1174
1175static unsigned long last_valid_pfn;
56425306 1176pgd_t swapper_pg_dir[2048];
1da177e4 1177
c4bce90e
DM
1178static void sun4u_pgprot_init(void);
1179static void sun4v_pgprot_init(void);
1180
1da177e4
LT
1181void __init paging_init(void)
1182{
2bdb3cb2 1183 unsigned long end_pfn, pages_avail, shift;
0836a0eb
DM
1184 unsigned long real_end, i;
1185
481295f9
DM
1186 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1187 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1188
d7744a09 1189 /* Invalidate both kernel TSBs. */
8b234274 1190 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d7744a09 1191 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
8b234274 1192
c4bce90e
DM
1193 if (tlb_type == hypervisor)
1194 sun4v_pgprot_init();
1195 else
1196 sun4u_pgprot_init();
1197
d257d5da
DM
1198 if (tlb_type == cheetah_plus ||
1199 tlb_type == hypervisor)
517af332
DM
1200 tsb_phys_patch();
1201
490384e7 1202 if (tlb_type == hypervisor) {
d257d5da 1203 sun4v_patch_tlb_handlers();
490384e7
DM
1204 sun4v_ktsb_init();
1205 }
d257d5da 1206
13edad7a
DM
1207 /* Find available physical memory... */
1208 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
1209
1210 phys_base = 0xffffffffffffffffUL;
13edad7a
DM
1211 for (i = 0; i < pavail_ents; i++)
1212 phys_base = min(phys_base, pavail[i].phys_addr);
0836a0eb 1213
0836a0eb
DM
1214 pfn_base = phys_base >> PAGE_SHIFT;
1215
1da177e4
LT
1216 set_bit(0, mmu_context_bmap);
1217
2bdb3cb2
DM
1218 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1219
1da177e4
LT
1220 real_end = (unsigned long)_end;
1221 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1222 bigkernel = 1;
2bdb3cb2
DM
1223 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1224 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1225 prom_halt();
1da177e4 1226 }
2bdb3cb2
DM
1227
1228 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
1229 * work.
1230 */
1231 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1232
56425306 1233 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1da177e4
LT
1234
1235 /* Now can init the kernel/bad page tables. */
1236 pud_set(pud_offset(&swapper_pg_dir[0], 0),
56425306 1237 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1da177e4 1238
c9c10830 1239 inherit_prom_mappings();
5085b4a5 1240
a8b900d8
DM
1241 /* Ok, we can use our TLB miss and window trap handlers safely. */
1242 setup_tba();
1da177e4 1243
c9c10830 1244 __flush_tlb_all();
9ad98c5b 1245
490384e7
DM
1246 if (tlb_type == hypervisor)
1247 sun4v_ktsb_register();
1248
2bdb3cb2
DM
1249 /* Setup bootmem... */
1250 pages_avail = 0;
1251 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1252
56425306 1253 kernel_physical_mapping_init();
56425306 1254
1da177e4
LT
1255 {
1256 unsigned long zones_size[MAX_NR_ZONES];
1257 unsigned long zholes_size[MAX_NR_ZONES];
1258 unsigned long npages;
1259 int znum;
1260
1261 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1262 zones_size[znum] = zholes_size[znum] = 0;
1263
1264 npages = end_pfn - pfn_base;
1265 zones_size[ZONE_DMA] = npages;
1266 zholes_size[ZONE_DMA] = npages - pages_avail;
1267
1268 free_area_init_node(0, &contig_page_data, zones_size,
1269 phys_base >> PAGE_SHIFT, zholes_size);
1270 }
1271
1272 device_scan();
1273}
1274
1da177e4
LT
1275static void __init taint_real_pages(void)
1276{
1da177e4
LT
1277 int i;
1278
13edad7a 1279 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1da177e4 1280
13edad7a 1281 /* Find changes discovered in the physmem available rescan and
1da177e4
LT
1282 * reserve the lost portions in the bootmem maps.
1283 */
13edad7a 1284 for (i = 0; i < pavail_ents; i++) {
1da177e4
LT
1285 unsigned long old_start, old_end;
1286
13edad7a 1287 old_start = pavail[i].phys_addr;
1da177e4 1288 old_end = old_start +
13edad7a 1289 pavail[i].reg_size;
1da177e4
LT
1290 while (old_start < old_end) {
1291 int n;
1292
13edad7a 1293 for (n = 0; pavail_rescan_ents; n++) {
1da177e4
LT
1294 unsigned long new_start, new_end;
1295
13edad7a
DM
1296 new_start = pavail_rescan[n].phys_addr;
1297 new_end = new_start +
1298 pavail_rescan[n].reg_size;
1da177e4
LT
1299
1300 if (new_start <= old_start &&
1301 new_end >= (old_start + PAGE_SIZE)) {
13edad7a
DM
1302 set_bit(old_start >> 22,
1303 sparc64_valid_addr_bitmap);
1da177e4
LT
1304 goto do_next_page;
1305 }
1306 }
1307 reserve_bootmem(old_start, PAGE_SIZE);
1308
1309 do_next_page:
1310 old_start += PAGE_SIZE;
1311 }
1312 }
1313}
1314
1315void __init mem_init(void)
1316{
1317 unsigned long codepages, datapages, initpages;
1318 unsigned long addr, last;
1319 int i;
1320
1321 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1322 i += 1;
2bdb3cb2 1323 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1da177e4
LT
1324 if (sparc64_valid_addr_bitmap == NULL) {
1325 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1326 prom_halt();
1327 }
1328 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1329
1330 addr = PAGE_OFFSET + kern_base;
1331 last = PAGE_ALIGN(kern_size) + addr;
1332 while (addr < last) {
1333 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1334 addr += PAGE_SIZE;
1335 }
1336
1337 taint_real_pages();
1338
1339 max_mapnr = last_valid_pfn - pfn_base;
1340 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1341
1342#ifdef CONFIG_DEBUG_BOOTMEM
1343 prom_printf("mem_init: Calling free_all_bootmem().\n");
1344#endif
1345 totalram_pages = num_physpages = free_all_bootmem() - 1;
1346
1347 /*
1348 * Set up the zero page, mark it reserved, so that page count
1349 * is not manipulated when freeing the page from user ptes.
1350 */
1351 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1352 if (mem_map_zero == NULL) {
1353 prom_printf("paging_init: Cannot alloc zero page.\n");
1354 prom_halt();
1355 }
1356 SetPageReserved(mem_map_zero);
1357
1358 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1359 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1360 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1361 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1362 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1363 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1364
1365 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1366 nr_free_pages() << (PAGE_SHIFT-10),
1367 codepages << (PAGE_SHIFT-10),
1368 datapages << (PAGE_SHIFT-10),
1369 initpages << (PAGE_SHIFT-10),
1370 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1371
1372 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1373 cheetah_ecache_flush_init();
1374}
1375
898cf0ec 1376void free_initmem(void)
1da177e4
LT
1377{
1378 unsigned long addr, initend;
1379
1380 /*
1381 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1382 */
1383 addr = PAGE_ALIGN((unsigned long)(__init_begin));
1384 initend = (unsigned long)(__init_end) & PAGE_MASK;
1385 for (; addr < initend; addr += PAGE_SIZE) {
1386 unsigned long page;
1387 struct page *p;
1388
1389 page = (addr +
1390 ((unsigned long) __va(kern_base)) -
1391 ((unsigned long) KERNBASE));
1392 memset((void *)addr, 0xcc, PAGE_SIZE);
1393 p = virt_to_page(page);
1394
1395 ClearPageReserved(p);
1396 set_page_count(p, 1);
1397 __free_page(p);
1398 num_physpages++;
1399 totalram_pages++;
1400 }
1401}
1402
1403#ifdef CONFIG_BLK_DEV_INITRD
1404void free_initrd_mem(unsigned long start, unsigned long end)
1405{
1406 if (start < end)
1407 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1408 for (; start < end; start += PAGE_SIZE) {
1409 struct page *p = virt_to_page(start);
1410
1411 ClearPageReserved(p);
1412 set_page_count(p, 1);
1413 __free_page(p);
1414 num_physpages++;
1415 totalram_pages++;
1416 }
1417}
1418#endif
c4bce90e 1419
c4bce90e
DM
1420#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1421#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1422#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1423#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1424#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1425#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1426
1427pgprot_t PAGE_KERNEL __read_mostly;
1428EXPORT_SYMBOL(PAGE_KERNEL);
1429
1430pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1431pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
1432
1433pgprot_t PAGE_SHARED __read_mostly;
1434EXPORT_SYMBOL(PAGE_SHARED);
1435
c4bce90e
DM
1436pgprot_t PAGE_EXEC __read_mostly;
1437unsigned long pg_iobits __read_mostly;
1438
1439unsigned long _PAGE_IE __read_mostly;
b2bef442 1440
c4bce90e 1441unsigned long _PAGE_E __read_mostly;
b2bef442
DM
1442EXPORT_SYMBOL(_PAGE_E);
1443
c4bce90e 1444unsigned long _PAGE_CACHE __read_mostly;
b2bef442 1445EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e
DM
1446
1447static void prot_init_common(unsigned long page_none,
1448 unsigned long page_shared,
1449 unsigned long page_copy,
1450 unsigned long page_readonly,
1451 unsigned long page_exec_bit)
1452{
1453 PAGE_COPY = __pgprot(page_copy);
0f15952a 1454 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
1455
1456 protection_map[0x0] = __pgprot(page_none);
1457 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1458 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1459 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1460 protection_map[0x4] = __pgprot(page_readonly);
1461 protection_map[0x5] = __pgprot(page_readonly);
1462 protection_map[0x6] = __pgprot(page_copy);
1463 protection_map[0x7] = __pgprot(page_copy);
1464 protection_map[0x8] = __pgprot(page_none);
1465 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1466 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1467 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1468 protection_map[0xc] = __pgprot(page_readonly);
1469 protection_map[0xd] = __pgprot(page_readonly);
1470 protection_map[0xe] = __pgprot(page_shared);
1471 protection_map[0xf] = __pgprot(page_shared);
1472}
1473
1474static void __init sun4u_pgprot_init(void)
1475{
1476 unsigned long page_none, page_shared, page_copy, page_readonly;
1477 unsigned long page_exec_bit;
1478
1479 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1480 _PAGE_CACHE_4U | _PAGE_P_4U |
1481 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1482 _PAGE_EXEC_4U);
1483 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1484 _PAGE_CACHE_4U | _PAGE_P_4U |
1485 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1486 _PAGE_EXEC_4U | _PAGE_L_4U);
1487 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1488
1489 _PAGE_IE = _PAGE_IE_4U;
1490 _PAGE_E = _PAGE_E_4U;
1491 _PAGE_CACHE = _PAGE_CACHE_4U;
1492
1493 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1494 __ACCESS_BITS_4U | _PAGE_E_4U);
1495
9cc3a1ac 1496 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
c4bce90e 1497 0xfffff80000000000;
9cc3a1ac
DM
1498 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
1499 _PAGE_P_4U | _PAGE_W_4U);
1500
1501 /* XXX Should use 256MB on Panther. XXX */
1502 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
c4bce90e
DM
1503
1504 _PAGE_SZBITS = _PAGE_SZBITS_4U;
1505 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1506 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1507 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1508
1509
1510 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1511 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1512 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1513 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1514 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1515 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1516 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1517
1518 page_exec_bit = _PAGE_EXEC_4U;
1519
1520 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1521 page_exec_bit);
1522}
1523
1524static void __init sun4v_pgprot_init(void)
1525{
1526 unsigned long page_none, page_shared, page_copy, page_readonly;
1527 unsigned long page_exec_bit;
1528
1529 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1530 _PAGE_CACHE_4V | _PAGE_P_4V |
1531 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1532 _PAGE_EXEC_4V);
1533 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1534 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1535
1536 _PAGE_IE = _PAGE_IE_4V;
1537 _PAGE_E = _PAGE_E_4V;
1538 _PAGE_CACHE = _PAGE_CACHE_4V;
1539
9cc3a1ac
DM
1540 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
1541 0xfffff80000000000;
1542 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1543 _PAGE_P_4V | _PAGE_W_4V);
1544
1545 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
c4bce90e 1546 0xfffff80000000000;
9cc3a1ac
DM
1547 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1548 _PAGE_P_4V | _PAGE_W_4V);
c4bce90e
DM
1549
1550 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1551 __ACCESS_BITS_4V | _PAGE_E_4V);
1552
1553 _PAGE_SZBITS = _PAGE_SZBITS_4V;
1554 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1555 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1556 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1557 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1558
1559 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1560 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1561 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1562 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1563 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1564 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1565 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1566
1567 page_exec_bit = _PAGE_EXEC_4V;
1568
1569 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1570 page_exec_bit);
1571}
1572
1573unsigned long pte_sz_bits(unsigned long sz)
1574{
1575 if (tlb_type == hypervisor) {
1576 switch (sz) {
1577 case 8 * 1024:
1578 default:
1579 return _PAGE_SZ8K_4V;
1580 case 64 * 1024:
1581 return _PAGE_SZ64K_4V;
1582 case 512 * 1024:
1583 return _PAGE_SZ512K_4V;
1584 case 4 * 1024 * 1024:
1585 return _PAGE_SZ4MB_4V;
1586 };
1587 } else {
1588 switch (sz) {
1589 case 8 * 1024:
1590 default:
1591 return _PAGE_SZ8K_4U;
1592 case 64 * 1024:
1593 return _PAGE_SZ64K_4U;
1594 case 512 * 1024:
1595 return _PAGE_SZ512K_4U;
1596 case 4 * 1024 * 1024:
1597 return _PAGE_SZ4MB_4U;
1598 };
1599 }
1600}
1601
1602pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1603{
1604 pte_t pte;
cf627156
DM
1605
1606 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
1607 pte_val(pte) |= (((unsigned long)space) << 32);
1608 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 1609
cf627156 1610 return pte;
c4bce90e
DM
1611}
1612
1613static unsigned long kern_large_tte(unsigned long paddr)
1614{
1615 unsigned long val;
1616
1617 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1618 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1619 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1620 if (tlb_type == hypervisor)
1621 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1622 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1623 _PAGE_EXEC_4V | _PAGE_W_4V);
1624
1625 return val | paddr;
1626}
1627
1628/*
1629 * Translate PROM's mapping we capture at boot time into physical address.
1630 * The second parameter is only set from prom_callback() invocations.
1631 */
1632unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1633{
1634 unsigned long mask;
1635 int i;
1636
1637 mask = _PAGE_PADDR_4U;
1638 if (tlb_type == hypervisor)
1639 mask = _PAGE_PADDR_4V;
1640
1641 for (i = 0; i < prom_trans_ents; i++) {
1642 struct linux_prom_translation *p = &prom_trans[i];
1643
1644 if (promva >= p->virt &&
1645 promva < (p->virt + p->size)) {
1646 unsigned long base = p->data & mask;
1647
1648 if (error)
1649 *error = 0;
1650 return base + (promva & (8192 - 1));
1651 }
1652 }
1653 if (error)
1654 *error = 1;
1655 return 0UL;
1656}
1657
1658/* XXX We should kill off this ugly thing at so me point. XXX */
1659unsigned long sun4u_get_pte(unsigned long addr)
1660{
1661 pgd_t *pgdp;
1662 pud_t *pudp;
1663 pmd_t *pmdp;
1664 pte_t *ptep;
1665 unsigned long mask = _PAGE_PADDR_4U;
1666
1667 if (tlb_type == hypervisor)
1668 mask = _PAGE_PADDR_4V;
1669
1670 if (addr >= PAGE_OFFSET)
1671 return addr & mask;
1672
1673 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1674 return prom_virt_to_phys(addr, NULL);
1675
1676 pgdp = pgd_offset_k(addr);
1677 pudp = pud_offset(pgdp, addr);
1678 pmdp = pmd_offset(pudp, addr);
1679 ptep = pte_offset_kernel(pmdp, addr);
1680
1681 return pte_val(*ptep) & mask;
1682}
1683
1684/* If not locked, zap it. */
1685void __flush_tlb_all(void)
1686{
1687 unsigned long pstate;
1688 int i;
1689
1690 __asm__ __volatile__("flushw\n\t"
1691 "rdpr %%pstate, %0\n\t"
1692 "wrpr %0, %1, %%pstate"
1693 : "=r" (pstate)
1694 : "i" (PSTATE_IE));
1695 if (tlb_type == spitfire) {
1696 for (i = 0; i < 64; i++) {
1697 /* Spitfire Errata #32 workaround */
1698 /* NOTE: Always runs on spitfire, so no
1699 * cheetah+ page size encodings.
1700 */
1701 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1702 "flush %%g6"
1703 : /* No outputs */
1704 : "r" (0),
1705 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1706
1707 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1708 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1709 "membar #Sync"
1710 : /* no outputs */
1711 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1712 spitfire_put_dtlb_data(i, 0x0UL);
1713 }
1714
1715 /* Spitfire Errata #32 workaround */
1716 /* NOTE: Always runs on spitfire, so no
1717 * cheetah+ page size encodings.
1718 */
1719 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1720 "flush %%g6"
1721 : /* No outputs */
1722 : "r" (0),
1723 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1724
1725 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1726 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1727 "membar #Sync"
1728 : /* no outputs */
1729 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1730 spitfire_put_itlb_data(i, 0x0UL);
1731 }
1732 }
1733 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1734 cheetah_flush_dtlb_all();
1735 cheetah_flush_itlb_all();
1736 }
1737 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1738 : : "r" (pstate));
1739}