]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/copypage-v6.c
Merge branches 'at91', 'dcache', 'ftrace', 'hwbpt', 'misc', 'mmci', 's3c', 'st-ux...
[net-next-2.6.git] / arch / arm / mm / copypage-v6.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/copypage-v6.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
063b0a42 13#include <linux/highmem.h>
1da177e4 14
1da177e4
LT
15#include <asm/pgtable.h>
16#include <asm/shmparam.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
46097c7d 19#include <asm/cachetype.h>
1da177e4 20
1b2e2b73
RK
21#include "mm.h"
22
1da177e4
LT
23#if SHMLBA > 16384
24#error FIX ME
25#endif
26
27#define from_address (0xffff8000)
1da177e4 28#define to_address (0xffffc000)
1da177e4 29
1da177e4
LT
30static DEFINE_SPINLOCK(v6_lock);
31
1da177e4
LT
32/*
33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages.
35 */
063b0a42 36static void v6_copy_user_highpage_nonaliasing(struct page *to,
f00a75c0 37 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
1da177e4 38{
063b0a42
RK
39 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
1da177e4 43 copy_page(kto, kfrom);
7e5a69e8 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
063b0a42
RK
45 kunmap_atomic(kto, KM_USER1);
46 kunmap_atomic(kfrom, KM_USER0);
1da177e4
LT
47}
48
49/*
50 * Clear the user page. No aliasing to deal with so we can just
51 * attack the kernel's existing mapping of this page.
52 */
303c6443 53static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
1da177e4 54{
303c6443 55 void *kaddr = kmap_atomic(page, KM_USER0);
1da177e4 56 clear_page(kaddr);
303c6443 57 kunmap_atomic(kaddr, KM_USER0);
1da177e4
LT
58}
59
60/*
063b0a42
RK
61 * Discard data in the kernel mapping for the new page.
62 * FIXME: needs this MCRR to be supported.
1da177e4 63 */
063b0a42 64static void discard_old_kernel_data(void *kto)
1da177e4 65{
1da177e4
LT
66 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
67 :
68 : "r" (kto),
69 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
70 : "cc");
063b0a42
RK
71}
72
73/*
74 * Copy the page, taking account of the cache colour.
75 */
76static void v6_copy_user_highpage_aliasing(struct page *to,
f00a75c0 77 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
063b0a42
RK
78{
79 unsigned int offset = CACHE_COLOUR(vaddr);
80 unsigned long kfrom, kto;
81
c0177800 82 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
063b0a42
RK
83 __flush_dcache_page(page_mapping(from), from);
84
85 /* FIXME: not highmem safe */
86 discard_old_kernel_data(page_address(to));
1da177e4
LT
87
88 /*
89 * Now copy the page using the same cache colour as the
90 * pages ultimate destination.
91 */
92 spin_lock(&v6_lock);
93
063b0a42
RK
94 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
95 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
1da177e4 96
063b0a42
RK
97 kfrom = from_address + (offset << PAGE_SHIFT);
98 kto = to_address + (offset << PAGE_SHIFT);
1da177e4 99
063b0a42
RK
100 flush_tlb_kernel_page(kfrom);
101 flush_tlb_kernel_page(kto);
1da177e4 102
063b0a42 103 copy_page((void *)kto, (void *)kfrom);
1da177e4
LT
104
105 spin_unlock(&v6_lock);
106}
107
108/*
109 * Clear the user page. We need to deal with the aliasing issues,
110 * so remap the kernel page into the same cache colour as the user
111 * page.
112 */
303c6443 113static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
1da177e4 114{
b8a9b66f 115 unsigned int offset = CACHE_COLOUR(vaddr);
1da177e4
LT
116 unsigned long to = to_address + (offset << PAGE_SHIFT);
117
303c6443
RK
118 /* FIXME: not highmem safe */
119 discard_old_kernel_data(page_address(page));
1da177e4
LT
120
121 /*
122 * Now clear the page using the same cache colour as
123 * the pages ultimate destination.
124 */
125 spin_lock(&v6_lock);
126
303c6443 127 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
1da177e4
LT
128 flush_tlb_kernel_page(to);
129 clear_page((void *)to);
130
131 spin_unlock(&v6_lock);
132}
133
134struct cpu_user_fns v6_user_fns __initdata = {
303c6443 135 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
063b0a42 136 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
1da177e4
LT
137};
138
139static int __init v6_userpage_init(void)
140{
141 if (cache_is_vipt_aliasing()) {
303c6443 142 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
063b0a42 143 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
1da177e4
LT
144 }
145
146 return 0;
147}
148
08ee4e4c 149core_initcall(v6_userpage_init);