]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/module.h> |
2 | #include <linux/highmem.h> | |
52ab320a | 3 | #include <linux/sched.h> |
631330f5 | 4 | #include <linux/smp.h> |
bb86bf28 | 5 | #include <asm/fixmap.h> |
1da177e4 LT |
6 | #include <asm/tlbflush.h> |
7 | ||
bb86bf28 RB |
8 | static pte_t *kmap_pte; |
9 | ||
10 | unsigned long highstart_pfn, highend_pfn; | |
11 | ||
3e4d3af5 | 12 | void *kmap(struct page *page) |
1da177e4 LT |
13 | { |
14 | void *addr; | |
15 | ||
16 | might_sleep(); | |
17 | if (!PageHighMem(page)) | |
18 | return page_address(page); | |
19 | addr = kmap_high(page); | |
20 | flush_tlb_one((unsigned long)addr); | |
21 | ||
22 | return addr; | |
23 | } | |
3e4d3af5 | 24 | EXPORT_SYMBOL(kmap); |
1da177e4 | 25 | |
3e4d3af5 | 26 | void kunmap(struct page *page) |
1da177e4 | 27 | { |
b72b7092 | 28 | BUG_ON(in_interrupt()); |
1da177e4 LT |
29 | if (!PageHighMem(page)) |
30 | return; | |
31 | kunmap_high(page); | |
32 | } | |
3e4d3af5 | 33 | EXPORT_SYMBOL(kunmap); |
1da177e4 LT |
34 | |
35 | /* | |
36 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
37 | * no global lock is needed and because the kmap code must perform a global TLB | |
38 | * invalidation when the kmap pool wraps. | |
39 | * | |
40 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
41 | * kmaps are appropriate for short, tight code paths only. | |
42 | */ | |
43 | ||
3e4d3af5 | 44 | void *__kmap_atomic(struct page *page) |
1da177e4 | 45 | { |
1da177e4 | 46 | unsigned long vaddr; |
3e4d3af5 | 47 | int idx, type; |
1da177e4 LT |
48 | |
49 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | |
a866374a | 50 | pagefault_disable(); |
1da177e4 LT |
51 | if (!PageHighMem(page)) |
52 | return page_address(page); | |
53 | ||
3e4d3af5 | 54 | type = kmap_atomic_idx_push(); |
1da177e4 LT |
55 | idx = type + KM_TYPE_NR*smp_processor_id(); |
56 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
57 | #ifdef CONFIG_DEBUG_HIGHMEM | |
b72b7092 | 58 | BUG_ON(!pte_none(*(kmap_pte - idx))); |
1da177e4 | 59 | #endif |
bb86bf28 | 60 | set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); |
1da177e4 LT |
61 | local_flush_tlb_one((unsigned long)vaddr); |
62 | ||
63 | return (void*) vaddr; | |
64 | } | |
bb86bf28 | 65 | EXPORT_SYMBOL(__kmap_atomic); |
1da177e4 | 66 | |
3e4d3af5 | 67 | void __kunmap_atomic(void *kvaddr) |
1da177e4 | 68 | { |
1da177e4 | 69 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
3e4d3af5 | 70 | int type; |
1da177e4 LT |
71 | |
72 | if (vaddr < FIXADDR_START) { // FIXME | |
a866374a | 73 | pagefault_enable(); |
1da177e4 LT |
74 | return; |
75 | } | |
76 | ||
3e4d3af5 PZ |
77 | type = kmap_atomic_idx_pop(); |
78 | #ifdef CONFIG_DEBUG_HIGHMEM | |
79 | { | |
80 | int idx = type + KM_TYPE_NR * smp_processor_id(); | |
1da177e4 | 81 | |
3e4d3af5 | 82 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
1da177e4 | 83 | |
3e4d3af5 PZ |
84 | /* |
85 | * force other mappings to Oops if they'll try to access | |
86 | * this pte without first remap it | |
87 | */ | |
88 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | |
89 | local_flush_tlb_one(vaddr); | |
90 | } | |
91 | #endif | |
a866374a | 92 | pagefault_enable(); |
1da177e4 | 93 | } |
3e4d3af5 | 94 | EXPORT_SYMBOL(__kunmap_atomic); |
1da177e4 | 95 | |
60080265 RB |
96 | /* |
97 | * This is the same as kmap_atomic() but can map memory that doesn't | |
98 | * have a struct page associated with it. | |
99 | */ | |
3e4d3af5 | 100 | void *kmap_atomic_pfn(unsigned long pfn) |
60080265 | 101 | { |
60080265 | 102 | unsigned long vaddr; |
3e4d3af5 | 103 | int idx, type; |
60080265 | 104 | |
a866374a | 105 | pagefault_disable(); |
60080265 | 106 | |
3e4d3af5 | 107 | type = kmap_atomic_idx_push(); |
60080265 RB |
108 | idx = type + KM_TYPE_NR*smp_processor_id(); |
109 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | |
bb86bf28 | 110 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); |
60080265 RB |
111 | flush_tlb_one(vaddr); |
112 | ||
113 | return (void*) vaddr; | |
114 | } | |
115 | ||
3e4d3af5 | 116 | struct page *kmap_atomic_to_page(void *ptr) |
1da177e4 LT |
117 | { |
118 | unsigned long idx, vaddr = (unsigned long)ptr; | |
119 | pte_t *pte; | |
120 | ||
121 | if (vaddr < FIXADDR_START) | |
122 | return virt_to_page(ptr); | |
123 | ||
124 | idx = virt_to_fix(vaddr); | |
125 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
126 | return pte_page(*pte); | |
127 | } | |
128 | ||
bb86bf28 RB |
129 | void __init kmap_init(void) |
130 | { | |
131 | unsigned long kmap_vstart; | |
132 | ||
133 | /* cache the first kmap pte */ | |
134 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | |
135 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
136 | } |