]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | #include <linux/highmem.h> |
129f6946 | 2 | #include <linux/module.h> |
867c5b52 | 3 | #include <linux/swap.h> /* for totalram_pages */ |
1da177e4 LT |
4 | |
5 | void *kmap(struct page *page) | |
6 | { | |
7 | might_sleep(); | |
8 | if (!PageHighMem(page)) | |
9 | return page_address(page); | |
10 | return kmap_high(page); | |
11 | } | |
12 | ||
13 | void kunmap(struct page *page) | |
14 | { | |
15 | if (in_interrupt()) | |
16 | BUG(); | |
17 | if (!PageHighMem(page)) | |
18 | return; | |
19 | kunmap_high(page); | |
20 | } | |
21 | ||
22 | /* | |
23 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
24 | * no global lock is needed and because the kmap code must perform a global TLB | |
25 | * invalidation when the kmap pool wraps. | |
26 | * | |
27 | * However when holding an atomic kmap is is not legal to sleep, so atomic | |
28 | * kmaps are appropriate for short, tight code paths only. | |
29 | */ | |
ce6234b5 | 30 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
1da177e4 LT |
31 | { |
32 | enum fixed_addresses idx; | |
33 | unsigned long vaddr; | |
022eb434 | 34 | |
9c312058 | 35 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
a866374a | 36 | pagefault_disable(); |
656dad31 | 37 | |
1da177e4 LT |
38 | if (!PageHighMem(page)) |
39 | return page_address(page); | |
40 | ||
f4112de6 | 41 | debug_kmap_atomic(type); |
9c312058 | 42 | |
7ca43e75 | 43 | debug_kmap_atomic(type); |
4150d3f5 | 44 | idx = type + KM_TYPE_NR*smp_processor_id(); |
1da177e4 | 45 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
4150d3f5 | 46 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
ce6234b5 | 47 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
49f19710 | 48 | arch_flush_lazy_mmu_mode(); |
1da177e4 | 49 | |
4150d3f5 | 50 | return (void *)vaddr; |
1da177e4 LT |
51 | } |
52 | ||
ce6234b5 JF |
53 | void *kmap_atomic(struct page *page, enum km_type type) |
54 | { | |
55 | return kmap_atomic_prot(page, type, kmap_prot); | |
56 | } | |
57 | ||
1da177e4 LT |
58 | void kunmap_atomic(void *kvaddr, enum km_type type) |
59 | { | |
1da177e4 LT |
60 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
61 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
62 | ||
1da177e4 | 63 | /* |
23002d88 ZA |
64 | * Force other mappings to Oops if they'll try to access this pte |
65 | * without first remap it. Keeping stale mappings around is a bad idea | |
66 | * also, in case the page changes cacheability attributes or becomes | |
67 | * a protected page in a hypervisor. | |
1da177e4 | 68 | */ |
3b17979b JF |
69 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
70 | kpte_clear_flush(kmap_pte-idx, vaddr); | |
71 | else { | |
72 | #ifdef CONFIG_DEBUG_HIGHMEM | |
73 | BUG_ON(vaddr < PAGE_OFFSET); | |
74 | BUG_ON(vaddr >= (unsigned long)high_memory); | |
75 | #endif | |
76 | } | |
1da177e4 | 77 | |
7b2f27f4 | 78 | arch_flush_lazy_mmu_mode(); |
a866374a | 79 | pagefault_enable(); |
1da177e4 LT |
80 | } |
81 | ||
dd63fdcc IM |
82 | /* |
83 | * This is the same as kmap_atomic() but can map memory that doesn't | |
bb6d59ca AM |
84 | * have a struct page associated with it. |
85 | */ | |
86 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | |
87 | { | |
88 | return kmap_atomic_prot_pfn(pfn, type, kmap_prot); | |
89 | } | |
d1d8c925 | 90 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ |
60e64d46 | 91 | |
1da177e4 LT |
92 | struct page *kmap_atomic_to_page(void *ptr) |
93 | { | |
94 | unsigned long idx, vaddr = (unsigned long)ptr; | |
95 | pte_t *pte; | |
96 | ||
97 | if (vaddr < FIXADDR_START) | |
98 | return virt_to_page(ptr); | |
99 | ||
100 | idx = virt_to_fix(vaddr); | |
101 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | |
102 | return pte_page(*pte); | |
103 | } | |
104 | ||
129f6946 AD |
105 | EXPORT_SYMBOL(kmap); |
106 | EXPORT_SYMBOL(kunmap); | |
107 | EXPORT_SYMBOL(kmap_atomic); | |
108 | EXPORT_SYMBOL(kunmap_atomic); | |
867c5b52 | 109 | |
867c5b52 PE |
110 | void __init set_highmem_pages_init(void) |
111 | { | |
112 | struct zone *zone; | |
113 | int nid; | |
114 | ||
115 | for_each_zone(zone) { | |
116 | unsigned long zone_start_pfn, zone_end_pfn; | |
117 | ||
118 | if (!is_highmem(zone)) | |
119 | continue; | |
120 | ||
121 | zone_start_pfn = zone->zone_start_pfn; | |
122 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | |
123 | ||
124 | nid = zone_to_nid(zone); | |
125 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", | |
126 | zone->name, nid, zone_start_pfn, zone_end_pfn); | |
127 | ||
128 | add_highpages_with_active_regions(nid, zone_start_pfn, | |
129 | zone_end_pfn); | |
130 | } | |
131 | totalram_pages += totalhigh_pages; | |
132 | } |