]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mm/highmem.c
ARM: 5688/1: ks8695_serial: disable_irq() lockup
[net-next-2.6.git] / arch / arm / mm / highmem.c
CommitLineData
d73cd428
NP
1/*
2 * arch/arm/mm/highmem.c -- ARM highmem support
3 *
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include "mm.h"
20
21void *kmap(struct page *page)
22{
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
27}
28EXPORT_SYMBOL(kmap);
29
30void kunmap(struct page *page)
31{
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
36}
37EXPORT_SYMBOL(kunmap);
38
39void *kmap_atomic(struct page *page, enum km_type type)
40{
41 unsigned int idx;
42 unsigned long vaddr;
43
44 pagefault_disable();
45 if (!PageHighMem(page))
46 return page_address(page);
47
48 idx = type + KM_TYPE_NR * smp_processor_id();
49 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
50#ifdef CONFIG_DEBUG_HIGHMEM
51 /*
52 * With debugging enabled, kunmap_atomic forces that entry to 0.
53 * Make sure it was indeed properly unmapped.
54 */
55 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
56#endif
57 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
58 /*
59 * When debugging is off, kunmap_atomic leaves the previous mapping
60 * in place, so this TLB flush ensures the TLB is updated with the
61 * new mapping.
62 */
63 local_flush_tlb_kernel_page(vaddr);
64
65 return (void *)vaddr;
66}
67EXPORT_SYMBOL(kmap_atomic);
68
69void kunmap_atomic(void *kvaddr, enum km_type type)
70{
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
73
74 if (kvaddr >= (void *)FIXADDR_START) {
75 __cpuc_flush_dcache_page((void *)vaddr);
76#ifdef CONFIG_DEBUG_HIGHMEM
77 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
78 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
79 local_flush_tlb_kernel_page(vaddr);
80#else
81 (void) idx; /* to kill a warning */
82#endif
83 }
84 pagefault_enable();
85}
86EXPORT_SYMBOL(kunmap_atomic);
87
88void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
89{
90 unsigned int idx;
91 unsigned long vaddr;
92
93 pagefault_disable();
94
95 idx = type + KM_TYPE_NR * smp_processor_id();
96 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
97#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
99#endif
100 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
101 local_flush_tlb_kernel_page(vaddr);
102
103 return (void *)vaddr;
104}
105
106struct page *kmap_atomic_to_page(const void *ptr)
107{
108 unsigned long vaddr = (unsigned long)ptr;
109 pte_t *pte;
110
111 if (vaddr < FIXADDR_START)
112 return virt_to_page(ptr);
113
114 pte = TOP_PTE(vaddr);
115 return pte_page(*pte);
116}