]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/mm/ioremap.c
vmallocinfo: add caller information
[net-next-2.6.git] / arch / x86 / mm / ioremap.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
e9332cac 9#include <linux/bootmem.h>
1da177e4 10#include <linux/init.h>
a148ecfd 11#include <linux/io.h>
3cbd09e4
TG
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15
1da177e4 16#include <asm/cacheflush.h>
3cbd09e4
TG
17#include <asm/e820.h>
18#include <asm/fixmap.h>
1da177e4 19#include <asm/pgtable.h>
3cbd09e4 20#include <asm/tlbflush.h>
f6df72e7 21#include <asm/pgalloc.h>
d7677d40 22#include <asm/pat.h>
1da177e4 23
240d3a7c
TG
24#ifdef CONFIG_X86_64
25
26unsigned long __phys_addr(unsigned long x)
27{
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
31}
32EXPORT_SYMBOL(__phys_addr);
33
e3100c82
TG
34static inline int phys_addr_valid(unsigned long addr)
35{
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
37}
38
39#else
40
41static inline int phys_addr_valid(unsigned long addr)
42{
43 return 1;
44}
45
240d3a7c
TG
46#endif
47
5f5192b9
TG
48int page_is_ram(unsigned long pagenr)
49{
756a6c68 50 resource_size_t addr, end;
5f5192b9
TG
51 int i;
52
d8a9e6a5
AV
53 /*
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
57 */
58 if (pagenr == 0)
59 return 0;
60
156fbc3f
AV
61 /*
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
64 */
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
67 return 0;
d8a9e6a5 68
5f5192b9
TG
69 for (i = 0; i < e820.nr_map; i++) {
70 /*
71 * Not usable memory:
72 */
73 if (e820.map[i].type != E820_RAM)
74 continue;
5f5192b9
TG
75 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
950f9d95 77
950f9d95 78
5f5192b9
TG
79 if ((pagenr >= addr) && (pagenr < end))
80 return 1;
81 }
82 return 0;
83}
84
e9332cac
TG
85/*
86 * Fix up the linear direct mapping of the kernel to avoid cache attribute
87 * conflicts.
88 */
3a96ce8c 89int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
e9332cac 91{
d806e5ee 92 unsigned long nrpages = size >> PAGE_SHIFT;
93809be8 93 int err;
e9332cac 94
3a96ce8c 95 switch (prot_val) {
96 case _PAGE_CACHE_UC:
d806e5ee 97 default:
1219333d 98 err = _set_memory_uc(vaddr, nrpages);
d806e5ee 99 break;
b310f381 100 case _PAGE_CACHE_WC:
101 err = _set_memory_wc(vaddr, nrpages);
102 break;
3a96ce8c 103 case _PAGE_CACHE_WB:
1219333d 104 err = _set_memory_wb(vaddr, nrpages);
d806e5ee
TG
105 break;
106 }
e9332cac
TG
107
108 return err;
109}
110
1da177e4
LT
111/*
112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
114 * directly.
115 *
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
119 */
23016969
CL
120static void __iomem *__ioremap_caller(resource_size_t phys_addr,
121 unsigned long size, unsigned long prot_val, void *caller)
1da177e4 122{
756a6c68
IM
123 unsigned long pfn, offset, vaddr;
124 resource_size_t last_addr;
91eebf40 125 struct vm_struct *area;
d7677d40 126 unsigned long new_prot_val;
d806e5ee 127 pgprot_t prot;
dee7cbb2 128 int retval;
1da177e4
LT
129
130 /* Don't allow wraparound or zero size */
131 last_addr = phys_addr + size - 1;
132 if (!size || last_addr < phys_addr)
133 return NULL;
134
e3100c82 135 if (!phys_addr_valid(phys_addr)) {
6997ab49 136 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
4c8337ac 137 (unsigned long long)phys_addr);
e3100c82
TG
138 WARN_ON_ONCE(1);
139 return NULL;
140 }
141
1da177e4
LT
142 /*
143 * Don't remap the low PCI/ISA area, it's always mapped..
144 */
145 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
4b40fcee 146 return (__force void __iomem *)phys_to_virt(phys_addr);
1da177e4
LT
147
148 /*
149 * Don't allow anybody to remap normal RAM that we're using..
150 */
bdd3cee2
IM
151 for (pfn = phys_addr >> PAGE_SHIFT;
152 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153
ba748d22
IM
154 int is_ram = page_is_ram(pfn);
155
156 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
266b9f87 157 return NULL;
ba748d22 158 WARN_ON_ONCE(is_ram);
1da177e4
LT
159 }
160
d7677d40 161 /*
162 * Mappings have to be page-aligned
163 */
164 offset = phys_addr & ~PAGE_MASK;
165 phys_addr &= PAGE_MASK;
166 size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
dee7cbb2
VP
168 retval = reserve_memtype(phys_addr, phys_addr + size,
169 prot_val, &new_prot_val);
170 if (retval) {
b450e5e8 171 pr_debug("Warning: reserve_memtype returned %d\n", retval);
dee7cbb2
VP
172 return NULL;
173 }
174
175 if (prot_val != new_prot_val) {
d7677d40 176 /*
177 * Do not fallback to certain memory types with certain
178 * requested type:
179 * - request is uncached, return cannot be write-back
b310f381 180 * - request is uncached, return cannot be write-combine
181 * - request is write-combine, return cannot be write-back
d7677d40 182 */
183 if ((prot_val == _PAGE_CACHE_UC &&
b310f381 184 (new_prot_val == _PAGE_CACHE_WB ||
185 new_prot_val == _PAGE_CACHE_WC)) ||
186 (prot_val == _PAGE_CACHE_WC &&
d7677d40 187 new_prot_val == _PAGE_CACHE_WB)) {
b450e5e8 188 pr_debug(
6997ab49 189 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
4c8337ac
RD
190 (unsigned long long)phys_addr,
191 (unsigned long long)(phys_addr + size),
6997ab49 192 prot_val, new_prot_val);
d7677d40 193 free_memtype(phys_addr, phys_addr + size);
194 return NULL;
195 }
196 prot_val = new_prot_val;
197 }
198
3a96ce8c 199 switch (prot_val) {
200 case _PAGE_CACHE_UC:
d806e5ee 201 default:
55c62682 202 prot = PAGE_KERNEL_NOCACHE;
d806e5ee 203 break;
b310f381 204 case _PAGE_CACHE_WC:
205 prot = PAGE_KERNEL_WC;
206 break;
3a96ce8c 207 case _PAGE_CACHE_WB:
d806e5ee
TG
208 prot = PAGE_KERNEL;
209 break;
210 }
a148ecfd 211
1da177e4
LT
212 /*
213 * Ok, go for it..
214 */
23016969 215 area = get_vm_area_caller(size, VM_IOREMAP, caller);
1da177e4
LT
216 if (!area)
217 return NULL;
218 area->phys_addr = phys_addr;
e66aadbe
TG
219 vaddr = (unsigned long) area->addr;
220 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
d7677d40 221 free_memtype(phys_addr, phys_addr + size);
b16bf712 222 free_vm_area(area);
1da177e4
LT
223 return NULL;
224 }
e9332cac 225
3a96ce8c 226 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
d7677d40 227 free_memtype(phys_addr, phys_addr + size);
e66aadbe 228 vunmap(area->addr);
e9332cac
TG
229 return NULL;
230 }
231
e66aadbe 232 return (void __iomem *) (vaddr + offset);
1da177e4 233}
1da177e4
LT
234
235/**
236 * ioremap_nocache - map bus memory into CPU space
237 * @offset: bus address of the memory
238 * @size: size of the resource to map
239 *
240 * ioremap_nocache performs a platform specific sequence of operations to
241 * make bus memory CPU accessible via the readb/readw/readl/writeb/
242 * writew/writel functions and the other mmio helpers. The returned
243 * address is not guaranteed to be usable directly as a virtual
91eebf40 244 * address.
1da177e4
LT
245 *
246 * This version of ioremap ensures that the memory is marked uncachable
247 * on the CPU as well as honouring existing caching rules from things like
91eebf40 248 * the PCI bus. Note that there are other caches and buffers on many
1da177e4
LT
249 * busses. In particular driver authors should read up on PCI writes
250 *
251 * It's useful if some control registers are in such an area and
252 * write combining or read caching is not desirable:
91eebf40 253 *
1da177e4
LT
254 * Must be freed with iounmap.
255 */
b9e76a00 256void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
1da177e4 257{
23016969
CL
258 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
259 __builtin_return_address(0));
1da177e4 260}
129f6946 261EXPORT_SYMBOL(ioremap_nocache);
1da177e4 262
b310f381 263/**
264 * ioremap_wc - map memory into CPU space write combined
265 * @offset: bus address of the memory
266 * @size: size of the resource to map
267 *
268 * This version of ioremap ensures that the memory is marked write combining.
269 * Write combining allows faster writes to some hardware devices.
270 *
271 * Must be freed with iounmap.
272 */
273void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
274{
275 if (pat_wc_enabled)
23016969
CL
276 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
277 __builtin_return_address(0));
b310f381 278 else
279 return ioremap_nocache(phys_addr, size);
280}
281EXPORT_SYMBOL(ioremap_wc);
282
b9e76a00 283void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
5f868152 284{
23016969
CL
285 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
286 __builtin_return_address(0));
5f868152
TG
287}
288EXPORT_SYMBOL(ioremap_cache);
289
bf5421c3
AK
290/**
291 * iounmap - Free a IO remapping
292 * @addr: virtual address from ioremap_*
293 *
294 * Caller must ensure there is only one unmapping for the same pointer.
295 */
1da177e4
LT
296void iounmap(volatile void __iomem *addr)
297{
bf5421c3 298 struct vm_struct *p, *o;
c23a4e96
AM
299
300 if ((void __force *)addr <= high_memory)
1da177e4
LT
301 return;
302
303 /*
304 * __ioremap special-cases the PCI/ISA range by not instantiating a
305 * vm_area and by simply returning an address into the kernel mapping
306 * of ISA space. So handle that here.
307 */
308 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
91eebf40 309 addr < phys_to_virt(ISA_END_ADDRESS))
1da177e4
LT
310 return;
311
91eebf40
TG
312 addr = (volatile void __iomem *)
313 (PAGE_MASK & (unsigned long __force)addr);
bf5421c3
AK
314
315 /* Use the vm area unlocked, assuming the caller
316 ensures there isn't another iounmap for the same address
317 in parallel. Reuse of the virtual address is prevented by
318 leaving it in the global lists until we're done with it.
319 cpa takes care of the direct mappings. */
320 read_lock(&vmlist_lock);
321 for (p = vmlist; p; p = p->next) {
322 if (p->addr == addr)
323 break;
324 }
325 read_unlock(&vmlist_lock);
326
327 if (!p) {
91eebf40 328 printk(KERN_ERR "iounmap: bad address %p\n", addr);
c23a4e96 329 dump_stack();
bf5421c3 330 return;
1da177e4
LT
331 }
332
d7677d40 333 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
334
bf5421c3
AK
335 /* Finally remove it */
336 o = remove_vm_area((void *)addr);
337 BUG_ON(p != o || o == NULL);
91eebf40 338 kfree(p);
1da177e4 339}
129f6946 340EXPORT_SYMBOL(iounmap);
1da177e4 341
e045fb2a 342/*
343 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
344 * access
345 */
346void *xlate_dev_mem_ptr(unsigned long phys)
347{
348 void *addr;
349 unsigned long start = phys & PAGE_MASK;
350
351 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
352 if (page_is_ram(start >> PAGE_SHIFT))
353 return __va(phys);
354
355 addr = (void *)ioremap(start, PAGE_SIZE);
356 if (addr)
357 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
358
359 return addr;
360}
361
362void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
363{
364 if (page_is_ram(phys >> PAGE_SHIFT))
365 return;
366
367 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
368 return;
369}
370
240d3a7c 371#ifdef CONFIG_X86_32
d18d6d65
IM
372
373int __initdata early_ioremap_debug;
374
375static int __init early_ioremap_debug_setup(char *str)
376{
377 early_ioremap_debug = 1;
378
793b24a2 379 return 0;
d18d6d65 380}
793b24a2 381early_param("early_ioremap_debug", early_ioremap_debug_setup);
d18d6d65 382
0947b2f3 383static __initdata int after_paging_init;
c92a7a54
IC
384static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
385 __section(.bss.page_aligned);
0947b2f3 386
551889a6 387static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
0947b2f3 388{
37cc8d7f
JF
389 /* Don't assume we're using swapper_pg_dir at this point */
390 pgd_t *base = __va(read_cr3());
391 pgd_t *pgd = &base[pgd_index(addr)];
551889a6
IC
392 pud_t *pud = pud_offset(pgd, addr);
393 pmd_t *pmd = pmd_offset(pud, addr);
394
395 return pmd;
0947b2f3
HY
396}
397
551889a6 398static inline pte_t * __init early_ioremap_pte(unsigned long addr)
0947b2f3 399{
551889a6 400 return &bm_pte[pte_index(addr)];
0947b2f3
HY
401}
402
beacfaac 403void __init early_ioremap_init(void)
0947b2f3 404{
551889a6 405 pmd_t *pmd;
0947b2f3 406
d18d6d65 407 if (early_ioremap_debug)
adafdf6a 408 printk(KERN_INFO "early_ioremap_init()\n");
d18d6d65 409
551889a6 410 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
0947b2f3 411 memset(bm_pte, 0, sizeof(bm_pte));
b6fbb669 412 pmd_populate_kernel(&init_mm, pmd, bm_pte);
551889a6 413
0e3a9549 414 /*
551889a6 415 * The boot-ioremap range spans multiple pmds, for which
0e3a9549
IM
416 * we are not prepared:
417 */
551889a6 418 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
0e3a9549 419 WARN_ON(1);
551889a6
IC
420 printk(KERN_WARNING "pmd %p != %p\n",
421 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
91eebf40 422 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
551889a6 423 fix_to_virt(FIX_BTMAP_BEGIN));
91eebf40 424 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
551889a6 425 fix_to_virt(FIX_BTMAP_END));
91eebf40
TG
426
427 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
428 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
429 FIX_BTMAP_BEGIN);
0e3a9549 430 }
0947b2f3
HY
431}
432
beacfaac 433void __init early_ioremap_clear(void)
0947b2f3 434{
551889a6 435 pmd_t *pmd;
0947b2f3 436
d18d6d65 437 if (early_ioremap_debug)
adafdf6a 438 printk(KERN_INFO "early_ioremap_clear()\n");
d18d6d65 439
551889a6
IC
440 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
441 pmd_clear(pmd);
6944a9c8 442 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
0947b2f3
HY
443 __flush_tlb_all();
444}
445
beacfaac 446void __init early_ioremap_reset(void)
0947b2f3
HY
447{
448 enum fixed_addresses idx;
551889a6
IC
449 unsigned long addr, phys;
450 pte_t *pte;
0947b2f3
HY
451
452 after_paging_init = 1;
64a8f852 453 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
0947b2f3 454 addr = fix_to_virt(idx);
beacfaac 455 pte = early_ioremap_pte(addr);
551889a6
IC
456 if (pte_present(*pte)) {
457 phys = pte_val(*pte) & PAGE_MASK;
0947b2f3
HY
458 set_fixmap(idx, phys);
459 }
460 }
461}
462
beacfaac 463static void __init __early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
464 unsigned long phys, pgprot_t flags)
465{
551889a6
IC
466 unsigned long addr = __fix_to_virt(idx);
467 pte_t *pte;
0947b2f3
HY
468
469 if (idx >= __end_of_fixed_addresses) {
470 BUG();
471 return;
472 }
beacfaac 473 pte = early_ioremap_pte(addr);
0947b2f3 474 if (pgprot_val(flags))
551889a6 475 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
0947b2f3 476 else
551889a6 477 pte_clear(NULL, addr, pte);
0947b2f3
HY
478 __flush_tlb_one(addr);
479}
480
beacfaac 481static inline void __init early_set_fixmap(enum fixed_addresses idx,
0947b2f3
HY
482 unsigned long phys)
483{
484 if (after_paging_init)
485 set_fixmap(idx, phys);
486 else
beacfaac 487 __early_set_fixmap(idx, phys, PAGE_KERNEL);
0947b2f3
HY
488}
489
beacfaac 490static inline void __init early_clear_fixmap(enum fixed_addresses idx)
0947b2f3
HY
491{
492 if (after_paging_init)
493 clear_fixmap(idx);
494 else
beacfaac 495 __early_set_fixmap(idx, 0, __pgprot(0));
0947b2f3
HY
496}
497
1b42f516
IM
498
499int __initdata early_ioremap_nested;
500
d690b2af
IM
501static int __init check_early_ioremap_leak(void)
502{
503 if (!early_ioremap_nested)
504 return 0;
505
506 printk(KERN_WARNING
91eebf40
TG
507 "Debug warning: early ioremap leak of %d areas detected.\n",
508 early_ioremap_nested);
d690b2af 509 printk(KERN_WARNING
91eebf40 510 "please boot with early_ioremap_debug and report the dmesg.\n");
d690b2af
IM
511 WARN_ON(1);
512
513 return 1;
514}
515late_initcall(check_early_ioremap_leak);
516
beacfaac 517void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
1da177e4
LT
518{
519 unsigned long offset, last_addr;
1b42f516
IM
520 unsigned int nrpages, nesting;
521 enum fixed_addresses idx0, idx;
522
523 WARN_ON(system_state != SYSTEM_BOOTING);
524
525 nesting = early_ioremap_nested;
d18d6d65 526 if (early_ioremap_debug) {
adafdf6a 527 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
91eebf40 528 phys_addr, size, nesting);
d18d6d65
IM
529 dump_stack();
530 }
1da177e4
LT
531
532 /* Don't allow wraparound or zero size */
533 last_addr = phys_addr + size - 1;
bd796ed0
IM
534 if (!size || last_addr < phys_addr) {
535 WARN_ON(1);
1da177e4 536 return NULL;
bd796ed0 537 }
1da177e4 538
bd796ed0
IM
539 if (nesting >= FIX_BTMAPS_NESTING) {
540 WARN_ON(1);
1b42f516 541 return NULL;
bd796ed0 542 }
1b42f516 543 early_ioremap_nested++;
1da177e4
LT
544 /*
545 * Mappings have to be page-aligned
546 */
547 offset = phys_addr & ~PAGE_MASK;
548 phys_addr &= PAGE_MASK;
549 size = PAGE_ALIGN(last_addr) - phys_addr;
550
551 /*
552 * Mappings have to fit in the FIX_BTMAP area.
553 */
554 nrpages = size >> PAGE_SHIFT;
bd796ed0
IM
555 if (nrpages > NR_FIX_BTMAPS) {
556 WARN_ON(1);
1da177e4 557 return NULL;
bd796ed0 558 }
1da177e4
LT
559
560 /*
561 * Ok, go for it..
562 */
1b42f516
IM
563 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
564 idx = idx0;
1da177e4 565 while (nrpages > 0) {
beacfaac 566 early_set_fixmap(idx, phys_addr);
1da177e4
LT
567 phys_addr += PAGE_SIZE;
568 --idx;
569 --nrpages;
570 }
d18d6d65
IM
571 if (early_ioremap_debug)
572 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
1b42f516 573
91eebf40 574 return (void *) (offset + fix_to_virt(idx0));
1da177e4
LT
575}
576
beacfaac 577void __init early_iounmap(void *addr, unsigned long size)
1da177e4
LT
578{
579 unsigned long virt_addr;
580 unsigned long offset;
581 unsigned int nrpages;
582 enum fixed_addresses idx;
1b42f516
IM
583 unsigned int nesting;
584
585 nesting = --early_ioremap_nested;
bd796ed0 586 WARN_ON(nesting < 0);
1da177e4 587
d18d6d65 588 if (early_ioremap_debug) {
adafdf6a 589 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
91eebf40 590 size, nesting);
d18d6d65
IM
591 dump_stack();
592 }
593
1da177e4 594 virt_addr = (unsigned long)addr;
bd796ed0
IM
595 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
596 WARN_ON(1);
1da177e4 597 return;
bd796ed0 598 }
1da177e4
LT
599 offset = virt_addr & ~PAGE_MASK;
600 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
601
1b42f516 602 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
1da177e4 603 while (nrpages > 0) {
beacfaac 604 early_clear_fixmap(idx);
1da177e4
LT
605 --idx;
606 --nrpages;
607 }
608}
1b42f516
IM
609
610void __this_fixmap_does_not_exist(void)
611{
612 WARN_ON(1);
613}
240d3a7c
TG
614
615#endif /* CONFIG_X86_32 */