]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/powerpc/kernel/pci-common.c
[POWERPC] Merge pcibios_resource_to_bus/bus_to_resource
[net-next-2.6.git] / arch / powerpc / kernel / pci-common.c
CommitLineData
5516b540
KG
1/*
2 * Contains common pci routines for ALL ppc platform
cf1d8a8a
KG
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
5516b540
KG
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/bootmem.h>
26#include <linux/mm.h>
27#include <linux/list.h>
28#include <linux/syscalls.h>
29#include <linux/irq.h>
30#include <linux/vmalloc.h>
31
32#include <asm/processor.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/pci-bridge.h>
36#include <asm/byteorder.h>
37#include <asm/machdep.h>
38#include <asm/ppc-pci.h>
39#include <asm/firmware.h>
40
41#ifdef DEBUG
42#include <asm/udbg.h>
43#define DBG(fmt...) printk(fmt)
44#else
45#define DBG(fmt...)
46#endif
47
a4c9e328
KG
48static DEFINE_SPINLOCK(hose_spinlock);
49
50/* XXX kill that some day ... */
ebfc00f7 51static int global_phb_number; /* Global phb counter */
a4c9e328 52
25e81f92
BH
53/* ISA Memory physical address */
54resource_size_t isa_mem_base;
55
a4c9e328 56
e60516e3 57struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
a4c9e328
KG
58{
59 struct pci_controller *phb;
60
e60516e3 61 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
a4c9e328
KG
62 if (phb == NULL)
63 return NULL;
e60516e3
SR
64 spin_lock(&hose_spinlock);
65 phb->global_number = global_phb_number++;
66 list_add_tail(&phb->list_node, &hose_list);
67 spin_unlock(&hose_spinlock);
44ef3390 68 phb->dn = dev;
a4c9e328
KG
69 phb->is_dynamic = mem_init_done;
70#ifdef CONFIG_PPC64
71 if (dev) {
72 int nid = of_node_to_nid(dev);
73
74 if (nid < 0 || !node_online(nid))
75 nid = -1;
76
77 PHB_SET_NODE(phb, nid);
78 }
79#endif
80 return phb;
81}
82
83void pcibios_free_controller(struct pci_controller *phb)
84{
85 spin_lock(&hose_spinlock);
86 list_del(&phb->list_node);
87 spin_unlock(&hose_spinlock);
88
89 if (phb->is_dynamic)
90 kfree(phb);
91}
92
6dfbde20
BH
93int pcibios_vaddr_is_ioport(void __iomem *address)
94{
95 int ret = 0;
96 struct pci_controller *hose;
97 unsigned long size;
98
99 spin_lock(&hose_spinlock);
100 list_for_each_entry(hose, &hose_list, list_node) {
101#ifdef CONFIG_PPC64
102 size = hose->pci_io_size;
103#else
104 size = hose->io_resource.end - hose->io_resource.start + 1;
105#endif
106 if (address >= hose->io_base_virt &&
107 address < (hose->io_base_virt + size)) {
108 ret = 1;
109 break;
110 }
111 }
112 spin_unlock(&hose_spinlock);
113 return ret;
114}
115
5516b540
KG
116/*
117 * Return the domain number for this bus.
118 */
119int pci_domain_nr(struct pci_bus *bus)
120{
6207e816 121 struct pci_controller *hose = pci_bus_to_host(bus);
5516b540 122
6207e816 123 return hose->global_number;
5516b540 124}
5516b540 125EXPORT_SYMBOL(pci_domain_nr);
58083dad
KG
126
127#ifdef CONFIG_PPC_OF
a4c9e328
KG
128
129/* This routine is meant to be used early during boot, when the
130 * PCI bus numbers have not yet been assigned, and you need to
131 * issue PCI config cycles to an OF device.
132 * It could also be used to "fix" RTAS config cycles if you want
133 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
134 * config cycles.
135 */
136struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
137{
138 if (!have_of)
139 return NULL;
140 while(node) {
141 struct pci_controller *hose, *tmp;
142 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
44ef3390 143 if (hose->dn == node)
a4c9e328
KG
144 return hose;
145 node = node->parent;
146 }
147 return NULL;
148}
149
58083dad
KG
150static ssize_t pci_show_devspec(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 struct pci_dev *pdev;
154 struct device_node *np;
155
156 pdev = to_pci_dev (dev);
157 np = pci_device_to_OF_node(pdev);
158 if (np == NULL || np->full_name == NULL)
159 return 0;
160 return sprintf(buf, "%s", np->full_name);
161}
162static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
163#endif /* CONFIG_PPC_OF */
164
165/* Add sysfs properties */
4f3731da 166int pcibios_add_platform_entries(struct pci_dev *pdev)
58083dad
KG
167{
168#ifdef CONFIG_PPC_OF
4f3731da
TB
169 return device_create_file(&pdev->dev, &dev_attr_devspec);
170#else
171 return 0;
58083dad 172#endif /* CONFIG_PPC_OF */
4f3731da 173
58083dad
KG
174}
175
a2b7390a 176char __devinit *pcibios_setup(char *str)
58083dad
KG
177{
178 return str;
179}
180
181/*
182 * Reads the interrupt pin to determine if interrupt is use by card.
183 * If the interrupt is used, then gets the interrupt line from the
184 * openfirmware and sets it in the pci_dev and pci_config line.
185 */
186int pci_read_irq_line(struct pci_dev *pci_dev)
187{
188 struct of_irq oirq;
189 unsigned int virq;
190
191 DBG("Try to map irq for %s...\n", pci_name(pci_dev));
192
193#ifdef DEBUG
194 memset(&oirq, 0xff, sizeof(oirq));
195#endif
196 /* Try to get a mapping from the device-tree */
197 if (of_irq_map_pci(pci_dev, &oirq)) {
198 u8 line, pin;
199
200 /* If that fails, lets fallback to what is in the config
201 * space and map that through the default controller. We
202 * also set the type to level low since that's what PCI
203 * interrupts are. If your platform does differently, then
204 * either provide a proper interrupt tree or don't use this
205 * function.
206 */
207 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
208 return -1;
209 if (pin == 0)
210 return -1;
211 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
212 line == 0xff) {
213 return -1;
214 }
215 DBG(" -> no map ! Using irq line %d from PCI config\n", line);
216
217 virq = irq_create_mapping(NULL, line);
218 if (virq != NO_IRQ)
219 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
220 } else {
221 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
222 oirq.size, oirq.specifier[0], oirq.specifier[1],
223 oirq.controller->full_name);
224
225 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
226 oirq.size);
227 }
228 if(virq == NO_IRQ) {
229 DBG(" -> failed to map !\n");
230 return -1;
231 }
232
233 DBG(" -> mapped to linux irq %d\n", virq);
234
235 pci_dev->irq = virq;
236
237 return 0;
238}
239EXPORT_SYMBOL(pci_read_irq_line);
240
241/*
242 * Platform support for /proc/bus/pci/X/Y mmap()s,
243 * modelled on the sparc64 implementation by Dave Miller.
244 * -- paulus.
245 */
246
247/*
248 * Adjust vm_pgoff of VMA such that it is the physical page offset
249 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
250 *
251 * Basically, the user finds the base address for his device which he wishes
252 * to mmap. They read the 32-bit value from the config space base register,
253 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
254 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
255 *
256 * Returns negative error code on failure, zero on success.
257 */
258static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
259 resource_size_t *offset,
260 enum pci_mmap_state mmap_state)
261{
262 struct pci_controller *hose = pci_bus_to_host(dev->bus);
263 unsigned long io_offset = 0;
264 int i, res_bit;
265
266 if (hose == 0)
267 return NULL; /* should never happen */
268
269 /* If memory, add on the PCI bridge address offset */
270 if (mmap_state == pci_mmap_mem) {
271#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
272 *offset += hose->pci_mem_offset;
273#endif
274 res_bit = IORESOURCE_MEM;
275 } else {
276 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
277 *offset += io_offset;
278 res_bit = IORESOURCE_IO;
279 }
280
281 /*
282 * Check that the offset requested corresponds to one of the
283 * resources of the device.
284 */
285 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
286 struct resource *rp = &dev->resource[i];
287 int flags = rp->flags;
288
289 /* treat ROM as memory (should be already) */
290 if (i == PCI_ROM_RESOURCE)
291 flags |= IORESOURCE_MEM;
292
293 /* Active and same type? */
294 if ((flags & res_bit) == 0)
295 continue;
296
297 /* In the range of this resource? */
298 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
299 continue;
300
301 /* found it! construct the final physical address */
302 if (mmap_state == pci_mmap_io)
303 *offset += hose->io_base_phys - io_offset;
304 return rp;
305 }
306
307 return NULL;
308}
309
310/*
311 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
312 * device mapping.
313 */
314static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
315 pgprot_t protection,
316 enum pci_mmap_state mmap_state,
317 int write_combine)
318{
319 unsigned long prot = pgprot_val(protection);
320
321 /* Write combine is always 0 on non-memory space mappings. On
322 * memory space, if the user didn't pass 1, we check for a
323 * "prefetchable" resource. This is a bit hackish, but we use
324 * this to workaround the inability of /sysfs to provide a write
325 * combine bit
326 */
327 if (mmap_state != pci_mmap_mem)
328 write_combine = 0;
329 else if (write_combine == 0) {
330 if (rp->flags & IORESOURCE_PREFETCH)
331 write_combine = 1;
332 }
333
334 /* XXX would be nice to have a way to ask for write-through */
335 prot |= _PAGE_NO_CACHE;
336 if (write_combine)
337 prot &= ~_PAGE_GUARDED;
338 else
339 prot |= _PAGE_GUARDED;
340
341 return __pgprot(prot);
342}
343
344/*
345 * This one is used by /dev/mem and fbdev who have no clue about the
346 * PCI device, it tries to find the PCI device first and calls the
347 * above routine
348 */
349pgprot_t pci_phys_mem_access_prot(struct file *file,
350 unsigned long pfn,
351 unsigned long size,
352 pgprot_t protection)
353{
354 struct pci_dev *pdev = NULL;
355 struct resource *found = NULL;
356 unsigned long prot = pgprot_val(protection);
357 unsigned long offset = pfn << PAGE_SHIFT;
358 int i;
359
360 if (page_is_ram(pfn))
361 return __pgprot(prot);
362
363 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
364
365 for_each_pci_dev(pdev) {
366 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
367 struct resource *rp = &pdev->resource[i];
368 int flags = rp->flags;
369
370 /* Active and same type? */
371 if ((flags & IORESOURCE_MEM) == 0)
372 continue;
373 /* In the range of this resource? */
374 if (offset < (rp->start & PAGE_MASK) ||
375 offset > rp->end)
376 continue;
377 found = rp;
378 break;
379 }
380 if (found)
381 break;
382 }
383 if (found) {
384 if (found->flags & IORESOURCE_PREFETCH)
385 prot &= ~_PAGE_GUARDED;
386 pci_dev_put(pdev);
387 }
388
389 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
390
391 return __pgprot(prot);
392}
393
394
395/*
396 * Perform the actual remap of the pages for a PCI device mapping, as
397 * appropriate for this architecture. The region in the process to map
398 * is described by vm_start and vm_end members of VMA, the base physical
399 * address is found in vm_pgoff.
400 * The pci device structure is provided so that architectures may make mapping
401 * decisions on a per-device or per-bus basis.
402 *
403 * Returns a negative error code on failure, zero on success.
404 */
405int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
406 enum pci_mmap_state mmap_state, int write_combine)
407{
408 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
409 struct resource *rp;
410 int ret;
411
412 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
413 if (rp == NULL)
414 return -EINVAL;
415
416 vma->vm_pgoff = offset >> PAGE_SHIFT;
417 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
418 vma->vm_page_prot,
419 mmap_state, write_combine);
420
421 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
422 vma->vm_end - vma->vm_start, vma->vm_page_prot);
423
424 return ret;
425}
426
427void pci_resource_to_user(const struct pci_dev *dev, int bar,
428 const struct resource *rsrc,
429 resource_size_t *start, resource_size_t *end)
430{
431 struct pci_controller *hose = pci_bus_to_host(dev->bus);
432 resource_size_t offset = 0;
433
434 if (hose == NULL)
435 return;
436
437 if (rsrc->flags & IORESOURCE_IO)
438 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
439
440 /* We pass a fully fixed up address to userland for MMIO instead of
441 * a BAR value because X is lame and expects to be able to use that
442 * to pass to /dev/mem !
443 *
444 * That means that we'll have potentially 64 bits values where some
445 * userland apps only expect 32 (like X itself since it thinks only
446 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
447 * 32 bits CHRPs :-(
448 *
449 * Hopefully, the sysfs insterface is immune to that gunk. Once X
450 * has been fixed (and the fix spread enough), we can re-enable the
451 * 2 lines below and pass down a BAR value to userland. In that case
452 * we'll also have to re-enable the matching code in
453 * __pci_mmap_make_offset().
454 *
455 * BenH.
456 */
457#if 0
458 else if (rsrc->flags & IORESOURCE_MEM)
459 offset = hose->pci_mem_offset;
460#endif
461
462 *start = rsrc->start - offset;
463 *end = rsrc->end - offset;
464}
13dccb9e
BH
465
466/**
467 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
468 * @hose: newly allocated pci_controller to be setup
469 * @dev: device node of the host bridge
470 * @primary: set if primary bus (32 bits only, soon to be deprecated)
471 *
472 * This function will parse the "ranges" property of a PCI host bridge device
473 * node and setup the resource mapping of a pci controller based on its
474 * content.
475 *
476 * Life would be boring if it wasn't for a few issues that we have to deal
477 * with here:
478 *
479 * - We can only cope with one IO space range and up to 3 Memory space
480 * ranges. However, some machines (thanks Apple !) tend to split their
481 * space into lots of small contiguous ranges. So we have to coalesce.
482 *
483 * - We can only cope with all memory ranges having the same offset
484 * between CPU addresses and PCI addresses. Unfortunately, some bridges
485 * are setup for a large 1:1 mapping along with a small "window" which
486 * maps PCI address 0 to some arbitrary high address of the CPU space in
487 * order to give access to the ISA memory hole.
488 * The way out of here that I've chosen for now is to always set the
489 * offset based on the first resource found, then override it if we
490 * have a different offset and the previous was set by an ISA hole.
491 *
492 * - Some busses have IO space not starting at 0, which causes trouble with
493 * the way we do our IO resource renumbering. The code somewhat deals with
494 * it for 64 bits but I would expect problems on 32 bits.
495 *
496 * - Some 32 bits platforms such as 4xx can have physical space larger than
497 * 32 bits so we need to use 64 bits values for the parsing
498 */
499void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
500 struct device_node *dev,
501 int primary)
502{
503 const u32 *ranges;
504 int rlen;
505 int pna = of_n_addr_cells(dev);
506 int np = pna + 5;
507 int memno = 0, isa_hole = -1;
508 u32 pci_space;
509 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
510 unsigned long long isa_mb = 0;
511 struct resource *res;
512
513 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
514 dev->full_name, primary ? "(primary)" : "");
515
516 /* Get ranges property */
517 ranges = of_get_property(dev, "ranges", &rlen);
518 if (ranges == NULL)
519 return;
520
521 /* Parse it */
522 while ((rlen -= np * 4) >= 0) {
523 /* Read next ranges element */
524 pci_space = ranges[0];
525 pci_addr = of_read_number(ranges + 1, 2);
526 cpu_addr = of_translate_address(dev, ranges + 3);
527 size = of_read_number(ranges + pna + 3, 2);
528 ranges += np;
529 if (cpu_addr == OF_BAD_ADDR || size == 0)
530 continue;
531
532 /* Now consume following elements while they are contiguous */
533 for (; rlen >= np * sizeof(u32);
534 ranges += np, rlen -= np * 4) {
535 if (ranges[0] != pci_space)
536 break;
537 pci_next = of_read_number(ranges + 1, 2);
538 cpu_next = of_translate_address(dev, ranges + 3);
539 if (pci_next != pci_addr + size ||
540 cpu_next != cpu_addr + size)
541 break;
542 size += of_read_number(ranges + pna + 3, 2);
543 }
544
545 /* Act based on address space type */
546 res = NULL;
547 switch ((pci_space >> 24) & 0x3) {
548 case 1: /* PCI IO space */
549 printk(KERN_INFO
550 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
551 cpu_addr, cpu_addr + size - 1, pci_addr);
552
553 /* We support only one IO range */
554 if (hose->pci_io_size) {
555 printk(KERN_INFO
556 " \\--> Skipped (too many) !\n");
557 continue;
558 }
559#ifdef CONFIG_PPC32
560 /* On 32 bits, limit I/O space to 16MB */
561 if (size > 0x01000000)
562 size = 0x01000000;
563
564 /* 32 bits needs to map IOs here */
565 hose->io_base_virt = ioremap(cpu_addr, size);
566
567 /* Expect trouble if pci_addr is not 0 */
568 if (primary)
569 isa_io_base =
570 (unsigned long)hose->io_base_virt;
571#endif /* CONFIG_PPC32 */
572 /* pci_io_size and io_base_phys always represent IO
573 * space starting at 0 so we factor in pci_addr
574 */
575 hose->pci_io_size = pci_addr + size;
576 hose->io_base_phys = cpu_addr - pci_addr;
577
578 /* Build resource */
579 res = &hose->io_resource;
580 res->flags = IORESOURCE_IO;
581 res->start = pci_addr;
582 break;
583 case 2: /* PCI Memory space */
584 printk(KERN_INFO
585 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
586 cpu_addr, cpu_addr + size - 1, pci_addr,
587 (pci_space & 0x40000000) ? "Prefetch" : "");
588
589 /* We support only 3 memory ranges */
590 if (memno >= 3) {
591 printk(KERN_INFO
592 " \\--> Skipped (too many) !\n");
593 continue;
594 }
595 /* Handles ISA memory hole space here */
596 if (pci_addr == 0) {
597 isa_mb = cpu_addr;
598 isa_hole = memno;
599 if (primary || isa_mem_base == 0)
600 isa_mem_base = cpu_addr;
601 }
602
603 /* We get the PCI/Mem offset from the first range or
604 * the, current one if the offset came from an ISA
605 * hole. If they don't match, bugger.
606 */
607 if (memno == 0 ||
608 (isa_hole >= 0 && pci_addr != 0 &&
609 hose->pci_mem_offset == isa_mb))
610 hose->pci_mem_offset = cpu_addr - pci_addr;
611 else if (pci_addr != 0 &&
612 hose->pci_mem_offset != cpu_addr - pci_addr) {
613 printk(KERN_INFO
614 " \\--> Skipped (offset mismatch) !\n");
615 continue;
616 }
617
618 /* Build resource */
619 res = &hose->mem_resources[memno++];
620 res->flags = IORESOURCE_MEM;
621 if (pci_space & 0x40000000)
622 res->flags |= IORESOURCE_PREFETCH;
623 res->start = cpu_addr;
624 break;
625 }
626 if (res != NULL) {
627 res->name = dev->full_name;
628 res->end = res->start + size - 1;
629 res->parent = NULL;
630 res->sibling = NULL;
631 res->child = NULL;
632 }
633 }
634
635 /* Out of paranoia, let's put the ISA hole last if any */
636 if (isa_hole >= 0 && memno > 0 && isa_hole != (memno-1)) {
637 struct resource tmp = hose->mem_resources[isa_hole];
638 hose->mem_resources[isa_hole] = hose->mem_resources[memno-1];
639 hose->mem_resources[memno-1] = tmp;
640 }
641}
fa462f2d
BH
642
643/* Decide whether to display the domain number in /proc */
644int pci_proc_domain(struct pci_bus *bus)
645{
646 struct pci_controller *hose = pci_bus_to_host(bus);
647#ifdef CONFIG_PPC64
648 return hose->buid != 0;
649#else
650 if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
651 return 0;
652 if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
653 return hose->global_number != 0;
654 return 1;
655#endif
656}
657
fe2d338c
BH
658void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
659 struct resource *res)
660{
661 resource_size_t offset = 0, mask = (resource_size_t)-1;
662 struct pci_controller *hose = pci_bus_to_host(dev->bus);
663
664 if (!hose)
665 return;
666 if (res->flags & IORESOURCE_IO) {
667 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
668 mask = 0xffffffffu;
669 } else if (res->flags & IORESOURCE_MEM)
670 offset = hose->pci_mem_offset;
671
672 region->start = (res->start - offset) & mask;
673 region->end = (res->end - offset) & mask;
674}
675EXPORT_SYMBOL(pcibios_resource_to_bus);
676
677void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
678 struct pci_bus_region *region)
679{
680 resource_size_t offset = 0, mask = (resource_size_t)-1;
681 struct pci_controller *hose = pci_bus_to_host(dev->bus);
682
683 if (!hose)
684 return;
685 if (res->flags & IORESOURCE_IO) {
686 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
687 mask = 0xffffffffu;
688 } else if (res->flags & IORESOURCE_MEM)
689 offset = hose->pci_mem_offset;
690 res->start = (region->start + offset) & mask;
691 res->end = (region->end + offset) & mask;
692}
693EXPORT_SYMBOL(pcibios_bus_to_resource);