1/* 2 * Contains common pci routines for ALL ppc platform 3 * (based on pci_32.c and pci_64.c) 4 * 5 * Port for PPC64 David Engebretsen, IBM Corp. 6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 7 * 8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 9 * Rework, based on alpha PCI code. 10 * 11 * Common pmac/prep/chrp pci routines. -- Cort 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18 19#include <linux/kernel.h> 20#include <linux/pci.h> 21#include <linux/string.h> 22#include <linux/init.h> 23#include <linux/bootmem.h> 24#include <linux/mm.h> 25#include <linux/list.h> 26#include <linux/syscalls.h> 27#include <linux/irq.h> 28#include <linux/vmalloc.h> 29#include <linux/slab.h> 30#include <linux/of.h> 31#include <linux/of_address.h> 32#include <linux/of_irq.h> 33#include <linux/of_pci.h> 34#include <linux/export.h> 35 36#include <asm/processor.h> 37#include <linux/io.h> 38#include <asm/pci-bridge.h> 39#include <asm/byteorder.h> 40 41static DEFINE_SPINLOCK(hose_spinlock); 42LIST_HEAD(hose_list); 43 44/* XXX kill that some day ... */ 45static int global_phb_number; /* Global phb counter */ 46 47/* ISA Memory physical address */ 48resource_size_t isa_mem_base; 49 50unsigned long isa_io_base; 51static int pci_bus_count; 52 53struct pci_controller *pcibios_alloc_controller(struct device_node *dev) 54{ 55 struct pci_controller *phb; 56 57 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); 58 if (!phb) 59 return NULL; 60 spin_lock(&hose_spinlock); 61 phb->global_number = global_phb_number++; 62 list_add_tail(&phb->list_node, &hose_list); 63 spin_unlock(&hose_spinlock); 64 phb->dn = dev; 65 phb->is_dynamic = mem_init_done; 66 return phb; 67} 68 69void pcibios_free_controller(struct pci_controller *phb) 70{ 71 spin_lock(&hose_spinlock); 72 list_del(&phb->list_node); 73 spin_unlock(&hose_spinlock); 74 75 if (phb->is_dynamic) 76 kfree(phb); 77} 78 79static resource_size_t pcibios_io_size(const struct pci_controller *hose) 80{ 81 return resource_size(&hose->io_resource); 82} 83 84int pcibios_vaddr_is_ioport(void __iomem *address) 85{ 86 int ret = 0; 87 struct pci_controller *hose; 88 resource_size_t size; 89 90 spin_lock(&hose_spinlock); 91 list_for_each_entry(hose, &hose_list, list_node) { 92 size = pcibios_io_size(hose); 93 if (address >= hose->io_base_virt && 94 address < (hose->io_base_virt + size)) { 95 ret = 1; 96 break; 97 } 98 } 99 spin_unlock(&hose_spinlock); 100 return ret; 101} 102 103unsigned long pci_address_to_pio(phys_addr_t address) 104{ 105 struct pci_controller *hose; 106 resource_size_t size; 107 unsigned long ret = ~0; 108 109 spin_lock(&hose_spinlock); 110 list_for_each_entry(hose, &hose_list, list_node) { 111 size = pcibios_io_size(hose); 112 if (address >= hose->io_base_phys && 113 address < (hose->io_base_phys + size)) { 114 unsigned long base = 115 (unsigned long)hose->io_base_virt - _IO_BASE; 116 ret = base + (address - hose->io_base_phys); 117 break; 118 } 119 } 120 spin_unlock(&hose_spinlock); 121 122 return ret; 123} 124EXPORT_SYMBOL_GPL(pci_address_to_pio); 125 126/* 127 * Return the domain number for this bus. 128 */ 129int pci_domain_nr(struct pci_bus *bus) 130{ 131 struct pci_controller *hose = pci_bus_to_host(bus); 132 133 return hose->global_number; 134} 135EXPORT_SYMBOL(pci_domain_nr); 136 137/* This routine is meant to be used early during boot, when the 138 * PCI bus numbers have not yet been assigned, and you need to 139 * issue PCI config cycles to an OF device. 140 * It could also be used to "fix" RTAS config cycles if you want 141 * to set pci_assign_all_buses to 1 and still use RTAS for PCI 142 * config cycles. 143 */ 144struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) 145{ 146 while (node) { 147 struct pci_controller *hose, *tmp; 148 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 149 if (hose->dn == node) 150 return hose; 151 node = node->parent; 152 } 153 return NULL; 154} 155 156void pcibios_set_master(struct pci_dev *dev) 157{ 158 /* No special bus mastering setup handling */ 159} 160 161/* 162 * Platform support for /proc/bus/pci/X/Y mmap()s, 163 * modelled on the sparc64 implementation by Dave Miller. 164 * -- paulus. 165 */ 166 167/* 168 * Adjust vm_pgoff of VMA such that it is the physical page offset 169 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 170 * 171 * Basically, the user finds the base address for his device which he wishes 172 * to mmap. They read the 32-bit value from the config space base register, 173 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 174 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 175 * 176 * Returns negative error code on failure, zero on success. 177 */ 178static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 179 resource_size_t *offset, 180 enum pci_mmap_state mmap_state) 181{ 182 struct pci_controller *hose = pci_bus_to_host(dev->bus); 183 unsigned long io_offset = 0; 184 int i, res_bit; 185 186 if (!hose) 187 return NULL; /* should never happen */ 188 189 /* If memory, add on the PCI bridge address offset */ 190 if (mmap_state == pci_mmap_mem) { 191#if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 192 *offset += hose->pci_mem_offset; 193#endif 194 res_bit = IORESOURCE_MEM; 195 } else { 196 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 197 *offset += io_offset; 198 res_bit = IORESOURCE_IO; 199 } 200 201 /* 202 * Check that the offset requested corresponds to one of the 203 * resources of the device. 204 */ 205 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 206 struct resource *rp = &dev->resource[i]; 207 int flags = rp->flags; 208 209 /* treat ROM as memory (should be already) */ 210 if (i == PCI_ROM_RESOURCE) 211 flags |= IORESOURCE_MEM; 212 213 /* Active and same type? */ 214 if ((flags & res_bit) == 0) 215 continue; 216 217 /* In the range of this resource? */ 218 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 219 continue; 220 221 /* found it! construct the final physical address */ 222 if (mmap_state == pci_mmap_io) 223 *offset += hose->io_base_phys - io_offset; 224 return rp; 225 } 226 227 return NULL; 228} 229 230/* 231 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 232 * device mapping. 233 */ 234static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 235 pgprot_t protection, 236 enum pci_mmap_state mmap_state, 237 int write_combine) 238{ 239 pgprot_t prot = protection; 240 241 /* Write combine is always 0 on non-memory space mappings. On 242 * memory space, if the user didn't pass 1, we check for a 243 * "prefetchable" resource. This is a bit hackish, but we use 244 * this to workaround the inability of /sysfs to provide a write 245 * combine bit 246 */ 247 if (mmap_state != pci_mmap_mem) 248 write_combine = 0; 249 else if (write_combine == 0) { 250 if (rp->flags & IORESOURCE_PREFETCH) 251 write_combine = 1; 252 } 253 254 return pgprot_noncached(prot); 255} 256 257/* 258 * This one is used by /dev/mem and fbdev who have no clue about the 259 * PCI device, it tries to find the PCI device first and calls the 260 * above routine 261 */ 262pgprot_t pci_phys_mem_access_prot(struct file *file, 263 unsigned long pfn, 264 unsigned long size, 265 pgprot_t prot) 266{ 267 struct pci_dev *pdev = NULL; 268 struct resource *found = NULL; 269 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; 270 int i; 271 272 if (page_is_ram(pfn)) 273 return prot; 274 275 prot = pgprot_noncached(prot); 276 for_each_pci_dev(pdev) { 277 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 278 struct resource *rp = &pdev->resource[i]; 279 int flags = rp->flags; 280 281 /* Active and same type? */ 282 if ((flags & IORESOURCE_MEM) == 0) 283 continue; 284 /* In the range of this resource? */ 285 if (offset < (rp->start & PAGE_MASK) || 286 offset > rp->end) 287 continue; 288 found = rp; 289 break; 290 } 291 if (found) 292 break; 293 } 294 if (found) { 295 if (found->flags & IORESOURCE_PREFETCH) 296 prot = pgprot_noncached_wc(prot); 297 pci_dev_put(pdev); 298 } 299 300 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", 301 (unsigned long long)offset, pgprot_val(prot)); 302 303 return prot; 304} 305 306/* 307 * Perform the actual remap of the pages for a PCI device mapping, as 308 * appropriate for this architecture. The region in the process to map 309 * is described by vm_start and vm_end members of VMA, the base physical 310 * address is found in vm_pgoff. 311 * The pci device structure is provided so that architectures may make mapping 312 * decisions on a per-device or per-bus basis. 313 * 314 * Returns a negative error code on failure, zero on success. 315 */ 316int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 317 enum pci_mmap_state mmap_state, int write_combine) 318{ 319 resource_size_t offset = 320 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; 321 struct resource *rp; 322 int ret; 323 324 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 325 if (rp == NULL) 326 return -EINVAL; 327 328 vma->vm_pgoff = offset >> PAGE_SHIFT; 329 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 330 vma->vm_page_prot, 331 mmap_state, write_combine); 332 333 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 334 vma->vm_end - vma->vm_start, vma->vm_page_prot); 335 336 return ret; 337} 338 339/* This provides legacy IO read access on a bus */ 340int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) 341{ 342 unsigned long offset; 343 struct pci_controller *hose = pci_bus_to_host(bus); 344 struct resource *rp = &hose->io_resource; 345 void __iomem *addr; 346 347 /* Check if port can be supported by that bus. We only check 348 * the ranges of the PHB though, not the bus itself as the rules 349 * for forwarding legacy cycles down bridges are not our problem 350 * here. So if the host bridge supports it, we do it. 351 */ 352 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 353 offset += port; 354 355 if (!(rp->flags & IORESOURCE_IO)) 356 return -ENXIO; 357 if (offset < rp->start || (offset + size) > rp->end) 358 return -ENXIO; 359 addr = hose->io_base_virt + port; 360 361 switch (size) { 362 case 1: 363 *((u8 *)val) = in_8(addr); 364 return 1; 365 case 2: 366 if (port & 1) 367 return -EINVAL; 368 *((u16 *)val) = in_le16(addr); 369 return 2; 370 case 4: 371 if (port & 3) 372 return -EINVAL; 373 *((u32 *)val) = in_le32(addr); 374 return 4; 375 } 376 return -EINVAL; 377} 378 379/* This provides legacy IO write access on a bus */ 380int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) 381{ 382 unsigned long offset; 383 struct pci_controller *hose = pci_bus_to_host(bus); 384 struct resource *rp = &hose->io_resource; 385 void __iomem *addr; 386 387 /* Check if port can be supported by that bus. We only check 388 * the ranges of the PHB though, not the bus itself as the rules 389 * for forwarding legacy cycles down bridges are not our problem 390 * here. So if the host bridge supports it, we do it. 391 */ 392 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 393 offset += port; 394 395 if (!(rp->flags & IORESOURCE_IO)) 396 return -ENXIO; 397 if (offset < rp->start || (offset + size) > rp->end) 398 return -ENXIO; 399 addr = hose->io_base_virt + port; 400 401 /* WARNING: The generic code is idiotic. It gets passed a pointer 402 * to what can be a 1, 2 or 4 byte quantity and always reads that 403 * as a u32, which means that we have to correct the location of 404 * the data read within those 32 bits for size 1 and 2 405 */ 406 switch (size) { 407 case 1: 408 out_8(addr, val >> 24); 409 return 1; 410 case 2: 411 if (port & 1) 412 return -EINVAL; 413 out_le16(addr, val >> 16); 414 return 2; 415 case 4: 416 if (port & 3) 417 return -EINVAL; 418 out_le32(addr, val); 419 return 4; 420 } 421 return -EINVAL; 422} 423 424/* This provides legacy IO or memory mmap access on a bus */ 425int pci_mmap_legacy_page_range(struct pci_bus *bus, 426 struct vm_area_struct *vma, 427 enum pci_mmap_state mmap_state) 428{ 429 struct pci_controller *hose = pci_bus_to_host(bus); 430 resource_size_t offset = 431 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; 432 resource_size_t size = vma->vm_end - vma->vm_start; 433 struct resource *rp; 434 435 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", 436 pci_domain_nr(bus), bus->number, 437 mmap_state == pci_mmap_mem ? "MEM" : "IO", 438 (unsigned long long)offset, 439 (unsigned long long)(offset + size - 1)); 440 441 if (mmap_state == pci_mmap_mem) { 442 /* Hack alert ! 443 * 444 * Because X is lame and can fail starting if it gets an error 445 * trying to mmap legacy_mem (instead of just moving on without 446 * legacy memory access) we fake it here by giving it anonymous 447 * memory, effectively behaving just like /dev/zero 448 */ 449 if ((offset + size) > hose->isa_mem_size) { 450#ifdef CONFIG_MMU 451 pr_debug("Process %s (pid:%d) mapped non-existing PCI", 452 current->comm, current->pid); 453 pr_debug("legacy memory for 0%04x:%02x\n", 454 pci_domain_nr(bus), bus->number); 455#endif 456 if (vma->vm_flags & VM_SHARED) 457 return shmem_zero_setup(vma); 458 return 0; 459 } 460 offset += hose->isa_mem_phys; 461 } else { 462 unsigned long io_offset = (unsigned long)hose->io_base_virt - 463 _IO_BASE; 464 unsigned long roffset = offset + io_offset; 465 rp = &hose->io_resource; 466 if (!(rp->flags & IORESOURCE_IO)) 467 return -ENXIO; 468 if (roffset < rp->start || (roffset + size) > rp->end) 469 return -ENXIO; 470 offset += hose->io_base_phys; 471 } 472 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); 473 474 vma->vm_pgoff = offset >> PAGE_SHIFT; 475 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 476 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 477 vma->vm_end - vma->vm_start, 478 vma->vm_page_prot); 479} 480 481void pci_resource_to_user(const struct pci_dev *dev, int bar, 482 const struct resource *rsrc, 483 resource_size_t *start, resource_size_t *end) 484{ 485 struct pci_controller *hose = pci_bus_to_host(dev->bus); 486 resource_size_t offset = 0; 487 488 if (hose == NULL) 489 return; 490 491 if (rsrc->flags & IORESOURCE_IO) 492 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 493 494 /* We pass a fully fixed up address to userland for MMIO instead of 495 * a BAR value because X is lame and expects to be able to use that 496 * to pass to /dev/mem ! 497 * 498 * That means that we'll have potentially 64 bits values where some 499 * userland apps only expect 32 (like X itself since it thinks only 500 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 501 * 32 bits CHRPs :-( 502 * 503 * Hopefully, the sysfs insterface is immune to that gunk. Once X 504 * has been fixed (and the fix spread enough), we can re-enable the 505 * 2 lines below and pass down a BAR value to userland. In that case 506 * we'll also have to re-enable the matching code in 507 * __pci_mmap_make_offset(). 508 * 509 * BenH. 510 */ 511#if 0 512 else if (rsrc->flags & IORESOURCE_MEM) 513 offset = hose->pci_mem_offset; 514#endif 515 516 *start = rsrc->start - offset; 517 *end = rsrc->end - offset; 518} 519 520/** 521 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree 522 * @hose: newly allocated pci_controller to be setup 523 * @dev: device node of the host bridge 524 * @primary: set if primary bus (32 bits only, soon to be deprecated) 525 * 526 * This function will parse the "ranges" property of a PCI host bridge device 527 * node and setup the resource mapping of a pci controller based on its 528 * content. 529 * 530 * Life would be boring if it wasn't for a few issues that we have to deal 531 * with here: 532 * 533 * - We can only cope with one IO space range and up to 3 Memory space 534 * ranges. However, some machines (thanks Apple !) tend to split their 535 * space into lots of small contiguous ranges. So we have to coalesce. 536 * 537 * - We can only cope with all memory ranges having the same offset 538 * between CPU addresses and PCI addresses. Unfortunately, some bridges 539 * are setup for a large 1:1 mapping along with a small "window" which 540 * maps PCI address 0 to some arbitrary high address of the CPU space in 541 * order to give access to the ISA memory hole. 542 * The way out of here that I've chosen for now is to always set the 543 * offset based on the first resource found, then override it if we 544 * have a different offset and the previous was set by an ISA hole. 545 * 546 * - Some busses have IO space not starting at 0, which causes trouble with 547 * the way we do our IO resource renumbering. The code somewhat deals with 548 * it for 64 bits but I would expect problems on 32 bits. 549 * 550 * - Some 32 bits platforms such as 4xx can have physical space larger than 551 * 32 bits so we need to use 64 bits values for the parsing 552 */ 553void pci_process_bridge_OF_ranges(struct pci_controller *hose, 554 struct device_node *dev, int primary) 555{ 556 int memno = 0, isa_hole = -1; 557 unsigned long long isa_mb = 0; 558 struct resource *res; 559 struct of_pci_range range; 560 struct of_pci_range_parser parser; 561 562 pr_info("PCI host bridge %s %s ranges:\n", 563 dev->full_name, primary ? "(primary)" : ""); 564 565 /* Check for ranges property */ 566 if (of_pci_range_parser_init(&parser, dev)) 567 return; 568 569 pr_debug("Parsing ranges property...\n"); 570 for_each_of_pci_range(&parser, &range) { 571 /* Read next ranges element */ 572 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ", 573 range.pci_space, range.pci_addr); 574 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n", 575 range.cpu_addr, range.size); 576 577 /* If we failed translation or got a zero-sized region 578 * (some FW try to feed us with non sensical zero sized regions 579 * such as power3 which look like some kind of attempt 580 * at exposing the VGA memory hole) 581 */ 582 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) 583 continue; 584 585 /* Act based on address space type */ 586 res = NULL; 587 switch (range.flags & IORESOURCE_TYPE_BITS) { 588 case IORESOURCE_IO: 589 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n", 590 range.cpu_addr, range.cpu_addr + range.size - 1, 591 range.pci_addr); 592 593 /* We support only one IO range */ 594 if (hose->pci_io_size) { 595 pr_info(" \\--> Skipped (too many) !\n"); 596 continue; 597 } 598 /* On 32 bits, limit I/O space to 16MB */ 599 if (range.size > 0x01000000) 600 range.size = 0x01000000; 601 602 /* 32 bits needs to map IOs here */ 603 hose->io_base_virt = ioremap(range.cpu_addr, 604 range.size); 605 606 /* Expect trouble if pci_addr is not 0 */ 607 if (primary) 608 isa_io_base = 609 (unsigned long)hose->io_base_virt; 610 /* pci_io_size and io_base_phys always represent IO 611 * space starting at 0 so we factor in pci_addr 612 */ 613 hose->pci_io_size = range.pci_addr + range.size; 614 hose->io_base_phys = range.cpu_addr - range.pci_addr; 615 616 /* Build resource */ 617 res = &hose->io_resource; 618 range.cpu_addr = range.pci_addr; 619 620 break; 621 case IORESOURCE_MEM: 622 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", 623 range.cpu_addr, range.cpu_addr + range.size - 1, 624 range.pci_addr, 625 (range.pci_space & 0x40000000) ? 626 "Prefetch" : ""); 627 628 /* We support only 3 memory ranges */ 629 if (memno >= 3) { 630 pr_info(" \\--> Skipped (too many) !\n"); 631 continue; 632 } 633 /* Handles ISA memory hole space here */ 634 if (range.pci_addr == 0) { 635 isa_mb = range.cpu_addr; 636 isa_hole = memno; 637 if (primary || isa_mem_base == 0) 638 isa_mem_base = range.cpu_addr; 639 hose->isa_mem_phys = range.cpu_addr; 640 hose->isa_mem_size = range.size; 641 } 642 643 /* We get the PCI/Mem offset from the first range or 644 * the, current one if the offset came from an ISA 645 * hole. If they don't match, bugger. 646 */ 647 if (memno == 0 || 648 (isa_hole >= 0 && range.pci_addr != 0 && 649 hose->pci_mem_offset == isa_mb)) 650 hose->pci_mem_offset = range.cpu_addr - 651 range.pci_addr; 652 else if (range.pci_addr != 0 && 653 hose->pci_mem_offset != range.cpu_addr - 654 range.pci_addr) { 655 pr_info(" \\--> Skipped (offset mismatch) !\n"); 656 continue; 657 } 658 659 /* Build resource */ 660 res = &hose->mem_resources[memno++]; 661 break; 662 } 663 if (res != NULL) { 664 res->name = dev->full_name; 665 res->flags = range.flags; 666 res->start = range.cpu_addr; 667 res->end = range.cpu_addr + range.size - 1; 668 res->parent = res->child = res->sibling = NULL; 669 } 670 } 671 672 /* If there's an ISA hole and the pci_mem_offset is -not- matching 673 * the ISA hole offset, then we need to remove the ISA hole from 674 * the resource list for that brige 675 */ 676 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { 677 unsigned int next = isa_hole + 1; 678 pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb); 679 if (next < memno) 680 memmove(&hose->mem_resources[isa_hole], 681 &hose->mem_resources[next], 682 sizeof(struct resource) * (memno - next)); 683 hose->mem_resources[--memno].flags = 0; 684 } 685} 686 687/* Decide whether to display the domain number in /proc */ 688int pci_proc_domain(struct pci_bus *bus) 689{ 690 return 0; 691} 692 693/* This header fixup will do the resource fixup for all devices as they are 694 * probed, but not for bridge ranges 695 */ 696static void pcibios_fixup_resources(struct pci_dev *dev) 697{ 698 struct pci_controller *hose = pci_bus_to_host(dev->bus); 699 int i; 700 701 if (!hose) { 702 pr_err("No host bridge for PCI dev %s !\n", 703 pci_name(dev)); 704 return; 705 } 706 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 707 struct resource *res = dev->resource + i; 708 if (!res->flags) 709 continue; 710 if (res->start == 0) { 711 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]", 712 pci_name(dev), i, 713 (unsigned long long)res->start, 714 (unsigned long long)res->end, 715 (unsigned int)res->flags); 716 pr_debug("is unassigned\n"); 717 res->end -= res->start; 718 res->start = 0; 719 res->flags |= IORESOURCE_UNSET; 720 continue; 721 } 722 723 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", 724 pci_name(dev), i, 725 (unsigned long long)res->start, 726 (unsigned long long)res->end, 727 (unsigned int)res->flags); 728 } 729} 730DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); 731 732/* This function tries to figure out if a bridge resource has been initialized 733 * by the firmware or not. It doesn't have to be absolutely bullet proof, but 734 * things go more smoothly when it gets it right. It should covers cases such 735 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges 736 */ 737static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, 738 struct resource *res) 739{ 740 struct pci_controller *hose = pci_bus_to_host(bus); 741 struct pci_dev *dev = bus->self; 742 resource_size_t offset; 743 u16 command; 744 int i; 745 746 /* Job is a bit different between memory and IO */ 747 if (res->flags & IORESOURCE_MEM) { 748 /* If the BAR is non-0 (res != pci_mem_offset) then it's 749 * probably been initialized by somebody 750 */ 751 if (res->start != hose->pci_mem_offset) 752 return 0; 753 754 /* The BAR is 0, let's check if memory decoding is enabled on 755 * the bridge. If not, we consider it unassigned 756 */ 757 pci_read_config_word(dev, PCI_COMMAND, &command); 758 if ((command & PCI_COMMAND_MEMORY) == 0) 759 return 1; 760 761 /* Memory decoding is enabled and the BAR is 0. If any of 762 * the bridge resources covers that starting address (0 then 763 * it's good enough for us for memory 764 */ 765 for (i = 0; i < 3; i++) { 766 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && 767 hose->mem_resources[i].start == hose->pci_mem_offset) 768 return 0; 769 } 770 771 /* Well, it starts at 0 and we know it will collide so we may as 772 * well consider it as unassigned. That covers the Apple case. 773 */ 774 return 1; 775 } else { 776 /* If the BAR is non-0, then we consider it assigned */ 777 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 778 if (((res->start - offset) & 0xfffffffful) != 0) 779 return 0; 780 781 /* Here, we are a bit different than memory as typically IO 782 * space starting at low addresses -is- valid. What we do 783 * instead if that we consider as unassigned anything that 784 * doesn't have IO enabled in the PCI command register, 785 * and that's it. 786 */ 787 pci_read_config_word(dev, PCI_COMMAND, &command); 788 if (command & PCI_COMMAND_IO) 789 return 0; 790 791 /* It's starting at 0 and IO is disabled in the bridge, consider 792 * it unassigned 793 */ 794 return 1; 795 } 796} 797 798/* Fixup resources of a PCI<->PCI bridge */ 799static void pcibios_fixup_bridge(struct pci_bus *bus) 800{ 801 struct resource *res; 802 int i; 803 804 struct pci_dev *dev = bus->self; 805 806 pci_bus_for_each_resource(bus, res, i) { 807 if (!res) 808 continue; 809 if (!res->flags) 810 continue; 811 if (i >= 3 && bus->self->transparent) 812 continue; 813 814 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", 815 pci_name(dev), i, 816 (unsigned long long)res->start, 817 (unsigned long long)res->end, 818 (unsigned int)res->flags); 819 820 /* Try to detect uninitialized P2P bridge resources, 821 * and clear them out so they get re-assigned later 822 */ 823 if (pcibios_uninitialized_bridge_resource(bus, res)) { 824 res->flags = 0; 825 pr_debug("PCI:%s (unassigned)\n", 826 pci_name(dev)); 827 } else { 828 pr_debug("PCI:%s %016llx-%016llx\n", 829 pci_name(dev), 830 (unsigned long long)res->start, 831 (unsigned long long)res->end); 832 } 833 } 834} 835 836void pcibios_setup_bus_self(struct pci_bus *bus) 837{ 838 /* Fix up the bus resources for P2P bridges */ 839 if (bus->self != NULL) 840 pcibios_fixup_bridge(bus); 841} 842 843void pcibios_setup_bus_devices(struct pci_bus *bus) 844{ 845 struct pci_dev *dev; 846 847 pr_debug("PCI: Fixup bus devices %d (%s)\n", 848 bus->number, bus->self ? pci_name(bus->self) : "PHB"); 849 850 list_for_each_entry(dev, &bus->devices, bus_list) { 851 /* Setup OF node pointer in archdata */ 852 dev->dev.of_node = pci_device_to_OF_node(dev); 853 854 /* Fixup NUMA node as it may not be setup yet by the generic 855 * code and is needed by the DMA init 856 */ 857 set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); 858 859 /* Read default IRQs and fixup if necessary */ 860 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); 861 } 862} 863 864void pcibios_fixup_bus(struct pci_bus *bus) 865{ 866 /* When called from the generic PCI probe, read PCI<->PCI bridge 867 * bases. This is -not- called when generating the PCI tree from 868 * the OF device-tree. 869 */ 870 if (bus->self != NULL) 871 pci_read_bridge_bases(bus); 872 873 /* Now fixup the bus bus */ 874 pcibios_setup_bus_self(bus); 875 876 /* Now fixup devices on that bus */ 877 pcibios_setup_bus_devices(bus); 878} 879EXPORT_SYMBOL(pcibios_fixup_bus); 880 881static int skip_isa_ioresource_align(struct pci_dev *dev) 882{ 883 return 0; 884} 885 886/* 887 * We need to avoid collisions with `mirrored' VGA ports 888 * and other strange ISA hardware, so we always want the 889 * addresses to be allocated in the 0x000-0x0ff region 890 * modulo 0x400. 891 * 892 * Why? Because some silly external IO cards only decode 893 * the low 10 bits of the IO address. The 0x00-0xff region 894 * is reserved for motherboard devices that decode all 16 895 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 896 * but we want to try to avoid allocating at 0x2900-0x2bff 897 * which might have be mirrored at 0x0100-0x03ff.. 898 */ 899resource_size_t pcibios_align_resource(void *data, const struct resource *res, 900 resource_size_t size, resource_size_t align) 901{ 902 struct pci_dev *dev = data; 903 resource_size_t start = res->start; 904 905 if (res->flags & IORESOURCE_IO) { 906 if (skip_isa_ioresource_align(dev)) 907 return start; 908 if (start & 0x300) 909 start = (start + 0x3ff) & ~0x3ff; 910 } 911 912 return start; 913} 914EXPORT_SYMBOL(pcibios_align_resource); 915 916/* 917 * Reparent resource children of pr that conflict with res 918 * under res, and make res replace those children. 919 */ 920static int __init reparent_resources(struct resource *parent, 921 struct resource *res) 922{ 923 struct resource *p, **pp; 924 struct resource **firstpp = NULL; 925 926 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { 927 if (p->end < res->start) 928 continue; 929 if (res->end < p->start) 930 break; 931 if (p->start < res->start || p->end > res->end) 932 return -1; /* not completely contained */ 933 if (firstpp == NULL) 934 firstpp = pp; 935 } 936 if (firstpp == NULL) 937 return -1; /* didn't find any conflicting entries? */ 938 res->parent = parent; 939 res->child = *firstpp; 940 res->sibling = *pp; 941 *firstpp = res; 942 *pp = NULL; 943 for (p = res->child; p != NULL; p = p->sibling) { 944 p->parent = res; 945 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", 946 p->name, 947 (unsigned long long)p->start, 948 (unsigned long long)p->end, res->name); 949 } 950 return 0; 951} 952 953/* 954 * Handle resources of PCI devices. If the world were perfect, we could 955 * just allocate all the resource regions and do nothing more. It isn't. 956 * On the other hand, we cannot just re-allocate all devices, as it would 957 * require us to know lots of host bridge internals. So we attempt to 958 * keep as much of the original configuration as possible, but tweak it 959 * when it's found to be wrong. 960 * 961 * Known BIOS problems we have to work around: 962 * - I/O or memory regions not configured 963 * - regions configured, but not enabled in the command register 964 * - bogus I/O addresses above 64K used 965 * - expansion ROMs left enabled (this may sound harmless, but given 966 * the fact the PCI specs explicitly allow address decoders to be 967 * shared between expansion ROMs and other resource regions, it's 968 * at least dangerous) 969 * 970 * Our solution: 971 * (1) Allocate resources for all buses behind PCI-to-PCI bridges. 972 * This gives us fixed barriers on where we can allocate. 973 * (2) Allocate resources for all enabled devices. If there is 974 * a collision, just mark the resource as unallocated. Also 975 * disable expansion ROMs during this step. 976 * (3) Try to allocate resources for disabled devices. If the 977 * resources were assigned correctly, everything goes well, 978 * if they weren't, they won't disturb allocation of other 979 * resources. 980 * (4) Assign new addresses to resources which were either 981 * not configured at all or misconfigured. If explicitly 982 * requested by the user, configure expansion ROM address 983 * as well. 984 */ 985 986static void pcibios_allocate_bus_resources(struct pci_bus *bus) 987{ 988 struct pci_bus *b; 989 int i; 990 struct resource *res, *pr; 991 992 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", 993 pci_domain_nr(bus), bus->number); 994 995 pci_bus_for_each_resource(bus, res, i) { 996 if (!res || !res->flags 997 || res->start > res->end || res->parent) 998 continue; 999 if (bus->parent == NULL) 1000 pr = (res->flags & IORESOURCE_IO) ? 1001 &ioport_resource : &iomem_resource; 1002 else { 1003 /* Don't bother with non-root busses when 1004 * re-assigning all resources. We clear the 1005 * resource flags as if they were colliding 1006 * and as such ensure proper re-allocation 1007 * later. 1008 */ 1009 pr = pci_find_parent_resource(bus->self, res); 1010 if (pr == res) { 1011 /* this happens when the generic PCI 1012 * code (wrongly) decides that this 1013 * bridge is transparent -- paulus 1014 */ 1015 continue; 1016 } 1017 } 1018 1019 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ", 1020 bus->self ? pci_name(bus->self) : "PHB", 1021 bus->number, i, 1022 (unsigned long long)res->start, 1023 (unsigned long long)res->end); 1024 pr_debug("[0x%x], parent %p (%s)\n", 1025 (unsigned int)res->flags, 1026 pr, (pr && pr->name) ? pr->name : "nil"); 1027 1028 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1029 struct pci_dev *dev = bus->self; 1030 1031 if (request_resource(pr, res) == 0) 1032 continue; 1033 /* 1034 * Must be a conflict with an existing entry. 1035 * Move that entry (or entries) under the 1036 * bridge resource and try again. 1037 */ 1038 if (reparent_resources(pr, res) == 0) 1039 continue; 1040 1041 if (dev && i < PCI_BRIDGE_RESOURCE_NUM && 1042 pci_claim_bridge_resource(dev, 1043 i + PCI_BRIDGE_RESOURCES) == 0) 1044 continue; 1045 1046 } 1047 pr_warn("PCI: Cannot allocate resource region "); 1048 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number); 1049 res->start = res->end = 0; 1050 res->flags = 0; 1051 } 1052 1053 list_for_each_entry(b, &bus->children, node) 1054 pcibios_allocate_bus_resources(b); 1055} 1056 1057static inline void alloc_resource(struct pci_dev *dev, int idx) 1058{ 1059 struct resource *pr, *r = &dev->resource[idx]; 1060 1061 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", 1062 pci_name(dev), idx, 1063 (unsigned long long)r->start, 1064 (unsigned long long)r->end, 1065 (unsigned int)r->flags); 1066 1067 pr = pci_find_parent_resource(dev, r); 1068 if (!pr || (pr->flags & IORESOURCE_UNSET) || 1069 request_resource(pr, r) < 0) { 1070 pr_warn("PCI: Cannot allocate resource region %d ", idx); 1071 pr_cont("of device %s, will remap\n", pci_name(dev)); 1072 if (pr) 1073 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", 1074 pr, 1075 (unsigned long long)pr->start, 1076 (unsigned long long)pr->end, 1077 (unsigned int)pr->flags); 1078 /* We'll assign a new address later */ 1079 r->flags |= IORESOURCE_UNSET; 1080 r->end -= r->start; 1081 r->start = 0; 1082 } 1083} 1084 1085static void __init pcibios_allocate_resources(int pass) 1086{ 1087 struct pci_dev *dev = NULL; 1088 int idx, disabled; 1089 u16 command; 1090 struct resource *r; 1091 1092 for_each_pci_dev(dev) { 1093 pci_read_config_word(dev, PCI_COMMAND, &command); 1094 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 1095 r = &dev->resource[idx]; 1096 if (r->parent) /* Already allocated */ 1097 continue; 1098 if (!r->flags || (r->flags & IORESOURCE_UNSET)) 1099 continue; /* Not assigned at all */ 1100 /* We only allocate ROMs on pass 1 just in case they 1101 * have been screwed up by firmware 1102 */ 1103 if (idx == PCI_ROM_RESOURCE) 1104 disabled = 1; 1105 if (r->flags & IORESOURCE_IO) 1106 disabled = !(command & PCI_COMMAND_IO); 1107 else 1108 disabled = !(command & PCI_COMMAND_MEMORY); 1109 if (pass == disabled) 1110 alloc_resource(dev, idx); 1111 } 1112 if (pass) 1113 continue; 1114 r = &dev->resource[PCI_ROM_RESOURCE]; 1115 if (r->flags) { 1116 /* Turn the ROM off, leave the resource region, 1117 * but keep it unregistered. 1118 */ 1119 u32 reg; 1120 pci_read_config_dword(dev, dev->rom_base_reg, ®); 1121 if (reg & PCI_ROM_ADDRESS_ENABLE) { 1122 pr_debug("PCI: Switching off ROM of %s\n", 1123 pci_name(dev)); 1124 r->flags &= ~IORESOURCE_ROM_ENABLE; 1125 pci_write_config_dword(dev, dev->rom_base_reg, 1126 reg & ~PCI_ROM_ADDRESS_ENABLE); 1127 } 1128 } 1129 } 1130} 1131 1132static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) 1133{ 1134 struct pci_controller *hose = pci_bus_to_host(bus); 1135 resource_size_t offset; 1136 struct resource *res, *pres; 1137 int i; 1138 1139 pr_debug("Reserving legacy ranges for domain %04x\n", 1140 pci_domain_nr(bus)); 1141 1142 /* Check for IO */ 1143 if (!(hose->io_resource.flags & IORESOURCE_IO)) 1144 goto no_io; 1145 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1146 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1147 BUG_ON(res == NULL); 1148 res->name = "Legacy IO"; 1149 res->flags = IORESOURCE_IO; 1150 res->start = offset; 1151 res->end = (offset + 0xfff) & 0xfffffffful; 1152 pr_debug("Candidate legacy IO: %pR\n", res); 1153 if (request_resource(&hose->io_resource, res)) { 1154 pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", 1155 pci_domain_nr(bus), bus->number, res); 1156 kfree(res); 1157 } 1158 1159 no_io: 1160 /* Check for memory */ 1161 offset = hose->pci_mem_offset; 1162 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); 1163 for (i = 0; i < 3; i++) { 1164 pres = &hose->mem_resources[i]; 1165 if (!(pres->flags & IORESOURCE_MEM)) 1166 continue; 1167 pr_debug("hose mem res: %pR\n", pres); 1168 if ((pres->start - offset) <= 0xa0000 && 1169 (pres->end - offset) >= 0xbffff) 1170 break; 1171 } 1172 if (i >= 3) 1173 return; 1174 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 1175 BUG_ON(res == NULL); 1176 res->name = "Legacy VGA memory"; 1177 res->flags = IORESOURCE_MEM; 1178 res->start = 0xa0000 + offset; 1179 res->end = 0xbffff + offset; 1180 pr_debug("Candidate VGA memory: %pR\n", res); 1181 if (request_resource(pres, res)) { 1182 pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n", 1183 pci_domain_nr(bus), bus->number, res); 1184 kfree(res); 1185 } 1186} 1187 1188void __init pcibios_resource_survey(void) 1189{ 1190 struct pci_bus *b; 1191 1192 /* Allocate and assign resources. If we re-assign everything, then 1193 * we skip the allocate phase 1194 */ 1195 list_for_each_entry(b, &pci_root_buses, node) 1196 pcibios_allocate_bus_resources(b); 1197 1198 pcibios_allocate_resources(0); 1199 pcibios_allocate_resources(1); 1200 1201 /* Before we start assigning unassigned resource, we try to reserve 1202 * the low IO area and the VGA memory area if they intersect the 1203 * bus available resources to avoid allocating things on top of them 1204 */ 1205 list_for_each_entry(b, &pci_root_buses, node) 1206 pcibios_reserve_legacy_regions(b); 1207 1208 /* Now proceed to assigning things that were left unassigned */ 1209 pr_debug("PCI: Assigning unassigned resources...\n"); 1210 pci_assign_unassigned_resources(); 1211} 1212 1213/* This is used by the PCI hotplug driver to allocate resource 1214 * of newly plugged busses. We can try to consolidate with the 1215 * rest of the code later, for now, keep it as-is as our main 1216 * resource allocation function doesn't deal with sub-trees yet. 1217 */ 1218void pcibios_claim_one_bus(struct pci_bus *bus) 1219{ 1220 struct pci_dev *dev; 1221 struct pci_bus *child_bus; 1222 1223 list_for_each_entry(dev, &bus->devices, bus_list) { 1224 int i; 1225 1226 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 1227 struct resource *r = &dev->resource[i]; 1228 1229 if (r->parent || !r->start || !r->flags) 1230 continue; 1231 1232 pr_debug("PCI: Claiming %s: ", pci_name(dev)); 1233 pr_debug("Resource %d: %016llx..%016llx [%x]\n", 1234 i, (unsigned long long)r->start, 1235 (unsigned long long)r->end, 1236 (unsigned int)r->flags); 1237 1238 if (pci_claim_resource(dev, i) == 0) 1239 continue; 1240 1241 pci_claim_bridge_resource(dev, i); 1242 } 1243 } 1244 1245 list_for_each_entry(child_bus, &bus->children, node) 1246 pcibios_claim_one_bus(child_bus); 1247} 1248EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 1249 1250 1251/* pcibios_finish_adding_to_bus 1252 * 1253 * This is to be called by the hotplug code after devices have been 1254 * added to a bus, this include calling it for a PHB that is just 1255 * being added 1256 */ 1257void pcibios_finish_adding_to_bus(struct pci_bus *bus) 1258{ 1259 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", 1260 pci_domain_nr(bus), bus->number); 1261 1262 /* Allocate bus and devices resources */ 1263 pcibios_allocate_bus_resources(bus); 1264 pcibios_claim_one_bus(bus); 1265 1266 /* Add new devices to global lists. Register in proc, sysfs. */ 1267 pci_bus_add_devices(bus); 1268 1269 /* Fixup EEH */ 1270 /* eeh_add_device_tree_late(bus); */ 1271} 1272EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); 1273 1274static void pcibios_setup_phb_resources(struct pci_controller *hose, 1275 struct list_head *resources) 1276{ 1277 unsigned long io_offset; 1278 struct resource *res; 1279 int i; 1280 1281 /* Hookup PHB IO resource */ 1282 res = &hose->io_resource; 1283 1284 /* Fixup IO space offset */ 1285 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 1286 res->start = (res->start + io_offset) & 0xffffffffu; 1287 res->end = (res->end + io_offset) & 0xffffffffu; 1288 1289 if (!res->flags) { 1290 pr_warn("PCI: I/O resource not set for host "); 1291 pr_cont("bridge %s (domain %d)\n", 1292 hose->dn->full_name, hose->global_number); 1293 /* Workaround for lack of IO resource only on 32-bit */ 1294 res->start = (unsigned long)hose->io_base_virt - isa_io_base; 1295 res->end = res->start + IO_SPACE_LIMIT; 1296 res->flags = IORESOURCE_IO; 1297 } 1298 pci_add_resource_offset(resources, res, 1299 (__force resource_size_t)(hose->io_base_virt - _IO_BASE)); 1300 1301 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", 1302 (unsigned long long)res->start, 1303 (unsigned long long)res->end, 1304 (unsigned long)res->flags); 1305 1306 /* Hookup PHB Memory resources */ 1307 for (i = 0; i < 3; ++i) { 1308 res = &hose->mem_resources[i]; 1309 if (!res->flags) { 1310 if (i > 0) 1311 continue; 1312 pr_err("PCI: Memory resource 0 not set for "); 1313 pr_cont("host bridge %s (domain %d)\n", 1314 hose->dn->full_name, hose->global_number); 1315 1316 /* Workaround for lack of MEM resource only on 32-bit */ 1317 res->start = hose->pci_mem_offset; 1318 res->end = (resource_size_t)-1LL; 1319 res->flags = IORESOURCE_MEM; 1320 1321 } 1322 pci_add_resource_offset(resources, res, hose->pci_mem_offset); 1323 1324 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", 1325 i, (unsigned long long)res->start, 1326 (unsigned long long)res->end, 1327 (unsigned long)res->flags); 1328 } 1329 1330 pr_debug("PCI: PHB MEM offset = %016llx\n", 1331 (unsigned long long)hose->pci_mem_offset); 1332 pr_debug("PCI: PHB IO offset = %08lx\n", 1333 (unsigned long)hose->io_base_virt - _IO_BASE); 1334} 1335 1336struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) 1337{ 1338 struct pci_controller *hose = bus->sysdata; 1339 1340 return of_node_get(hose->dn); 1341} 1342 1343static void pcibios_scan_phb(struct pci_controller *hose) 1344{ 1345 LIST_HEAD(resources); 1346 struct pci_bus *bus; 1347 struct device_node *node = hose->dn; 1348 1349 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); 1350 1351 pcibios_setup_phb_resources(hose, &resources); 1352 1353 bus = pci_scan_root_bus(hose->parent, hose->first_busno, 1354 hose->ops, hose, &resources); 1355 if (bus == NULL) { 1356 pr_err("Failed to create bus for PCI domain %04x\n", 1357 hose->global_number); 1358 pci_free_resource_list(&resources); 1359 return; 1360 } 1361 bus->busn_res.start = hose->first_busno; 1362 hose->bus = bus; 1363 1364 hose->last_busno = bus->busn_res.end; 1365} 1366 1367static int __init pcibios_init(void) 1368{ 1369 struct pci_controller *hose, *tmp; 1370 int next_busno = 0; 1371 1372 pr_info("PCI: Probing PCI hardware\n"); 1373 1374 /* Scan all of the recorded PCI controllers. */ 1375 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1376 hose->last_busno = 0xff; 1377 pcibios_scan_phb(hose); 1378 if (next_busno <= hose->last_busno) 1379 next_busno = hose->last_busno + 1; 1380 } 1381 pci_bus_count = next_busno; 1382 1383 /* Call common code to handle resource allocation */ 1384 pcibios_resource_survey(); 1385 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1386 if (hose->bus) 1387 pci_bus_add_devices(hose->bus); 1388 } 1389 1390 return 0; 1391} 1392 1393subsys_initcall(pcibios_init); 1394 1395static struct pci_controller *pci_bus_to_hose(int bus) 1396{ 1397 struct pci_controller *hose, *tmp; 1398 1399 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1400 if (bus >= hose->first_busno && bus <= hose->last_busno) 1401 return hose; 1402 return NULL; 1403} 1404 1405/* Provide information on locations of various I/O regions in physical 1406 * memory. Do this on a per-card basis so that we choose the right 1407 * root bridge. 1408 * Note that the returned IO or memory base is a physical address 1409 */ 1410 1411long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) 1412{ 1413 struct pci_controller *hose; 1414 long result = -EOPNOTSUPP; 1415 1416 hose = pci_bus_to_hose(bus); 1417 if (!hose) 1418 return -ENODEV; 1419 1420 switch (which) { 1421 case IOBASE_BRIDGE_NUMBER: 1422 return (long)hose->first_busno; 1423 case IOBASE_MEMORY: 1424 return (long)hose->pci_mem_offset; 1425 case IOBASE_IO: 1426 return (long)hose->io_base_phys; 1427 case IOBASE_ISA_IO: 1428 return (long)isa_io_base; 1429 case IOBASE_ISA_MEM: 1430 return (long)isa_mem_base; 1431 } 1432 1433 return result; 1434} 1435 1436/* 1437 * Null PCI config access functions, for the case when we can't 1438 * find a hose. 1439 */ 1440#define NULL_PCI_OP(rw, size, type) \ 1441static int \ 1442null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ 1443{ \ 1444 return PCIBIOS_DEVICE_NOT_FOUND; \ 1445} 1446 1447static int 1448null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 1449 int len, u32 *val) 1450{ 1451 return PCIBIOS_DEVICE_NOT_FOUND; 1452} 1453 1454static int 1455null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 1456 int len, u32 val) 1457{ 1458 return PCIBIOS_DEVICE_NOT_FOUND; 1459} 1460 1461static struct pci_ops null_pci_ops = { 1462 .read = null_read_config, 1463 .write = null_write_config, 1464}; 1465 1466/* 1467 * These functions are used early on before PCI scanning is done 1468 * and all of the pci_dev and pci_bus structures have been created. 1469 */ 1470static struct pci_bus * 1471fake_pci_bus(struct pci_controller *hose, int busnr) 1472{ 1473 static struct pci_bus bus; 1474 1475 if (!hose) 1476 pr_err("Can't find hose for PCI bus %d!\n", busnr); 1477 1478 bus.number = busnr; 1479 bus.sysdata = hose; 1480 bus.ops = hose ? hose->ops : &null_pci_ops; 1481 return &bus; 1482} 1483 1484#define EARLY_PCI_OP(rw, size, type) \ 1485int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ 1486 int devfn, int offset, type value) \ 1487{ \ 1488 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ 1489 devfn, offset, value); \ 1490} 1491 1492EARLY_PCI_OP(read, byte, u8 *) 1493EARLY_PCI_OP(read, word, u16 *) 1494EARLY_PCI_OP(read, dword, u32 *) 1495EARLY_PCI_OP(write, byte, u8) 1496EARLY_PCI_OP(write, word, u16) 1497EARLY_PCI_OP(write, dword, u32) 1498 1499int early_find_capability(struct pci_controller *hose, int bus, int devfn, 1500 int cap) 1501{ 1502 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); 1503} 1504 1505