root/drivers/pci/setup-bus.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. free_list
  2. add_to_list
  3. remove_from_list
  4. res_to_dev_res
  5. get_res_add_size
  6. get_res_add_align
  7. pdev_sort_resources
  8. __dev_sort_resources
  9. reset_resource
  10. reassign_resources_sorted
  11. assign_requested_resources_sorted
  12. pci_fail_res_type_mask
  13. pci_need_to_release
  14. __assign_resources_sorted
  15. pdev_assign_resources_sorted
  16. pbus_assign_resources_sorted
  17. pci_setup_cardbus
  18. pci_setup_bridge_io
  19. pci_setup_bridge_mmio
  20. pci_setup_bridge_mmio_pref
  21. __pci_setup_bridge
  22. pcibios_setup_bridge
  23. pci_setup_bridge
  24. pci_claim_bridge_resource
  25. pci_bridge_check_ranges
  26. find_free_bus_resource
  27. calculate_iosize
  28. calculate_memsize
  29. pcibios_window_alignment
  30. window_alignment
  31. pbus_size_io
  32. calculate_mem_align
  33. pbus_size_mem
  34. pci_cardbus_resource_alignment
  35. pci_bus_size_cardbus
  36. __pci_bus_size_bridges
  37. pci_bus_size_bridges
  38. assign_fixed_resource_on_bus
  39. pdev_assign_fixed_resources
  40. __pci_bus_assign_resources
  41. pci_bus_assign_resources
  42. pci_claim_device_resources
  43. pci_claim_bridge_resources
  44. pci_bus_allocate_dev_resources
  45. pci_bus_allocate_resources
  46. pci_bus_claim_resources
  47. __pci_bridge_assign_resources
  48. pci_bridge_release_resources
  49. pci_bus_release_bridge_resources
  50. pci_bus_dump_res
  51. pci_bus_dump_resources
  52. pci_bus_get_depth
  53. pci_realloc_get_opt
  54. pci_realloc_enabled
  55. iov_resources_unassigned
  56. pci_realloc_detect
  57. pci_realloc_detect
  58. pci_assign_unassigned_root_bus_resources
  59. pci_assign_unassigned_resources
  60. extend_bridge_window
  61. pci_bus_distribute_available_resources
  62. pci_bridge_distribute_available_resources
  63. pci_assign_unassigned_bridge_resources
  64. pci_reassign_bridge_resources
  65. pci_assign_unassigned_bus_resources

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Support routines for initializing a PCI subsystem
   4  *
   5  * Extruded from code written by
   6  *      Dave Rusling (david.rusling@reo.mts.dec.com)
   7  *      David Mosberger (davidm@cs.arizona.edu)
   8  *      David Miller (davem@redhat.com)
   9  *
  10  * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  11  *           PCI-PCI bridges cleanup, sorted resource allocation.
  12  * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  13  *           Converted to allocation in 3 passes, which gives
  14  *           tighter packing. Prefetchable range support.
  15  */
  16 
  17 #include <linux/init.h>
  18 #include <linux/kernel.h>
  19 #include <linux/module.h>
  20 #include <linux/pci.h>
  21 #include <linux/errno.h>
  22 #include <linux/ioport.h>
  23 #include <linux/cache.h>
  24 #include <linux/slab.h>
  25 #include <linux/acpi.h>
  26 #include "pci.h"
  27 
  28 unsigned int pci_flags;
  29 
  30 struct pci_dev_resource {
  31         struct list_head list;
  32         struct resource *res;
  33         struct pci_dev *dev;
  34         resource_size_t start;
  35         resource_size_t end;
  36         resource_size_t add_size;
  37         resource_size_t min_align;
  38         unsigned long flags;
  39 };
  40 
  41 static void free_list(struct list_head *head)
  42 {
  43         struct pci_dev_resource *dev_res, *tmp;
  44 
  45         list_for_each_entry_safe(dev_res, tmp, head, list) {
  46                 list_del(&dev_res->list);
  47                 kfree(dev_res);
  48         }
  49 }
  50 
  51 /**
  52  * add_to_list() - Add a new resource tracker to the list
  53  * @head:       Head of the list
  54  * @dev:        Device to which the resource belongs
  55  * @res:        Resource to be tracked
  56  * @add_size:   Additional size to be optionally added to the resource
  57  */
  58 static int add_to_list(struct list_head *head, struct pci_dev *dev,
  59                        struct resource *res, resource_size_t add_size,
  60                        resource_size_t min_align)
  61 {
  62         struct pci_dev_resource *tmp;
  63 
  64         tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  65         if (!tmp)
  66                 return -ENOMEM;
  67 
  68         tmp->res = res;
  69         tmp->dev = dev;
  70         tmp->start = res->start;
  71         tmp->end = res->end;
  72         tmp->flags = res->flags;
  73         tmp->add_size = add_size;
  74         tmp->min_align = min_align;
  75 
  76         list_add(&tmp->list, head);
  77 
  78         return 0;
  79 }
  80 
  81 static void remove_from_list(struct list_head *head, struct resource *res)
  82 {
  83         struct pci_dev_resource *dev_res, *tmp;
  84 
  85         list_for_each_entry_safe(dev_res, tmp, head, list) {
  86                 if (dev_res->res == res) {
  87                         list_del(&dev_res->list);
  88                         kfree(dev_res);
  89                         break;
  90                 }
  91         }
  92 }
  93 
  94 static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
  95                                                struct resource *res)
  96 {
  97         struct pci_dev_resource *dev_res;
  98 
  99         list_for_each_entry(dev_res, head, list) {
 100                 if (dev_res->res == res)
 101                         return dev_res;
 102         }
 103 
 104         return NULL;
 105 }
 106 
 107 static resource_size_t get_res_add_size(struct list_head *head,
 108                                         struct resource *res)
 109 {
 110         struct pci_dev_resource *dev_res;
 111 
 112         dev_res = res_to_dev_res(head, res);
 113         return dev_res ? dev_res->add_size : 0;
 114 }
 115 
 116 static resource_size_t get_res_add_align(struct list_head *head,
 117                                          struct resource *res)
 118 {
 119         struct pci_dev_resource *dev_res;
 120 
 121         dev_res = res_to_dev_res(head, res);
 122         return dev_res ? dev_res->min_align : 0;
 123 }
 124 
 125 
 126 /* Sort resources by alignment */
 127 static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
 128 {
 129         int i;
 130 
 131         for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 132                 struct resource *r;
 133                 struct pci_dev_resource *dev_res, *tmp;
 134                 resource_size_t r_align;
 135                 struct list_head *n;
 136 
 137                 r = &dev->resource[i];
 138 
 139                 if (r->flags & IORESOURCE_PCI_FIXED)
 140                         continue;
 141 
 142                 if (!(r->flags) || r->parent)
 143                         continue;
 144 
 145                 r_align = pci_resource_alignment(dev, r);
 146                 if (!r_align) {
 147                         pci_warn(dev, "BAR %d: %pR has bogus alignment\n",
 148                                  i, r);
 149                         continue;
 150                 }
 151 
 152                 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
 153                 if (!tmp)
 154                         panic("pdev_sort_resources(): kmalloc() failed!\n");
 155                 tmp->res = r;
 156                 tmp->dev = dev;
 157 
 158                 /* Fallback is smallest one or list is empty */
 159                 n = head;
 160                 list_for_each_entry(dev_res, head, list) {
 161                         resource_size_t align;
 162 
 163                         align = pci_resource_alignment(dev_res->dev,
 164                                                          dev_res->res);
 165 
 166                         if (r_align > align) {
 167                                 n = &dev_res->list;
 168                                 break;
 169                         }
 170                 }
 171                 /* Insert it just before n */
 172                 list_add_tail(&tmp->list, n);
 173         }
 174 }
 175 
 176 static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
 177 {
 178         u16 class = dev->class >> 8;
 179 
 180         /* Don't touch classless devices or host bridges or IOAPICs */
 181         if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
 182                 return;
 183 
 184         /* Don't touch IOAPIC devices already enabled by firmware */
 185         if (class == PCI_CLASS_SYSTEM_PIC) {
 186                 u16 command;
 187                 pci_read_config_word(dev, PCI_COMMAND, &command);
 188                 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
 189                         return;
 190         }
 191 
 192         pdev_sort_resources(dev, head);
 193 }
 194 
 195 static inline void reset_resource(struct resource *res)
 196 {
 197         res->start = 0;
 198         res->end = 0;
 199         res->flags = 0;
 200 }
 201 
 202 /**
 203  * reassign_resources_sorted() - Satisfy any additional resource requests
 204  *
 205  * @realloc_head:       Head of the list tracking requests requiring
 206  *                      additional resources
 207  * @head:               Head of the list tracking requests with allocated
 208  *                      resources
 209  *
 210  * Walk through each element of the realloc_head and try to procure additional
 211  * resources for the element, provided the element is in the head list.
 212  */
 213 static void reassign_resources_sorted(struct list_head *realloc_head,
 214                                       struct list_head *head)
 215 {
 216         struct resource *res;
 217         struct pci_dev_resource *add_res, *tmp;
 218         struct pci_dev_resource *dev_res;
 219         resource_size_t add_size, align;
 220         int idx;
 221 
 222         list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
 223                 bool found_match = false;
 224 
 225                 res = add_res->res;
 226                 /* Skip resource that has been reset */
 227                 if (!res->flags)
 228                         goto out;
 229 
 230                 /* Skip this resource if not found in head list */
 231                 list_for_each_entry(dev_res, head, list) {
 232                         if (dev_res->res == res) {
 233                                 found_match = true;
 234                                 break;
 235                         }
 236                 }
 237                 if (!found_match) /* Just skip */
 238                         continue;
 239 
 240                 idx = res - &add_res->dev->resource[0];
 241                 add_size = add_res->add_size;
 242                 align = add_res->min_align;
 243                 if (!resource_size(res)) {
 244                         res->start = align;
 245                         res->end = res->start + add_size - 1;
 246                         if (pci_assign_resource(add_res->dev, idx))
 247                                 reset_resource(res);
 248                 } else {
 249                         res->flags |= add_res->flags &
 250                                  (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
 251                         if (pci_reassign_resource(add_res->dev, idx,
 252                                                   add_size, align))
 253                                 pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
 254                                          (unsigned long long) add_size, idx,
 255                                          res);
 256                 }
 257 out:
 258                 list_del(&add_res->list);
 259                 kfree(add_res);
 260         }
 261 }
 262 
 263 /**
 264  * assign_requested_resources_sorted() - Satisfy resource requests
 265  *
 266  * @head:       Head of the list tracking requests for resources
 267  * @fail_head:  Head of the list tracking requests that could not be
 268  *              allocated
 269  *
 270  * Satisfy resource requests of each element in the list.  Add requests that
 271  * could not be satisfied to the failed_list.
 272  */
 273 static void assign_requested_resources_sorted(struct list_head *head,
 274                                  struct list_head *fail_head)
 275 {
 276         struct resource *res;
 277         struct pci_dev_resource *dev_res;
 278         int idx;
 279 
 280         list_for_each_entry(dev_res, head, list) {
 281                 res = dev_res->res;
 282                 idx = res - &dev_res->dev->resource[0];
 283                 if (resource_size(res) &&
 284                     pci_assign_resource(dev_res->dev, idx)) {
 285                         if (fail_head) {
 286                                 /*
 287                                  * If the failed resource is a ROM BAR and
 288                                  * it will be enabled later, don't add it
 289                                  * to the list.
 290                                  */
 291                                 if (!((idx == PCI_ROM_RESOURCE) &&
 292                                       (!(res->flags & IORESOURCE_ROM_ENABLE))))
 293                                         add_to_list(fail_head,
 294                                                     dev_res->dev, res,
 295                                                     0 /* don't care */,
 296                                                     0 /* don't care */);
 297                         }
 298                         reset_resource(res);
 299                 }
 300         }
 301 }
 302 
 303 static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
 304 {
 305         struct pci_dev_resource *fail_res;
 306         unsigned long mask = 0;
 307 
 308         /* Check failed type */
 309         list_for_each_entry(fail_res, fail_head, list)
 310                 mask |= fail_res->flags;
 311 
 312         /*
 313          * One pref failed resource will set IORESOURCE_MEM, as we can
 314          * allocate pref in non-pref range.  Will release all assigned
 315          * non-pref sibling resources according to that bit.
 316          */
 317         return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
 318 }
 319 
 320 static bool pci_need_to_release(unsigned long mask, struct resource *res)
 321 {
 322         if (res->flags & IORESOURCE_IO)
 323                 return !!(mask & IORESOURCE_IO);
 324 
 325         /* Check pref at first */
 326         if (res->flags & IORESOURCE_PREFETCH) {
 327                 if (mask & IORESOURCE_PREFETCH)
 328                         return true;
 329                 /* Count pref if its parent is non-pref */
 330                 else if ((mask & IORESOURCE_MEM) &&
 331                          !(res->parent->flags & IORESOURCE_PREFETCH))
 332                         return true;
 333                 else
 334                         return false;
 335         }
 336 
 337         if (res->flags & IORESOURCE_MEM)
 338                 return !!(mask & IORESOURCE_MEM);
 339 
 340         return false;   /* Should not get here */
 341 }
 342 
 343 static void __assign_resources_sorted(struct list_head *head,
 344                                       struct list_head *realloc_head,
 345                                       struct list_head *fail_head)
 346 {
 347         /*
 348          * Should not assign requested resources at first.  They could be
 349          * adjacent, so later reassign can not reallocate them one by one in
 350          * parent resource window.
 351          *
 352          * Try to assign requested + add_size at beginning.  If could do that,
 353          * could get out early.  If could not do that, we still try to assign
 354          * requested at first, then try to reassign add_size for some resources.
 355          *
 356          * Separate three resource type checking if we need to release
 357          * assigned resource after requested + add_size try.
 358          *
 359          *      1. If IO port assignment fails, will release assigned IO
 360          *         port.
 361          *      2. If pref MMIO assignment fails, release assigned pref
 362          *         MMIO.  If assigned pref MMIO's parent is non-pref MMIO
 363          *         and non-pref MMIO assignment fails, will release that
 364          *         assigned pref MMIO.
 365          *      3. If non-pref MMIO assignment fails or pref MMIO
 366          *         assignment fails, will release assigned non-pref MMIO.
 367          */
 368         LIST_HEAD(save_head);
 369         LIST_HEAD(local_fail_head);
 370         struct pci_dev_resource *save_res;
 371         struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
 372         unsigned long fail_type;
 373         resource_size_t add_align, align;
 374 
 375         /* Check if optional add_size is there */
 376         if (!realloc_head || list_empty(realloc_head))
 377                 goto requested_and_reassign;
 378 
 379         /* Save original start, end, flags etc at first */
 380         list_for_each_entry(dev_res, head, list) {
 381                 if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
 382                         free_list(&save_head);
 383                         goto requested_and_reassign;
 384                 }
 385         }
 386 
 387         /* Update res in head list with add_size in realloc_head list */
 388         list_for_each_entry_safe(dev_res, tmp_res, head, list) {
 389                 dev_res->res->end += get_res_add_size(realloc_head,
 390                                                         dev_res->res);
 391 
 392                 /*
 393                  * There are two kinds of additional resources in the list:
 394                  * 1. bridge resource  -- IORESOURCE_STARTALIGN
 395                  * 2. SR-IOV resource  -- IORESOURCE_SIZEALIGN
 396                  * Here just fix the additional alignment for bridge
 397                  */
 398                 if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
 399                         continue;
 400 
 401                 add_align = get_res_add_align(realloc_head, dev_res->res);
 402 
 403                 /*
 404                  * The "head" list is sorted by alignment so resources with
 405                  * bigger alignment will be assigned first.  After we
 406                  * change the alignment of a dev_res in "head" list, we
 407                  * need to reorder the list by alignment to make it
 408                  * consistent.
 409                  */
 410                 if (add_align > dev_res->res->start) {
 411                         resource_size_t r_size = resource_size(dev_res->res);
 412 
 413                         dev_res->res->start = add_align;
 414                         dev_res->res->end = add_align + r_size - 1;
 415 
 416                         list_for_each_entry(dev_res2, head, list) {
 417                                 align = pci_resource_alignment(dev_res2->dev,
 418                                                                dev_res2->res);
 419                                 if (add_align > align) {
 420                                         list_move_tail(&dev_res->list,
 421                                                        &dev_res2->list);
 422                                         break;
 423                                 }
 424                         }
 425                 }
 426 
 427         }
 428 
 429         /* Try updated head list with add_size added */
 430         assign_requested_resources_sorted(head, &local_fail_head);
 431 
 432         /* All assigned with add_size? */
 433         if (list_empty(&local_fail_head)) {
 434                 /* Remove head list from realloc_head list */
 435                 list_for_each_entry(dev_res, head, list)
 436                         remove_from_list(realloc_head, dev_res->res);
 437                 free_list(&save_head);
 438                 free_list(head);
 439                 return;
 440         }
 441 
 442         /* Check failed type */
 443         fail_type = pci_fail_res_type_mask(&local_fail_head);
 444         /* Remove not need to be released assigned res from head list etc */
 445         list_for_each_entry_safe(dev_res, tmp_res, head, list)
 446                 if (dev_res->res->parent &&
 447                     !pci_need_to_release(fail_type, dev_res->res)) {
 448                         /* Remove it from realloc_head list */
 449                         remove_from_list(realloc_head, dev_res->res);
 450                         remove_from_list(&save_head, dev_res->res);
 451                         list_del(&dev_res->list);
 452                         kfree(dev_res);
 453                 }
 454 
 455         free_list(&local_fail_head);
 456         /* Release assigned resource */
 457         list_for_each_entry(dev_res, head, list)
 458                 if (dev_res->res->parent)
 459                         release_resource(dev_res->res);
 460         /* Restore start/end/flags from saved list */
 461         list_for_each_entry(save_res, &save_head, list) {
 462                 struct resource *res = save_res->res;
 463 
 464                 res->start = save_res->start;
 465                 res->end = save_res->end;
 466                 res->flags = save_res->flags;
 467         }
 468         free_list(&save_head);
 469 
 470 requested_and_reassign:
 471         /* Satisfy the must-have resource requests */
 472         assign_requested_resources_sorted(head, fail_head);
 473 
 474         /* Try to satisfy any additional optional resource requests */
 475         if (realloc_head)
 476                 reassign_resources_sorted(realloc_head, head);
 477         free_list(head);
 478 }
 479 
 480 static void pdev_assign_resources_sorted(struct pci_dev *dev,
 481                                          struct list_head *add_head,
 482                                          struct list_head *fail_head)
 483 {
 484         LIST_HEAD(head);
 485 
 486         __dev_sort_resources(dev, &head);
 487         __assign_resources_sorted(&head, add_head, fail_head);
 488 
 489 }
 490 
 491 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
 492                                          struct list_head *realloc_head,
 493                                          struct list_head *fail_head)
 494 {
 495         struct pci_dev *dev;
 496         LIST_HEAD(head);
 497 
 498         list_for_each_entry(dev, &bus->devices, bus_list)
 499                 __dev_sort_resources(dev, &head);
 500 
 501         __assign_resources_sorted(&head, realloc_head, fail_head);
 502 }
 503 
 504 void pci_setup_cardbus(struct pci_bus *bus)
 505 {
 506         struct pci_dev *bridge = bus->self;
 507         struct resource *res;
 508         struct pci_bus_region region;
 509 
 510         pci_info(bridge, "CardBus bridge to %pR\n",
 511                  &bus->busn_res);
 512 
 513         res = bus->resource[0];
 514         pcibios_resource_to_bus(bridge->bus, &region, res);
 515         if (res->flags & IORESOURCE_IO) {
 516                 /*
 517                  * The IO resource is allocated a range twice as large as it
 518                  * would normally need.  This allows us to set both IO regs.
 519                  */
 520                 pci_info(bridge, "  bridge window %pR\n", res);
 521                 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
 522                                         region.start);
 523                 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
 524                                         region.end);
 525         }
 526 
 527         res = bus->resource[1];
 528         pcibios_resource_to_bus(bridge->bus, &region, res);
 529         if (res->flags & IORESOURCE_IO) {
 530                 pci_info(bridge, "  bridge window %pR\n", res);
 531                 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
 532                                         region.start);
 533                 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
 534                                         region.end);
 535         }
 536 
 537         res = bus->resource[2];
 538         pcibios_resource_to_bus(bridge->bus, &region, res);
 539         if (res->flags & IORESOURCE_MEM) {
 540                 pci_info(bridge, "  bridge window %pR\n", res);
 541                 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
 542                                         region.start);
 543                 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
 544                                         region.end);
 545         }
 546 
 547         res = bus->resource[3];
 548         pcibios_resource_to_bus(bridge->bus, &region, res);
 549         if (res->flags & IORESOURCE_MEM) {
 550                 pci_info(bridge, "  bridge window %pR\n", res);
 551                 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
 552                                         region.start);
 553                 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
 554                                         region.end);
 555         }
 556 }
 557 EXPORT_SYMBOL(pci_setup_cardbus);
 558 
 559 /*
 560  * Initialize bridges with base/limit values we have collected.  PCI-to-PCI
 561  * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
 562  * are no I/O ports or memory behind the bridge, the corresponding range
 563  * must be turned off by writing base value greater than limit to the
 564  * bridge's base/limit registers.
 565  *
 566  * Note: care must be taken when updating I/O base/limit registers of
 567  * bridges which support 32-bit I/O.  This update requires two config space
 568  * writes, so it's quite possible that an I/O window of the bridge will
 569  * have some undesirable address (e.g. 0) after the first write.  Ditto
 570  * 64-bit prefetchable MMIO.
 571  */
 572 static void pci_setup_bridge_io(struct pci_dev *bridge)
 573 {
 574         struct resource *res;
 575         struct pci_bus_region region;
 576         unsigned long io_mask;
 577         u8 io_base_lo, io_limit_lo;
 578         u16 l;
 579         u32 io_upper16;
 580 
 581         io_mask = PCI_IO_RANGE_MASK;
 582         if (bridge->io_window_1k)
 583                 io_mask = PCI_IO_1K_RANGE_MASK;
 584 
 585         /* Set up the top and bottom of the PCI I/O segment for this bus */
 586         res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
 587         pcibios_resource_to_bus(bridge->bus, &region, res);
 588         if (res->flags & IORESOURCE_IO) {
 589                 pci_read_config_word(bridge, PCI_IO_BASE, &l);
 590                 io_base_lo = (region.start >> 8) & io_mask;
 591                 io_limit_lo = (region.end >> 8) & io_mask;
 592                 l = ((u16) io_limit_lo << 8) | io_base_lo;
 593                 /* Set up upper 16 bits of I/O base/limit */
 594                 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
 595                 pci_info(bridge, "  bridge window %pR\n", res);
 596         } else {
 597                 /* Clear upper 16 bits of I/O base/limit */
 598                 io_upper16 = 0;
 599                 l = 0x00f0;
 600         }
 601         /* Temporarily disable the I/O range before updating PCI_IO_BASE */
 602         pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
 603         /* Update lower 16 bits of I/O base/limit */
 604         pci_write_config_word(bridge, PCI_IO_BASE, l);
 605         /* Update upper 16 bits of I/O base/limit */
 606         pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
 607 }
 608 
 609 static void pci_setup_bridge_mmio(struct pci_dev *bridge)
 610 {
 611         struct resource *res;
 612         struct pci_bus_region region;
 613         u32 l;
 614 
 615         /* Set up the top and bottom of the PCI Memory segment for this bus */
 616         res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
 617         pcibios_resource_to_bus(bridge->bus, &region, res);
 618         if (res->flags & IORESOURCE_MEM) {
 619                 l = (region.start >> 16) & 0xfff0;
 620                 l |= region.end & 0xfff00000;
 621                 pci_info(bridge, "  bridge window %pR\n", res);
 622         } else {
 623                 l = 0x0000fff0;
 624         }
 625         pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
 626 }
 627 
 628 static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
 629 {
 630         struct resource *res;
 631         struct pci_bus_region region;
 632         u32 l, bu, lu;
 633 
 634         /*
 635          * Clear out the upper 32 bits of PREF limit.  If
 636          * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
 637          * PREF range, which is ok.
 638          */
 639         pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
 640 
 641         /* Set up PREF base/limit */
 642         bu = lu = 0;
 643         res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
 644         pcibios_resource_to_bus(bridge->bus, &region, res);
 645         if (res->flags & IORESOURCE_PREFETCH) {
 646                 l = (region.start >> 16) & 0xfff0;
 647                 l |= region.end & 0xfff00000;
 648                 if (res->flags & IORESOURCE_MEM_64) {
 649                         bu = upper_32_bits(region.start);
 650                         lu = upper_32_bits(region.end);
 651                 }
 652                 pci_info(bridge, "  bridge window %pR\n", res);
 653         } else {
 654                 l = 0x0000fff0;
 655         }
 656         pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
 657 
 658         /* Set the upper 32 bits of PREF base & limit */
 659         pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
 660         pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
 661 }
 662 
 663 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
 664 {
 665         struct pci_dev *bridge = bus->self;
 666 
 667         pci_info(bridge, "PCI bridge to %pR\n",
 668                  &bus->busn_res);
 669 
 670         if (type & IORESOURCE_IO)
 671                 pci_setup_bridge_io(bridge);
 672 
 673         if (type & IORESOURCE_MEM)
 674                 pci_setup_bridge_mmio(bridge);
 675 
 676         if (type & IORESOURCE_PREFETCH)
 677                 pci_setup_bridge_mmio_pref(bridge);
 678 
 679         pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
 680 }
 681 
 682 void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
 683 {
 684 }
 685 
 686 void pci_setup_bridge(struct pci_bus *bus)
 687 {
 688         unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
 689                                   IORESOURCE_PREFETCH;
 690 
 691         pcibios_setup_bridge(bus, type);
 692         __pci_setup_bridge(bus, type);
 693 }
 694 
 695 
 696 int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
 697 {
 698         if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
 699                 return 0;
 700 
 701         if (pci_claim_resource(bridge, i) == 0)
 702                 return 0;       /* Claimed the window */
 703 
 704         if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
 705                 return 0;
 706 
 707         if (!pci_bus_clip_resource(bridge, i))
 708                 return -EINVAL; /* Clipping didn't change anything */
 709 
 710         switch (i - PCI_BRIDGE_RESOURCES) {
 711         case 0:
 712                 pci_setup_bridge_io(bridge);
 713                 break;
 714         case 1:
 715                 pci_setup_bridge_mmio(bridge);
 716                 break;
 717         case 2:
 718                 pci_setup_bridge_mmio_pref(bridge);
 719                 break;
 720         default:
 721                 return -EINVAL;
 722         }
 723 
 724         if (pci_claim_resource(bridge, i) == 0)
 725                 return 0;       /* Claimed a smaller window */
 726 
 727         return -EINVAL;
 728 }
 729 
 730 /*
 731  * Check whether the bridge supports optional I/O and prefetchable memory
 732  * ranges.  If not, the respective base/limit registers must be read-only
 733  * and read as 0.
 734  */
 735 static void pci_bridge_check_ranges(struct pci_bus *bus)
 736 {
 737         struct pci_dev *bridge = bus->self;
 738         struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
 739 
 740         b_res[1].flags |= IORESOURCE_MEM;
 741 
 742         if (bridge->io_window)
 743                 b_res[0].flags |= IORESOURCE_IO;
 744 
 745         if (bridge->pref_window) {
 746                 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
 747                 if (bridge->pref_64_window) {
 748                         b_res[2].flags |= IORESOURCE_MEM_64;
 749                         b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
 750                 }
 751         }
 752 }
 753 
 754 /*
 755  * Helper function for sizing routines: find first available bus resource
 756  * of a given type.  Note: we intentionally skip the bus resources which
 757  * have already been assigned (that is, have non-NULL parent resource).
 758  */
 759 static struct resource *find_free_bus_resource(struct pci_bus *bus,
 760                                                unsigned long type_mask,
 761                                                unsigned long type)
 762 {
 763         int i;
 764         struct resource *r;
 765 
 766         pci_bus_for_each_resource(bus, r, i) {
 767                 if (r == &ioport_resource || r == &iomem_resource)
 768                         continue;
 769                 if (r && (r->flags & type_mask) == type && !r->parent)
 770                         return r;
 771         }
 772         return NULL;
 773 }
 774 
 775 static resource_size_t calculate_iosize(resource_size_t size,
 776                                         resource_size_t min_size,
 777                                         resource_size_t size1,
 778                                         resource_size_t add_size,
 779                                         resource_size_t children_add_size,
 780                                         resource_size_t old_size,
 781                                         resource_size_t align)
 782 {
 783         if (size < min_size)
 784                 size = min_size;
 785         if (old_size == 1)
 786                 old_size = 0;
 787         /*
 788          * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
 789          * struct pci_bus.
 790          */
 791 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
 792         size = (size & 0xff) + ((size & ~0xffUL) << 2);
 793 #endif
 794         size = size + size1;
 795         if (size < old_size)
 796                 size = old_size;
 797 
 798         size = ALIGN(max(size, add_size) + children_add_size, align);
 799         return size;
 800 }
 801 
 802 static resource_size_t calculate_memsize(resource_size_t size,
 803                                          resource_size_t min_size,
 804                                          resource_size_t add_size,
 805                                          resource_size_t children_add_size,
 806                                          resource_size_t old_size,
 807                                          resource_size_t align)
 808 {
 809         if (size < min_size)
 810                 size = min_size;
 811         if (old_size == 1)
 812                 old_size = 0;
 813         if (size < old_size)
 814                 size = old_size;
 815 
 816         size = ALIGN(max(size, add_size) + children_add_size, align);
 817         return size;
 818 }
 819 
 820 resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
 821                                                 unsigned long type)
 822 {
 823         return 1;
 824 }
 825 
 826 #define PCI_P2P_DEFAULT_MEM_ALIGN       0x100000        /* 1MiB */
 827 #define PCI_P2P_DEFAULT_IO_ALIGN        0x1000          /* 4KiB */
 828 #define PCI_P2P_DEFAULT_IO_ALIGN_1K     0x400           /* 1KiB */
 829 
 830 static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
 831 {
 832         resource_size_t align = 1, arch_align;
 833 
 834         if (type & IORESOURCE_MEM)
 835                 align = PCI_P2P_DEFAULT_MEM_ALIGN;
 836         else if (type & IORESOURCE_IO) {
 837                 /*
 838                  * Per spec, I/O windows are 4K-aligned, but some bridges have
 839                  * an extension to support 1K alignment.
 840                  */
 841                 if (bus->self->io_window_1k)
 842                         align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
 843                 else
 844                         align = PCI_P2P_DEFAULT_IO_ALIGN;
 845         }
 846 
 847         arch_align = pcibios_window_alignment(bus, type);
 848         return max(align, arch_align);
 849 }
 850 
 851 /**
 852  * pbus_size_io() - Size the I/O window of a given bus
 853  *
 854  * @bus:                The bus
 855  * @min_size:           The minimum I/O window that must be allocated
 856  * @add_size:           Additional optional I/O window
 857  * @realloc_head:       Track the additional I/O window on this list
 858  *
 859  * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
 860  * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
 861  * devices are limited to 256 bytes.  We must be careful with the ISA
 862  * aliasing though.
 863  */
 864 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
 865                          resource_size_t add_size,
 866                          struct list_head *realloc_head)
 867 {
 868         struct pci_dev *dev;
 869         struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
 870                                                         IORESOURCE_IO);
 871         resource_size_t size = 0, size0 = 0, size1 = 0;
 872         resource_size_t children_add_size = 0;
 873         resource_size_t min_align, align;
 874 
 875         if (!b_res)
 876                 return;
 877 
 878         min_align = window_alignment(bus, IORESOURCE_IO);
 879         list_for_each_entry(dev, &bus->devices, bus_list) {
 880                 int i;
 881 
 882                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 883                         struct resource *r = &dev->resource[i];
 884                         unsigned long r_size;
 885 
 886                         if (r->parent || !(r->flags & IORESOURCE_IO))
 887                                 continue;
 888                         r_size = resource_size(r);
 889 
 890                         if (r_size < 0x400)
 891                                 /* Might be re-aligned for ISA */
 892                                 size += r_size;
 893                         else
 894                                 size1 += r_size;
 895 
 896                         align = pci_resource_alignment(dev, r);
 897                         if (align > min_align)
 898                                 min_align = align;
 899 
 900                         if (realloc_head)
 901                                 children_add_size += get_res_add_size(realloc_head, r);
 902                 }
 903         }
 904 
 905         size0 = calculate_iosize(size, min_size, size1, 0, 0,
 906                         resource_size(b_res), min_align);
 907         size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
 908                 calculate_iosize(size, min_size, size1, add_size, children_add_size,
 909                         resource_size(b_res), min_align);
 910         if (!size0 && !size1) {
 911                 if (b_res->start || b_res->end)
 912                         pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
 913                                  b_res, &bus->busn_res);
 914                 b_res->flags = 0;
 915                 return;
 916         }
 917 
 918         b_res->start = min_align;
 919         b_res->end = b_res->start + size0 - 1;
 920         b_res->flags |= IORESOURCE_STARTALIGN;
 921         if (size1 > size0 && realloc_head) {
 922                 add_to_list(realloc_head, bus->self, b_res, size1-size0,
 923                             min_align);
 924                 pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
 925                          b_res, &bus->busn_res,
 926                          (unsigned long long) size1 - size0);
 927         }
 928 }
 929 
 930 static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
 931                                                   int max_order)
 932 {
 933         resource_size_t align = 0;
 934         resource_size_t min_align = 0;
 935         int order;
 936 
 937         for (order = 0; order <= max_order; order++) {
 938                 resource_size_t align1 = 1;
 939 
 940                 align1 <<= (order + 20);
 941 
 942                 if (!align)
 943                         min_align = align1;
 944                 else if (ALIGN(align + min_align, min_align) < align1)
 945                         min_align = align1 >> 1;
 946                 align += aligns[order];
 947         }
 948 
 949         return min_align;
 950 }
 951 
 952 /**
 953  * pbus_size_mem() - Size the memory window of a given bus
 954  *
 955  * @bus:                The bus
 956  * @mask:               Mask the resource flag, then compare it with type
 957  * @type:               The type of free resource from bridge
 958  * @type2:              Second match type
 959  * @type3:              Third match type
 960  * @min_size:           The minimum memory window that must be allocated
 961  * @add_size:           Additional optional memory window
 962  * @realloc_head:       Track the additional memory window on this list
 963  *
 964  * Calculate the size of the bus and minimal alignment which guarantees
 965  * that all child resources fit in this size.
 966  *
 967  * Return -ENOSPC if there's no available bus resource of the desired
 968  * type.  Otherwise, set the bus resource start/end to indicate the
 969  * required size, add things to realloc_head (if supplied), and return 0.
 970  */
 971 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
 972                          unsigned long type, unsigned long type2,
 973                          unsigned long type3, resource_size_t min_size,
 974                          resource_size_t add_size,
 975                          struct list_head *realloc_head)
 976 {
 977         struct pci_dev *dev;
 978         resource_size_t min_align, align, size, size0, size1;
 979         resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
 980         int order, max_order;
 981         struct resource *b_res = find_free_bus_resource(bus,
 982                                         mask | IORESOURCE_PREFETCH, type);
 983         resource_size_t children_add_size = 0;
 984         resource_size_t children_add_align = 0;
 985         resource_size_t add_align = 0;
 986 
 987         if (!b_res)
 988                 return -ENOSPC;
 989 
 990         memset(aligns, 0, sizeof(aligns));
 991         max_order = 0;
 992         size = 0;
 993 
 994         list_for_each_entry(dev, &bus->devices, bus_list) {
 995                 int i;
 996 
 997                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 998                         struct resource *r = &dev->resource[i];
 999                         resource_size_t r_size;
1000 
1001                         if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
1002                             ((r->flags & mask) != type &&
1003                              (r->flags & mask) != type2 &&
1004                              (r->flags & mask) != type3))
1005                                 continue;
1006                         r_size = resource_size(r);
1007 #ifdef CONFIG_PCI_IOV
1008                         /* Put SRIOV requested res to the optional list */
1009                         if (realloc_head && i >= PCI_IOV_RESOURCES &&
1010                                         i <= PCI_IOV_RESOURCE_END) {
1011                                 add_align = max(pci_resource_alignment(dev, r), add_align);
1012                                 r->end = r->start - 1;
1013                                 add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
1014                                 children_add_size += r_size;
1015                                 continue;
1016                         }
1017 #endif
1018                         /*
1019                          * aligns[0] is for 1MB (since bridge memory
1020                          * windows are always at least 1MB aligned), so
1021                          * keep "order" from being negative for smaller
1022                          * resources.
1023                          */
1024                         align = pci_resource_alignment(dev, r);
1025                         order = __ffs(align) - 20;
1026                         if (order < 0)
1027                                 order = 0;
1028                         if (order >= ARRAY_SIZE(aligns)) {
1029                                 pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
1030                                          i, r, (unsigned long long) align);
1031                                 r->flags = 0;
1032                                 continue;
1033                         }
1034                         size += max(r_size, align);
1035                         /*
1036                          * Exclude ranges with size > align from calculation of
1037                          * the alignment.
1038                          */
1039                         if (r_size <= align)
1040                                 aligns[order] += align;
1041                         if (order > max_order)
1042                                 max_order = order;
1043 
1044                         if (realloc_head) {
1045                                 children_add_size += get_res_add_size(realloc_head, r);
1046                                 children_add_align = get_res_add_align(realloc_head, r);
1047                                 add_align = max(add_align, children_add_align);
1048                         }
1049                 }
1050         }
1051 
1052         min_align = calculate_mem_align(aligns, max_order);
1053         min_align = max(min_align, window_alignment(bus, b_res->flags));
1054         size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
1055         add_align = max(min_align, add_align);
1056         size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
1057                 calculate_memsize(size, min_size, add_size, children_add_size,
1058                                 resource_size(b_res), add_align);
1059         if (!size0 && !size1) {
1060                 if (b_res->start || b_res->end)
1061                         pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1062                                  b_res, &bus->busn_res);
1063                 b_res->flags = 0;
1064                 return 0;
1065         }
1066         b_res->start = min_align;
1067         b_res->end = size0 + min_align - 1;
1068         b_res->flags |= IORESOURCE_STARTALIGN;
1069         if (size1 > size0 && realloc_head) {
1070                 add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
1071                 pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
1072                            b_res, &bus->busn_res,
1073                            (unsigned long long) (size1 - size0),
1074                            (unsigned long long) add_align);
1075         }
1076         return 0;
1077 }
1078 
1079 unsigned long pci_cardbus_resource_alignment(struct resource *res)
1080 {
1081         if (res->flags & IORESOURCE_IO)
1082                 return pci_cardbus_io_size;
1083         if (res->flags & IORESOURCE_MEM)
1084                 return pci_cardbus_mem_size;
1085         return 0;
1086 }
1087 
1088 static void pci_bus_size_cardbus(struct pci_bus *bus,
1089                                  struct list_head *realloc_head)
1090 {
1091         struct pci_dev *bridge = bus->self;
1092         struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
1093         resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
1094         u16 ctrl;
1095 
1096         if (b_res[0].parent)
1097                 goto handle_b_res_1;
1098         /*
1099          * Reserve some resources for CardBus.  We reserve a fixed amount
1100          * of bus space for CardBus bridges.
1101          */
1102         b_res[0].start = pci_cardbus_io_size;
1103         b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
1104         b_res[0].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1105         if (realloc_head) {
1106                 b_res[0].end -= pci_cardbus_io_size;
1107                 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1108                                 pci_cardbus_io_size);
1109         }
1110 
1111 handle_b_res_1:
1112         if (b_res[1].parent)
1113                 goto handle_b_res_2;
1114         b_res[1].start = pci_cardbus_io_size;
1115         b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1;
1116         b_res[1].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1117         if (realloc_head) {
1118                 b_res[1].end -= pci_cardbus_io_size;
1119                 add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size,
1120                                  pci_cardbus_io_size);
1121         }
1122 
1123 handle_b_res_2:
1124         /* MEM1 must not be pref MMIO */
1125         pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1126         if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
1127                 ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
1128                 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1129                 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1130         }
1131 
1132         /* Check whether prefetchable memory is supported by this bridge. */
1133         pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1134         if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
1135                 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
1136                 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1137                 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1138         }
1139 
1140         if (b_res[2].parent)
1141                 goto handle_b_res_3;
1142         /*
1143          * If we have prefetchable memory support, allocate two regions.
1144          * Otherwise, allocate one region of twice the size.
1145          */
1146         if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
1147                 b_res[2].start = pci_cardbus_mem_size;
1148                 b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1;
1149                 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
1150                                   IORESOURCE_STARTALIGN;
1151                 if (realloc_head) {
1152                         b_res[2].end -= pci_cardbus_mem_size;
1153                         add_to_list(realloc_head, bridge, b_res+2,
1154                                  pci_cardbus_mem_size, pci_cardbus_mem_size);
1155                 }
1156 
1157                 /* Reduce that to half */
1158                 b_res_3_size = pci_cardbus_mem_size;
1159         }
1160 
1161 handle_b_res_3:
1162         if (b_res[3].parent)
1163                 goto handle_done;
1164         b_res[3].start = pci_cardbus_mem_size;
1165         b_res[3].end = b_res[3].start + b_res_3_size - 1;
1166         b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
1167         if (realloc_head) {
1168                 b_res[3].end -= b_res_3_size;
1169                 add_to_list(realloc_head, bridge, b_res+3, b_res_3_size,
1170                                  pci_cardbus_mem_size);
1171         }
1172 
1173 handle_done:
1174         ;
1175 }
1176 
1177 void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
1178 {
1179         struct pci_dev *dev;
1180         unsigned long mask, prefmask, type2 = 0, type3 = 0;
1181         resource_size_t additional_mem_size = 0, additional_io_size = 0;
1182         struct resource *b_res;
1183         int ret;
1184 
1185         list_for_each_entry(dev, &bus->devices, bus_list) {
1186                 struct pci_bus *b = dev->subordinate;
1187                 if (!b)
1188                         continue;
1189 
1190                 switch (dev->hdr_type) {
1191                 case PCI_HEADER_TYPE_CARDBUS:
1192                         pci_bus_size_cardbus(b, realloc_head);
1193                         break;
1194 
1195                 case PCI_HEADER_TYPE_BRIDGE:
1196                 default:
1197                         __pci_bus_size_bridges(b, realloc_head);
1198                         break;
1199                 }
1200         }
1201 
1202         /* The root bus? */
1203         if (pci_is_root_bus(bus))
1204                 return;
1205 
1206         switch (bus->self->hdr_type) {
1207         case PCI_HEADER_TYPE_CARDBUS:
1208                 /* Don't size CardBuses yet */
1209                 break;
1210 
1211         case PCI_HEADER_TYPE_BRIDGE:
1212                 pci_bridge_check_ranges(bus);
1213                 if (bus->self->is_hotplug_bridge) {
1214                         additional_io_size  = pci_hotplug_io_size;
1215                         additional_mem_size = pci_hotplug_mem_size;
1216                 }
1217                 /* Fall through */
1218         default:
1219                 pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
1220                              additional_io_size, realloc_head);
1221 
1222                 /*
1223                  * If there's a 64-bit prefetchable MMIO window, compute
1224                  * the size required to put all 64-bit prefetchable
1225                  * resources in it.
1226                  */
1227                 b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES];
1228                 mask = IORESOURCE_MEM;
1229                 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
1230                 if (b_res[2].flags & IORESOURCE_MEM_64) {
1231                         prefmask |= IORESOURCE_MEM_64;
1232                         ret = pbus_size_mem(bus, prefmask, prefmask,
1233                                   prefmask, prefmask,
1234                                   realloc_head ? 0 : additional_mem_size,
1235                                   additional_mem_size, realloc_head);
1236 
1237                         /*
1238                          * If successful, all non-prefetchable resources
1239                          * and any 32-bit prefetchable resources will go in
1240                          * the non-prefetchable window.
1241                          */
1242                         if (ret == 0) {
1243                                 mask = prefmask;
1244                                 type2 = prefmask & ~IORESOURCE_MEM_64;
1245                                 type3 = prefmask & ~IORESOURCE_PREFETCH;
1246                         }
1247                 }
1248 
1249                 /*
1250                  * If there is no 64-bit prefetchable window, compute the
1251                  * size required to put all prefetchable resources in the
1252                  * 32-bit prefetchable window (if there is one).
1253                  */
1254                 if (!type2) {
1255                         prefmask &= ~IORESOURCE_MEM_64;
1256                         ret = pbus_size_mem(bus, prefmask, prefmask,
1257                                          prefmask, prefmask,
1258                                          realloc_head ? 0 : additional_mem_size,
1259                                          additional_mem_size, realloc_head);
1260 
1261                         /*
1262                          * If successful, only non-prefetchable resources
1263                          * will go in the non-prefetchable window.
1264                          */
1265                         if (ret == 0)
1266                                 mask = prefmask;
1267                         else
1268                                 additional_mem_size += additional_mem_size;
1269 
1270                         type2 = type3 = IORESOURCE_MEM;
1271                 }
1272 
1273                 /*
1274                  * Compute the size required to put everything else in the
1275                  * non-prefetchable window. This includes:
1276                  *
1277                  *   - all non-prefetchable resources
1278                  *   - 32-bit prefetchable resources if there's a 64-bit
1279                  *     prefetchable window or no prefetchable window at all
1280                  *   - 64-bit prefetchable resources if there's no prefetchable
1281                  *     window at all
1282                  *
1283                  * Note that the strategy in __pci_assign_resource() must match
1284                  * that used here. Specifically, we cannot put a 32-bit
1285                  * prefetchable resource in a 64-bit prefetchable window.
1286                  */
1287                 pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
1288                                 realloc_head ? 0 : additional_mem_size,
1289                                 additional_mem_size, realloc_head);
1290                 break;
1291         }
1292 }
1293 
1294 void pci_bus_size_bridges(struct pci_bus *bus)
1295 {
1296         __pci_bus_size_bridges(bus, NULL);
1297 }
1298 EXPORT_SYMBOL(pci_bus_size_bridges);
1299 
1300 static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
1301 {
1302         int i;
1303         struct resource *parent_r;
1304         unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
1305                              IORESOURCE_PREFETCH;
1306 
1307         pci_bus_for_each_resource(b, parent_r, i) {
1308                 if (!parent_r)
1309                         continue;
1310 
1311                 if ((r->flags & mask) == (parent_r->flags & mask) &&
1312                     resource_contains(parent_r, r))
1313                         request_resource(parent_r, r);
1314         }
1315 }
1316 
1317 /*
1318  * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
1319  * skipped by pbus_assign_resources_sorted().
1320  */
1321 static void pdev_assign_fixed_resources(struct pci_dev *dev)
1322 {
1323         int i;
1324 
1325         for (i = 0; i <  PCI_NUM_RESOURCES; i++) {
1326                 struct pci_bus *b;
1327                 struct resource *r = &dev->resource[i];
1328 
1329                 if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
1330                     !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
1331                         continue;
1332 
1333                 b = dev->bus;
1334                 while (b && !r->parent) {
1335                         assign_fixed_resource_on_bus(b, r);
1336                         b = b->parent;
1337                 }
1338         }
1339 }
1340 
1341 void __pci_bus_assign_resources(const struct pci_bus *bus,
1342                                 struct list_head *realloc_head,
1343                                 struct list_head *fail_head)
1344 {
1345         struct pci_bus *b;
1346         struct pci_dev *dev;
1347 
1348         pbus_assign_resources_sorted(bus, realloc_head, fail_head);
1349 
1350         list_for_each_entry(dev, &bus->devices, bus_list) {
1351                 pdev_assign_fixed_resources(dev);
1352 
1353                 b = dev->subordinate;
1354                 if (!b)
1355                         continue;
1356 
1357                 __pci_bus_assign_resources(b, realloc_head, fail_head);
1358 
1359                 switch (dev->hdr_type) {
1360                 case PCI_HEADER_TYPE_BRIDGE:
1361                         if (!pci_is_enabled(dev))
1362                                 pci_setup_bridge(b);
1363                         break;
1364 
1365                 case PCI_HEADER_TYPE_CARDBUS:
1366                         pci_setup_cardbus(b);
1367                         break;
1368 
1369                 default:
1370                         pci_info(dev, "not setting up bridge for bus %04x:%02x\n",
1371                                  pci_domain_nr(b), b->number);
1372                         break;
1373                 }
1374         }
1375 }
1376 
1377 void pci_bus_assign_resources(const struct pci_bus *bus)
1378 {
1379         __pci_bus_assign_resources(bus, NULL, NULL);
1380 }
1381 EXPORT_SYMBOL(pci_bus_assign_resources);
1382 
1383 static void pci_claim_device_resources(struct pci_dev *dev)
1384 {
1385         int i;
1386 
1387         for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
1388                 struct resource *r = &dev->resource[i];
1389 
1390                 if (!r->flags || r->parent)
1391                         continue;
1392 
1393                 pci_claim_resource(dev, i);
1394         }
1395 }
1396 
1397 static void pci_claim_bridge_resources(struct pci_dev *dev)
1398 {
1399         int i;
1400 
1401         for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
1402                 struct resource *r = &dev->resource[i];
1403 
1404                 if (!r->flags || r->parent)
1405                         continue;
1406 
1407                 pci_claim_bridge_resource(dev, i);
1408         }
1409 }
1410 
1411 static void pci_bus_allocate_dev_resources(struct pci_bus *b)
1412 {
1413         struct pci_dev *dev;
1414         struct pci_bus *child;
1415 
1416         list_for_each_entry(dev, &b->devices, bus_list) {
1417                 pci_claim_device_resources(dev);
1418 
1419                 child = dev->subordinate;
1420                 if (child)
1421                         pci_bus_allocate_dev_resources(child);
1422         }
1423 }
1424 
1425 static void pci_bus_allocate_resources(struct pci_bus *b)
1426 {
1427         struct pci_bus *child;
1428 
1429         /*
1430          * Carry out a depth-first search on the PCI bus tree to allocate
1431          * bridge apertures.  Read the programmed bridge bases and
1432          * recursively claim the respective bridge resources.
1433          */
1434         if (b->self) {
1435                 pci_read_bridge_bases(b);
1436                 pci_claim_bridge_resources(b->self);
1437         }
1438 
1439         list_for_each_entry(child, &b->children, node)
1440                 pci_bus_allocate_resources(child);
1441 }
1442 
1443 void pci_bus_claim_resources(struct pci_bus *b)
1444 {
1445         pci_bus_allocate_resources(b);
1446         pci_bus_allocate_dev_resources(b);
1447 }
1448 EXPORT_SYMBOL(pci_bus_claim_resources);
1449 
1450 static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
1451                                           struct list_head *add_head,
1452                                           struct list_head *fail_head)
1453 {
1454         struct pci_bus *b;
1455 
1456         pdev_assign_resources_sorted((struct pci_dev *)bridge,
1457                                          add_head, fail_head);
1458 
1459         b = bridge->subordinate;
1460         if (!b)
1461                 return;
1462 
1463         __pci_bus_assign_resources(b, add_head, fail_head);
1464 
1465         switch (bridge->class >> 8) {
1466         case PCI_CLASS_BRIDGE_PCI:
1467                 pci_setup_bridge(b);
1468                 break;
1469 
1470         case PCI_CLASS_BRIDGE_CARDBUS:
1471                 pci_setup_cardbus(b);
1472                 break;
1473 
1474         default:
1475                 pci_info(bridge, "not setting up bridge for bus %04x:%02x\n",
1476                          pci_domain_nr(b), b->number);
1477                 break;
1478         }
1479 }
1480 
1481 #define PCI_RES_TYPE_MASK \
1482         (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
1483          IORESOURCE_MEM_64)
1484 
1485 static void pci_bridge_release_resources(struct pci_bus *bus,
1486                                          unsigned long type)
1487 {
1488         struct pci_dev *dev = bus->self;
1489         struct resource *r;
1490         unsigned old_flags = 0;
1491         struct resource *b_res;
1492         int idx = 1;
1493 
1494         b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
1495 
1496         /*
1497          * 1. If IO port assignment fails, release bridge IO port.
1498          * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
1499          * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
1500          *    release bridge pref MMIO.
1501          * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
1502          *    release bridge pref MMIO.
1503          * 5. If pref MMIO assignment fails, and bridge pref is not
1504          *    assigned, release bridge nonpref MMIO.
1505          */
1506         if (type & IORESOURCE_IO)
1507                 idx = 0;
1508         else if (!(type & IORESOURCE_PREFETCH))
1509                 idx = 1;
1510         else if ((type & IORESOURCE_MEM_64) &&
1511                  (b_res[2].flags & IORESOURCE_MEM_64))
1512                 idx = 2;
1513         else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
1514                  (b_res[2].flags & IORESOURCE_PREFETCH))
1515                 idx = 2;
1516         else
1517                 idx = 1;
1518 
1519         r = &b_res[idx];
1520 
1521         if (!r->parent)
1522                 return;
1523 
1524         /* If there are children, release them all */
1525         release_child_resources(r);
1526         if (!release_resource(r)) {
1527                 type = old_flags = r->flags & PCI_RES_TYPE_MASK;
1528                 pci_info(dev, "resource %d %pR released\n",
1529                          PCI_BRIDGE_RESOURCES + idx, r);
1530                 /* Keep the old size */
1531                 r->end = resource_size(r) - 1;
1532                 r->start = 0;
1533                 r->flags = 0;
1534 
1535                 /* Avoiding touch the one without PREF */
1536                 if (type & IORESOURCE_PREFETCH)
1537                         type = IORESOURCE_PREFETCH;
1538                 __pci_setup_bridge(bus, type);
1539                 /* For next child res under same bridge */
1540                 r->flags = old_flags;
1541         }
1542 }
1543 
1544 enum release_type {
1545         leaf_only,
1546         whole_subtree,
1547 };
1548 
1549 /*
1550  * Try to release PCI bridge resources from leaf bridge, so we can allocate
1551  * a larger window later.
1552  */
1553 static void pci_bus_release_bridge_resources(struct pci_bus *bus,
1554                                              unsigned long type,
1555                                              enum release_type rel_type)
1556 {
1557         struct pci_dev *dev;
1558         bool is_leaf_bridge = true;
1559 
1560         list_for_each_entry(dev, &bus->devices, bus_list) {
1561                 struct pci_bus *b = dev->subordinate;
1562                 if (!b)
1563                         continue;
1564 
1565                 is_leaf_bridge = false;
1566 
1567                 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1568                         continue;
1569 
1570                 if (rel_type == whole_subtree)
1571                         pci_bus_release_bridge_resources(b, type,
1572                                                  whole_subtree);
1573         }
1574 
1575         if (pci_is_root_bus(bus))
1576                 return;
1577 
1578         if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1579                 return;
1580 
1581         if ((rel_type == whole_subtree) || is_leaf_bridge)
1582                 pci_bridge_release_resources(bus, type);
1583 }
1584 
1585 static void pci_bus_dump_res(struct pci_bus *bus)
1586 {
1587         struct resource *res;
1588         int i;
1589 
1590         pci_bus_for_each_resource(bus, res, i) {
1591                 if (!res || !res->end || !res->flags)
1592                         continue;
1593 
1594                 dev_info(&bus->dev, "resource %d %pR\n", i, res);
1595         }
1596 }
1597 
1598 static void pci_bus_dump_resources(struct pci_bus *bus)
1599 {
1600         struct pci_bus *b;
1601         struct pci_dev *dev;
1602 
1603 
1604         pci_bus_dump_res(bus);
1605 
1606         list_for_each_entry(dev, &bus->devices, bus_list) {
1607                 b = dev->subordinate;
1608                 if (!b)
1609                         continue;
1610 
1611                 pci_bus_dump_resources(b);
1612         }
1613 }
1614 
1615 static int pci_bus_get_depth(struct pci_bus *bus)
1616 {
1617         int depth = 0;
1618         struct pci_bus *child_bus;
1619 
1620         list_for_each_entry(child_bus, &bus->children, node) {
1621                 int ret;
1622 
1623                 ret = pci_bus_get_depth(child_bus);
1624                 if (ret + 1 > depth)
1625                         depth = ret + 1;
1626         }
1627 
1628         return depth;
1629 }
1630 
1631 /*
1632  * -1: undefined, will auto detect later
1633  *  0: disabled by user
1634  *  1: disabled by auto detect
1635  *  2: enabled by user
1636  *  3: enabled by auto detect
1637  */
1638 enum enable_type {
1639         undefined = -1,
1640         user_disabled,
1641         auto_disabled,
1642         user_enabled,
1643         auto_enabled,
1644 };
1645 
1646 static enum enable_type pci_realloc_enable = undefined;
1647 void __init pci_realloc_get_opt(char *str)
1648 {
1649         if (!strncmp(str, "off", 3))
1650                 pci_realloc_enable = user_disabled;
1651         else if (!strncmp(str, "on", 2))
1652                 pci_realloc_enable = user_enabled;
1653 }
1654 static bool pci_realloc_enabled(enum enable_type enable)
1655 {
1656         return enable >= user_enabled;
1657 }
1658 
1659 #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
1660 static int iov_resources_unassigned(struct pci_dev *dev, void *data)
1661 {
1662         int i;
1663         bool *unassigned = data;
1664 
1665         for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1666                 struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
1667                 struct pci_bus_region region;
1668 
1669                 /* Not assigned or rejected by kernel? */
1670                 if (!r->flags)
1671                         continue;
1672 
1673                 pcibios_resource_to_bus(dev->bus, &region, r);
1674                 if (!region.start) {
1675                         *unassigned = true;
1676                         return 1; /* Return early from pci_walk_bus() */
1677                 }
1678         }
1679 
1680         return 0;
1681 }
1682 
1683 static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1684                                            enum enable_type enable_local)
1685 {
1686         bool unassigned = false;
1687         struct pci_host_bridge *host;
1688 
1689         if (enable_local != undefined)
1690                 return enable_local;
1691 
1692         host = pci_find_host_bridge(bus);
1693         if (host->preserve_config)
1694                 return auto_disabled;
1695 
1696         pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
1697         if (unassigned)
1698                 return auto_enabled;
1699 
1700         return enable_local;
1701 }
1702 #else
1703 static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1704                                            enum enable_type enable_local)
1705 {
1706         return enable_local;
1707 }
1708 #endif
1709 
1710 /*
1711  * First try will not touch PCI bridge res.
1712  * Second and later try will clear small leaf bridge res.
1713  * Will stop till to the max depth if can not find good one.
1714  */
1715 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
1716 {
1717         LIST_HEAD(realloc_head);
1718         /* List of resources that want additional resources */
1719         struct list_head *add_list = NULL;
1720         int tried_times = 0;
1721         enum release_type rel_type = leaf_only;
1722         LIST_HEAD(fail_head);
1723         struct pci_dev_resource *fail_res;
1724         int pci_try_num = 1;
1725         enum enable_type enable_local;
1726 
1727         /* Don't realloc if asked to do so */
1728         enable_local = pci_realloc_detect(bus, pci_realloc_enable);
1729         if (pci_realloc_enabled(enable_local)) {
1730                 int max_depth = pci_bus_get_depth(bus);
1731 
1732                 pci_try_num = max_depth + 1;
1733                 dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
1734                          max_depth, pci_try_num);
1735         }
1736 
1737 again:
1738         /*
1739          * Last try will use add_list, otherwise will try good to have as must
1740          * have, so can realloc parent bridge resource
1741          */
1742         if (tried_times + 1 == pci_try_num)
1743                 add_list = &realloc_head;
1744         /*
1745          * Depth first, calculate sizes and alignments of all subordinate buses.
1746          */
1747         __pci_bus_size_bridges(bus, add_list);
1748 
1749         /* Depth last, allocate resources and update the hardware. */
1750         __pci_bus_assign_resources(bus, add_list, &fail_head);
1751         if (add_list)
1752                 BUG_ON(!list_empty(add_list));
1753         tried_times++;
1754 
1755         /* Any device complain? */
1756         if (list_empty(&fail_head))
1757                 goto dump;
1758 
1759         if (tried_times >= pci_try_num) {
1760                 if (enable_local == undefined)
1761                         dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
1762                 else if (enable_local == auto_enabled)
1763                         dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
1764 
1765                 free_list(&fail_head);
1766                 goto dump;
1767         }
1768 
1769         dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
1770                  tried_times + 1);
1771 
1772         /* Third times and later will not check if it is leaf */
1773         if ((tried_times + 1) > 2)
1774                 rel_type = whole_subtree;
1775 
1776         /*
1777          * Try to release leaf bridge's resources that doesn't fit resource of
1778          * child device under that bridge.
1779          */
1780         list_for_each_entry(fail_res, &fail_head, list)
1781                 pci_bus_release_bridge_resources(fail_res->dev->bus,
1782                                                  fail_res->flags & PCI_RES_TYPE_MASK,
1783                                                  rel_type);
1784 
1785         /* Restore size and flags */
1786         list_for_each_entry(fail_res, &fail_head, list) {
1787                 struct resource *res = fail_res->res;
1788                 int idx;
1789 
1790                 res->start = fail_res->start;
1791                 res->end = fail_res->end;
1792                 res->flags = fail_res->flags;
1793 
1794                 if (pci_is_bridge(fail_res->dev)) {
1795                         idx = res - &fail_res->dev->resource[0];
1796                         if (idx >= PCI_BRIDGE_RESOURCES &&
1797                             idx <= PCI_BRIDGE_RESOURCE_END)
1798                                 res->flags = 0;
1799                 }
1800         }
1801         free_list(&fail_head);
1802 
1803         goto again;
1804 
1805 dump:
1806         /* Dump the resource on buses */
1807         pci_bus_dump_resources(bus);
1808 }
1809 
1810 void __init pci_assign_unassigned_resources(void)
1811 {
1812         struct pci_bus *root_bus;
1813 
1814         list_for_each_entry(root_bus, &pci_root_buses, node) {
1815                 pci_assign_unassigned_root_bus_resources(root_bus);
1816 
1817                 /* Make sure the root bridge has a companion ACPI device */
1818                 if (ACPI_HANDLE(root_bus->bridge))
1819                         acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
1820         }
1821 }
1822 
1823 static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
1824                                  struct list_head *add_list,
1825                                  resource_size_t available)
1826 {
1827         struct pci_dev_resource *dev_res;
1828 
1829         if (res->parent)
1830                 return;
1831 
1832         if (resource_size(res) >= available)
1833                 return;
1834 
1835         dev_res = res_to_dev_res(add_list, res);
1836         if (!dev_res)
1837                 return;
1838 
1839         /* Is there room to extend the window? */
1840         if (available - resource_size(res) <= dev_res->add_size)
1841                 return;
1842 
1843         dev_res->add_size = available - resource_size(res);
1844         pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
1845                 &dev_res->add_size);
1846 }
1847 
1848 static void pci_bus_distribute_available_resources(struct pci_bus *bus,
1849                                             struct list_head *add_list,
1850                                             resource_size_t available_io,
1851                                             resource_size_t available_mmio,
1852                                             resource_size_t available_mmio_pref)
1853 {
1854         resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
1855         unsigned int normal_bridges = 0, hotplug_bridges = 0;
1856         struct resource *io_res, *mmio_res, *mmio_pref_res;
1857         struct pci_dev *dev, *bridge = bus->self;
1858 
1859         io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
1860         mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
1861         mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
1862 
1863         /*
1864          * Update additional resource list (add_list) to fill all the
1865          * extra resource space available for this port except the space
1866          * calculated in __pci_bus_size_bridges() which covers all the
1867          * devices currently connected to the port and below.
1868          */
1869         extend_bridge_window(bridge, io_res, add_list, available_io);
1870         extend_bridge_window(bridge, mmio_res, add_list, available_mmio);
1871         extend_bridge_window(bridge, mmio_pref_res, add_list,
1872                              available_mmio_pref);
1873 
1874         /*
1875          * Calculate how many hotplug bridges and normal bridges there
1876          * are on this bus.  We will distribute the additional available
1877          * resources between hotplug bridges.
1878          */
1879         for_each_pci_bridge(dev, bus) {
1880                 if (dev->is_hotplug_bridge)
1881                         hotplug_bridges++;
1882                 else
1883                         normal_bridges++;
1884         }
1885 
1886         /*
1887          * There is only one bridge on the bus so it gets all available
1888          * resources which it can then distribute to the possible hotplug
1889          * bridges below.
1890          */
1891         if (hotplug_bridges + normal_bridges == 1) {
1892                 dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
1893                 if (dev->subordinate) {
1894                         pci_bus_distribute_available_resources(dev->subordinate,
1895                                 add_list, available_io, available_mmio,
1896                                 available_mmio_pref);
1897                 }
1898                 return;
1899         }
1900 
1901         if (hotplug_bridges == 0)
1902                 return;
1903 
1904         /*
1905          * Calculate the total amount of extra resource space we can
1906          * pass to bridges below this one.  This is basically the
1907          * extra space reduced by the minimal required space for the
1908          * non-hotplug bridges.
1909          */
1910         remaining_io = available_io;
1911         remaining_mmio = available_mmio;
1912         remaining_mmio_pref = available_mmio_pref;
1913 
1914         for_each_pci_bridge(dev, bus) {
1915                 const struct resource *res;
1916 
1917                 if (dev->is_hotplug_bridge)
1918                         continue;
1919 
1920                 /*
1921                  * Reduce the available resource space by what the
1922                  * bridge and devices below it occupy.
1923                  */
1924                 res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
1925                 if (!res->parent && available_io > resource_size(res))
1926                         remaining_io -= resource_size(res);
1927 
1928                 res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
1929                 if (!res->parent && available_mmio > resource_size(res))
1930                         remaining_mmio -= resource_size(res);
1931 
1932                 res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
1933                 if (!res->parent && available_mmio_pref > resource_size(res))
1934                         remaining_mmio_pref -= resource_size(res);
1935         }
1936 
1937         /*
1938          * Go over devices on this bus and distribute the remaining
1939          * resource space between hotplug bridges.
1940          */
1941         for_each_pci_bridge(dev, bus) {
1942                 resource_size_t align, io, mmio, mmio_pref;
1943                 struct pci_bus *b;
1944 
1945                 b = dev->subordinate;
1946                 if (!b || !dev->is_hotplug_bridge)
1947                         continue;
1948 
1949                 /*
1950                  * Distribute available extra resources equally between
1951                  * hotplug-capable downstream ports taking alignment into
1952                  * account.
1953                  */
1954                 align = pci_resource_alignment(bridge, io_res);
1955                 io = div64_ul(available_io, hotplug_bridges);
1956                 io = min(ALIGN(io, align), remaining_io);
1957                 remaining_io -= io;
1958 
1959                 align = pci_resource_alignment(bridge, mmio_res);
1960                 mmio = div64_ul(available_mmio, hotplug_bridges);
1961                 mmio = min(ALIGN(mmio, align), remaining_mmio);
1962                 remaining_mmio -= mmio;
1963 
1964                 align = pci_resource_alignment(bridge, mmio_pref_res);
1965                 mmio_pref = div64_ul(available_mmio_pref, hotplug_bridges);
1966                 mmio_pref = min(ALIGN(mmio_pref, align), remaining_mmio_pref);
1967                 remaining_mmio_pref -= mmio_pref;
1968 
1969                 pci_bus_distribute_available_resources(b, add_list, io, mmio,
1970                                                        mmio_pref);
1971         }
1972 }
1973 
1974 static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
1975                                                      struct list_head *add_list)
1976 {
1977         resource_size_t available_io, available_mmio, available_mmio_pref;
1978         const struct resource *res;
1979 
1980         if (!bridge->is_hotplug_bridge)
1981                 return;
1982 
1983         /* Take the initial extra resources from the hotplug port */
1984         res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
1985         available_io = resource_size(res);
1986         res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
1987         available_mmio = resource_size(res);
1988         res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
1989         available_mmio_pref = resource_size(res);
1990 
1991         pci_bus_distribute_available_resources(bridge->subordinate,
1992                                                add_list, available_io,
1993                                                available_mmio,
1994                                                available_mmio_pref);
1995 }
1996 
1997 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
1998 {
1999         struct pci_bus *parent = bridge->subordinate;
2000         /* List of resources that want additional resources */
2001         LIST_HEAD(add_list);
2002 
2003         int tried_times = 0;
2004         LIST_HEAD(fail_head);
2005         struct pci_dev_resource *fail_res;
2006         int retval;
2007 
2008 again:
2009         __pci_bus_size_bridges(parent, &add_list);
2010 
2011         /*
2012          * Distribute remaining resources (if any) equally between hotplug
2013          * bridges below.  This makes it possible to extend the hierarchy
2014          * later without running out of resources.
2015          */
2016         pci_bridge_distribute_available_resources(bridge, &add_list);
2017 
2018         __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
2019         BUG_ON(!list_empty(&add_list));
2020         tried_times++;
2021 
2022         if (list_empty(&fail_head))
2023                 goto enable_all;
2024 
2025         if (tried_times >= 2) {
2026                 /* Still fail, don't need to try more */
2027                 free_list(&fail_head);
2028                 goto enable_all;
2029         }
2030 
2031         printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
2032                          tried_times + 1);
2033 
2034         /*
2035          * Try to release leaf bridge's resources that aren't big enough
2036          * to contain child device resources.
2037          */
2038         list_for_each_entry(fail_res, &fail_head, list)
2039                 pci_bus_release_bridge_resources(fail_res->dev->bus,
2040                                                  fail_res->flags & PCI_RES_TYPE_MASK,
2041                                                  whole_subtree);
2042 
2043         /* Restore size and flags */
2044         list_for_each_entry(fail_res, &fail_head, list) {
2045                 struct resource *res = fail_res->res;
2046                 int idx;
2047 
2048                 res->start = fail_res->start;
2049                 res->end = fail_res->end;
2050                 res->flags = fail_res->flags;
2051 
2052                 if (pci_is_bridge(fail_res->dev)) {
2053                         idx = res - &fail_res->dev->resource[0];
2054                         if (idx >= PCI_BRIDGE_RESOURCES &&
2055                             idx <= PCI_BRIDGE_RESOURCE_END)
2056                                 res->flags = 0;
2057                 }
2058         }
2059         free_list(&fail_head);
2060 
2061         goto again;
2062 
2063 enable_all:
2064         retval = pci_reenable_device(bridge);
2065         if (retval)
2066                 pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
2067         pci_set_master(bridge);
2068 }
2069 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
2070 
2071 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
2072 {
2073         struct pci_dev_resource *dev_res;
2074         struct pci_dev *next;
2075         LIST_HEAD(saved);
2076         LIST_HEAD(added);
2077         LIST_HEAD(failed);
2078         unsigned int i;
2079         int ret;
2080 
2081         /* Walk to the root hub, releasing bridge BARs when possible */
2082         next = bridge;
2083         do {
2084                 bridge = next;
2085                 for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
2086                      i++) {
2087                         struct resource *res = &bridge->resource[i];
2088 
2089                         if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
2090                                 continue;
2091 
2092                         /* Ignore BARs which are still in use */
2093                         if (res->child)
2094                                 continue;
2095 
2096                         ret = add_to_list(&saved, bridge, res, 0, 0);
2097                         if (ret)
2098                                 goto cleanup;
2099 
2100                         pci_info(bridge, "BAR %d: releasing %pR\n",
2101                                  i, res);
2102 
2103                         if (res->parent)
2104                                 release_resource(res);
2105                         res->start = 0;
2106                         res->end = 0;
2107                         break;
2108                 }
2109                 if (i == PCI_BRIDGE_RESOURCE_END)
2110                         break;
2111 
2112                 next = bridge->bus ? bridge->bus->self : NULL;
2113         } while (next);
2114 
2115         if (list_empty(&saved))
2116                 return -ENOENT;
2117 
2118         __pci_bus_size_bridges(bridge->subordinate, &added);
2119         __pci_bridge_assign_resources(bridge, &added, &failed);
2120         BUG_ON(!list_empty(&added));
2121 
2122         if (!list_empty(&failed)) {
2123                 ret = -ENOSPC;
2124                 goto cleanup;
2125         }
2126 
2127         list_for_each_entry(dev_res, &saved, list) {
2128                 /* Skip the bridge we just assigned resources for */
2129                 if (bridge == dev_res->dev)
2130                         continue;
2131 
2132                 bridge = dev_res->dev;
2133                 pci_setup_bridge(bridge->subordinate);
2134         }
2135 
2136         free_list(&saved);
2137         return 0;
2138 
2139 cleanup:
2140         /* Restore size and flags */
2141         list_for_each_entry(dev_res, &failed, list) {
2142                 struct resource *res = dev_res->res;
2143 
2144                 res->start = dev_res->start;
2145                 res->end = dev_res->end;
2146                 res->flags = dev_res->flags;
2147         }
2148         free_list(&failed);
2149 
2150         /* Revert to the old configuration */
2151         list_for_each_entry(dev_res, &saved, list) {
2152                 struct resource *res = dev_res->res;
2153 
2154                 bridge = dev_res->dev;
2155                 i = res - bridge->resource;
2156 
2157                 res->start = dev_res->start;
2158                 res->end = dev_res->end;
2159                 res->flags = dev_res->flags;
2160 
2161                 pci_claim_resource(bridge, i);
2162                 pci_setup_bridge(bridge->subordinate);
2163         }
2164         free_list(&saved);
2165 
2166         return ret;
2167 }
2168 
2169 void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
2170 {
2171         struct pci_dev *dev;
2172         /* List of resources that want additional resources */
2173         LIST_HEAD(add_list);
2174 
2175         down_read(&pci_bus_sem);
2176         for_each_pci_bridge(dev, bus)
2177                 if (pci_has_subordinate(dev))
2178                         __pci_bus_size_bridges(dev->subordinate, &add_list);
2179         up_read(&pci_bus_sem);
2180         __pci_bus_assign_resources(bus, &add_list, NULL);
2181         BUG_ON(!list_empty(&add_list));
2182 }
2183 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);

/* [<][>][^][v][top][bottom][index][help] */