root/kernel/resource.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. next_resource
  2. r_next
  3. r_start
  4. r_stop
  5. r_show
  6. ioresources_init
  7. free_resource
  8. alloc_resource
  9. __request_resource
  10. __release_resource
  11. __release_child_resources
  12. release_child_resources
  13. request_resource_conflict
  14. request_resource
  15. release_resource
  16. find_next_iomem_res
  17. __walk_iomem_res_desc
  18. walk_iomem_res_desc
  19. walk_system_ram_res
  20. walk_mem_res
  21. walk_system_ram_range
  22. __is_ram
  23. page_is_ram
  24. region_intersects
  25. arch_remove_reservations
  26. simple_align_resource
  27. resource_clip
  28. __find_resource
  29. find_resource
  30. reallocate_resource
  31. allocate_resource
  32. lookup_resource
  33. __insert_resource
  34. insert_resource_conflict
  35. insert_resource
  36. insert_resource_expand_to_fit
  37. remove_resource
  38. __adjust_resource
  39. adjust_resource
  40. __reserve_region_with_split
  41. reserve_region_with_split
  42. resource_alignment
  43. __request_region
  44. __release_region
  45. release_mem_region_adjustable
  46. devm_resource_release
  47. devm_request_resource
  48. devm_resource_match
  49. devm_release_resource
  50. devm_region_release
  51. devm_region_match
  52. __devm_request_region
  53. __devm_release_region
  54. reserve_setup
  55. iomem_map_sanity_check
  56. iomem_is_exclusive
  57. resource_list_create_entry
  58. resource_list_free
  59. __request_free_mem_region
  60. devm_request_free_mem_region
  61. request_free_mem_region
  62. strict_iomem

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *      linux/kernel/resource.c
   4  *
   5  * Copyright (C) 1999   Linus Torvalds
   6  * Copyright (C) 1999   Martin Mares <mj@ucw.cz>
   7  *
   8  * Arbitrary resource management.
   9  */
  10 
  11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12 
  13 #include <linux/export.h>
  14 #include <linux/errno.h>
  15 #include <linux/ioport.h>
  16 #include <linux/init.h>
  17 #include <linux/slab.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/fs.h>
  20 #include <linux/proc_fs.h>
  21 #include <linux/sched.h>
  22 #include <linux/seq_file.h>
  23 #include <linux/device.h>
  24 #include <linux/pfn.h>
  25 #include <linux/mm.h>
  26 #include <linux/resource_ext.h>
  27 #include <asm/io.h>
  28 
  29 
  30 struct resource ioport_resource = {
  31         .name   = "PCI IO",
  32         .start  = 0,
  33         .end    = IO_SPACE_LIMIT,
  34         .flags  = IORESOURCE_IO,
  35 };
  36 EXPORT_SYMBOL(ioport_resource);
  37 
  38 struct resource iomem_resource = {
  39         .name   = "PCI mem",
  40         .start  = 0,
  41         .end    = -1,
  42         .flags  = IORESOURCE_MEM,
  43 };
  44 EXPORT_SYMBOL(iomem_resource);
  45 
  46 /* constraints to be met while allocating resources */
  47 struct resource_constraint {
  48         resource_size_t min, max, align;
  49         resource_size_t (*alignf)(void *, const struct resource *,
  50                         resource_size_t, resource_size_t);
  51         void *alignf_data;
  52 };
  53 
  54 static DEFINE_RWLOCK(resource_lock);
  55 
  56 /*
  57  * For memory hotplug, there is no way to free resource entries allocated
  58  * by boot mem after the system is up. So for reusing the resource entry
  59  * we need to remember the resource.
  60  */
  61 static struct resource *bootmem_resource_free;
  62 static DEFINE_SPINLOCK(bootmem_resource_lock);
  63 
  64 static struct resource *next_resource(struct resource *p, bool sibling_only)
  65 {
  66         /* Caller wants to traverse through siblings only */
  67         if (sibling_only)
  68                 return p->sibling;
  69 
  70         if (p->child)
  71                 return p->child;
  72         while (!p->sibling && p->parent)
  73                 p = p->parent;
  74         return p->sibling;
  75 }
  76 
  77 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
  78 {
  79         struct resource *p = v;
  80         (*pos)++;
  81         return (void *)next_resource(p, false);
  82 }
  83 
  84 #ifdef CONFIG_PROC_FS
  85 
  86 enum { MAX_IORES_LEVEL = 5 };
  87 
  88 static void *r_start(struct seq_file *m, loff_t *pos)
  89         __acquires(resource_lock)
  90 {
  91         struct resource *p = PDE_DATA(file_inode(m->file));
  92         loff_t l = 0;
  93         read_lock(&resource_lock);
  94         for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
  95                 ;
  96         return p;
  97 }
  98 
  99 static void r_stop(struct seq_file *m, void *v)
 100         __releases(resource_lock)
 101 {
 102         read_unlock(&resource_lock);
 103 }
 104 
 105 static int r_show(struct seq_file *m, void *v)
 106 {
 107         struct resource *root = PDE_DATA(file_inode(m->file));
 108         struct resource *r = v, *p;
 109         unsigned long long start, end;
 110         int width = root->end < 0x10000 ? 4 : 8;
 111         int depth;
 112 
 113         for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
 114                 if (p->parent == root)
 115                         break;
 116 
 117         if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
 118                 start = r->start;
 119                 end = r->end;
 120         } else {
 121                 start = end = 0;
 122         }
 123 
 124         seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
 125                         depth * 2, "",
 126                         width, start,
 127                         width, end,
 128                         r->name ? r->name : "<BAD>");
 129         return 0;
 130 }
 131 
 132 static const struct seq_operations resource_op = {
 133         .start  = r_start,
 134         .next   = r_next,
 135         .stop   = r_stop,
 136         .show   = r_show,
 137 };
 138 
 139 static int __init ioresources_init(void)
 140 {
 141         proc_create_seq_data("ioports", 0, NULL, &resource_op,
 142                         &ioport_resource);
 143         proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
 144         return 0;
 145 }
 146 __initcall(ioresources_init);
 147 
 148 #endif /* CONFIG_PROC_FS */
 149 
 150 static void free_resource(struct resource *res)
 151 {
 152         if (!res)
 153                 return;
 154 
 155         if (!PageSlab(virt_to_head_page(res))) {
 156                 spin_lock(&bootmem_resource_lock);
 157                 res->sibling = bootmem_resource_free;
 158                 bootmem_resource_free = res;
 159                 spin_unlock(&bootmem_resource_lock);
 160         } else {
 161                 kfree(res);
 162         }
 163 }
 164 
 165 static struct resource *alloc_resource(gfp_t flags)
 166 {
 167         struct resource *res = NULL;
 168 
 169         spin_lock(&bootmem_resource_lock);
 170         if (bootmem_resource_free) {
 171                 res = bootmem_resource_free;
 172                 bootmem_resource_free = res->sibling;
 173         }
 174         spin_unlock(&bootmem_resource_lock);
 175 
 176         if (res)
 177                 memset(res, 0, sizeof(struct resource));
 178         else
 179                 res = kzalloc(sizeof(struct resource), flags);
 180 
 181         return res;
 182 }
 183 
 184 /* Return the conflict entry if you can't request it */
 185 static struct resource * __request_resource(struct resource *root, struct resource *new)
 186 {
 187         resource_size_t start = new->start;
 188         resource_size_t end = new->end;
 189         struct resource *tmp, **p;
 190 
 191         if (end < start)
 192                 return root;
 193         if (start < root->start)
 194                 return root;
 195         if (end > root->end)
 196                 return root;
 197         p = &root->child;
 198         for (;;) {
 199                 tmp = *p;
 200                 if (!tmp || tmp->start > end) {
 201                         new->sibling = tmp;
 202                         *p = new;
 203                         new->parent = root;
 204                         return NULL;
 205                 }
 206                 p = &tmp->sibling;
 207                 if (tmp->end < start)
 208                         continue;
 209                 return tmp;
 210         }
 211 }
 212 
 213 static int __release_resource(struct resource *old, bool release_child)
 214 {
 215         struct resource *tmp, **p, *chd;
 216 
 217         p = &old->parent->child;
 218         for (;;) {
 219                 tmp = *p;
 220                 if (!tmp)
 221                         break;
 222                 if (tmp == old) {
 223                         if (release_child || !(tmp->child)) {
 224                                 *p = tmp->sibling;
 225                         } else {
 226                                 for (chd = tmp->child;; chd = chd->sibling) {
 227                                         chd->parent = tmp->parent;
 228                                         if (!(chd->sibling))
 229                                                 break;
 230                                 }
 231                                 *p = tmp->child;
 232                                 chd->sibling = tmp->sibling;
 233                         }
 234                         old->parent = NULL;
 235                         return 0;
 236                 }
 237                 p = &tmp->sibling;
 238         }
 239         return -EINVAL;
 240 }
 241 
 242 static void __release_child_resources(struct resource *r)
 243 {
 244         struct resource *tmp, *p;
 245         resource_size_t size;
 246 
 247         p = r->child;
 248         r->child = NULL;
 249         while (p) {
 250                 tmp = p;
 251                 p = p->sibling;
 252 
 253                 tmp->parent = NULL;
 254                 tmp->sibling = NULL;
 255                 __release_child_resources(tmp);
 256 
 257                 printk(KERN_DEBUG "release child resource %pR\n", tmp);
 258                 /* need to restore size, and keep flags */
 259                 size = resource_size(tmp);
 260                 tmp->start = 0;
 261                 tmp->end = size - 1;
 262         }
 263 }
 264 
 265 void release_child_resources(struct resource *r)
 266 {
 267         write_lock(&resource_lock);
 268         __release_child_resources(r);
 269         write_unlock(&resource_lock);
 270 }
 271 
 272 /**
 273  * request_resource_conflict - request and reserve an I/O or memory resource
 274  * @root: root resource descriptor
 275  * @new: resource descriptor desired by caller
 276  *
 277  * Returns 0 for success, conflict resource on error.
 278  */
 279 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
 280 {
 281         struct resource *conflict;
 282 
 283         write_lock(&resource_lock);
 284         conflict = __request_resource(root, new);
 285         write_unlock(&resource_lock);
 286         return conflict;
 287 }
 288 
 289 /**
 290  * request_resource - request and reserve an I/O or memory resource
 291  * @root: root resource descriptor
 292  * @new: resource descriptor desired by caller
 293  *
 294  * Returns 0 for success, negative error code on error.
 295  */
 296 int request_resource(struct resource *root, struct resource *new)
 297 {
 298         struct resource *conflict;
 299 
 300         conflict = request_resource_conflict(root, new);
 301         return conflict ? -EBUSY : 0;
 302 }
 303 
 304 EXPORT_SYMBOL(request_resource);
 305 
 306 /**
 307  * release_resource - release a previously reserved resource
 308  * @old: resource pointer
 309  */
 310 int release_resource(struct resource *old)
 311 {
 312         int retval;
 313 
 314         write_lock(&resource_lock);
 315         retval = __release_resource(old, true);
 316         write_unlock(&resource_lock);
 317         return retval;
 318 }
 319 
 320 EXPORT_SYMBOL(release_resource);
 321 
 322 /**
 323  * Finds the lowest iomem resource that covers part of [@start..@end].  The
 324  * caller must specify @start, @end, @flags, and @desc (which may be
 325  * IORES_DESC_NONE).
 326  *
 327  * If a resource is found, returns 0 and @*res is overwritten with the part
 328  * of the resource that's within [@start..@end]; if none is found, returns
 329  * -ENODEV.  Returns -EINVAL for invalid parameters.
 330  *
 331  * This function walks the whole tree and not just first level children
 332  * unless @first_lvl is true.
 333  *
 334  * @start:      start address of the resource searched for
 335  * @end:        end address of same resource
 336  * @flags:      flags which the resource must have
 337  * @desc:       descriptor the resource must have
 338  * @first_lvl:  walk only the first level children, if set
 339  * @res:        return ptr, if resource found
 340  */
 341 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
 342                                unsigned long flags, unsigned long desc,
 343                                bool first_lvl, struct resource *res)
 344 {
 345         bool siblings_only = true;
 346         struct resource *p;
 347 
 348         if (!res)
 349                 return -EINVAL;
 350 
 351         if (start >= end)
 352                 return -EINVAL;
 353 
 354         read_lock(&resource_lock);
 355 
 356         for (p = iomem_resource.child; p; p = next_resource(p, siblings_only)) {
 357                 /* If we passed the resource we are looking for, stop */
 358                 if (p->start > end) {
 359                         p = NULL;
 360                         break;
 361                 }
 362 
 363                 /* Skip until we find a range that matches what we look for */
 364                 if (p->end < start)
 365                         continue;
 366 
 367                 /*
 368                  * Now that we found a range that matches what we look for,
 369                  * check the flags and the descriptor. If we were not asked to
 370                  * use only the first level, start looking at children as well.
 371                  */
 372                 siblings_only = first_lvl;
 373 
 374                 if ((p->flags & flags) != flags)
 375                         continue;
 376                 if ((desc != IORES_DESC_NONE) && (desc != p->desc))
 377                         continue;
 378 
 379                 /* Found a match, break */
 380                 break;
 381         }
 382 
 383         if (p) {
 384                 /* copy data */
 385                 res->start = max(start, p->start);
 386                 res->end = min(end, p->end);
 387                 res->flags = p->flags;
 388                 res->desc = p->desc;
 389         }
 390 
 391         read_unlock(&resource_lock);
 392         return p ? 0 : -ENODEV;
 393 }
 394 
 395 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
 396                                  unsigned long flags, unsigned long desc,
 397                                  bool first_lvl, void *arg,
 398                                  int (*func)(struct resource *, void *))
 399 {
 400         struct resource res;
 401         int ret = -EINVAL;
 402 
 403         while (start < end &&
 404                !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
 405                 ret = (*func)(&res, arg);
 406                 if (ret)
 407                         break;
 408 
 409                 start = res.end + 1;
 410         }
 411 
 412         return ret;
 413 }
 414 
 415 /**
 416  * Walks through iomem resources and calls func() with matching resource
 417  * ranges. This walks through whole tree and not just first level children.
 418  * All the memory ranges which overlap start,end and also match flags and
 419  * desc are valid candidates.
 420  *
 421  * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
 422  * @flags: I/O resource flags
 423  * @start: start addr
 424  * @end: end addr
 425  * @arg: function argument for the callback @func
 426  * @func: callback function that is called for each qualifying resource area
 427  *
 428  * NOTE: For a new descriptor search, define a new IORES_DESC in
 429  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
 430  */
 431 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
 432                 u64 end, void *arg, int (*func)(struct resource *, void *))
 433 {
 434         return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
 435 }
 436 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
 437 
 438 /*
 439  * This function calls the @func callback against all memory ranges of type
 440  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
 441  * Now, this function is only for System RAM, it deals with full ranges and
 442  * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
 443  * ranges.
 444  */
 445 int walk_system_ram_res(u64 start, u64 end, void *arg,
 446                         int (*func)(struct resource *, void *))
 447 {
 448         unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 449 
 450         return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
 451                                      arg, func);
 452 }
 453 
 454 /*
 455  * This function calls the @func callback against all memory ranges, which
 456  * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
 457  */
 458 int walk_mem_res(u64 start, u64 end, void *arg,
 459                  int (*func)(struct resource *, void *))
 460 {
 461         unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 462 
 463         return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
 464                                      arg, func);
 465 }
 466 
 467 /*
 468  * This function calls the @func callback against all memory ranges of type
 469  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
 470  * It is to be used only for System RAM.
 471  *
 472  * This will find System RAM ranges that are children of top-level resources
 473  * in addition to top-level System RAM resources.
 474  */
 475 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 476                           void *arg, int (*func)(unsigned long, unsigned long, void *))
 477 {
 478         resource_size_t start, end;
 479         unsigned long flags;
 480         struct resource res;
 481         unsigned long pfn, end_pfn;
 482         int ret = -EINVAL;
 483 
 484         start = (u64) start_pfn << PAGE_SHIFT;
 485         end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
 486         flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 487         while (start < end &&
 488                !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
 489                                     false, &res)) {
 490                 pfn = PFN_UP(res.start);
 491                 end_pfn = PFN_DOWN(res.end + 1);
 492                 if (end_pfn > pfn)
 493                         ret = (*func)(pfn, end_pfn - pfn, arg);
 494                 if (ret)
 495                         break;
 496                 start = res.end + 1;
 497         }
 498         return ret;
 499 }
 500 
 501 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
 502 {
 503         return 1;
 504 }
 505 
 506 /*
 507  * This generic page_is_ram() returns true if specified address is
 508  * registered as System RAM in iomem_resource list.
 509  */
 510 int __weak page_is_ram(unsigned long pfn)
 511 {
 512         return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
 513 }
 514 EXPORT_SYMBOL_GPL(page_is_ram);
 515 
 516 /**
 517  * region_intersects() - determine intersection of region with known resources
 518  * @start: region start address
 519  * @size: size of region
 520  * @flags: flags of resource (in iomem_resource)
 521  * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
 522  *
 523  * Check if the specified region partially overlaps or fully eclipses a
 524  * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
 525  * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
 526  * return REGION_MIXED if the region overlaps @flags/@desc and another
 527  * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
 528  * and no other defined resource. Note that REGION_INTERSECTS is also
 529  * returned in the case when the specified region overlaps RAM and undefined
 530  * memory holes.
 531  *
 532  * region_intersect() is used by memory remapping functions to ensure
 533  * the user is not remapping RAM and is a vast speed up over walking
 534  * through the resource table page by page.
 535  */
 536 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
 537                       unsigned long desc)
 538 {
 539         struct resource res;
 540         int type = 0; int other = 0;
 541         struct resource *p;
 542 
 543         res.start = start;
 544         res.end = start + size - 1;
 545 
 546         read_lock(&resource_lock);
 547         for (p = iomem_resource.child; p ; p = p->sibling) {
 548                 bool is_type = (((p->flags & flags) == flags) &&
 549                                 ((desc == IORES_DESC_NONE) ||
 550                                  (desc == p->desc)));
 551 
 552                 if (resource_overlaps(p, &res))
 553                         is_type ? type++ : other++;
 554         }
 555         read_unlock(&resource_lock);
 556 
 557         if (other == 0)
 558                 return type ? REGION_INTERSECTS : REGION_DISJOINT;
 559 
 560         if (type)
 561                 return REGION_MIXED;
 562 
 563         return REGION_DISJOINT;
 564 }
 565 EXPORT_SYMBOL_GPL(region_intersects);
 566 
 567 void __weak arch_remove_reservations(struct resource *avail)
 568 {
 569 }
 570 
 571 static resource_size_t simple_align_resource(void *data,
 572                                              const struct resource *avail,
 573                                              resource_size_t size,
 574                                              resource_size_t align)
 575 {
 576         return avail->start;
 577 }
 578 
 579 static void resource_clip(struct resource *res, resource_size_t min,
 580                           resource_size_t max)
 581 {
 582         if (res->start < min)
 583                 res->start = min;
 584         if (res->end > max)
 585                 res->end = max;
 586 }
 587 
 588 /*
 589  * Find empty slot in the resource tree with the given range and
 590  * alignment constraints
 591  */
 592 static int __find_resource(struct resource *root, struct resource *old,
 593                          struct resource *new,
 594                          resource_size_t  size,
 595                          struct resource_constraint *constraint)
 596 {
 597         struct resource *this = root->child;
 598         struct resource tmp = *new, avail, alloc;
 599 
 600         tmp.start = root->start;
 601         /*
 602          * Skip past an allocated resource that starts at 0, since the assignment
 603          * of this->start - 1 to tmp->end below would cause an underflow.
 604          */
 605         if (this && this->start == root->start) {
 606                 tmp.start = (this == old) ? old->start : this->end + 1;
 607                 this = this->sibling;
 608         }
 609         for(;;) {
 610                 if (this)
 611                         tmp.end = (this == old) ?  this->end : this->start - 1;
 612                 else
 613                         tmp.end = root->end;
 614 
 615                 if (tmp.end < tmp.start)
 616                         goto next;
 617 
 618                 resource_clip(&tmp, constraint->min, constraint->max);
 619                 arch_remove_reservations(&tmp);
 620 
 621                 /* Check for overflow after ALIGN() */
 622                 avail.start = ALIGN(tmp.start, constraint->align);
 623                 avail.end = tmp.end;
 624                 avail.flags = new->flags & ~IORESOURCE_UNSET;
 625                 if (avail.start >= tmp.start) {
 626                         alloc.flags = avail.flags;
 627                         alloc.start = constraint->alignf(constraint->alignf_data, &avail,
 628                                         size, constraint->align);
 629                         alloc.end = alloc.start + size - 1;
 630                         if (alloc.start <= alloc.end &&
 631                             resource_contains(&avail, &alloc)) {
 632                                 new->start = alloc.start;
 633                                 new->end = alloc.end;
 634                                 return 0;
 635                         }
 636                 }
 637 
 638 next:           if (!this || this->end == root->end)
 639                         break;
 640 
 641                 if (this != old)
 642                         tmp.start = this->end + 1;
 643                 this = this->sibling;
 644         }
 645         return -EBUSY;
 646 }
 647 
 648 /*
 649  * Find empty slot in the resource tree given range and alignment.
 650  */
 651 static int find_resource(struct resource *root, struct resource *new,
 652                         resource_size_t size,
 653                         struct resource_constraint  *constraint)
 654 {
 655         return  __find_resource(root, NULL, new, size, constraint);
 656 }
 657 
 658 /**
 659  * reallocate_resource - allocate a slot in the resource tree given range & alignment.
 660  *      The resource will be relocated if the new size cannot be reallocated in the
 661  *      current location.
 662  *
 663  * @root: root resource descriptor
 664  * @old:  resource descriptor desired by caller
 665  * @newsize: new size of the resource descriptor
 666  * @constraint: the size and alignment constraints to be met.
 667  */
 668 static int reallocate_resource(struct resource *root, struct resource *old,
 669                                resource_size_t newsize,
 670                                struct resource_constraint *constraint)
 671 {
 672         int err=0;
 673         struct resource new = *old;
 674         struct resource *conflict;
 675 
 676         write_lock(&resource_lock);
 677 
 678         if ((err = __find_resource(root, old, &new, newsize, constraint)))
 679                 goto out;
 680 
 681         if (resource_contains(&new, old)) {
 682                 old->start = new.start;
 683                 old->end = new.end;
 684                 goto out;
 685         }
 686 
 687         if (old->child) {
 688                 err = -EBUSY;
 689                 goto out;
 690         }
 691 
 692         if (resource_contains(old, &new)) {
 693                 old->start = new.start;
 694                 old->end = new.end;
 695         } else {
 696                 __release_resource(old, true);
 697                 *old = new;
 698                 conflict = __request_resource(root, old);
 699                 BUG_ON(conflict);
 700         }
 701 out:
 702         write_unlock(&resource_lock);
 703         return err;
 704 }
 705 
 706 
 707 /**
 708  * allocate_resource - allocate empty slot in the resource tree given range & alignment.
 709  *      The resource will be reallocated with a new size if it was already allocated
 710  * @root: root resource descriptor
 711  * @new: resource descriptor desired by caller
 712  * @size: requested resource region size
 713  * @min: minimum boundary to allocate
 714  * @max: maximum boundary to allocate
 715  * @align: alignment requested, in bytes
 716  * @alignf: alignment function, optional, called if not NULL
 717  * @alignf_data: arbitrary data to pass to the @alignf function
 718  */
 719 int allocate_resource(struct resource *root, struct resource *new,
 720                       resource_size_t size, resource_size_t min,
 721                       resource_size_t max, resource_size_t align,
 722                       resource_size_t (*alignf)(void *,
 723                                                 const struct resource *,
 724                                                 resource_size_t,
 725                                                 resource_size_t),
 726                       void *alignf_data)
 727 {
 728         int err;
 729         struct resource_constraint constraint;
 730 
 731         if (!alignf)
 732                 alignf = simple_align_resource;
 733 
 734         constraint.min = min;
 735         constraint.max = max;
 736         constraint.align = align;
 737         constraint.alignf = alignf;
 738         constraint.alignf_data = alignf_data;
 739 
 740         if ( new->parent ) {
 741                 /* resource is already allocated, try reallocating with
 742                    the new constraints */
 743                 return reallocate_resource(root, new, size, &constraint);
 744         }
 745 
 746         write_lock(&resource_lock);
 747         err = find_resource(root, new, size, &constraint);
 748         if (err >= 0 && __request_resource(root, new))
 749                 err = -EBUSY;
 750         write_unlock(&resource_lock);
 751         return err;
 752 }
 753 
 754 EXPORT_SYMBOL(allocate_resource);
 755 
 756 /**
 757  * lookup_resource - find an existing resource by a resource start address
 758  * @root: root resource descriptor
 759  * @start: resource start address
 760  *
 761  * Returns a pointer to the resource if found, NULL otherwise
 762  */
 763 struct resource *lookup_resource(struct resource *root, resource_size_t start)
 764 {
 765         struct resource *res;
 766 
 767         read_lock(&resource_lock);
 768         for (res = root->child; res; res = res->sibling) {
 769                 if (res->start == start)
 770                         break;
 771         }
 772         read_unlock(&resource_lock);
 773 
 774         return res;
 775 }
 776 
 777 /*
 778  * Insert a resource into the resource tree. If successful, return NULL,
 779  * otherwise return the conflicting resource (compare to __request_resource())
 780  */
 781 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
 782 {
 783         struct resource *first, *next;
 784 
 785         for (;; parent = first) {
 786                 first = __request_resource(parent, new);
 787                 if (!first)
 788                         return first;
 789 
 790                 if (first == parent)
 791                         return first;
 792                 if (WARN_ON(first == new))      /* duplicated insertion */
 793                         return first;
 794 
 795                 if ((first->start > new->start) || (first->end < new->end))
 796                         break;
 797                 if ((first->start == new->start) && (first->end == new->end))
 798                         break;
 799         }
 800 
 801         for (next = first; ; next = next->sibling) {
 802                 /* Partial overlap? Bad, and unfixable */
 803                 if (next->start < new->start || next->end > new->end)
 804                         return next;
 805                 if (!next->sibling)
 806                         break;
 807                 if (next->sibling->start > new->end)
 808                         break;
 809         }
 810 
 811         new->parent = parent;
 812         new->sibling = next->sibling;
 813         new->child = first;
 814 
 815         next->sibling = NULL;
 816         for (next = first; next; next = next->sibling)
 817                 next->parent = new;
 818 
 819         if (parent->child == first) {
 820                 parent->child = new;
 821         } else {
 822                 next = parent->child;
 823                 while (next->sibling != first)
 824                         next = next->sibling;
 825                 next->sibling = new;
 826         }
 827         return NULL;
 828 }
 829 
 830 /**
 831  * insert_resource_conflict - Inserts resource in the resource tree
 832  * @parent: parent of the new resource
 833  * @new: new resource to insert
 834  *
 835  * Returns 0 on success, conflict resource if the resource can't be inserted.
 836  *
 837  * This function is equivalent to request_resource_conflict when no conflict
 838  * happens. If a conflict happens, and the conflicting resources
 839  * entirely fit within the range of the new resource, then the new
 840  * resource is inserted and the conflicting resources become children of
 841  * the new resource.
 842  *
 843  * This function is intended for producers of resources, such as FW modules
 844  * and bus drivers.
 845  */
 846 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
 847 {
 848         struct resource *conflict;
 849 
 850         write_lock(&resource_lock);
 851         conflict = __insert_resource(parent, new);
 852         write_unlock(&resource_lock);
 853         return conflict;
 854 }
 855 
 856 /**
 857  * insert_resource - Inserts a resource in the resource tree
 858  * @parent: parent of the new resource
 859  * @new: new resource to insert
 860  *
 861  * Returns 0 on success, -EBUSY if the resource can't be inserted.
 862  *
 863  * This function is intended for producers of resources, such as FW modules
 864  * and bus drivers.
 865  */
 866 int insert_resource(struct resource *parent, struct resource *new)
 867 {
 868         struct resource *conflict;
 869 
 870         conflict = insert_resource_conflict(parent, new);
 871         return conflict ? -EBUSY : 0;
 872 }
 873 EXPORT_SYMBOL_GPL(insert_resource);
 874 
 875 /**
 876  * insert_resource_expand_to_fit - Insert a resource into the resource tree
 877  * @root: root resource descriptor
 878  * @new: new resource to insert
 879  *
 880  * Insert a resource into the resource tree, possibly expanding it in order
 881  * to make it encompass any conflicting resources.
 882  */
 883 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
 884 {
 885         if (new->parent)
 886                 return;
 887 
 888         write_lock(&resource_lock);
 889         for (;;) {
 890                 struct resource *conflict;
 891 
 892                 conflict = __insert_resource(root, new);
 893                 if (!conflict)
 894                         break;
 895                 if (conflict == root)
 896                         break;
 897 
 898                 /* Ok, expand resource to cover the conflict, then try again .. */
 899                 if (conflict->start < new->start)
 900                         new->start = conflict->start;
 901                 if (conflict->end > new->end)
 902                         new->end = conflict->end;
 903 
 904                 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
 905         }
 906         write_unlock(&resource_lock);
 907 }
 908 
 909 /**
 910  * remove_resource - Remove a resource in the resource tree
 911  * @old: resource to remove
 912  *
 913  * Returns 0 on success, -EINVAL if the resource is not valid.
 914  *
 915  * This function removes a resource previously inserted by insert_resource()
 916  * or insert_resource_conflict(), and moves the children (if any) up to
 917  * where they were before.  insert_resource() and insert_resource_conflict()
 918  * insert a new resource, and move any conflicting resources down to the
 919  * children of the new resource.
 920  *
 921  * insert_resource(), insert_resource_conflict() and remove_resource() are
 922  * intended for producers of resources, such as FW modules and bus drivers.
 923  */
 924 int remove_resource(struct resource *old)
 925 {
 926         int retval;
 927 
 928         write_lock(&resource_lock);
 929         retval = __release_resource(old, false);
 930         write_unlock(&resource_lock);
 931         return retval;
 932 }
 933 EXPORT_SYMBOL_GPL(remove_resource);
 934 
 935 static int __adjust_resource(struct resource *res, resource_size_t start,
 936                                 resource_size_t size)
 937 {
 938         struct resource *tmp, *parent = res->parent;
 939         resource_size_t end = start + size - 1;
 940         int result = -EBUSY;
 941 
 942         if (!parent)
 943                 goto skip;
 944 
 945         if ((start < parent->start) || (end > parent->end))
 946                 goto out;
 947 
 948         if (res->sibling && (res->sibling->start <= end))
 949                 goto out;
 950 
 951         tmp = parent->child;
 952         if (tmp != res) {
 953                 while (tmp->sibling != res)
 954                         tmp = tmp->sibling;
 955                 if (start <= tmp->end)
 956                         goto out;
 957         }
 958 
 959 skip:
 960         for (tmp = res->child; tmp; tmp = tmp->sibling)
 961                 if ((tmp->start < start) || (tmp->end > end))
 962                         goto out;
 963 
 964         res->start = start;
 965         res->end = end;
 966         result = 0;
 967 
 968  out:
 969         return result;
 970 }
 971 
 972 /**
 973  * adjust_resource - modify a resource's start and size
 974  * @res: resource to modify
 975  * @start: new start value
 976  * @size: new size
 977  *
 978  * Given an existing resource, change its start and size to match the
 979  * arguments.  Returns 0 on success, -EBUSY if it can't fit.
 980  * Existing children of the resource are assumed to be immutable.
 981  */
 982 int adjust_resource(struct resource *res, resource_size_t start,
 983                     resource_size_t size)
 984 {
 985         int result;
 986 
 987         write_lock(&resource_lock);
 988         result = __adjust_resource(res, start, size);
 989         write_unlock(&resource_lock);
 990         return result;
 991 }
 992 EXPORT_SYMBOL(adjust_resource);
 993 
 994 static void __init
 995 __reserve_region_with_split(struct resource *root, resource_size_t start,
 996                             resource_size_t end, const char *name)
 997 {
 998         struct resource *parent = root;
 999         struct resource *conflict;
1000         struct resource *res = alloc_resource(GFP_ATOMIC);
1001         struct resource *next_res = NULL;
1002         int type = resource_type(root);
1003 
1004         if (!res)
1005                 return;
1006 
1007         res->name = name;
1008         res->start = start;
1009         res->end = end;
1010         res->flags = type | IORESOURCE_BUSY;
1011         res->desc = IORES_DESC_NONE;
1012 
1013         while (1) {
1014 
1015                 conflict = __request_resource(parent, res);
1016                 if (!conflict) {
1017                         if (!next_res)
1018                                 break;
1019                         res = next_res;
1020                         next_res = NULL;
1021                         continue;
1022                 }
1023 
1024                 /* conflict covered whole area */
1025                 if (conflict->start <= res->start &&
1026                                 conflict->end >= res->end) {
1027                         free_resource(res);
1028                         WARN_ON(next_res);
1029                         break;
1030                 }
1031 
1032                 /* failed, split and try again */
1033                 if (conflict->start > res->start) {
1034                         end = res->end;
1035                         res->end = conflict->start - 1;
1036                         if (conflict->end < end) {
1037                                 next_res = alloc_resource(GFP_ATOMIC);
1038                                 if (!next_res) {
1039                                         free_resource(res);
1040                                         break;
1041                                 }
1042                                 next_res->name = name;
1043                                 next_res->start = conflict->end + 1;
1044                                 next_res->end = end;
1045                                 next_res->flags = type | IORESOURCE_BUSY;
1046                                 next_res->desc = IORES_DESC_NONE;
1047                         }
1048                 } else {
1049                         res->start = conflict->end + 1;
1050                 }
1051         }
1052 
1053 }
1054 
1055 void __init
1056 reserve_region_with_split(struct resource *root, resource_size_t start,
1057                           resource_size_t end, const char *name)
1058 {
1059         int abort = 0;
1060 
1061         write_lock(&resource_lock);
1062         if (root->start > start || root->end < end) {
1063                 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1064                        (unsigned long long)start, (unsigned long long)end,
1065                        root);
1066                 if (start > root->end || end < root->start)
1067                         abort = 1;
1068                 else {
1069                         if (end > root->end)
1070                                 end = root->end;
1071                         if (start < root->start)
1072                                 start = root->start;
1073                         pr_err("fixing request to [0x%llx-0x%llx]\n",
1074                                (unsigned long long)start,
1075                                (unsigned long long)end);
1076                 }
1077                 dump_stack();
1078         }
1079         if (!abort)
1080                 __reserve_region_with_split(root, start, end, name);
1081         write_unlock(&resource_lock);
1082 }
1083 
1084 /**
1085  * resource_alignment - calculate resource's alignment
1086  * @res: resource pointer
1087  *
1088  * Returns alignment on success, 0 (invalid alignment) on failure.
1089  */
1090 resource_size_t resource_alignment(struct resource *res)
1091 {
1092         switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1093         case IORESOURCE_SIZEALIGN:
1094                 return resource_size(res);
1095         case IORESOURCE_STARTALIGN:
1096                 return res->start;
1097         default:
1098                 return 0;
1099         }
1100 }
1101 
1102 /*
1103  * This is compatibility stuff for IO resources.
1104  *
1105  * Note how this, unlike the above, knows about
1106  * the IO flag meanings (busy etc).
1107  *
1108  * request_region creates a new busy region.
1109  *
1110  * release_region releases a matching busy region.
1111  */
1112 
1113 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1114 
1115 /**
1116  * __request_region - create a new busy resource region
1117  * @parent: parent resource descriptor
1118  * @start: resource start address
1119  * @n: resource region size
1120  * @name: reserving caller's ID string
1121  * @flags: IO resource flags
1122  */
1123 struct resource * __request_region(struct resource *parent,
1124                                    resource_size_t start, resource_size_t n,
1125                                    const char *name, int flags)
1126 {
1127         DECLARE_WAITQUEUE(wait, current);
1128         struct resource *res = alloc_resource(GFP_KERNEL);
1129 
1130         if (!res)
1131                 return NULL;
1132 
1133         res->name = name;
1134         res->start = start;
1135         res->end = start + n - 1;
1136 
1137         write_lock(&resource_lock);
1138 
1139         for (;;) {
1140                 struct resource *conflict;
1141 
1142                 res->flags = resource_type(parent) | resource_ext_type(parent);
1143                 res->flags |= IORESOURCE_BUSY | flags;
1144                 res->desc = parent->desc;
1145 
1146                 conflict = __request_resource(parent, res);
1147                 if (!conflict)
1148                         break;
1149                 /*
1150                  * mm/hmm.c reserves physical addresses which then
1151                  * become unavailable to other users.  Conflicts are
1152                  * not expected.  Warn to aid debugging if encountered.
1153                  */
1154                 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1155                         pr_warn("Unaddressable device %s %pR conflicts with %pR",
1156                                 conflict->name, conflict, res);
1157                 }
1158                 if (conflict != parent) {
1159                         if (!(conflict->flags & IORESOURCE_BUSY)) {
1160                                 parent = conflict;
1161                                 continue;
1162                         }
1163                 }
1164                 if (conflict->flags & flags & IORESOURCE_MUXED) {
1165                         add_wait_queue(&muxed_resource_wait, &wait);
1166                         write_unlock(&resource_lock);
1167                         set_current_state(TASK_UNINTERRUPTIBLE);
1168                         schedule();
1169                         remove_wait_queue(&muxed_resource_wait, &wait);
1170                         write_lock(&resource_lock);
1171                         continue;
1172                 }
1173                 /* Uhhuh, that didn't work out.. */
1174                 free_resource(res);
1175                 res = NULL;
1176                 break;
1177         }
1178         write_unlock(&resource_lock);
1179         return res;
1180 }
1181 EXPORT_SYMBOL(__request_region);
1182 
1183 /**
1184  * __release_region - release a previously reserved resource region
1185  * @parent: parent resource descriptor
1186  * @start: resource start address
1187  * @n: resource region size
1188  *
1189  * The described resource region must match a currently busy region.
1190  */
1191 void __release_region(struct resource *parent, resource_size_t start,
1192                       resource_size_t n)
1193 {
1194         struct resource **p;
1195         resource_size_t end;
1196 
1197         p = &parent->child;
1198         end = start + n - 1;
1199 
1200         write_lock(&resource_lock);
1201 
1202         for (;;) {
1203                 struct resource *res = *p;
1204 
1205                 if (!res)
1206                         break;
1207                 if (res->start <= start && res->end >= end) {
1208                         if (!(res->flags & IORESOURCE_BUSY)) {
1209                                 p = &res->child;
1210                                 continue;
1211                         }
1212                         if (res->start != start || res->end != end)
1213                                 break;
1214                         *p = res->sibling;
1215                         write_unlock(&resource_lock);
1216                         if (res->flags & IORESOURCE_MUXED)
1217                                 wake_up(&muxed_resource_wait);
1218                         free_resource(res);
1219                         return;
1220                 }
1221                 p = &res->sibling;
1222         }
1223 
1224         write_unlock(&resource_lock);
1225 
1226         printk(KERN_WARNING "Trying to free nonexistent resource "
1227                 "<%016llx-%016llx>\n", (unsigned long long)start,
1228                 (unsigned long long)end);
1229 }
1230 EXPORT_SYMBOL(__release_region);
1231 
1232 #ifdef CONFIG_MEMORY_HOTREMOVE
1233 /**
1234  * release_mem_region_adjustable - release a previously reserved memory region
1235  * @parent: parent resource descriptor
1236  * @start: resource start address
1237  * @size: resource region size
1238  *
1239  * This interface is intended for memory hot-delete.  The requested region
1240  * is released from a currently busy memory resource.  The requested region
1241  * must either match exactly or fit into a single busy resource entry.  In
1242  * the latter case, the remaining resource is adjusted accordingly.
1243  * Existing children of the busy memory resource must be immutable in the
1244  * request.
1245  *
1246  * Note:
1247  * - Additional release conditions, such as overlapping region, can be
1248  *   supported after they are confirmed as valid cases.
1249  * - When a busy memory resource gets split into two entries, the code
1250  *   assumes that all children remain in the lower address entry for
1251  *   simplicity.  Enhance this logic when necessary.
1252  */
1253 int release_mem_region_adjustable(struct resource *parent,
1254                                   resource_size_t start, resource_size_t size)
1255 {
1256         struct resource **p;
1257         struct resource *res;
1258         struct resource *new_res;
1259         resource_size_t end;
1260         int ret = -EINVAL;
1261 
1262         end = start + size - 1;
1263         if ((start < parent->start) || (end > parent->end))
1264                 return ret;
1265 
1266         /* The alloc_resource() result gets checked later */
1267         new_res = alloc_resource(GFP_KERNEL);
1268 
1269         p = &parent->child;
1270         write_lock(&resource_lock);
1271 
1272         while ((res = *p)) {
1273                 if (res->start >= end)
1274                         break;
1275 
1276                 /* look for the next resource if it does not fit into */
1277                 if (res->start > start || res->end < end) {
1278                         p = &res->sibling;
1279                         continue;
1280                 }
1281 
1282                 /*
1283                  * All memory regions added from memory-hotplug path have the
1284                  * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
1285                  * this flag, we know that we are dealing with a resource coming
1286                  * from HMM/devm. HMM/devm use another mechanism to add/release
1287                  * a resource. This goes via devm_request_mem_region and
1288                  * devm_release_mem_region.
1289                  * HMM/devm take care to release their resources when they want,
1290                  * so if we are dealing with them, let us just back off here.
1291                  */
1292                 if (!(res->flags & IORESOURCE_SYSRAM)) {
1293                         ret = 0;
1294                         break;
1295                 }
1296 
1297                 if (!(res->flags & IORESOURCE_MEM))
1298                         break;
1299 
1300                 if (!(res->flags & IORESOURCE_BUSY)) {
1301                         p = &res->child;
1302                         continue;
1303                 }
1304 
1305                 /* found the target resource; let's adjust accordingly */
1306                 if (res->start == start && res->end == end) {
1307                         /* free the whole entry */
1308                         *p = res->sibling;
1309                         free_resource(res);
1310                         ret = 0;
1311                 } else if (res->start == start && res->end != end) {
1312                         /* adjust the start */
1313                         ret = __adjust_resource(res, end + 1,
1314                                                 res->end - end);
1315                 } else if (res->start != start && res->end == end) {
1316                         /* adjust the end */
1317                         ret = __adjust_resource(res, res->start,
1318                                                 start - res->start);
1319                 } else {
1320                         /* split into two entries */
1321                         if (!new_res) {
1322                                 ret = -ENOMEM;
1323                                 break;
1324                         }
1325                         new_res->name = res->name;
1326                         new_res->start = end + 1;
1327                         new_res->end = res->end;
1328                         new_res->flags = res->flags;
1329                         new_res->desc = res->desc;
1330                         new_res->parent = res->parent;
1331                         new_res->sibling = res->sibling;
1332                         new_res->child = NULL;
1333 
1334                         ret = __adjust_resource(res, res->start,
1335                                                 start - res->start);
1336                         if (ret)
1337                                 break;
1338                         res->sibling = new_res;
1339                         new_res = NULL;
1340                 }
1341 
1342                 break;
1343         }
1344 
1345         write_unlock(&resource_lock);
1346         free_resource(new_res);
1347         return ret;
1348 }
1349 #endif  /* CONFIG_MEMORY_HOTREMOVE */
1350 
1351 /*
1352  * Managed region resource
1353  */
1354 static void devm_resource_release(struct device *dev, void *ptr)
1355 {
1356         struct resource **r = ptr;
1357 
1358         release_resource(*r);
1359 }
1360 
1361 /**
1362  * devm_request_resource() - request and reserve an I/O or memory resource
1363  * @dev: device for which to request the resource
1364  * @root: root of the resource tree from which to request the resource
1365  * @new: descriptor of the resource to request
1366  *
1367  * This is a device-managed version of request_resource(). There is usually
1368  * no need to release resources requested by this function explicitly since
1369  * that will be taken care of when the device is unbound from its driver.
1370  * If for some reason the resource needs to be released explicitly, because
1371  * of ordering issues for example, drivers must call devm_release_resource()
1372  * rather than the regular release_resource().
1373  *
1374  * When a conflict is detected between any existing resources and the newly
1375  * requested resource, an error message will be printed.
1376  *
1377  * Returns 0 on success or a negative error code on failure.
1378  */
1379 int devm_request_resource(struct device *dev, struct resource *root,
1380                           struct resource *new)
1381 {
1382         struct resource *conflict, **ptr;
1383 
1384         ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1385         if (!ptr)
1386                 return -ENOMEM;
1387 
1388         *ptr = new;
1389 
1390         conflict = request_resource_conflict(root, new);
1391         if (conflict) {
1392                 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1393                         new, conflict->name, conflict);
1394                 devres_free(ptr);
1395                 return -EBUSY;
1396         }
1397 
1398         devres_add(dev, ptr);
1399         return 0;
1400 }
1401 EXPORT_SYMBOL(devm_request_resource);
1402 
1403 static int devm_resource_match(struct device *dev, void *res, void *data)
1404 {
1405         struct resource **ptr = res;
1406 
1407         return *ptr == data;
1408 }
1409 
1410 /**
1411  * devm_release_resource() - release a previously requested resource
1412  * @dev: device for which to release the resource
1413  * @new: descriptor of the resource to release
1414  *
1415  * Releases a resource previously requested using devm_request_resource().
1416  */
1417 void devm_release_resource(struct device *dev, struct resource *new)
1418 {
1419         WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1420                                new));
1421 }
1422 EXPORT_SYMBOL(devm_release_resource);
1423 
1424 struct region_devres {
1425         struct resource *parent;
1426         resource_size_t start;
1427         resource_size_t n;
1428 };
1429 
1430 static void devm_region_release(struct device *dev, void *res)
1431 {
1432         struct region_devres *this = res;
1433 
1434         __release_region(this->parent, this->start, this->n);
1435 }
1436 
1437 static int devm_region_match(struct device *dev, void *res, void *match_data)
1438 {
1439         struct region_devres *this = res, *match = match_data;
1440 
1441         return this->parent == match->parent &&
1442                 this->start == match->start && this->n == match->n;
1443 }
1444 
1445 struct resource *
1446 __devm_request_region(struct device *dev, struct resource *parent,
1447                       resource_size_t start, resource_size_t n, const char *name)
1448 {
1449         struct region_devres *dr = NULL;
1450         struct resource *res;
1451 
1452         dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1453                           GFP_KERNEL);
1454         if (!dr)
1455                 return NULL;
1456 
1457         dr->parent = parent;
1458         dr->start = start;
1459         dr->n = n;
1460 
1461         res = __request_region(parent, start, n, name, 0);
1462         if (res)
1463                 devres_add(dev, dr);
1464         else
1465                 devres_free(dr);
1466 
1467         return res;
1468 }
1469 EXPORT_SYMBOL(__devm_request_region);
1470 
1471 void __devm_release_region(struct device *dev, struct resource *parent,
1472                            resource_size_t start, resource_size_t n)
1473 {
1474         struct region_devres match_data = { parent, start, n };
1475 
1476         __release_region(parent, start, n);
1477         WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1478                                &match_data));
1479 }
1480 EXPORT_SYMBOL(__devm_release_region);
1481 
1482 /*
1483  * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1484  */
1485 #define MAXRESERVE 4
1486 static int __init reserve_setup(char *str)
1487 {
1488         static int reserved;
1489         static struct resource reserve[MAXRESERVE];
1490 
1491         for (;;) {
1492                 unsigned int io_start, io_num;
1493                 int x = reserved;
1494                 struct resource *parent;
1495 
1496                 if (get_option(&str, &io_start) != 2)
1497                         break;
1498                 if (get_option(&str, &io_num) == 0)
1499                         break;
1500                 if (x < MAXRESERVE) {
1501                         struct resource *res = reserve + x;
1502 
1503                         /*
1504                          * If the region starts below 0x10000, we assume it's
1505                          * I/O port space; otherwise assume it's memory.
1506                          */
1507                         if (io_start < 0x10000) {
1508                                 res->flags = IORESOURCE_IO;
1509                                 parent = &ioport_resource;
1510                         } else {
1511                                 res->flags = IORESOURCE_MEM;
1512                                 parent = &iomem_resource;
1513                         }
1514                         res->name = "reserved";
1515                         res->start = io_start;
1516                         res->end = io_start + io_num - 1;
1517                         res->flags |= IORESOURCE_BUSY;
1518                         res->desc = IORES_DESC_NONE;
1519                         res->child = NULL;
1520                         if (request_resource(parent, res) == 0)
1521                                 reserved = x+1;
1522                 }
1523         }
1524         return 1;
1525 }
1526 __setup("reserve=", reserve_setup);
1527 
1528 /*
1529  * Check if the requested addr and size spans more than any slot in the
1530  * iomem resource tree.
1531  */
1532 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1533 {
1534         struct resource *p = &iomem_resource;
1535         int err = 0;
1536         loff_t l;
1537 
1538         read_lock(&resource_lock);
1539         for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1540                 /*
1541                  * We can probably skip the resources without
1542                  * IORESOURCE_IO attribute?
1543                  */
1544                 if (p->start >= addr + size)
1545                         continue;
1546                 if (p->end < addr)
1547                         continue;
1548                 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1549                     PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1550                         continue;
1551                 /*
1552                  * if a resource is "BUSY", it's not a hardware resource
1553                  * but a driver mapping of such a resource; we don't want
1554                  * to warn for those; some drivers legitimately map only
1555                  * partial hardware resources. (example: vesafb)
1556                  */
1557                 if (p->flags & IORESOURCE_BUSY)
1558                         continue;
1559 
1560                 printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
1561                        (unsigned long long)addr,
1562                        (unsigned long long)(addr + size - 1),
1563                        p->name, p);
1564                 err = -1;
1565                 break;
1566         }
1567         read_unlock(&resource_lock);
1568 
1569         return err;
1570 }
1571 
1572 #ifdef CONFIG_STRICT_DEVMEM
1573 static int strict_iomem_checks = 1;
1574 #else
1575 static int strict_iomem_checks;
1576 #endif
1577 
1578 /*
1579  * check if an address is reserved in the iomem resource tree
1580  * returns true if reserved, false if not reserved.
1581  */
1582 bool iomem_is_exclusive(u64 addr)
1583 {
1584         struct resource *p = &iomem_resource;
1585         bool err = false;
1586         loff_t l;
1587         int size = PAGE_SIZE;
1588 
1589         if (!strict_iomem_checks)
1590                 return false;
1591 
1592         addr = addr & PAGE_MASK;
1593 
1594         read_lock(&resource_lock);
1595         for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1596                 /*
1597                  * We can probably skip the resources without
1598                  * IORESOURCE_IO attribute?
1599                  */
1600                 if (p->start >= addr + size)
1601                         break;
1602                 if (p->end < addr)
1603                         continue;
1604                 /*
1605                  * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1606                  * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1607                  * resource is busy.
1608                  */
1609                 if ((p->flags & IORESOURCE_BUSY) == 0)
1610                         continue;
1611                 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1612                                 || p->flags & IORESOURCE_EXCLUSIVE) {
1613                         err = true;
1614                         break;
1615                 }
1616         }
1617         read_unlock(&resource_lock);
1618 
1619         return err;
1620 }
1621 
1622 struct resource_entry *resource_list_create_entry(struct resource *res,
1623                                                   size_t extra_size)
1624 {
1625         struct resource_entry *entry;
1626 
1627         entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1628         if (entry) {
1629                 INIT_LIST_HEAD(&entry->node);
1630                 entry->res = res ? res : &entry->__res;
1631         }
1632 
1633         return entry;
1634 }
1635 EXPORT_SYMBOL(resource_list_create_entry);
1636 
1637 void resource_list_free(struct list_head *head)
1638 {
1639         struct resource_entry *entry, *tmp;
1640 
1641         list_for_each_entry_safe(entry, tmp, head, node)
1642                 resource_list_destroy_entry(entry);
1643 }
1644 EXPORT_SYMBOL(resource_list_free);
1645 
1646 #ifdef CONFIG_DEVICE_PRIVATE
1647 static struct resource *__request_free_mem_region(struct device *dev,
1648                 struct resource *base, unsigned long size, const char *name)
1649 {
1650         resource_size_t end, addr;
1651         struct resource *res;
1652 
1653         size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
1654         end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
1655         addr = end - size + 1UL;
1656 
1657         for (; addr > size && addr >= base->start; addr -= size) {
1658                 if (region_intersects(addr, size, 0, IORES_DESC_NONE) !=
1659                                 REGION_DISJOINT)
1660                         continue;
1661 
1662                 if (dev)
1663                         res = devm_request_mem_region(dev, addr, size, name);
1664                 else
1665                         res = request_mem_region(addr, size, name);
1666                 if (!res)
1667                         return ERR_PTR(-ENOMEM);
1668                 res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1669                 return res;
1670         }
1671 
1672         return ERR_PTR(-ERANGE);
1673 }
1674 
1675 /**
1676  * devm_request_free_mem_region - find free region for device private memory
1677  *
1678  * @dev: device struct to bind the resource to
1679  * @size: size in bytes of the device memory to add
1680  * @base: resource tree to look in
1681  *
1682  * This function tries to find an empty range of physical address big enough to
1683  * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1684  * memory, which in turn allocates struct pages.
1685  */
1686 struct resource *devm_request_free_mem_region(struct device *dev,
1687                 struct resource *base, unsigned long size)
1688 {
1689         return __request_free_mem_region(dev, base, size, dev_name(dev));
1690 }
1691 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1692 
1693 struct resource *request_free_mem_region(struct resource *base,
1694                 unsigned long size, const char *name)
1695 {
1696         return __request_free_mem_region(NULL, base, size, name);
1697 }
1698 EXPORT_SYMBOL_GPL(request_free_mem_region);
1699 
1700 #endif /* CONFIG_DEVICE_PRIVATE */
1701 
1702 static int __init strict_iomem(char *str)
1703 {
1704         if (strstr(str, "relaxed"))
1705                 strict_iomem_checks = 0;
1706         if (strstr(str, "strict"))
1707                 strict_iomem_checks = 1;
1708         return 1;
1709 }
1710 
1711 __setup("iomem=", strict_iomem);

/* [<][>][^][v][top][bottom][index][help] */