root/lib/genalloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. chunk_size
  2. set_bits_ll
  3. clear_bits_ll
  4. bitmap_set_ll
  5. bitmap_clear_ll
  6. gen_pool_create
  7. gen_pool_add_owner
  8. gen_pool_virt_to_phys
  9. gen_pool_destroy
  10. gen_pool_alloc_algo_owner
  11. gen_pool_dma_alloc
  12. gen_pool_dma_alloc_algo
  13. gen_pool_dma_alloc_align
  14. gen_pool_dma_zalloc
  15. gen_pool_dma_zalloc_algo
  16. gen_pool_dma_zalloc_align
  17. gen_pool_free_owner
  18. gen_pool_for_each_chunk
  19. addr_in_gen_pool
  20. gen_pool_avail
  21. gen_pool_size
  22. gen_pool_set_algo
  23. gen_pool_first_fit
  24. gen_pool_first_fit_align
  25. gen_pool_fixed_alloc
  26. gen_pool_first_fit_order_align
  27. gen_pool_best_fit
  28. devm_gen_pool_release
  29. devm_gen_pool_match
  30. gen_pool_get
  31. devm_gen_pool_create
  32. of_gen_pool_get

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Basic general purpose allocator for managing special purpose
   4  * memory, for example, memory that is not managed by the regular
   5  * kmalloc/kfree interface.  Uses for this includes on-device special
   6  * memory, uncached memory etc.
   7  *
   8  * It is safe to use the allocator in NMI handlers and other special
   9  * unblockable contexts that could otherwise deadlock on locks.  This
  10  * is implemented by using atomic operations and retries on any
  11  * conflicts.  The disadvantage is that there may be livelocks in
  12  * extreme cases.  For better scalability, one allocator can be used
  13  * for each CPU.
  14  *
  15  * The lockless operation only works if there is enough memory
  16  * available.  If new memory is added to the pool a lock has to be
  17  * still taken.  So any user relying on locklessness has to ensure
  18  * that sufficient memory is preallocated.
  19  *
  20  * The basic atomic operation of this allocator is cmpxchg on long.
  21  * On architectures that don't have NMI-safe cmpxchg implementation,
  22  * the allocator can NOT be used in NMI handler.  So code uses the
  23  * allocator in NMI handler should depend on
  24  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
  25  *
  26  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
  27  */
  28 
  29 #include <linux/slab.h>
  30 #include <linux/export.h>
  31 #include <linux/bitmap.h>
  32 #include <linux/rculist.h>
  33 #include <linux/interrupt.h>
  34 #include <linux/genalloc.h>
  35 #include <linux/of_device.h>
  36 #include <linux/vmalloc.h>
  37 
  38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
  39 {
  40         return chunk->end_addr - chunk->start_addr + 1;
  41 }
  42 
  43 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
  44 {
  45         unsigned long val, nval;
  46 
  47         nval = *addr;
  48         do {
  49                 val = nval;
  50                 if (val & mask_to_set)
  51                         return -EBUSY;
  52                 cpu_relax();
  53         } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
  54 
  55         return 0;
  56 }
  57 
  58 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
  59 {
  60         unsigned long val, nval;
  61 
  62         nval = *addr;
  63         do {
  64                 val = nval;
  65                 if ((val & mask_to_clear) != mask_to_clear)
  66                         return -EBUSY;
  67                 cpu_relax();
  68         } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
  69 
  70         return 0;
  71 }
  72 
  73 /*
  74  * bitmap_set_ll - set the specified number of bits at the specified position
  75  * @map: pointer to a bitmap
  76  * @start: a bit position in @map
  77  * @nr: number of bits to set
  78  *
  79  * Set @nr bits start from @start in @map lock-lessly. Several users
  80  * can set/clear the same bitmap simultaneously without lock. If two
  81  * users set the same bit, one user will return remain bits, otherwise
  82  * return 0.
  83  */
  84 static int bitmap_set_ll(unsigned long *map, int start, int nr)
  85 {
  86         unsigned long *p = map + BIT_WORD(start);
  87         const int size = start + nr;
  88         int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
  89         unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
  90 
  91         while (nr - bits_to_set >= 0) {
  92                 if (set_bits_ll(p, mask_to_set))
  93                         return nr;
  94                 nr -= bits_to_set;
  95                 bits_to_set = BITS_PER_LONG;
  96                 mask_to_set = ~0UL;
  97                 p++;
  98         }
  99         if (nr) {
 100                 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
 101                 if (set_bits_ll(p, mask_to_set))
 102                         return nr;
 103         }
 104 
 105         return 0;
 106 }
 107 
 108 /*
 109  * bitmap_clear_ll - clear the specified number of bits at the specified position
 110  * @map: pointer to a bitmap
 111  * @start: a bit position in @map
 112  * @nr: number of bits to set
 113  *
 114  * Clear @nr bits start from @start in @map lock-lessly. Several users
 115  * can set/clear the same bitmap simultaneously without lock. If two
 116  * users clear the same bit, one user will return remain bits,
 117  * otherwise return 0.
 118  */
 119 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
 120 {
 121         unsigned long *p = map + BIT_WORD(start);
 122         const int size = start + nr;
 123         int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
 124         unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
 125 
 126         while (nr - bits_to_clear >= 0) {
 127                 if (clear_bits_ll(p, mask_to_clear))
 128                         return nr;
 129                 nr -= bits_to_clear;
 130                 bits_to_clear = BITS_PER_LONG;
 131                 mask_to_clear = ~0UL;
 132                 p++;
 133         }
 134         if (nr) {
 135                 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
 136                 if (clear_bits_ll(p, mask_to_clear))
 137                         return nr;
 138         }
 139 
 140         return 0;
 141 }
 142 
 143 /**
 144  * gen_pool_create - create a new special memory pool
 145  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 146  * @nid: node id of the node the pool structure should be allocated on, or -1
 147  *
 148  * Create a new special memory pool that can be used to manage special purpose
 149  * memory not managed by the regular kmalloc/kfree interface.
 150  */
 151 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
 152 {
 153         struct gen_pool *pool;
 154 
 155         pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
 156         if (pool != NULL) {
 157                 spin_lock_init(&pool->lock);
 158                 INIT_LIST_HEAD(&pool->chunks);
 159                 pool->min_alloc_order = min_alloc_order;
 160                 pool->algo = gen_pool_first_fit;
 161                 pool->data = NULL;
 162                 pool->name = NULL;
 163         }
 164         return pool;
 165 }
 166 EXPORT_SYMBOL(gen_pool_create);
 167 
 168 /**
 169  * gen_pool_add_owner- add a new chunk of special memory to the pool
 170  * @pool: pool to add new memory chunk to
 171  * @virt: virtual starting address of memory chunk to add to pool
 172  * @phys: physical starting address of memory chunk to add to pool
 173  * @size: size in bytes of the memory chunk to add to pool
 174  * @nid: node id of the node the chunk structure and bitmap should be
 175  *       allocated on, or -1
 176  * @owner: private data the publisher would like to recall at alloc time
 177  *
 178  * Add a new chunk of special memory to the specified pool.
 179  *
 180  * Returns 0 on success or a -ve errno on failure.
 181  */
 182 int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
 183                  size_t size, int nid, void *owner)
 184 {
 185         struct gen_pool_chunk *chunk;
 186         int nbits = size >> pool->min_alloc_order;
 187         int nbytes = sizeof(struct gen_pool_chunk) +
 188                                 BITS_TO_LONGS(nbits) * sizeof(long);
 189 
 190         chunk = vzalloc_node(nbytes, nid);
 191         if (unlikely(chunk == NULL))
 192                 return -ENOMEM;
 193 
 194         chunk->phys_addr = phys;
 195         chunk->start_addr = virt;
 196         chunk->end_addr = virt + size - 1;
 197         chunk->owner = owner;
 198         atomic_long_set(&chunk->avail, size);
 199 
 200         spin_lock(&pool->lock);
 201         list_add_rcu(&chunk->next_chunk, &pool->chunks);
 202         spin_unlock(&pool->lock);
 203 
 204         return 0;
 205 }
 206 EXPORT_SYMBOL(gen_pool_add_owner);
 207 
 208 /**
 209  * gen_pool_virt_to_phys - return the physical address of memory
 210  * @pool: pool to allocate from
 211  * @addr: starting address of memory
 212  *
 213  * Returns the physical address on success, or -1 on error.
 214  */
 215 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
 216 {
 217         struct gen_pool_chunk *chunk;
 218         phys_addr_t paddr = -1;
 219 
 220         rcu_read_lock();
 221         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 222                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
 223                         paddr = chunk->phys_addr + (addr - chunk->start_addr);
 224                         break;
 225                 }
 226         }
 227         rcu_read_unlock();
 228 
 229         return paddr;
 230 }
 231 EXPORT_SYMBOL(gen_pool_virt_to_phys);
 232 
 233 /**
 234  * gen_pool_destroy - destroy a special memory pool
 235  * @pool: pool to destroy
 236  *
 237  * Destroy the specified special memory pool. Verifies that there are no
 238  * outstanding allocations.
 239  */
 240 void gen_pool_destroy(struct gen_pool *pool)
 241 {
 242         struct list_head *_chunk, *_next_chunk;
 243         struct gen_pool_chunk *chunk;
 244         int order = pool->min_alloc_order;
 245         int bit, end_bit;
 246 
 247         list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
 248                 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
 249                 list_del(&chunk->next_chunk);
 250 
 251                 end_bit = chunk_size(chunk) >> order;
 252                 bit = find_next_bit(chunk->bits, end_bit, 0);
 253                 BUG_ON(bit < end_bit);
 254 
 255                 vfree(chunk);
 256         }
 257         kfree_const(pool->name);
 258         kfree(pool);
 259 }
 260 EXPORT_SYMBOL(gen_pool_destroy);
 261 
 262 /**
 263  * gen_pool_alloc_algo_owner - allocate special memory from the pool
 264  * @pool: pool to allocate from
 265  * @size: number of bytes to allocate from the pool
 266  * @algo: algorithm passed from caller
 267  * @data: data passed to algorithm
 268  * @owner: optionally retrieve the chunk owner
 269  *
 270  * Allocate the requested number of bytes from the specified pool.
 271  * Uses the pool allocation function (with first-fit algorithm by default).
 272  * Can not be used in NMI handler on architectures without
 273  * NMI-safe cmpxchg implementation.
 274  */
 275 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
 276                 genpool_algo_t algo, void *data, void **owner)
 277 {
 278         struct gen_pool_chunk *chunk;
 279         unsigned long addr = 0;
 280         int order = pool->min_alloc_order;
 281         int nbits, start_bit, end_bit, remain;
 282 
 283 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 284         BUG_ON(in_nmi());
 285 #endif
 286 
 287         if (owner)
 288                 *owner = NULL;
 289 
 290         if (size == 0)
 291                 return 0;
 292 
 293         nbits = (size + (1UL << order) - 1) >> order;
 294         rcu_read_lock();
 295         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 296                 if (size > atomic_long_read(&chunk->avail))
 297                         continue;
 298 
 299                 start_bit = 0;
 300                 end_bit = chunk_size(chunk) >> order;
 301 retry:
 302                 start_bit = algo(chunk->bits, end_bit, start_bit,
 303                                  nbits, data, pool, chunk->start_addr);
 304                 if (start_bit >= end_bit)
 305                         continue;
 306                 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
 307                 if (remain) {
 308                         remain = bitmap_clear_ll(chunk->bits, start_bit,
 309                                                  nbits - remain);
 310                         BUG_ON(remain);
 311                         goto retry;
 312                 }
 313 
 314                 addr = chunk->start_addr + ((unsigned long)start_bit << order);
 315                 size = nbits << order;
 316                 atomic_long_sub(size, &chunk->avail);
 317                 if (owner)
 318                         *owner = chunk->owner;
 319                 break;
 320         }
 321         rcu_read_unlock();
 322         return addr;
 323 }
 324 EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
 325 
 326 /**
 327  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
 328  * @pool: pool to allocate from
 329  * @size: number of bytes to allocate from the pool
 330  * @dma: dma-view physical address return value.  Use %NULL if unneeded.
 331  *
 332  * Allocate the requested number of bytes from the specified pool.
 333  * Uses the pool allocation function (with first-fit algorithm by default).
 334  * Can not be used in NMI handler on architectures without
 335  * NMI-safe cmpxchg implementation.
 336  *
 337  * Return: virtual address of the allocated memory, or %NULL on failure
 338  */
 339 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
 340 {
 341         return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
 342 }
 343 EXPORT_SYMBOL(gen_pool_dma_alloc);
 344 
 345 /**
 346  * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
 347  * usage with the given pool algorithm
 348  * @pool: pool to allocate from
 349  * @size: number of bytes to allocate from the pool
 350  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
 351  * @algo: algorithm passed from caller
 352  * @data: data passed to algorithm
 353  *
 354  * Allocate the requested number of bytes from the specified pool. Uses the
 355  * given pool allocation function. Can not be used in NMI handler on
 356  * architectures without NMI-safe cmpxchg implementation.
 357  *
 358  * Return: virtual address of the allocated memory, or %NULL on failure
 359  */
 360 void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
 361                 dma_addr_t *dma, genpool_algo_t algo, void *data)
 362 {
 363         unsigned long vaddr;
 364 
 365         if (!pool)
 366                 return NULL;
 367 
 368         vaddr = gen_pool_alloc_algo(pool, size, algo, data);
 369         if (!vaddr)
 370                 return NULL;
 371 
 372         if (dma)
 373                 *dma = gen_pool_virt_to_phys(pool, vaddr);
 374 
 375         return (void *)vaddr;
 376 }
 377 EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
 378 
 379 /**
 380  * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
 381  * usage with the given alignment
 382  * @pool: pool to allocate from
 383  * @size: number of bytes to allocate from the pool
 384  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
 385  * @align: alignment in bytes for starting address
 386  *
 387  * Allocate the requested number bytes from the specified pool, with the given
 388  * alignment restriction. Can not be used in NMI handler on architectures
 389  * without NMI-safe cmpxchg implementation.
 390  *
 391  * Return: virtual address of the allocated memory, or %NULL on failure
 392  */
 393 void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
 394                 dma_addr_t *dma, int align)
 395 {
 396         struct genpool_data_align data = { .align = align };
 397 
 398         return gen_pool_dma_alloc_algo(pool, size, dma,
 399                         gen_pool_first_fit_align, &data);
 400 }
 401 EXPORT_SYMBOL(gen_pool_dma_alloc_align);
 402 
 403 /**
 404  * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
 405  * DMA usage
 406  * @pool: pool to allocate from
 407  * @size: number of bytes to allocate from the pool
 408  * @dma: dma-view physical address return value.  Use %NULL if unneeded.
 409  *
 410  * Allocate the requested number of zeroed bytes from the specified pool.
 411  * Uses the pool allocation function (with first-fit algorithm by default).
 412  * Can not be used in NMI handler on architectures without
 413  * NMI-safe cmpxchg implementation.
 414  *
 415  * Return: virtual address of the allocated zeroed memory, or %NULL on failure
 416  */
 417 void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
 418 {
 419         return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
 420 }
 421 EXPORT_SYMBOL(gen_pool_dma_zalloc);
 422 
 423 /**
 424  * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
 425  * DMA usage with the given pool algorithm
 426  * @pool: pool to allocate from
 427  * @size: number of bytes to allocate from the pool
 428  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
 429  * @algo: algorithm passed from caller
 430  * @data: data passed to algorithm
 431  *
 432  * Allocate the requested number of zeroed bytes from the specified pool. Uses
 433  * the given pool allocation function. Can not be used in NMI handler on
 434  * architectures without NMI-safe cmpxchg implementation.
 435  *
 436  * Return: virtual address of the allocated zeroed memory, or %NULL on failure
 437  */
 438 void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
 439                 dma_addr_t *dma, genpool_algo_t algo, void *data)
 440 {
 441         void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
 442 
 443         if (vaddr)
 444                 memset(vaddr, 0, size);
 445 
 446         return vaddr;
 447 }
 448 EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
 449 
 450 /**
 451  * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
 452  * DMA usage with the given alignment
 453  * @pool: pool to allocate from
 454  * @size: number of bytes to allocate from the pool
 455  * @dma: DMA-view physical address return value. Use %NULL if unneeded.
 456  * @align: alignment in bytes for starting address
 457  *
 458  * Allocate the requested number of zeroed bytes from the specified pool,
 459  * with the given alignment restriction. Can not be used in NMI handler on
 460  * architectures without NMI-safe cmpxchg implementation.
 461  *
 462  * Return: virtual address of the allocated zeroed memory, or %NULL on failure
 463  */
 464 void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
 465                 dma_addr_t *dma, int align)
 466 {
 467         struct genpool_data_align data = { .align = align };
 468 
 469         return gen_pool_dma_zalloc_algo(pool, size, dma,
 470                         gen_pool_first_fit_align, &data);
 471 }
 472 EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
 473 
 474 /**
 475  * gen_pool_free - free allocated special memory back to the pool
 476  * @pool: pool to free to
 477  * @addr: starting address of memory to free back to pool
 478  * @size: size in bytes of memory to free
 479  * @owner: private data stashed at gen_pool_add() time
 480  *
 481  * Free previously allocated special memory back to the specified
 482  * pool.  Can not be used in NMI handler on architectures without
 483  * NMI-safe cmpxchg implementation.
 484  */
 485 void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
 486                 void **owner)
 487 {
 488         struct gen_pool_chunk *chunk;
 489         int order = pool->min_alloc_order;
 490         int start_bit, nbits, remain;
 491 
 492 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 493         BUG_ON(in_nmi());
 494 #endif
 495 
 496         if (owner)
 497                 *owner = NULL;
 498 
 499         nbits = (size + (1UL << order) - 1) >> order;
 500         rcu_read_lock();
 501         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 502                 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
 503                         BUG_ON(addr + size - 1 > chunk->end_addr);
 504                         start_bit = (addr - chunk->start_addr) >> order;
 505                         remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 506                         BUG_ON(remain);
 507                         size = nbits << order;
 508                         atomic_long_add(size, &chunk->avail);
 509                         if (owner)
 510                                 *owner = chunk->owner;
 511                         rcu_read_unlock();
 512                         return;
 513                 }
 514         }
 515         rcu_read_unlock();
 516         BUG();
 517 }
 518 EXPORT_SYMBOL(gen_pool_free_owner);
 519 
 520 /**
 521  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
 522  * @pool:       the generic memory pool
 523  * @func:       func to call
 524  * @data:       additional data used by @func
 525  *
 526  * Call @func for every chunk of generic memory pool.  The @func is
 527  * called with rcu_read_lock held.
 528  */
 529 void gen_pool_for_each_chunk(struct gen_pool *pool,
 530         void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
 531         void *data)
 532 {
 533         struct gen_pool_chunk *chunk;
 534 
 535         rcu_read_lock();
 536         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
 537                 func(pool, chunk, data);
 538         rcu_read_unlock();
 539 }
 540 EXPORT_SYMBOL(gen_pool_for_each_chunk);
 541 
 542 /**
 543  * addr_in_gen_pool - checks if an address falls within the range of a pool
 544  * @pool:       the generic memory pool
 545  * @start:      start address
 546  * @size:       size of the region
 547  *
 548  * Check if the range of addresses falls within the specified pool. Returns
 549  * true if the entire range is contained in the pool and false otherwise.
 550  */
 551 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
 552                         size_t size)
 553 {
 554         bool found = false;
 555         unsigned long end = start + size - 1;
 556         struct gen_pool_chunk *chunk;
 557 
 558         rcu_read_lock();
 559         list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
 560                 if (start >= chunk->start_addr && start <= chunk->end_addr) {
 561                         if (end <= chunk->end_addr) {
 562                                 found = true;
 563                                 break;
 564                         }
 565                 }
 566         }
 567         rcu_read_unlock();
 568         return found;
 569 }
 570 
 571 /**
 572  * gen_pool_avail - get available free space of the pool
 573  * @pool: pool to get available free space
 574  *
 575  * Return available free space of the specified pool.
 576  */
 577 size_t gen_pool_avail(struct gen_pool *pool)
 578 {
 579         struct gen_pool_chunk *chunk;
 580         size_t avail = 0;
 581 
 582         rcu_read_lock();
 583         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 584                 avail += atomic_long_read(&chunk->avail);
 585         rcu_read_unlock();
 586         return avail;
 587 }
 588 EXPORT_SYMBOL_GPL(gen_pool_avail);
 589 
 590 /**
 591  * gen_pool_size - get size in bytes of memory managed by the pool
 592  * @pool: pool to get size
 593  *
 594  * Return size in bytes of memory managed by the pool.
 595  */
 596 size_t gen_pool_size(struct gen_pool *pool)
 597 {
 598         struct gen_pool_chunk *chunk;
 599         size_t size = 0;
 600 
 601         rcu_read_lock();
 602         list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 603                 size += chunk_size(chunk);
 604         rcu_read_unlock();
 605         return size;
 606 }
 607 EXPORT_SYMBOL_GPL(gen_pool_size);
 608 
 609 /**
 610  * gen_pool_set_algo - set the allocation algorithm
 611  * @pool: pool to change allocation algorithm
 612  * @algo: custom algorithm function
 613  * @data: additional data used by @algo
 614  *
 615  * Call @algo for each memory allocation in the pool.
 616  * If @algo is NULL use gen_pool_first_fit as default
 617  * memory allocation function.
 618  */
 619 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
 620 {
 621         rcu_read_lock();
 622 
 623         pool->algo = algo;
 624         if (!pool->algo)
 625                 pool->algo = gen_pool_first_fit;
 626 
 627         pool->data = data;
 628 
 629         rcu_read_unlock();
 630 }
 631 EXPORT_SYMBOL(gen_pool_set_algo);
 632 
 633 /**
 634  * gen_pool_first_fit - find the first available region
 635  * of memory matching the size requirement (no alignment constraint)
 636  * @map: The address to base the search on
 637  * @size: The bitmap size in bits
 638  * @start: The bitnumber to start searching at
 639  * @nr: The number of zeroed bits we're looking for
 640  * @data: additional data - unused
 641  * @pool: pool to find the fit region memory from
 642  */
 643 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
 644                 unsigned long start, unsigned int nr, void *data,
 645                 struct gen_pool *pool, unsigned long start_addr)
 646 {
 647         return bitmap_find_next_zero_area(map, size, start, nr, 0);
 648 }
 649 EXPORT_SYMBOL(gen_pool_first_fit);
 650 
 651 /**
 652  * gen_pool_first_fit_align - find the first available region
 653  * of memory matching the size requirement (alignment constraint)
 654  * @map: The address to base the search on
 655  * @size: The bitmap size in bits
 656  * @start: The bitnumber to start searching at
 657  * @nr: The number of zeroed bits we're looking for
 658  * @data: data for alignment
 659  * @pool: pool to get order from
 660  */
 661 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
 662                 unsigned long start, unsigned int nr, void *data,
 663                 struct gen_pool *pool, unsigned long start_addr)
 664 {
 665         struct genpool_data_align *alignment;
 666         unsigned long align_mask, align_off;
 667         int order;
 668 
 669         alignment = data;
 670         order = pool->min_alloc_order;
 671         align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
 672         align_off = (start_addr & (alignment->align - 1)) >> order;
 673 
 674         return bitmap_find_next_zero_area_off(map, size, start, nr,
 675                                               align_mask, align_off);
 676 }
 677 EXPORT_SYMBOL(gen_pool_first_fit_align);
 678 
 679 /**
 680  * gen_pool_fixed_alloc - reserve a specific region
 681  * @map: The address to base the search on
 682  * @size: The bitmap size in bits
 683  * @start: The bitnumber to start searching at
 684  * @nr: The number of zeroed bits we're looking for
 685  * @data: data for alignment
 686  * @pool: pool to get order from
 687  */
 688 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
 689                 unsigned long start, unsigned int nr, void *data,
 690                 struct gen_pool *pool, unsigned long start_addr)
 691 {
 692         struct genpool_data_fixed *fixed_data;
 693         int order;
 694         unsigned long offset_bit;
 695         unsigned long start_bit;
 696 
 697         fixed_data = data;
 698         order = pool->min_alloc_order;
 699         offset_bit = fixed_data->offset >> order;
 700         if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
 701                 return size;
 702 
 703         start_bit = bitmap_find_next_zero_area(map, size,
 704                         start + offset_bit, nr, 0);
 705         if (start_bit != offset_bit)
 706                 start_bit = size;
 707         return start_bit;
 708 }
 709 EXPORT_SYMBOL(gen_pool_fixed_alloc);
 710 
 711 /**
 712  * gen_pool_first_fit_order_align - find the first available region
 713  * of memory matching the size requirement. The region will be aligned
 714  * to the order of the size specified.
 715  * @map: The address to base the search on
 716  * @size: The bitmap size in bits
 717  * @start: The bitnumber to start searching at
 718  * @nr: The number of zeroed bits we're looking for
 719  * @data: additional data - unused
 720  * @pool: pool to find the fit region memory from
 721  */
 722 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
 723                 unsigned long size, unsigned long start,
 724                 unsigned int nr, void *data, struct gen_pool *pool,
 725                 unsigned long start_addr)
 726 {
 727         unsigned long align_mask = roundup_pow_of_two(nr) - 1;
 728 
 729         return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
 730 }
 731 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
 732 
 733 /**
 734  * gen_pool_best_fit - find the best fitting region of memory
 735  * macthing the size requirement (no alignment constraint)
 736  * @map: The address to base the search on
 737  * @size: The bitmap size in bits
 738  * @start: The bitnumber to start searching at
 739  * @nr: The number of zeroed bits we're looking for
 740  * @data: additional data - unused
 741  * @pool: pool to find the fit region memory from
 742  *
 743  * Iterate over the bitmap to find the smallest free region
 744  * which we can allocate the memory.
 745  */
 746 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
 747                 unsigned long start, unsigned int nr, void *data,
 748                 struct gen_pool *pool, unsigned long start_addr)
 749 {
 750         unsigned long start_bit = size;
 751         unsigned long len = size + 1;
 752         unsigned long index;
 753 
 754         index = bitmap_find_next_zero_area(map, size, start, nr, 0);
 755 
 756         while (index < size) {
 757                 int next_bit = find_next_bit(map, size, index + nr);
 758                 if ((next_bit - index) < len) {
 759                         len = next_bit - index;
 760                         start_bit = index;
 761                         if (len == nr)
 762                                 return start_bit;
 763                 }
 764                 index = bitmap_find_next_zero_area(map, size,
 765                                                    next_bit + 1, nr, 0);
 766         }
 767 
 768         return start_bit;
 769 }
 770 EXPORT_SYMBOL(gen_pool_best_fit);
 771 
 772 static void devm_gen_pool_release(struct device *dev, void *res)
 773 {
 774         gen_pool_destroy(*(struct gen_pool **)res);
 775 }
 776 
 777 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
 778 {
 779         struct gen_pool **p = res;
 780 
 781         /* NULL data matches only a pool without an assigned name */
 782         if (!data && !(*p)->name)
 783                 return 1;
 784 
 785         if (!data || !(*p)->name)
 786                 return 0;
 787 
 788         return !strcmp((*p)->name, data);
 789 }
 790 
 791 /**
 792  * gen_pool_get - Obtain the gen_pool (if any) for a device
 793  * @dev: device to retrieve the gen_pool from
 794  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
 795  *
 796  * Returns the gen_pool for the device if one is present, or NULL.
 797  */
 798 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
 799 {
 800         struct gen_pool **p;
 801 
 802         p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
 803                         (void *)name);
 804         if (!p)
 805                 return NULL;
 806         return *p;
 807 }
 808 EXPORT_SYMBOL_GPL(gen_pool_get);
 809 
 810 /**
 811  * devm_gen_pool_create - managed gen_pool_create
 812  * @dev: device that provides the gen_pool
 813  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 814  * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
 815  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
 816  *
 817  * Create a new special memory pool that can be used to manage special purpose
 818  * memory not managed by the regular kmalloc/kfree interface. The pool will be
 819  * automatically destroyed by the device management code.
 820  */
 821 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
 822                                       int nid, const char *name)
 823 {
 824         struct gen_pool **ptr, *pool;
 825         const char *pool_name = NULL;
 826 
 827         /* Check that genpool to be created is uniquely addressed on device */
 828         if (gen_pool_get(dev, name))
 829                 return ERR_PTR(-EINVAL);
 830 
 831         if (name) {
 832                 pool_name = kstrdup_const(name, GFP_KERNEL);
 833                 if (!pool_name)
 834                         return ERR_PTR(-ENOMEM);
 835         }
 836 
 837         ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
 838         if (!ptr)
 839                 goto free_pool_name;
 840 
 841         pool = gen_pool_create(min_alloc_order, nid);
 842         if (!pool)
 843                 goto free_devres;
 844 
 845         *ptr = pool;
 846         pool->name = pool_name;
 847         devres_add(dev, ptr);
 848 
 849         return pool;
 850 
 851 free_devres:
 852         devres_free(ptr);
 853 free_pool_name:
 854         kfree_const(pool_name);
 855 
 856         return ERR_PTR(-ENOMEM);
 857 }
 858 EXPORT_SYMBOL(devm_gen_pool_create);
 859 
 860 #ifdef CONFIG_OF
 861 /**
 862  * of_gen_pool_get - find a pool by phandle property
 863  * @np: device node
 864  * @propname: property name containing phandle(s)
 865  * @index: index into the phandle array
 866  *
 867  * Returns the pool that contains the chunk starting at the physical
 868  * address of the device tree node pointed at by the phandle property,
 869  * or NULL if not found.
 870  */
 871 struct gen_pool *of_gen_pool_get(struct device_node *np,
 872         const char *propname, int index)
 873 {
 874         struct platform_device *pdev;
 875         struct device_node *np_pool, *parent;
 876         const char *name = NULL;
 877         struct gen_pool *pool = NULL;
 878 
 879         np_pool = of_parse_phandle(np, propname, index);
 880         if (!np_pool)
 881                 return NULL;
 882 
 883         pdev = of_find_device_by_node(np_pool);
 884         if (!pdev) {
 885                 /* Check if named gen_pool is created by parent node device */
 886                 parent = of_get_parent(np_pool);
 887                 pdev = of_find_device_by_node(parent);
 888                 of_node_put(parent);
 889 
 890                 of_property_read_string(np_pool, "label", &name);
 891                 if (!name)
 892                         name = np_pool->name;
 893         }
 894         if (pdev)
 895                 pool = gen_pool_get(&pdev->dev, name);
 896         of_node_put(np_pool);
 897 
 898         return pool;
 899 }
 900 EXPORT_SYMBOL_GPL(of_gen_pool_get);
 901 #endif /* CONFIG_OF */

/* [<][>][^][v][top][bottom][index][help] */