1/* 2 * Basic general purpose allocator for managing special purpose 3 * memory, for example, memory that is not managed by the regular 4 * kmalloc/kfree interface. Uses for this includes on-device special 5 * memory, uncached memory etc. 6 * 7 * It is safe to use the allocator in NMI handlers and other special 8 * unblockable contexts that could otherwise deadlock on locks. This 9 * is implemented by using atomic operations and retries on any 10 * conflicts. The disadvantage is that there may be livelocks in 11 * extreme cases. For better scalability, one allocator can be used 12 * for each CPU. 13 * 14 * The lockless operation only works if there is enough memory 15 * available. If new memory is added to the pool a lock has to be 16 * still taken. So any user relying on locklessness has to ensure 17 * that sufficient memory is preallocated. 18 * 19 * The basic atomic operation of this allocator is cmpxchg on long. 20 * On architectures that don't have NMI-safe cmpxchg implementation, 21 * the allocator can NOT be used in NMI handler. So code uses the 22 * allocator in NMI handler should depend on 23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 24 * 25 * This source code is licensed under the GNU General Public License, 26 * Version 2. See the file COPYING for more details. 27 */ 28 29 30#ifndef __GENALLOC_H__ 31#define __GENALLOC_H__ 32 33#include <linux/spinlock_types.h> 34 35struct device; 36struct device_node; 37 38/** 39 * Allocation callback function type definition 40 * @map: Pointer to bitmap 41 * @size: The bitmap size in bits 42 * @start: The bitnumber to start searching at 43 * @nr: The number of zeroed bits we're looking for 44 * @data: optional additional data used by @genpool_algo_t 45 */ 46typedef unsigned long (*genpool_algo_t)(unsigned long *map, 47 unsigned long size, 48 unsigned long start, 49 unsigned int nr, 50 void *data); 51 52/* 53 * General purpose special memory pool descriptor. 54 */ 55struct gen_pool { 56 spinlock_t lock; 57 struct list_head chunks; /* list of chunks in this pool */ 58 int min_alloc_order; /* minimum allocation order */ 59 60 genpool_algo_t algo; /* allocation function */ 61 void *data; 62 63 const char *name; 64}; 65 66/* 67 * General purpose special memory pool chunk descriptor. 68 */ 69struct gen_pool_chunk { 70 struct list_head next_chunk; /* next chunk in pool */ 71 atomic_t avail; 72 phys_addr_t phys_addr; /* physical starting address of memory chunk */ 73 unsigned long start_addr; /* start address of memory chunk */ 74 unsigned long end_addr; /* end address of memory chunk (inclusive) */ 75 unsigned long bits[0]; /* bitmap for allocating memory chunk */ 76}; 77 78extern struct gen_pool *gen_pool_create(int, int); 79extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 80extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, 81 size_t, int); 82/** 83 * gen_pool_add - add a new chunk of special memory to the pool 84 * @pool: pool to add new memory chunk to 85 * @addr: starting address of memory chunk to add to pool 86 * @size: size in bytes of the memory chunk to add to pool 87 * @nid: node id of the node the chunk structure and bitmap should be 88 * allocated on, or -1 89 * 90 * Add a new chunk of special memory to the specified pool. 91 * 92 * Returns 0 on success or a -ve errno on failure. 93 */ 94static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, 95 size_t size, int nid) 96{ 97 return gen_pool_add_virt(pool, addr, -1, size, nid); 98} 99extern void gen_pool_destroy(struct gen_pool *); 100extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); 101extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, 102 dma_addr_t *dma); 103extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); 104extern void gen_pool_for_each_chunk(struct gen_pool *, 105 void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *); 106extern size_t gen_pool_avail(struct gen_pool *); 107extern size_t gen_pool_size(struct gen_pool *); 108 109extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, 110 void *data); 111 112extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 113 unsigned long start, unsigned int nr, void *data); 114 115extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, 116 unsigned long size, unsigned long start, unsigned int nr, 117 void *data); 118 119extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 120 unsigned long start, unsigned int nr, void *data); 121 122extern struct gen_pool *devm_gen_pool_create(struct device *dev, 123 int min_alloc_order, int nid, const char *name); 124extern struct gen_pool *gen_pool_get(struct device *dev, const char *name); 125 126bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 127 size_t size); 128 129#ifdef CONFIG_OF 130extern struct gen_pool *of_gen_pool_get(struct device_node *np, 131 const char *propname, int index); 132#else 133static inline struct gen_pool *of_gen_pool_get(struct device_node *np, 134 const char *propname, int index) 135{ 136 return NULL; 137} 138#endif 139#endif /* __GENALLOC_H__ */ 140