Searched refs:pool (Results 1 - 200 of 648) sorted by relevance

1234

/linux-4.1.27/net/ceph/
H A Dmsgpool.c12 struct ceph_msgpool *pool = arg; msgpool_alloc() local
15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); msgpool_alloc()
17 dout("msgpool_alloc %s failed\n", pool->name); msgpool_alloc()
19 dout("msgpool_alloc %s %p\n", pool->name, msg); msgpool_alloc()
20 msg->pool = pool; msgpool_alloc()
27 struct ceph_msgpool *pool = arg; msgpool_free() local
30 dout("msgpool_release %s %p\n", pool->name, msg); msgpool_free()
31 msg->pool = NULL; msgpool_free()
35 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, ceph_msgpool_init() argument
39 pool->type = type; ceph_msgpool_init()
40 pool->front_len = front_len; ceph_msgpool_init()
41 pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); ceph_msgpool_init()
42 if (!pool->pool) ceph_msgpool_init()
44 pool->name = name; ceph_msgpool_init()
48 void ceph_msgpool_destroy(struct ceph_msgpool *pool) ceph_msgpool_destroy() argument
50 dout("msgpool %s destroy\n", pool->name); ceph_msgpool_destroy()
51 mempool_destroy(pool->pool); ceph_msgpool_destroy()
54 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, ceph_msgpool_get() argument
59 if (front_len > pool->front_len) { ceph_msgpool_get()
60 dout("msgpool_get %s need front %d, pool size is %d\n", ceph_msgpool_get()
61 pool->name, front_len, pool->front_len); ceph_msgpool_get()
65 return ceph_msg_new(pool->type, front_len, GFP_NOFS, false); ceph_msgpool_get()
68 msg = mempool_alloc(pool->pool, GFP_NOFS); ceph_msgpool_get()
69 dout("msgpool_get %s %p\n", pool->name, msg); ceph_msgpool_get()
73 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) ceph_msgpool_put() argument
75 dout("msgpool_put %s %p\n", pool->name, msg); ceph_msgpool_put()
78 msg->front.iov_len = pool->front_len; ceph_msgpool_put()
79 msg->hdr.front_len = cpu_to_le32(pool->front_len); ceph_msgpool_put()
82 mempool_free(msg, pool->pool); ceph_msgpool_put()
H A Dosdmap.c380 if (l.pool < r.pool) pgid_cmp()
382 if (l.pool > r.pool) pgid_cmp()
433 pgid.pool, pgid.seed, pg); __lookup_pg_mapping()
445 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, __remove_pg_mapping()
451 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); __remove_pg_mapping()
456 * rbtree of pg pool info
624 u64 pool; decode_pool_names() local
627 dout(" %d pool names\n", num); decode_pool_names()
629 ceph_decode_64_safe(p, end, pool, bad); decode_pool_names()
631 dout(" pool %llu len %d\n", pool, len); decode_pool_names()
633 pi = __lookup_pg_pool(&map->pg_pools, pool); decode_pool_names()
803 u64 pool; __decode_pools() local
806 ceph_decode_64_safe(p, end, pool, e_inval); __decode_pools()
808 pi = __lookup_pg_pool(&map->pg_pools, pool); __decode_pools()
814 pi->id = pool; __decode_pools()
1206 u64 pool; osdmap_apply_incremental() local
1287 ceph_decode_64_safe(p, end, pool, e_inval); osdmap_apply_incremental()
1288 pi = __lookup_pg_pool(&map->pg_pools, pool); osdmap_apply_incremental()
1460 pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool); ceph_oloc_oid_to_pg()
1464 pg_out->pool = oloc->pool; ceph_oloc_oid_to_pg()
1469 pg_out->pool, pg_out->seed); ceph_oloc_oid_to_pg()
1496 struct ceph_pg_pool_info *pool, pg_to_raw_osds()
1503 ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset, pg_to_raw_osds()
1504 pool->type, pool->size); pg_to_raw_osds()
1506 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", pg_to_raw_osds()
1507 pgid.pool, pool->crush_ruleset, pool->type, pg_to_raw_osds()
1508 pool->size); pg_to_raw_osds()
1513 min_t(int, pool->size, CEPH_PG_MAX_SIZE), pg_to_raw_osds()
1516 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", pg_to_raw_osds()
1517 len, ruleno, pgid.pool, pool->crush_ruleset, pg_to_raw_osds()
1518 pool->type, pool->size); pg_to_raw_osds()
1532 struct ceph_pg_pool_info *pool, raw_to_up_osds()
1538 if (ceph_can_shift_osds(pool)) { raw_to_up_osds()
1567 struct ceph_pg_pool_info *pool, apply_primary_affinity()
1625 if (ceph_can_shift_osds(pool) && pos > 0) { apply_primary_affinity()
1640 struct ceph_pg_pool_info *pool, struct ceph_pg pgid, apply_temps()
1649 pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num, apply_temps()
1650 pool->pg_num_mask); apply_temps()
1660 if (ceph_can_shift_osds(pool)) apply_temps()
1699 struct ceph_pg_pool_info *pool; ceph_calc_pg_acting() local
1703 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool); ceph_calc_pg_acting()
1704 if (!pool) { ceph_calc_pg_acting()
1709 if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) { ceph_calc_pg_acting()
1710 /* hash pool id and seed so that pool PGs do not overlap */ ceph_calc_pg_acting()
1712 ceph_stable_mod(pgid.seed, pool->pgp_num, ceph_calc_pg_acting()
1713 pool->pgp_num_mask), ceph_calc_pg_acting()
1714 pgid.pool); ceph_calc_pg_acting()
1717 * legacy behavior: add ps and pool together. this is ceph_calc_pg_acting()
1718 * not a great approach because the PGs from each pool ceph_calc_pg_acting()
1722 pps = ceph_stable_mod(pgid.seed, pool->pgp_num, ceph_calc_pg_acting()
1723 pool->pgp_num_mask) + ceph_calc_pg_acting()
1724 (unsigned)pgid.pool; ceph_calc_pg_acting()
1727 len = pg_to_raw_osds(osdmap, pool, pgid, pps, osds); ceph_calc_pg_acting()
1733 len = raw_to_up_osds(osdmap, pool, osds, len, primary); ceph_calc_pg_acting()
1735 apply_primary_affinity(osdmap, pps, pool, osds, len, primary); ceph_calc_pg_acting()
1737 len = apply_temps(osdmap, pool, pgid, osds, len, primary); ceph_calc_pg_acting()
1495 pg_to_raw_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pool, struct ceph_pg pgid, u32 pps, int *osds) pg_to_raw_osds() argument
1531 raw_to_up_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pool, int *osds, int len, int *primary) raw_to_up_osds() argument
1566 apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps, struct ceph_pg_pool_info *pool, int *osds, int len, int *primary) apply_primary_affinity() argument
1639 apply_temps(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pool, struct ceph_pg pgid, int *osds, int len, int *primary) apply_temps() argument
H A Ddebugfs.c69 struct ceph_pg_pool_info *pool = osdmap_show() local
72 seq_printf(s, "pool %lld pg_num %u (%d) read_tier %lld write_tier %lld\n", osdmap_show()
73 pool->id, pool->pg_num, pool->pg_num_mask, osdmap_show()
74 pool->read_tier, pool->write_tier); osdmap_show()
91 seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool, osdmap_show()
102 seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool, osdmap_show()
157 req->r_pgid.pool, req->r_pgid.seed); osdc_show()
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_page_pool.c27 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) ion_page_pool_alloc_pages() argument
29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); ion_page_pool_alloc_pages()
33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, ion_page_pool_alloc_pages()
38 static void ion_page_pool_free_pages(struct ion_page_pool *pool, ion_page_pool_free_pages() argument
41 __free_pages(page, pool->order); ion_page_pool_free_pages()
44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) ion_page_pool_add() argument
46 mutex_lock(&pool->mutex); ion_page_pool_add()
48 list_add_tail(&page->lru, &pool->high_items); ion_page_pool_add()
49 pool->high_count++; ion_page_pool_add()
51 list_add_tail(&page->lru, &pool->low_items); ion_page_pool_add()
52 pool->low_count++; ion_page_pool_add()
54 mutex_unlock(&pool->mutex); ion_page_pool_add()
58 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) ion_page_pool_remove() argument
63 BUG_ON(!pool->high_count); ion_page_pool_remove()
64 page = list_first_entry(&pool->high_items, struct page, lru); ion_page_pool_remove()
65 pool->high_count--; ion_page_pool_remove()
67 BUG_ON(!pool->low_count); ion_page_pool_remove()
68 page = list_first_entry(&pool->low_items, struct page, lru); ion_page_pool_remove()
69 pool->low_count--; ion_page_pool_remove()
76 struct page *ion_page_pool_alloc(struct ion_page_pool *pool) ion_page_pool_alloc() argument
80 BUG_ON(!pool); ion_page_pool_alloc()
82 mutex_lock(&pool->mutex); ion_page_pool_alloc()
83 if (pool->high_count) ion_page_pool_alloc()
84 page = ion_page_pool_remove(pool, true); ion_page_pool_alloc()
85 else if (pool->low_count) ion_page_pool_alloc()
86 page = ion_page_pool_remove(pool, false); ion_page_pool_alloc()
87 mutex_unlock(&pool->mutex); ion_page_pool_alloc()
90 page = ion_page_pool_alloc_pages(pool); ion_page_pool_alloc()
95 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) ion_page_pool_free() argument
99 BUG_ON(pool->order != compound_order(page)); ion_page_pool_free()
101 ret = ion_page_pool_add(pool, page); ion_page_pool_free()
103 ion_page_pool_free_pages(pool, page); ion_page_pool_free()
106 static int ion_page_pool_total(struct ion_page_pool *pool, bool high) ion_page_pool_total() argument
108 int count = pool->low_count; ion_page_pool_total()
111 count += pool->high_count; ion_page_pool_total()
113 return count << pool->order; ion_page_pool_total()
116 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, ion_page_pool_shrink() argument
128 return ion_page_pool_total(pool, high); ion_page_pool_shrink()
133 mutex_lock(&pool->mutex); ion_page_pool_shrink()
134 if (pool->low_count) { ion_page_pool_shrink()
135 page = ion_page_pool_remove(pool, false); ion_page_pool_shrink()
136 } else if (high && pool->high_count) { ion_page_pool_shrink()
137 page = ion_page_pool_remove(pool, true); ion_page_pool_shrink()
139 mutex_unlock(&pool->mutex); ion_page_pool_shrink()
142 mutex_unlock(&pool->mutex); ion_page_pool_shrink()
143 ion_page_pool_free_pages(pool, page); ion_page_pool_shrink()
151 struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), ion_page_pool_create() local
153 if (!pool) ion_page_pool_create()
155 pool->high_count = 0; ion_page_pool_create()
156 pool->low_count = 0; ion_page_pool_create()
157 INIT_LIST_HEAD(&pool->low_items); ion_page_pool_create()
158 INIT_LIST_HEAD(&pool->high_items); ion_page_pool_create()
159 pool->gfp_mask = gfp_mask | __GFP_COMP; ion_page_pool_create()
160 pool->order = order; ion_page_pool_create()
161 mutex_init(&pool->mutex); ion_page_pool_create()
162 plist_node_init(&pool->list, order); ion_page_pool_create()
164 return pool; ion_page_pool_create()
167 void ion_page_pool_destroy(struct ion_page_pool *pool) ion_page_pool_destroy() argument
169 kfree(pool); ion_page_pool_destroy()
H A Dion_system_heap.c60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; alloc_buffer_page() local
64 page = ion_page_pool_alloc(pool); alloc_buffer_page()
87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; free_buffer_page() local
89 ion_page_pool_free(pool, page); free_buffer_page()
220 struct ion_page_pool *pool = sys_heap->pools[i]; ion_system_heap_shrink() local
222 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); ion_system_heap_shrink()
249 struct ion_page_pool *pool = sys_heap->pools[i]; ion_system_heap_debug_show() local
251 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", ion_system_heap_debug_show()
252 pool->high_count, pool->order, ion_system_heap_debug_show()
253 (PAGE_SIZE << pool->order) * pool->high_count); ion_system_heap_debug_show()
254 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", ion_system_heap_debug_show()
255 pool->low_count, pool->order, ion_system_heap_debug_show()
256 (PAGE_SIZE << pool->order) * pool->low_count); ion_system_heap_debug_show()
276 struct ion_page_pool *pool; ion_system_heap_create() local
281 pool = ion_page_pool_create(gfp_flags, orders[i]); ion_system_heap_create()
282 if (!pool) ion_system_heap_create()
284 heap->pools[i] = pool; ion_system_heap_create()
H A Dion_chunk_heap.c29 struct gen_pool *pool; member in struct:ion_chunk_heap
69 unsigned long paddr = gen_pool_alloc(chunk_heap->pool, ion_chunk_heap_allocate()
84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), ion_chunk_heap_allocate()
112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), ion_chunk_heap_free()
162 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + ion_chunk_heap_create()
164 if (!chunk_heap->pool) { ion_chunk_heap_create()
172 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); ion_chunk_heap_create()
191 gen_pool_destroy(chunk_heap->pool); ion_chunk_heap_destroy()
H A Dion_carveout_heap.c30 struct gen_pool *pool; member in struct:ion_carveout_heap
40 unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); ion_carveout_allocate()
56 gen_pool_free(carveout_heap->pool, addr, size); ion_carveout_free()
170 carveout_heap->pool = gen_pool_create(12, -1); ion_carveout_heap_create()
171 if (!carveout_heap->pool) { ion_carveout_heap_create()
176 gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, ion_carveout_heap_create()
190 gen_pool_destroy(carveout_heap->pool); ion_carveout_heap_destroy()
H A Dion_priv.h107 * system, not put in a page pool or otherwise cached.
162 * Represents a pool of memory from which buffers can be made. In some
345 * functions for creating and destroying a heap pool -- allows you
346 * to keep a pool of pre allocated memory to use from your heap. Keeping
347 * a pool of memory that is ready for dma, ie any cached mapping have been
353 * @high_count: number of highmem items in the pool
354 * @low_count: number of lowmem items in the pool
360 * @order: order of pages in the pool
363 * Allows you to keep a pool of pre allocated pages to use from your heap.
364 * Keeping a pool of pages that is ready for dma, ie any cached mapping have
384 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
385 * @pool: the pool
391 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
/linux-4.1.27/arch/metag/kernel/
H A Dtcm.c21 struct gen_pool *pool; member in struct:tcm_pool
29 struct tcm_pool *pool; find_pool() local
32 pool = list_entry(lh, struct tcm_pool, list); find_pool()
33 if (pool->tag == tag) find_pool()
34 return pool; find_pool()
41 * tcm_alloc - allocate memory from a TCM pool
42 * @tag: tag of the pool to allocate memory from
45 * Allocate the requested number of bytes from the pool matching
52 struct tcm_pool *pool; tcm_alloc() local
54 pool = find_pool(tag); tcm_alloc()
55 if (!pool) tcm_alloc()
58 vaddr = gen_pool_alloc(pool->pool, len); tcm_alloc()
66 * tcm_free - free a block of memory to a TCM pool
67 * @tag: tag of the pool to free memory to
72 * pool matching the specified tag.
76 struct tcm_pool *pool; tcm_free() local
78 pool = find_pool(tag); tcm_free()
79 if (!pool) tcm_free()
81 gen_pool_free(pool->pool, addr, len); tcm_free()
95 struct tcm_pool *pool; tcm_lookup_tag() local
99 pool = list_entry(lh, struct tcm_pool, list); tcm_lookup_tag()
100 if (addr >= pool->start && addr < pool->end) tcm_lookup_tag()
101 return pool->tag; tcm_lookup_tag()
108 * tcm_add_region - add a memory region to TCM pool list
111 * Add a region of memory to the TCM pool list. Returns 0 on success.
115 struct tcm_pool *pool; tcm_add_region() local
117 pool = kmalloc(sizeof(*pool), GFP_KERNEL); tcm_add_region()
118 if (!pool) { tcm_add_region()
119 pr_err("Failed to alloc memory for TCM pool!\n"); tcm_add_region()
123 pool->tag = reg->tag; tcm_add_region()
124 pool->start = reg->res.start; tcm_add_region()
125 pool->end = reg->res.end; tcm_add_region()
131 pool->pool = gen_pool_create(3, -1); tcm_add_region()
133 if (!pool->pool) { tcm_add_region()
134 pr_err("Failed to create TCM pool!\n"); tcm_add_region()
135 kfree(pool); tcm_add_region()
139 if (gen_pool_add(pool->pool, reg->res.start, tcm_add_region()
141 pr_err("Failed to add memory to TCM pool!\n"); tcm_add_region()
144 pr_info("Added %s TCM pool (%08x bytes @ %08x)\n", tcm_add_region()
148 list_add_tail(&pool->list, &pool_list); tcm_add_region()
/linux-4.1.27/include/linux/
H A Dzbud.h9 int (*evict)(struct zbud_pool *pool, unsigned long handle);
13 void zbud_destroy_pool(struct zbud_pool *pool);
14 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
16 void zbud_free(struct zbud_pool *pool, unsigned long handle);
17 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
18 void *zbud_map(struct zbud_pool *pool, unsigned long handle);
19 void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
20 u64 zbud_get_pool_size(struct zbud_pool *pool);
H A Dzpool.h7 * storage pool implementations. Typically, this is used to
17 int (*evict)(struct zpool *pool, unsigned long handle);
42 char *zpool_get_type(struct zpool *pool);
44 void zpool_destroy_pool(struct zpool *pool);
46 int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
49 void zpool_free(struct zpool *pool, unsigned long handle);
51 int zpool_shrink(struct zpool *pool, unsigned int pages,
54 void *zpool_map_handle(struct zpool *pool, unsigned long handle,
57 void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
59 u64 zpool_get_total_size(struct zpool *pool);
66 * @create: create a new pool.
67 * @destroy: destroy a pool.
68 * @malloc: allocate mem from a pool.
69 * @free: free mem from a pool.
70 * @shrink: shrink the pool.
73 * @total_size: get total size of a pool.
85 void (*destroy)(void *pool);
87 int (*malloc)(void *pool, size_t size, gfp_t gfp,
89 void (*free)(void *pool, unsigned long handle);
91 int (*shrink)(void *pool, unsigned int pages,
94 void *(*map)(void *pool, unsigned long handle,
96 void (*unmap)(void *pool, unsigned long handle);
98 u64 (*total_size)(void *pool);
105 int zpool_evict(void *pool, unsigned long handle);
H A Ddmapool.h22 void dma_pool_destroy(struct dma_pool *pool);
24 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
27 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
30 * Managed DMA pool
34 void dmam_pool_destroy(struct dma_pool *pool);
H A Dzsmalloc.h40 void zs_destroy_pool(struct zs_pool *pool);
42 unsigned long zs_malloc(struct zs_pool *pool, size_t size);
43 void zs_free(struct zs_pool *pool, unsigned long obj);
45 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
47 void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
49 unsigned long zs_get_total_pages(struct zs_pool *pool);
50 unsigned long zs_compact(struct zs_pool *pool);
H A Dgenalloc.h15 * available. If new memory is added to the pool a lock has to be
53 * General purpose special memory pool descriptor.
57 struct list_head chunks; /* list of chunks in this pool */
65 * General purpose special memory pool chunk descriptor.
68 struct list_head next_chunk; /* next chunk in pool */
77 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
81 * gen_pool_add - add a new chunk of special memory to the pool
82 * @pool: pool to add new memory chunk to
83 * @addr: starting address of memory chunk to add to pool
84 * @size: size in bytes of the memory chunk to add to pool
88 * Add a new chunk of special memory to the specified pool.
92 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, gen_pool_add() argument
95 return gen_pool_add_virt(pool, addr, -1, size, nid); gen_pool_add()
99 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
107 extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
124 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
H A Dpercpu_ida.h65 int percpu_ida_alloc(struct percpu_ida *pool, int state);
66 void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
68 void percpu_ida_destroy(struct percpu_ida *pool);
69 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
71 static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) percpu_ida_init() argument
73 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE, percpu_ida_init()
78 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
81 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
H A Dmempool.h2 * memory buffer pool support
32 extern int mempool_resize(mempool_t *pool, int new_min_nr);
33 extern void mempool_destroy(mempool_t *pool);
34 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
35 extern void mempool_free(void *element, mempool_t *pool);
H A Dhw_random.h61 /** Feed random bits into the pool. */
H A Dagpgart.h104 struct agp_memory *pool; member in struct:agp_controller
/linux-4.1.27/mm/
H A Dmempool.c4 * memory buffer pool support. Such pools are mostly used
24 static void poison_error(mempool_t *pool, void *element, size_t size, poison_error() argument
27 const int nr = pool->curr_nr; poison_error()
33 pr_err("Mempool %p size %zu\n", pool, size); poison_error()
41 static void __check_element(mempool_t *pool, void *element, size_t size) __check_element() argument
50 poison_error(pool, element, size, i); __check_element()
57 static void check_element(mempool_t *pool, void *element) check_element() argument
60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) check_element()
61 __check_element(pool, element, ksize(element)); check_element()
64 if (pool->free == mempool_free_pages) { check_element()
65 int order = (int)(long)pool->pool_data; check_element()
68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); check_element()
81 static void poison_element(mempool_t *pool, void *element) poison_element() argument
84 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) poison_element()
88 if (pool->alloc == mempool_alloc_pages) { poison_element()
89 int order = (int)(long)pool->pool_data; poison_element()
97 static inline void check_element(mempool_t *pool, void *element) check_element() argument
100 static inline void poison_element(mempool_t *pool, void *element) poison_element() argument
105 static void kasan_poison_element(mempool_t *pool, void *element) kasan_poison_element() argument
107 if (pool->alloc == mempool_alloc_slab) kasan_poison_element()
108 kasan_slab_free(pool->pool_data, element); kasan_poison_element()
109 if (pool->alloc == mempool_kmalloc) kasan_poison_element()
111 if (pool->alloc == mempool_alloc_pages) kasan_poison_element()
112 kasan_free_pages(element, (unsigned long)pool->pool_data); kasan_poison_element()
115 static void kasan_unpoison_element(mempool_t *pool, void *element) kasan_unpoison_element() argument
117 if (pool->alloc == mempool_alloc_slab) kasan_unpoison_element()
118 kasan_slab_alloc(pool->pool_data, element); kasan_unpoison_element()
119 if (pool->alloc == mempool_kmalloc) kasan_unpoison_element()
120 kasan_krealloc(element, (size_t)pool->pool_data); kasan_unpoison_element()
121 if (pool->alloc == mempool_alloc_pages) kasan_unpoison_element()
122 kasan_alloc_pages(element, (unsigned long)pool->pool_data); kasan_unpoison_element()
125 static void add_element(mempool_t *pool, void *element) add_element() argument
127 BUG_ON(pool->curr_nr >= pool->min_nr); add_element()
128 poison_element(pool, element); add_element()
129 kasan_poison_element(pool, element); add_element()
130 pool->elements[pool->curr_nr++] = element; add_element()
133 static void *remove_element(mempool_t *pool) remove_element() argument
135 void *element = pool->elements[--pool->curr_nr]; remove_element()
137 BUG_ON(pool->curr_nr < 0); remove_element()
138 check_element(pool, element); remove_element()
139 kasan_unpoison_element(pool, element); remove_element()
144 * mempool_destroy - deallocate a memory pool
145 * @pool: pointer to the memory pool which was allocated via
148 * Free all reserved elements in @pool and @pool itself. This function
151 void mempool_destroy(mempool_t *pool) mempool_destroy() argument
153 while (pool->curr_nr) { mempool_destroy()
154 void *element = remove_element(pool); mempool_destroy()
155 pool->free(element, pool->pool_data); mempool_destroy()
157 kfree(pool->elements); mempool_destroy()
158 kfree(pool); mempool_destroy()
163 * mempool_create - create a memory pool
165 * allocated for this pool.
171 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
188 mempool_t *pool; mempool_create_node() local
189 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); mempool_create_node()
190 if (!pool) mempool_create_node()
192 pool->elements = kmalloc_node(min_nr * sizeof(void *), mempool_create_node()
194 if (!pool->elements) { mempool_create_node()
195 kfree(pool); mempool_create_node()
198 spin_lock_init(&pool->lock); mempool_create_node()
199 pool->min_nr = min_nr; mempool_create_node()
200 pool->pool_data = pool_data; mempool_create_node()
201 init_waitqueue_head(&pool->wait); mempool_create_node()
202 pool->alloc = alloc_fn; mempool_create_node()
203 pool->free = free_fn; mempool_create_node()
208 while (pool->curr_nr < pool->min_nr) { mempool_create_node()
211 element = pool->alloc(gfp_mask, pool->pool_data); mempool_create_node()
213 mempool_destroy(pool); mempool_create_node()
216 add_element(pool, element); mempool_create_node()
218 return pool; mempool_create_node()
223 * mempool_resize - resize an existing memory pool
224 * @pool: pointer to the memory pool which was allocated via
227 * allocated for this pool.
229 * This function shrinks/grows the pool. In the case of growing,
230 * it cannot be guaranteed that the pool will be grown to the new
238 int mempool_resize(mempool_t *pool, int new_min_nr) mempool_resize() argument
247 spin_lock_irqsave(&pool->lock, flags); mempool_resize()
248 if (new_min_nr <= pool->min_nr) { mempool_resize()
249 while (new_min_nr < pool->curr_nr) { mempool_resize()
250 element = remove_element(pool); mempool_resize()
251 spin_unlock_irqrestore(&pool->lock, flags); mempool_resize()
252 pool->free(element, pool->pool_data); mempool_resize()
253 spin_lock_irqsave(&pool->lock, flags); mempool_resize()
255 pool->min_nr = new_min_nr; mempool_resize()
258 spin_unlock_irqrestore(&pool->lock, flags); mempool_resize()
260 /* Grow the pool */ mempool_resize()
266 spin_lock_irqsave(&pool->lock, flags); mempool_resize()
267 if (unlikely(new_min_nr <= pool->min_nr)) { mempool_resize()
269 spin_unlock_irqrestore(&pool->lock, flags); mempool_resize()
273 memcpy(new_elements, pool->elements, mempool_resize()
274 pool->curr_nr * sizeof(*new_elements)); mempool_resize()
275 kfree(pool->elements); mempool_resize()
276 pool->elements = new_elements; mempool_resize()
277 pool->min_nr = new_min_nr; mempool_resize()
279 while (pool->curr_nr < pool->min_nr) { mempool_resize()
280 spin_unlock_irqrestore(&pool->lock, flags); mempool_resize()
281 element = pool->alloc(GFP_KERNEL, pool->pool_data); mempool_resize()
284 spin_lock_irqsave(&pool->lock, flags); mempool_resize()
285 if (pool->curr_nr < pool->min_nr) { mempool_resize()
286 add_element(pool, element); mempool_resize()
288 spin_unlock_irqrestore(&pool->lock, flags); mempool_resize()
289 pool->free(element, pool->pool_data); /* Raced */ mempool_resize()
294 spin_unlock_irqrestore(&pool->lock, flags); mempool_resize()
301 * mempool_alloc - allocate an element from a specific memory pool
302 * @pool: pointer to the memory pool which was allocated via
312 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) mempool_alloc() argument
330 element = pool->alloc(gfp_temp, pool->pool_data); mempool_alloc()
334 spin_lock_irqsave(&pool->lock, flags); mempool_alloc()
335 if (likely(pool->curr_nr)) { mempool_alloc()
336 element = remove_element(pool); mempool_alloc()
337 spin_unlock_irqrestore(&pool->lock, flags); mempool_alloc()
350 * alloc failed with that and @pool was empty, retry immediately. mempool_alloc()
353 spin_unlock_irqrestore(&pool->lock, flags); mempool_alloc()
360 spin_unlock_irqrestore(&pool->lock, flags); mempool_alloc()
364 /* Let's wait for someone else to return an element to @pool */ mempool_alloc()
366 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); mempool_alloc()
368 spin_unlock_irqrestore(&pool->lock, flags); mempool_alloc()
376 finish_wait(&pool->wait, &wait); mempool_alloc()
382 * mempool_free - return an element to the pool.
383 * @element: pool element pointer.
384 * @pool: pointer to the memory pool which was allocated via
389 void mempool_free(void *element, mempool_t *pool) mempool_free() argument
398 * for @element and the following @pool->curr_nr. This ensures mempool_free()
399 * that the visible value of @pool->curr_nr is from after the mempool_free()
427 * pool waking up the waiters. mempool_free()
429 if (unlikely(pool->curr_nr < pool->min_nr)) { mempool_free()
430 spin_lock_irqsave(&pool->lock, flags); mempool_free()
431 if (likely(pool->curr_nr < pool->min_nr)) { mempool_free()
432 add_element(pool, element); mempool_free()
433 spin_unlock_irqrestore(&pool->lock, flags); mempool_free()
434 wake_up(&pool->wait); mempool_free()
437 spin_unlock_irqrestore(&pool->lock, flags); mempool_free()
439 pool->free(element, pool->pool_data); mempool_free()
H A Ddmapool.c17 * The current design of this allocator is fairly simple. The pool is
45 struct dma_pool { /* the pool */
74 struct dma_pool *pool; show_pools() local
84 list_for_each_entry(pool, &dev->dma_pools, pools) { show_pools()
88 spin_lock_irq(&pool->lock); show_pools()
89 list_for_each_entry(page, &pool->page_list, page_list) { show_pools()
93 spin_unlock_irq(&pool->lock); show_pools()
95 /* per-pool info, no real statistics yet */ show_pools()
97 pool->name, blocks, show_pools()
98 pages * (pool->allocation / pool->size), show_pools()
99 pool->size, pages); show_pools()
111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
112 * @name: name of pool, for diagnostics
114 * @size: size of the blocks in this pool.
119 * Returns a dma allocation pool with the requested characteristics, or
206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) pool_initialise_page() argument
209 unsigned int next_boundary = pool->boundary; pool_initialise_page()
212 unsigned int next = offset + pool->size; pool_initialise_page()
213 if (unlikely((next + pool->size) >= next_boundary)) { pool_initialise_page()
215 next_boundary += pool->boundary; pool_initialise_page()
219 } while (offset < pool->allocation); pool_initialise_page()
222 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) pool_alloc_page() argument
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, pool_alloc_page()
233 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); pool_alloc_page()
235 pool_initialise_page(pool, page); pool_alloc_page()
250 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) pool_free_page() argument
255 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); pool_free_page()
257 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); pool_free_page()
263 * dma_pool_destroy - destroys a pool of dma memory blocks.
264 * @pool: dma pool that will be destroyed
267 * Caller guarantees that no more memory from the pool is in use,
268 * and that nothing will try to use the pool after this call.
270 void dma_pool_destroy(struct dma_pool *pool) dma_pool_destroy() argument
276 list_del(&pool->pools); dma_pool_destroy()
277 if (pool->dev && list_empty(&pool->dev->dma_pools)) dma_pool_destroy()
281 device_remove_file(pool->dev, &dev_attr_pools); dma_pool_destroy()
284 while (!list_empty(&pool->page_list)) { dma_pool_destroy()
286 page = list_entry(pool->page_list.next, dma_pool_destroy()
289 if (pool->dev) dma_pool_destroy()
290 dev_err(pool->dev, dma_pool_destroy()
292 pool->name, page->vaddr); dma_pool_destroy()
296 pool->name, page->vaddr); dma_pool_destroy()
301 pool_free_page(pool, page); dma_pool_destroy()
304 kfree(pool); dma_pool_destroy()
310 * @pool: dma pool that will produce the block
318 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_pool_alloc() argument
328 spin_lock_irqsave(&pool->lock, flags); dma_pool_alloc()
329 list_for_each_entry(page, &pool->page_list, page_list) { dma_pool_alloc()
330 if (page->offset < pool->allocation) dma_pool_alloc()
334 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ dma_pool_alloc()
335 spin_unlock_irqrestore(&pool->lock, flags); dma_pool_alloc()
337 page = pool_alloc_page(pool, mem_flags); dma_pool_alloc()
341 spin_lock_irqsave(&pool->lock, flags); dma_pool_alloc()
343 list_add(&page->page_list, &pool->page_list); dma_pool_alloc()
355 for (i = sizeof(page->offset); i < pool->size; i++) { dma_pool_alloc()
358 if (pool->dev) dma_pool_alloc()
359 dev_err(pool->dev, dma_pool_alloc()
361 pool->name, retval); dma_pool_alloc()
364 pool->name, retval); dma_pool_alloc()
371 data, pool->size, 1); dma_pool_alloc()
375 memset(retval, POOL_POISON_ALLOCATED, pool->size); dma_pool_alloc()
377 spin_unlock_irqrestore(&pool->lock, flags); dma_pool_alloc()
382 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) pool_find_page() argument
386 list_for_each_entry(page, &pool->page_list, page_list) { pool_find_page()
389 if (dma < (page->dma + pool->allocation)) pool_find_page()
396 * dma_pool_free - put block back into dma pool
397 * @pool: the dma pool holding the block
404 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) dma_pool_free() argument
410 spin_lock_irqsave(&pool->lock, flags); dma_pool_free()
411 page = pool_find_page(pool, dma); dma_pool_free()
413 spin_unlock_irqrestore(&pool->lock, flags); dma_pool_free()
414 if (pool->dev) dma_pool_free()
415 dev_err(pool->dev, dma_pool_free()
417 pool->name, vaddr, (unsigned long)dma); dma_pool_free()
420 pool->name, vaddr, (unsigned long)dma); dma_pool_free()
427 spin_unlock_irqrestore(&pool->lock, flags); dma_pool_free()
428 if (pool->dev) dma_pool_free()
429 dev_err(pool->dev, dma_pool_free()
431 pool->name, vaddr, (unsigned long long)dma); dma_pool_free()
435 pool->name, vaddr, (unsigned long long)dma); dma_pool_free()
440 while (chain < pool->allocation) { dma_pool_free()
445 spin_unlock_irqrestore(&pool->lock, flags); dma_pool_free()
446 if (pool->dev) dma_pool_free()
447 dev_err(pool->dev, "dma_pool_free %s, dma %Lx " dma_pool_free()
448 "already free\n", pool->name, dma_pool_free()
452 "already free\n", pool->name, dma_pool_free()
457 memset(vaddr, POOL_POISON_FREED, pool->size); dma_pool_free()
465 * if (!is_page_busy(page)) pool_free_page(pool, page); dma_pool_free()
468 spin_unlock_irqrestore(&pool->lock, flags); dma_pool_free()
473 * Managed DMA pool
477 struct dma_pool *pool = *(struct dma_pool **)res; dmam_pool_release() local
479 dma_pool_destroy(pool); dmam_pool_release()
489 * @name: name of pool, for diagnostics
491 * @size: size of the blocks in this pool.
495 * Managed dma_pool_create(). DMA pool created with this function is
501 struct dma_pool **ptr, *pool; dmam_pool_create() local
507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); dmam_pool_create()
508 if (pool) dmam_pool_create()
513 return pool; dmam_pool_create()
519 * @pool: dma pool that will be destroyed
523 void dmam_pool_destroy(struct dma_pool *pool) dmam_pool_destroy() argument
525 struct device *dev = pool->dev; dmam_pool_destroy()
527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); dmam_pool_destroy()
H A Dzbud.c62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
66 * 63 freelists per pool.
76 * struct zbud_pool - stores metadata for each zbud pool
77 * @lock: protects all pool fields and first|last_chunk fields of any
78 * zbud page in the pool
86 * @pages_nr: number of zbud pages in the pool.
88 * pool creation time.
90 * This structure is allocated at pool creation time and maintains metadata
91 * pertaining to a particular zbud pool.
105 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
106 * @lru: links the zbud page into the lru list in the pool
124 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) zbud_zpool_evict() argument
126 return zpool_evict(pool, handle); zbud_zpool_evict()
139 static void zbud_zpool_destroy(void *pool) zbud_zpool_destroy() argument
141 zbud_destroy_pool(pool); zbud_zpool_destroy()
144 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, zbud_zpool_malloc() argument
147 return zbud_alloc(pool, size, gfp, handle); zbud_zpool_malloc()
149 static void zbud_zpool_free(void *pool, unsigned long handle) zbud_zpool_free() argument
151 zbud_free(pool, handle); zbud_zpool_free()
154 static int zbud_zpool_shrink(void *pool, unsigned int pages, zbud_zpool_shrink() argument
161 ret = zbud_reclaim_page(pool, 8); zbud_zpool_shrink()
173 static void *zbud_zpool_map(void *pool, unsigned long handle, zbud_zpool_map() argument
176 return zbud_map(pool, handle); zbud_zpool_map()
178 static void zbud_zpool_unmap(void *pool, unsigned long handle) zbud_zpool_unmap() argument
180 zbud_unmap(pool, handle); zbud_zpool_unmap()
183 static u64 zbud_zpool_total_size(void *pool) zbud_zpool_total_size() argument
185 return zbud_get_pool_size(pool) * PAGE_SIZE; zbud_zpool_total_size()
283 * zbud_create_pool() - create a new zbud pool
284 * @gfp: gfp flags when allocating the zbud pool structure
285 * @ops: user-defined operations for the zbud pool
287 * Return: pointer to the new zbud pool or NULL if the metadata allocation
292 struct zbud_pool *pool; zbud_create_pool() local
295 pool = kmalloc(sizeof(struct zbud_pool), gfp); zbud_create_pool()
296 if (!pool) zbud_create_pool()
298 spin_lock_init(&pool->lock); zbud_create_pool()
300 INIT_LIST_HEAD(&pool->unbuddied[i]); zbud_create_pool()
301 INIT_LIST_HEAD(&pool->buddied); zbud_create_pool()
302 INIT_LIST_HEAD(&pool->lru); zbud_create_pool()
303 pool->pages_nr = 0; zbud_create_pool()
304 pool->ops = ops; zbud_create_pool()
305 return pool; zbud_create_pool()
309 * zbud_destroy_pool() - destroys an existing zbud pool
310 * @pool: the zbud pool to be destroyed
312 * The pool should be emptied before this function is called.
314 void zbud_destroy_pool(struct zbud_pool *pool) zbud_destroy_pool() argument
316 kfree(pool); zbud_destroy_pool()
321 * @pool: zbud pool from which to allocate
323 * @gfp: gfp flags used if the pool needs to grow
326 * This function will attempt to find a free region in the pool large enough to
329 * allocated and added to the pool to satisfy the request.
332 * as zbud pool pages.
335 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
338 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, zbud_alloc() argument
351 spin_lock(&pool->lock); zbud_alloc()
356 if (!list_empty(&pool->unbuddied[i])) { for_each_unbuddied_list()
357 zhdr = list_first_entry(&pool->unbuddied[i], for_each_unbuddied_list()
369 spin_unlock(&pool->lock);
373 spin_lock(&pool->lock);
374 pool->pages_nr++;
387 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
390 list_add(&zhdr->buddy, &pool->buddied);
396 list_add(&zhdr->lru, &pool->lru);
399 spin_unlock(&pool->lock);
406 * @pool: pool in which the allocation resided
414 void zbud_free(struct zbud_pool *pool, unsigned long handle) zbud_free() argument
419 spin_lock(&pool->lock); zbud_free()
430 spin_unlock(&pool->lock); zbud_free()
441 pool->pages_nr--; zbud_free()
445 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); zbud_free()
448 spin_unlock(&pool->lock); zbud_free()
455 * zbud_reclaim_page() - evicts allocations from a pool page and frees it
456 * @pool: pool from which a page will attempt to be evicted
469 * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
470 * the user-defined eviction handler with the pool and handle as arguments.
489 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries) zbud_reclaim_page() argument
495 spin_lock(&pool->lock); zbud_reclaim_page()
496 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || zbud_reclaim_page()
498 spin_unlock(&pool->lock); zbud_reclaim_page()
502 zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru); zbud_reclaim_page()
517 spin_unlock(&pool->lock); zbud_reclaim_page()
521 ret = pool->ops->evict(pool, first_handle); zbud_reclaim_page()
526 ret = pool->ops->evict(pool, last_handle); zbud_reclaim_page()
531 spin_lock(&pool->lock); zbud_reclaim_page()
539 pool->pages_nr--; zbud_reclaim_page()
540 spin_unlock(&pool->lock); zbud_reclaim_page()
546 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); zbud_reclaim_page()
549 list_add(&zhdr->buddy, &pool->buddied); zbud_reclaim_page()
553 list_add(&zhdr->lru, &pool->lru); zbud_reclaim_page()
555 spin_unlock(&pool->lock); zbud_reclaim_page()
561 * @pool: pool in which the allocation resides
571 void *zbud_map(struct zbud_pool *pool, unsigned long handle) zbud_map() argument
578 * @pool: pool in which the allocation resides
581 void zbud_unmap(struct zbud_pool *pool, unsigned long handle) zbud_unmap() argument
586 * zbud_get_pool_size() - gets the zbud pool size in pages
587 * @pool: pool whose size is being queried
589 * Returns: size in pages of the given pool. The pool lock need not be
592 u64 zbud_get_pool_size(struct zbud_pool *pool) zbud_get_pool_size() argument
594 return pool->pages_nr; zbud_get_pool_size()
H A Dzpool.c6 * This is a common frontend for memory storage pool implementations.
24 void *pool; member in struct:zpool
78 * @pool: pool to evict from.
84 int zpool_evict(void *pool, unsigned long handle) zpool_evict() argument
90 if (zpool->pool == pool) { zpool_evict()
133 * @gfp The GFP flags to use when allocating the pool.
150 pr_info("creating pool type %s\n", type); zpool_create_pool()
173 zpool->pool = driver->create(name, gfp, ops); zpool_create_pool()
176 if (!zpool->pool) { zpool_create_pool()
177 pr_err("couldn't create %s pool\n", type); zpool_create_pool()
183 pr_info("created %s pool\n", type); zpool_create_pool()
194 * @pool The zpool to destroy.
198 * pool should only be destroyed once, and should not be used
205 pr_info("destroying pool type %s\n", zpool->type); zpool_destroy_pool()
210 zpool->driver->destroy(zpool->pool); zpool_destroy_pool()
217 * @pool The zpool to check
219 * This returns the type of the pool.
232 * @pool The zpool to allocate from.
237 * This allocates the requested amount of memory from the pool.
249 return zpool->driver->malloc(zpool->pool, size, gfp, handle); zpool_malloc()
254 * @pool The zpool that allocated the memory.
258 * that the pool will actually free memory, only that the memory
259 * in the pool will become available for use by the pool.
268 zpool->driver->free(zpool->pool, handle); zpool_free()
272 * zpool_shrink() - Shrink the pool size
273 * @pool The zpool to shrink.
274 * @pages The number of pages to shrink the pool.
277 * This attempts to shrink the actual memory size of the pool
278 * by evicting currently used handle(s). If the pool was
291 return zpool->driver->shrink(zpool->pool, pages, reclaimed); zpool_shrink()
296 * @pool The zpool that the handle was allocated from
319 return zpool->driver->map(zpool->pool, handle, mapmode); zpool_map_handle()
324 * @pool The zpool that the handle was allocated from
334 zpool->driver->unmap(zpool->pool, handle); zpool_unmap_handle()
338 * zpool_get_total_size() - The total size of the pool
339 * @pool The zpool to check
341 * This returns the total size in bytes of the pool.
347 return zpool->driver->total_size(zpool->pool); zpool_get_total_size()
H A Dzsmalloc.c255 gfp_t flags; /* allocation flags used when growing pool */
283 static int create_handle_cache(struct zs_pool *pool) create_handle_cache() argument
285 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, create_handle_cache()
287 return pool->handle_cachep ? 0 : 1; create_handle_cache()
290 static void destroy_handle_cache(struct zs_pool *pool) destroy_handle_cache() argument
292 if (pool->handle_cachep) destroy_handle_cache()
293 kmem_cache_destroy(pool->handle_cachep); destroy_handle_cache()
296 static unsigned long alloc_handle(struct zs_pool *pool) alloc_handle() argument
298 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, alloc_handle()
299 pool->flags & ~__GFP_HIGHMEM); alloc_handle()
302 static void free_handle(struct zs_pool *pool, unsigned long handle) free_handle() argument
304 kmem_cache_free(pool->handle_cachep, (void *)handle); free_handle()
326 static void zs_zpool_destroy(void *pool) zs_zpool_destroy() argument
328 zs_destroy_pool(pool); zs_zpool_destroy()
331 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, zs_zpool_malloc() argument
334 *handle = zs_malloc(pool, size); zs_zpool_malloc()
337 static void zs_zpool_free(void *pool, unsigned long handle) zs_zpool_free() argument
339 zs_free(pool, handle); zs_zpool_free()
342 static int zs_zpool_shrink(void *pool, unsigned int pages, zs_zpool_shrink() argument
348 static void *zs_zpool_map(void *pool, unsigned long handle, zs_zpool_map() argument
366 return zs_map_object(pool, handle, zs_mm); zs_zpool_map()
368 static void zs_zpool_unmap(void *pool, unsigned long handle) zs_zpool_unmap() argument
370 zs_unmap_object(pool, handle); zs_zpool_unmap()
373 static u64 zs_zpool_total_size(void *pool) zs_zpool_total_size() argument
375 return zs_get_total_pages(pool) << PAGE_SHIFT; zs_zpool_total_size()
435 * zsmalloc divides the pool into various size classes where each
492 struct zs_pool *pool = s->private; zs_stats_size_show() local
506 class = pool->size_class[i]; zs_stats_size_show()
556 static int zs_pool_stat_create(char *name, struct zs_pool *pool) zs_pool_stat_create() argument
568 pool->stat_dentry = entry; zs_pool_stat_create()
571 pool->stat_dentry, pool, &zs_stat_size_ops); zs_pool_stat_create()
581 static void zs_pool_stat_destroy(struct zs_pool *pool) zs_pool_stat_destroy() argument
583 debugfs_remove_recursive(pool->stat_dentry); zs_pool_stat_destroy()
613 static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) zs_pool_stat_create() argument
618 static inline void zs_pool_stat_destroy(struct zs_pool *pool) zs_pool_stat_destroy() argument
629 * the pool (not yet implemented). This function returns fullness
1241 unsigned long zs_get_total_pages(struct zs_pool *pool) zs_get_total_pages() argument
1243 return atomic_long_read(&pool->pages_allocated); zs_get_total_pages()
1249 * @pool: pool from which the object was allocated
1261 void *zs_map_object(struct zs_pool *pool, unsigned long handle, zs_map_object() argument
1289 class = pool->size_class[class_idx]; zs_map_object()
1315 void zs_unmap_object(struct zs_pool *pool, unsigned long handle) zs_unmap_object() argument
1330 class = pool->size_class[class_idx]; zs_unmap_object()
1383 * zs_malloc - Allocate block of given size from pool.
1384 * @pool: pool to allocate from
1391 unsigned long zs_malloc(struct zs_pool *pool, size_t size) zs_malloc() argument
1400 handle = alloc_handle(pool); zs_malloc()
1406 class = pool->size_class[get_size_class_index(size)]; zs_malloc()
1413 first_page = alloc_zspage(class, pool->flags); zs_malloc()
1415 free_handle(pool, handle); zs_malloc()
1421 &pool->pages_allocated); zs_malloc()
1438 static void obj_free(struct zs_pool *pool, struct size_class *class, obj_free() argument
1470 void zs_free(struct zs_pool *pool, unsigned long handle) zs_free() argument
1487 class = pool->size_class[class_idx]; zs_free()
1490 obj_free(pool, class, obj); zs_free()
1496 &pool->pages_allocated); zs_free()
1502 free_handle(pool, handle); zs_free()
1617 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, migrate_zspage() argument
1658 obj_free(pool, class, used_obj); migrate_zspage()
1686 static void putback_zspage(struct zs_pool *pool, struct size_class *class, putback_zspage() argument
1701 &pool->pages_allocated); putback_zspage()
1718 static unsigned long __zs_compact(struct zs_pool *pool, __zs_compact() argument
1743 if (!migrate_zspage(pool, class, &cc)) __zs_compact()
1746 putback_zspage(pool, class, dst_page); __zs_compact()
1755 putback_zspage(pool, class, dst_page); __zs_compact()
1756 putback_zspage(pool, class, src_page); __zs_compact()
1764 putback_zspage(pool, class, src_page); __zs_compact()
1771 unsigned long zs_compact(struct zs_pool *pool) zs_compact() argument
1778 class = pool->size_class[i]; zs_compact()
1783 nr_migrated += __zs_compact(pool, class); zs_compact()
1791 * zs_create_pool - Creates an allocation pool to work from.
1792 * @flags: allocation flags used to allocate pool metadata
1797 * On success, a pointer to the newly created pool is returned,
1803 struct zs_pool *pool; zs_create_pool() local
1806 pool = kzalloc(sizeof(*pool), GFP_KERNEL); zs_create_pool()
1807 if (!pool) zs_create_pool()
1810 pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), zs_create_pool()
1812 if (!pool->size_class) { zs_create_pool()
1813 kfree(pool); zs_create_pool()
1817 pool->name = kstrdup(name, GFP_KERNEL); zs_create_pool()
1818 if (!pool->name) zs_create_pool()
1821 if (create_handle_cache(pool)) zs_create_pool()
1849 pool->size_class[i] = prev_class; zs_create_pool()
1865 pool->size_class[i] = class; zs_create_pool()
1870 pool->flags = flags; zs_create_pool()
1872 if (zs_pool_stat_create(name, pool)) zs_create_pool()
1875 return pool; zs_create_pool()
1878 zs_destroy_pool(pool); zs_create_pool()
1883 void zs_destroy_pool(struct zs_pool *pool) zs_destroy_pool() argument
1887 zs_pool_stat_destroy(pool); zs_destroy_pool()
1891 struct size_class *class = pool->size_class[i]; zs_destroy_pool()
1908 destroy_handle_cache(pool); zs_destroy_pool()
1909 kfree(pool->size_class); zs_destroy_pool()
1910 kfree(pool->name); zs_destroy_pool()
1911 kfree(pool); zs_destroy_pool()
H A Dzswap.c6 * RAM-based memory pool. This can result in a significant I/O reduction on
62 /* Pages written back when pool limit was reached */
64 /* Store failed due to a reclaim failure after pool limit was reached */
87 /* The maximum percentage of memory that the compressed pool can occupy */
532 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) zswap_writeback_entry() argument
548 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); zswap_writeback_entry()
550 zpool_unmap_handle(pool, handle); zswap_writeback_entry()
922 pr_info("using %s pool\n", zswap_zpool_type); init_zswap()
/linux-4.1.27/drivers/infiniband/core/
H A Dfmr_pool.c57 * its pool's free_list (if the FMR can be mapped again; that is,
58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
95 void (*flush_function)(struct ib_fmr_pool *pool,
114 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, ib_fmr_cache_lookup() argument
122 if (!pool->cache_bucket) ib_fmr_cache_lookup()
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); ib_fmr_cache_lookup()
137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) ib_fmr_batch_release() argument
144 spin_lock_irq(&pool->pool_lock); ib_fmr_batch_release()
146 list_for_each_entry(fmr, &pool->dirty_list, list) { ib_fmr_batch_release()
159 list_splice_init(&pool->dirty_list, &unmap_list); ib_fmr_batch_release()
160 pool->dirty_len = 0; ib_fmr_batch_release()
162 spin_unlock_irq(&pool->pool_lock); ib_fmr_batch_release()
172 spin_lock_irq(&pool->pool_lock); ib_fmr_batch_release()
173 list_splice(&unmap_list, &pool->free_list); ib_fmr_batch_release()
174 spin_unlock_irq(&pool->pool_lock); ib_fmr_batch_release()
179 struct ib_fmr_pool *pool = pool_ptr; ib_fmr_cleanup_thread() local
182 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { ib_fmr_cleanup_thread()
183 ib_fmr_batch_release(pool); ib_fmr_cleanup_thread()
185 atomic_inc(&pool->flush_ser); ib_fmr_cleanup_thread()
186 wake_up_interruptible(&pool->force_wait); ib_fmr_cleanup_thread()
188 if (pool->flush_function) ib_fmr_cleanup_thread()
189 pool->flush_function(pool, pool->flush_arg); ib_fmr_cleanup_thread()
193 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && ib_fmr_cleanup_thread()
203 * ib_create_fmr_pool - Create an FMR pool
205 * @params:FMR pool parameters
207 * Create a pool of FMRs. Return value is pointer to new pool or
214 struct ib_fmr_pool *pool; ib_create_fmr_pool() local
251 pool = kmalloc(sizeof *pool, GFP_KERNEL); ib_create_fmr_pool()
252 if (!pool) { ib_create_fmr_pool()
253 printk(KERN_WARNING PFX "couldn't allocate pool struct\n"); ib_create_fmr_pool()
257 pool->cache_bucket = NULL; ib_create_fmr_pool()
259 pool->flush_function = params->flush_function; ib_create_fmr_pool()
260 pool->flush_arg = params->flush_arg; ib_create_fmr_pool()
262 INIT_LIST_HEAD(&pool->free_list); ib_create_fmr_pool()
263 INIT_LIST_HEAD(&pool->dirty_list); ib_create_fmr_pool()
266 pool->cache_bucket = ib_create_fmr_pool()
267 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, ib_create_fmr_pool()
269 if (!pool->cache_bucket) { ib_create_fmr_pool()
270 printk(KERN_WARNING PFX "Failed to allocate cache in pool\n"); ib_create_fmr_pool()
276 INIT_HLIST_HEAD(pool->cache_bucket + i); ib_create_fmr_pool()
279 pool->pool_size = 0; ib_create_fmr_pool()
280 pool->max_pages = params->max_pages_per_fmr; ib_create_fmr_pool()
281 pool->max_remaps = max_remaps; ib_create_fmr_pool()
282 pool->dirty_watermark = params->dirty_watermark; ib_create_fmr_pool()
283 pool->dirty_len = 0; ib_create_fmr_pool()
284 spin_lock_init(&pool->pool_lock); ib_create_fmr_pool()
285 atomic_set(&pool->req_ser, 0); ib_create_fmr_pool()
286 atomic_set(&pool->flush_ser, 0); ib_create_fmr_pool()
287 init_waitqueue_head(&pool->force_wait); ib_create_fmr_pool()
289 pool->thread = kthread_run(ib_fmr_cleanup_thread, ib_create_fmr_pool()
290 pool, ib_create_fmr_pool()
293 if (IS_ERR(pool->thread)) { ib_create_fmr_pool()
295 ret = PTR_ERR(pool->thread); ib_create_fmr_pool()
303 .max_maps = pool->max_remaps, ib_create_fmr_pool()
308 if (pool->cache_bucket) ib_create_fmr_pool()
319 fmr->pool = pool; ib_create_fmr_pool()
332 list_add_tail(&fmr->list, &pool->free_list); ib_create_fmr_pool()
333 ++pool->pool_size; ib_create_fmr_pool()
337 return pool; ib_create_fmr_pool()
340 kfree(pool->cache_bucket); ib_create_fmr_pool()
341 kfree(pool); ib_create_fmr_pool()
346 ib_destroy_fmr_pool(pool); ib_create_fmr_pool()
353 * ib_destroy_fmr_pool - Free FMR pool
354 * @pool:FMR pool to free
356 * Destroy an FMR pool and free all associated resources.
358 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) ib_destroy_fmr_pool() argument
365 kthread_stop(pool->thread); ib_destroy_fmr_pool()
366 ib_fmr_batch_release(pool); ib_destroy_fmr_pool()
369 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { ib_destroy_fmr_pool()
381 if (i < pool->pool_size) ib_destroy_fmr_pool()
382 printk(KERN_WARNING PFX "pool still has %d regions registered\n", ib_destroy_fmr_pool()
383 pool->pool_size - i); ib_destroy_fmr_pool()
385 kfree(pool->cache_bucket); ib_destroy_fmr_pool()
386 kfree(pool); ib_destroy_fmr_pool()
392 * @pool:FMR pool to flush
396 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) ib_flush_fmr_pool() argument
407 spin_lock_irq(&pool->pool_lock); ib_flush_fmr_pool()
408 list_for_each_entry_safe(fmr, next, &pool->free_list, list) { ib_flush_fmr_pool()
410 list_move(&fmr->list, &pool->dirty_list); ib_flush_fmr_pool()
412 spin_unlock_irq(&pool->pool_lock); ib_flush_fmr_pool()
414 serial = atomic_inc_return(&pool->req_ser); ib_flush_fmr_pool()
415 wake_up_process(pool->thread); ib_flush_fmr_pool()
417 if (wait_event_interruptible(pool->force_wait, ib_flush_fmr_pool()
418 atomic_read(&pool->flush_ser) - serial >= 0)) ib_flush_fmr_pool()
427 * @pool:FMR pool to allocate FMR from
432 * Map an FMR from an FMR pool.
439 struct ib_fmr_pool *pool = pool_handle; ib_fmr_pool_map_phys() local
444 if (list_len < 1 || list_len > pool->max_pages) ib_fmr_pool_map_phys()
447 spin_lock_irqsave(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
448 fmr = ib_fmr_cache_lookup(pool, ib_fmr_pool_map_phys()
459 spin_unlock_irqrestore(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
464 if (list_empty(&pool->free_list)) { ib_fmr_pool_map_phys()
465 spin_unlock_irqrestore(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
469 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); ib_fmr_pool_map_phys()
472 spin_unlock_irqrestore(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
478 spin_lock_irqsave(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
479 list_add(&fmr->list, &pool->free_list); ib_fmr_pool_map_phys()
480 spin_unlock_irqrestore(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
490 if (pool->cache_bucket) { ib_fmr_pool_map_phys()
495 spin_lock_irqsave(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
497 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); ib_fmr_pool_map_phys()
498 spin_unlock_irqrestore(&pool->pool_lock, flags); ib_fmr_pool_map_phys()
514 struct ib_fmr_pool *pool; ib_fmr_pool_unmap() local
517 pool = fmr->pool; ib_fmr_pool_unmap()
519 spin_lock_irqsave(&pool->pool_lock, flags); ib_fmr_pool_unmap()
523 if (fmr->remap_count < pool->max_remaps) { ib_fmr_pool_unmap()
524 list_add_tail(&fmr->list, &pool->free_list); ib_fmr_pool_unmap()
526 list_add_tail(&fmr->list, &pool->dirty_list); ib_fmr_pool_unmap()
527 if (++pool->dirty_len >= pool->dirty_watermark) { ib_fmr_pool_unmap()
528 atomic_inc(&pool->req_ser); ib_fmr_pool_unmap()
529 wake_up_process(pool->thread); ib_fmr_pool_unmap()
540 spin_unlock_irqrestore(&pool->pool_lock, flags); ib_fmr_pool_unmap()
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
H A Dlov_pool.c38 * OST pool methods
55 static void lov_pool_getref(struct pool_desc *pool) lov_pool_getref() argument
57 CDEBUG(D_INFO, "pool %p\n", pool); lov_pool_getref()
58 atomic_inc(&pool->pool_refcount); lov_pool_getref()
61 void lov_pool_putref(struct pool_desc *pool) lov_pool_putref() argument
63 CDEBUG(D_INFO, "pool %p\n", pool); lov_pool_putref()
64 if (atomic_dec_and_test(&pool->pool_refcount)) { lov_pool_putref()
65 LASSERT(hlist_unhashed(&pool->pool_hash)); lov_pool_putref()
66 LASSERT(list_empty(&pool->pool_list)); lov_pool_putref()
67 LASSERT(pool->pool_proc_entry == NULL); lov_pool_putref()
68 lov_ost_pool_free(&(pool->pool_rr.lqr_pool)); lov_pool_putref()
69 lov_ost_pool_free(&(pool->pool_obds)); lov_pool_putref()
70 OBD_FREE_PTR(pool); lov_pool_putref()
74 static void lov_pool_putref_locked(struct pool_desc *pool) lov_pool_putref_locked() argument
76 CDEBUG(D_INFO, "pool %p\n", pool); lov_pool_putref_locked()
77 LASSERT(atomic_read(&pool->pool_refcount) > 1); lov_pool_putref_locked()
79 atomic_dec(&pool->pool_refcount); lov_pool_putref_locked()
107 struct pool_desc *pool; pool_key() local
109 pool = hlist_entry(hnode, struct pool_desc, pool_hash); pool_key()
110 return pool->pool_name; pool_key()
116 struct pool_desc *pool; pool_hashkey_keycmp() local
119 pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash); pool_hashkey_keycmp()
120 return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME); pool_hashkey_keycmp()
130 struct pool_desc *pool; pool_hashrefcount_get() local
132 pool = hlist_entry(hnode, struct pool_desc, pool_hash); pool_hashrefcount_get()
133 lov_pool_getref(pool); pool_hashrefcount_get()
139 struct pool_desc *pool; pool_hashrefcount_put_locked() local
141 pool = hlist_entry(hnode, struct pool_desc, pool_hash); pool_hashrefcount_put_locked()
142 lov_pool_putref_locked(pool); pool_hashrefcount_put_locked()
158 * pool /proc seq_file methods
161 * iterator is used to go through the target pool entries
164 * pos is from 0 to (pool->pool_obds.op_count - 1)
169 struct pool_desc *pool; member in struct:pool_iterator
181 if (*pos >= pool_tgt_count(iter->pool)) pool_proc_next()
186 down_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_next()
188 if (iter->idx == pool_tgt_count(iter->pool)) { pool_proc_next()
190 up_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_next()
193 up_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_next()
201 struct pool_desc *pool = (struct pool_desc *)s->private; pool_proc_start() local
204 lov_pool_getref(pool); pool_proc_start()
205 if ((pool_tgt_count(pool) == 0) || pool_proc_start()
206 (*pos >= pool_tgt_count(pool))) { pool_proc_start()
208 * find pool to dec ref */ pool_proc_start()
209 lov_pool_putref(pool); pool_proc_start()
217 iter->pool = pool; pool_proc_start()
222 /* /!\ do not forget to restore it to pool before freeing it */ pool_proc_start()
247 s->private = iter->pool; pool_proc_stop()
248 lov_pool_putref(iter->pool); pool_proc_stop()
260 LASSERT(iter->pool != NULL); pool_proc_show()
261 LASSERT(iter->idx <= pool_tgt_count(iter->pool)); pool_proc_show()
263 down_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_show()
264 tgt = pool_tgt(iter->pool, iter->idx); pool_proc_show()
265 up_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_show()
299 void lov_dump_pool(int level, struct pool_desc *pool) lov_dump_pool() argument
303 lov_pool_getref(pool); lov_dump_pool()
305 CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n", lov_dump_pool()
306 pool->pool_name, pool->pool_obds.op_count); lov_dump_pool()
307 down_read(&pool_tgt_rw_sem(pool)); lov_dump_pool()
309 for (i = 0; i < pool_tgt_count(pool) ; i++) { lov_dump_pool()
310 if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp) lov_dump_pool()
312 CDEBUG(level, "pool "LOV_POOLNAMEF"[%d] = %s\n", lov_dump_pool()
313 pool->pool_name, i, lov_dump_pool()
314 obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid))); lov_dump_pool()
317 up_read(&pool_tgt_rw_sem(pool)); lov_dump_pool()
318 lov_pool_putref(pool); lov_dump_pool()
372 /* search ost in pool array */ lov_ost_pool_add()
442 /* ref count init to 1 because when created a pool is always used lov_pool_new()
465 CWARN("Cannot add proc pool entry "LOV_POOLNAMEF"\n", poolname); lov_pool_new()
469 CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry); lov_pool_new()
485 CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n", lov_pool_new()
508 struct pool_desc *pool; lov_pool_del() local
513 pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname); lov_pool_del()
514 if (pool == NULL) lov_pool_del()
517 if (pool->pool_proc_entry != NULL) { lov_pool_del()
518 CDEBUG(D_INFO, "proc entry %p\n", pool->pool_proc_entry); lov_pool_del()
519 lprocfs_remove(&pool->pool_proc_entry); lov_pool_del()
520 lov_pool_putref(pool); lov_pool_del()
524 list_del_init(&pool->pool_list); lov_pool_del()
529 lov_pool_putref(pool); lov_pool_del()
539 struct pool_desc *pool; lov_pool_add() local
545 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); lov_pool_add()
546 if (pool == NULL) lov_pool_add()
567 rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size); lov_pool_add()
571 pool->pool_rr.lqr_dirty = 1; lov_pool_add()
574 ostname, poolname, pool_tgt_count(pool)); lov_pool_add()
578 lov_pool_putref(pool); lov_pool_add()
586 struct pool_desc *pool; lov_pool_remove() local
592 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); lov_pool_remove()
593 if (pool == NULL) lov_pool_remove()
615 lov_ost_pool_remove(&pool->pool_obds, lov_idx); lov_pool_remove()
617 pool->pool_rr.lqr_dirty = 1; lov_pool_remove()
624 lov_pool_putref(pool); lov_pool_remove()
628 int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool) lov_check_index_in_pool() argument
632 /* caller may no have a ref on pool if it got the pool lov_check_index_in_pool()
633 * without calling lov_find_pool() (e.g. go through the lov pool lov_check_index_in_pool()
636 lov_pool_getref(pool); lov_check_index_in_pool()
638 down_read(&pool_tgt_rw_sem(pool)); lov_check_index_in_pool()
640 for (i = 0; i < pool_tgt_count(pool); i++) { lov_check_index_in_pool()
641 if (pool_tgt_array(pool)[i] == idx) { lov_check_index_in_pool()
648 up_read(&pool_tgt_rw_sem(pool)); lov_check_index_in_pool()
650 lov_pool_putref(pool); lov_check_index_in_pool()
656 struct pool_desc *pool; lov_find_pool() local
658 pool = NULL; lov_find_pool()
660 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); lov_find_pool()
661 if (pool == NULL) lov_find_pool()
662 CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n", lov_find_pool()
664 if ((pool != NULL) && (pool_tgt_count(pool) == 0)) { lov_find_pool()
665 CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n", lov_find_pool()
667 /* pool is ignored, so we remove ref on it */ lov_find_pool()
668 lov_pool_putref(pool); lov_find_pool()
669 pool = NULL; lov_find_pool()
672 return pool; lov_find_pool()
H A Dlov_internal.h290 /* high level pool methods */
295 void lov_dump_pool(int level, struct pool_desc *pool);
297 int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
298 void lov_pool_putref(struct pool_desc *pool);
/linux-4.1.27/sound/core/seq/
H A Dseq_memory.c35 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) snd_seq_pool_available() argument
37 return pool->total_elements - atomic_read(&pool->counter); snd_seq_pool_available()
40 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) snd_seq_output_ok() argument
42 return snd_seq_pool_available(pool) >= pool->room; snd_seq_output_ok()
178 static inline void free_cell(struct snd_seq_pool *pool, free_cell() argument
181 cell->next = pool->free; free_cell()
182 pool->free = cell; free_cell()
183 atomic_dec(&pool->counter); free_cell()
189 struct snd_seq_pool *pool; snd_seq_cell_free() local
193 pool = cell->pool; snd_seq_cell_free()
194 if (snd_BUG_ON(!pool)) snd_seq_cell_free()
197 spin_lock_irqsave(&pool->lock, flags); snd_seq_cell_free()
198 free_cell(pool, cell); snd_seq_cell_free()
205 curp->next = pool->free; snd_seq_cell_free()
206 free_cell(pool, curp); snd_seq_cell_free()
210 if (waitqueue_active(&pool->output_sleep)) { snd_seq_cell_free()
212 if (snd_seq_output_ok(pool)) snd_seq_cell_free()
213 wake_up(&pool->output_sleep); snd_seq_cell_free()
215 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_cell_free()
222 static int snd_seq_cell_alloc(struct snd_seq_pool *pool, snd_seq_cell_alloc() argument
231 if (pool == NULL) snd_seq_cell_alloc()
237 spin_lock_irqsave(&pool->lock, flags); snd_seq_cell_alloc()
238 if (pool->ptr == NULL) { /* not initialized */ snd_seq_cell_alloc()
239 pr_debug("ALSA: seq: pool is not initialized\n"); snd_seq_cell_alloc()
243 while (pool->free == NULL && ! nonblock && ! pool->closing) { snd_seq_cell_alloc()
246 add_wait_queue(&pool->output_sleep, &wait); snd_seq_cell_alloc()
247 spin_unlock_irq(&pool->lock); snd_seq_cell_alloc()
249 spin_lock_irq(&pool->lock); snd_seq_cell_alloc()
250 remove_wait_queue(&pool->output_sleep, &wait); snd_seq_cell_alloc()
257 if (pool->closing) { /* closing.. */ snd_seq_cell_alloc()
262 cell = pool->free; snd_seq_cell_alloc()
265 pool->free = cell->next; snd_seq_cell_alloc()
266 atomic_inc(&pool->counter); snd_seq_cell_alloc()
267 used = atomic_read(&pool->counter); snd_seq_cell_alloc()
268 if (pool->max_used < used) snd_seq_cell_alloc()
269 pool->max_used = used; snd_seq_cell_alloc()
270 pool->event_alloc_success++; snd_seq_cell_alloc()
275 pool->event_alloc_failures++; snd_seq_cell_alloc()
279 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_cell_alloc()
289 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, snd_seq_event_dup() argument
305 if (ncells >= pool->total_elements) snd_seq_event_dup()
308 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); snd_seq_event_dup()
334 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); snd_seq_event_dup()
369 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, snd_seq_pool_poll_wait() argument
372 poll_wait(file, &pool->output_sleep, wait); snd_seq_pool_poll_wait()
373 return snd_seq_output_ok(pool); snd_seq_pool_poll_wait()
378 int snd_seq_pool_init(struct snd_seq_pool *pool) snd_seq_pool_init() argument
384 if (snd_BUG_ON(!pool)) snd_seq_pool_init()
387 cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); snd_seq_pool_init()
392 spin_lock_irqsave(&pool->lock, flags); snd_seq_pool_init()
393 if (pool->ptr) { snd_seq_pool_init()
394 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_pool_init()
399 pool->ptr = cellptr; snd_seq_pool_init()
400 pool->free = NULL; snd_seq_pool_init()
402 for (cell = 0; cell < pool->size; cell++) { snd_seq_pool_init()
403 cellptr = pool->ptr + cell; snd_seq_pool_init()
404 cellptr->pool = pool; snd_seq_pool_init()
405 cellptr->next = pool->free; snd_seq_pool_init()
406 pool->free = cellptr; snd_seq_pool_init()
408 pool->room = (pool->size + 1) / 2; snd_seq_pool_init()
411 pool->max_used = 0; snd_seq_pool_init()
412 pool->total_elements = pool->size; snd_seq_pool_init()
413 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_pool_init()
418 int snd_seq_pool_done(struct snd_seq_pool *pool) snd_seq_pool_done() argument
424 if (snd_BUG_ON(!pool)) snd_seq_pool_done()
428 spin_lock_irqsave(&pool->lock, flags); snd_seq_pool_done()
429 pool->closing = 1; snd_seq_pool_done()
430 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_pool_done()
432 if (waitqueue_active(&pool->output_sleep)) snd_seq_pool_done()
433 wake_up(&pool->output_sleep); snd_seq_pool_done()
435 while (atomic_read(&pool->counter) > 0) { snd_seq_pool_done()
437 pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); snd_seq_pool_done()
445 spin_lock_irqsave(&pool->lock, flags); snd_seq_pool_done()
446 ptr = pool->ptr; snd_seq_pool_done()
447 pool->ptr = NULL; snd_seq_pool_done()
448 pool->free = NULL; snd_seq_pool_done()
449 pool->total_elements = 0; snd_seq_pool_done()
450 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_pool_done()
454 spin_lock_irqsave(&pool->lock, flags); snd_seq_pool_done()
455 pool->closing = 0; snd_seq_pool_done()
456 spin_unlock_irqrestore(&pool->lock, flags); snd_seq_pool_done()
462 /* init new memory pool */ snd_seq_pool_new()
465 struct snd_seq_pool *pool; snd_seq_pool_new() local
467 /* create pool block */ snd_seq_pool_new()
468 pool = kzalloc(sizeof(*pool), GFP_KERNEL); snd_seq_pool_new()
469 if (!pool) snd_seq_pool_new()
471 spin_lock_init(&pool->lock); snd_seq_pool_new()
472 pool->ptr = NULL; snd_seq_pool_new()
473 pool->free = NULL; snd_seq_pool_new()
474 pool->total_elements = 0; snd_seq_pool_new()
475 atomic_set(&pool->counter, 0); snd_seq_pool_new()
476 pool->closing = 0; snd_seq_pool_new()
477 init_waitqueue_head(&pool->output_sleep); snd_seq_pool_new()
479 pool->size = poolsize; snd_seq_pool_new()
482 pool->max_used = 0; snd_seq_pool_new()
483 return pool; snd_seq_pool_new()
486 /* remove memory pool */ snd_seq_pool_delete()
489 struct snd_seq_pool *pool = *ppool; snd_seq_pool_delete() local
492 if (pool == NULL) snd_seq_pool_delete()
494 snd_seq_pool_done(pool); snd_seq_pool_delete()
495 kfree(pool); snd_seq_pool_delete()
513 struct snd_seq_pool *pool, char *space) snd_seq_info_pool()
515 if (pool == NULL) snd_seq_info_pool()
517 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); snd_seq_info_pool()
518 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); snd_seq_info_pool()
519 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); snd_seq_info_pool()
520 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); snd_seq_info_pool()
521 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); snd_seq_info_pool()
512 snd_seq_info_pool(struct snd_info_buffer *buffer, struct snd_seq_pool *pool, char *space) snd_seq_info_pool() argument
H A Dseq_memory.h32 struct snd_seq_pool *pool; /* used pool */ member in struct:snd_seq_event_cell
36 /* design note: the pool is a contiguous block of memory, if we dynamicly
37 want to add additional cells to the pool be better store this in another
38 pool as we need to know the base address of the pool when releasing
45 int total_elements; /* pool size actually allocated */
48 int size; /* pool size to be allocated */
68 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
72 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) snd_seq_unused_cells() argument
74 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; snd_seq_unused_cells()
78 static inline int snd_seq_total_cells(struct snd_seq_pool *pool) snd_seq_total_cells() argument
80 return pool ? pool->total_elements : 0; snd_seq_total_cells()
83 /* init pool - allocate events */
84 int snd_seq_pool_init(struct snd_seq_pool *pool);
86 /* done pool - free events */
87 int snd_seq_pool_done(struct snd_seq_pool *pool);
89 /* create pool */
92 /* remove pool */
93 int snd_seq_pool_delete(struct snd_seq_pool **pool);
102 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait);
105 struct snd_seq_pool *pool, char *space);
H A Dseq_fifo.c39 f->pool = snd_seq_pool_new(poolsize); snd_seq_fifo_new()
40 if (f->pool == NULL) { snd_seq_fifo_new()
44 if (snd_seq_pool_init(f->pool) < 0) { snd_seq_fifo_new()
45 snd_seq_pool_delete(&f->pool); snd_seq_fifo_new()
82 if (f->pool) { snd_seq_fifo_delete()
83 snd_seq_pool_done(f->pool); snd_seq_fifo_delete()
84 snd_seq_pool_delete(&f->pool); snd_seq_fifo_delete()
123 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ snd_seq_fifo_event_in()
231 /* change the size of pool; all old events are removed */ snd_seq_fifo_resize()
238 if (snd_BUG_ON(!f || !f->pool)) snd_seq_fifo_resize()
241 /* allocate new pool */ snd_seq_fifo_resize()
251 /* remember old pool */ snd_seq_fifo_resize()
252 oldpool = f->pool; snd_seq_fifo_resize()
255 f->pool = newpool; snd_seq_fifo_resize()
262 /* release cells in old pool */ snd_seq_fifo_resize()
H A Dseq_clientmgr.h64 /* output pool */
65 struct snd_seq_pool *pool; /* memory pool for this client */ member in struct:snd_seq_client
H A Dseq_fifo.h31 struct snd_seq_pool *pool; /* FIFO pool */ member in struct:snd_seq_fifo
68 /* resize pool in fifo */
H A Dseq_clientmgr.c119 return snd_seq_total_cells(client->pool) > 0; snd_seq_write_pool_allocated()
229 client->pool = snd_seq_pool_new(poolsize); seq_create_client1()
230 if (client->pool == NULL) { seq_create_client1()
260 snd_seq_pool_delete(&client->pool); seq_create_client1()
280 if (client->pool) seq_free_client1()
281 snd_seq_pool_delete(&client->pool); seq_free_client1()
914 /* Allocate a cell from client pool and enqueue it to queue:
915 * if pool is empty and blocking is TRUE, sleep until a new cell is
959 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); snd_seq_client_enqueue_event()
1004 * -EAGAIN no space in output pool
1024 if (!client->accept_output || client->pool == NULL) snd_seq_write()
1027 /* allocate the pool now if the pool is not allocated yet */ snd_seq_write()
1028 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { snd_seq_write()
1029 if (snd_seq_pool_init(client->pool) < 0) snd_seq_write()
1117 /* check if data is available in the pool */ snd_seq_poll()
1119 snd_seq_pool_poll_wait(client->pool, file, wait)) snd_seq_poll()
1886 info.output_pool = cptr->pool->size; snd_seq_ioctl_get_client_pool()
1887 info.output_room = cptr->pool->room; snd_seq_ioctl_get_client_pool()
1889 info.output_free = snd_seq_unused_cells(cptr->pool); snd_seq_ioctl_get_client_pool()
1894 info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool); snd_seq_ioctl_get_client_pool()
1921 info.output_pool != client->pool->size)) { snd_seq_ioctl_set_client_pool()
1925 snd_seq_pool_done(client->pool); snd_seq_ioctl_set_client_pool()
1927 client->pool->size = info.output_pool; snd_seq_ioctl_set_client_pool()
1928 rc = snd_seq_pool_init(client->pool); snd_seq_ioctl_set_client_pool()
1936 /* change pool size */ snd_seq_ioctl_set_client_pool()
1943 info.output_room <= client->pool->size) { snd_seq_ioctl_set_client_pool()
1944 client->pool->room = info.output_room; snd_seq_ioctl_set_client_pool()
2444 if (snd_seq_pool_poll_wait(client->pool, file, wait)) snd_seq_kernel_client_write_poll()
2544 snd_iprintf(buffer, " Output pool :\n"); snd_seq_info_clients_read()
2545 snd_seq_info_pool(buffer, client->pool, " "); snd_seq_info_clients_read()
2548 client->data.user.fifo->pool) { snd_seq_info_clients_read()
2549 snd_iprintf(buffer, " Input pool :\n"); snd_seq_info_clients_read()
2550 snd_seq_info_pool(buffer, client->data.user.fifo->pool, " "); snd_seq_info_clients_read()
/linux-4.1.27/lib/
H A Dpercpu_ida.c60 static inline void steal_tags(struct percpu_ida *pool, steal_tags() argument
63 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; steal_tags()
66 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); steal_tags()
68 cpu = cpumask_next(cpu, &pool->cpus_have_tags); steal_tags()
71 cpu = cpumask_first(&pool->cpus_have_tags); steal_tags()
76 pool->cpu_last_stolen = cpu; steal_tags()
77 remote = per_cpu_ptr(pool->tag_cpu, cpu); steal_tags()
79 cpumask_clear_cpu(cpu, &pool->cpus_have_tags); steal_tags()
106 static inline void alloc_global_tags(struct percpu_ida *pool, alloc_global_tags() argument
110 pool->freelist, &pool->nr_free, alloc_global_tags()
111 min(pool->nr_free, pool->percpu_batch_size)); alloc_global_tags()
128 * @pool: pool to allocate from
144 int percpu_ida_alloc(struct percpu_ida *pool, int state) percpu_ida_alloc() argument
152 tags = this_cpu_ptr(pool->tag_cpu); percpu_ida_alloc()
162 spin_lock(&pool->lock); percpu_ida_alloc()
172 prepare_to_wait(&pool->wait, &wait, state); percpu_ida_alloc()
175 alloc_global_tags(pool, tags); percpu_ida_alloc()
177 steal_tags(pool, tags); percpu_ida_alloc()
183 &pool->cpus_have_tags); percpu_ida_alloc()
186 spin_unlock(&pool->lock); percpu_ida_alloc()
200 tags = this_cpu_ptr(pool->tag_cpu); percpu_ida_alloc()
203 finish_wait(&pool->wait, &wait); percpu_ida_alloc()
211 * @pool: pool @tag was allocated from
216 void percpu_ida_free(struct percpu_ida *pool, unsigned tag) percpu_ida_free() argument
222 BUG_ON(tag >= pool->nr_tags); percpu_ida_free()
225 tags = this_cpu_ptr(pool->tag_cpu); percpu_ida_free()
235 &pool->cpus_have_tags); percpu_ida_free()
236 wake_up(&pool->wait); percpu_ida_free()
239 if (nr_free == pool->percpu_max_size) { percpu_ida_free()
240 spin_lock(&pool->lock); percpu_ida_free()
246 if (tags->nr_free == pool->percpu_max_size) { percpu_ida_free()
247 move_tags(pool->freelist, &pool->nr_free, percpu_ida_free()
249 pool->percpu_batch_size); percpu_ida_free()
251 wake_up(&pool->wait); percpu_ida_free()
253 spin_unlock(&pool->lock); percpu_ida_free()
261 * percpu_ida_destroy - release a tag pool's resources
262 * @pool: pool to free
266 void percpu_ida_destroy(struct percpu_ida *pool) percpu_ida_destroy() argument
268 free_percpu(pool->tag_cpu); percpu_ida_destroy()
269 free_pages((unsigned long) pool->freelist, percpu_ida_destroy()
270 get_order(pool->nr_tags * sizeof(unsigned))); percpu_ida_destroy()
275 * percpu_ida_init - initialize a percpu tag pool
276 * @pool: pool to initialize
279 * Initializes @pool so that it can be used to allocate tags - integers in the
286 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, __percpu_ida_init() argument
291 memset(pool, 0, sizeof(*pool)); __percpu_ida_init()
293 init_waitqueue_head(&pool->wait); __percpu_ida_init()
294 spin_lock_init(&pool->lock); __percpu_ida_init()
295 pool->nr_tags = nr_tags; __percpu_ida_init()
296 pool->percpu_max_size = max_size; __percpu_ida_init()
297 pool->percpu_batch_size = batch_size; __percpu_ida_init()
306 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); __percpu_ida_init()
307 if (!pool->freelist) __percpu_ida_init()
311 pool->freelist[i] = i; __percpu_ida_init()
313 pool->nr_free = nr_tags; __percpu_ida_init()
315 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + __percpu_ida_init()
316 pool->percpu_max_size * sizeof(unsigned), __percpu_ida_init()
318 if (!pool->tag_cpu) __percpu_ida_init()
322 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); __percpu_ida_init()
326 percpu_ida_destroy(pool); __percpu_ida_init()
332 * percpu_ida_for_each_free - iterate free ids of a pool
333 * @pool: pool to iterate
341 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, percpu_ida_for_each_free() argument
350 remote = per_cpu_ptr(pool->tag_cpu, cpu); for_each_possible_cpu()
362 spin_lock(&pool->lock);
363 for (i = 0; i < pool->nr_free; i++) {
364 err = fn(pool->freelist[i], data);
368 spin_unlock(&pool->lock);
376 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
377 * @pool: pool related
378 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
382 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu) percpu_ida_free_tags() argument
386 return pool->nr_free; percpu_ida_free_tags()
387 remote = per_cpu_ptr(pool->tag_cpu, cpu); percpu_ida_free_tags()
H A Dgenalloc.c15 * available. If new memory is added to the pool a lock has to be
145 * gen_pool_create - create a new special memory pool
147 * @nid: node id of the node the pool structure should be allocated on, or -1
149 * Create a new special memory pool that can be used to manage special purpose
154 struct gen_pool *pool; gen_pool_create() local
156 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); gen_pool_create()
157 if (pool != NULL) { gen_pool_create()
158 spin_lock_init(&pool->lock); gen_pool_create()
159 INIT_LIST_HEAD(&pool->chunks); gen_pool_create()
160 pool->min_alloc_order = min_alloc_order; gen_pool_create()
161 pool->algo = gen_pool_first_fit; gen_pool_create()
162 pool->data = NULL; gen_pool_create()
164 return pool; gen_pool_create()
169 * gen_pool_add_virt - add a new chunk of special memory to the pool
170 * @pool: pool to add new memory chunk to
171 * @virt: virtual starting address of memory chunk to add to pool
172 * @phys: physical starting address of memory chunk to add to pool
173 * @size: size in bytes of the memory chunk to add to pool
177 * Add a new chunk of special memory to the specified pool.
181 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, gen_pool_add_virt() argument
185 int nbits = size >> pool->min_alloc_order; gen_pool_add_virt()
198 spin_lock(&pool->lock); gen_pool_add_virt()
199 list_add_rcu(&chunk->next_chunk, &pool->chunks); gen_pool_add_virt()
200 spin_unlock(&pool->lock); gen_pool_add_virt()
208 * @pool: pool to allocate from
213 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) gen_pool_virt_to_phys() argument
219 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { gen_pool_virt_to_phys()
232 * gen_pool_destroy - destroy a special memory pool
233 * @pool: pool to destroy
235 * Destroy the specified special memory pool. Verifies that there are no
238 void gen_pool_destroy(struct gen_pool *pool) gen_pool_destroy() argument
242 int order = pool->min_alloc_order; gen_pool_destroy()
245 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { gen_pool_destroy()
255 kfree(pool); gen_pool_destroy()
261 * gen_pool_alloc - allocate special memory from the pool
262 * @pool: pool to allocate from
263 * @size: number of bytes to allocate from the pool
265 * Allocate the requested number of bytes from the specified pool.
266 * Uses the pool allocation function (with first-fit algorithm by default).
270 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) gen_pool_alloc() argument
274 int order = pool->min_alloc_order; gen_pool_alloc()
286 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { gen_pool_alloc()
292 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, gen_pool_alloc()
293 pool->data); gen_pool_alloc()
315 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
316 * @pool: pool to allocate from
317 * @size: number of bytes to allocate from the pool
320 * Allocate the requested number of bytes from the specified pool.
321 * Uses the pool allocation function (with first-fit algorithm by default).
325 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) gen_pool_dma_alloc() argument
329 if (!pool) gen_pool_dma_alloc()
332 vaddr = gen_pool_alloc(pool, size); gen_pool_dma_alloc()
337 *dma = gen_pool_virt_to_phys(pool, vaddr); gen_pool_dma_alloc()
344 * gen_pool_free - free allocated special memory back to the pool
345 * @pool: pool to free to
346 * @addr: starting address of memory to free back to pool
350 * pool. Can not be used in NMI handler on architectures without
353 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) gen_pool_free() argument
356 int order = pool->min_alloc_order; gen_pool_free()
365 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { gen_pool_free()
383 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
384 * @pool: the generic memory pool
388 * Call @func for every chunk of generic memory pool. The @func is
391 void gen_pool_for_each_chunk(struct gen_pool *pool, gen_pool_for_each_chunk() argument
392 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), gen_pool_for_each_chunk()
398 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) gen_pool_for_each_chunk()
399 func(pool, chunk, data); gen_pool_for_each_chunk()
405 * addr_in_gen_pool - checks if an address falls within the range of a pool
406 * @pool: the generic memory pool
410 * Check if the range of addresses falls within the specified pool. Returns
411 * true if the entire range is contained in the pool and false otherwise.
413 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, addr_in_gen_pool() argument
421 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { addr_in_gen_pool()
434 * gen_pool_avail - get available free space of the pool
435 * @pool: pool to get available free space
437 * Return available free space of the specified pool.
439 size_t gen_pool_avail(struct gen_pool *pool) gen_pool_avail() argument
445 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) gen_pool_avail()
453 * gen_pool_size - get size in bytes of memory managed by the pool
454 * @pool: pool to get size
456 * Return size in bytes of memory managed by the pool.
458 size_t gen_pool_size(struct gen_pool *pool) gen_pool_size() argument
464 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) gen_pool_size()
473 * @pool: pool to change allocation algorithm
477 * Call @algo for each memory allocation in the pool.
481 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) gen_pool_set_algo() argument
485 pool->algo = algo; gen_pool_set_algo()
486 if (!pool->algo) gen_pool_set_algo()
487 pool->algo = gen_pool_first_fit; gen_pool_set_algo()
489 pool->data = data; gen_pool_set_algo()
577 * @nid: node id of the node the pool structure should be allocated on, or -1
579 * Create a new special memory pool that can be used to manage special purpose
580 * memory not managed by the regular kmalloc/kfree interface. The pool will be
586 struct gen_pool **ptr, *pool; devm_gen_pool_create() local
592 pool = gen_pool_create(min_alloc_order, nid); devm_gen_pool_create()
593 if (pool) { devm_gen_pool_create()
594 *ptr = pool; devm_gen_pool_create()
600 return pool; devm_gen_pool_create()
623 * of_get_named_gen_pool - find a pool by phandle property
628 * Returns the pool that contains the chunk starting at the physical
H A Diommu-common.c52 * the top 1/4 of the table will be set aside for pool allocations
88 start += iommu->poolsize; /* start for next pool */ iommu_tbl_pool_init()
110 struct iommu_pool *pool; iommu_tbl_range_alloc() local
130 pool = &(iommu->large_pool); iommu_tbl_range_alloc()
135 pool = &(iommu->pools[pool_nr]); iommu_tbl_range_alloc()
137 spin_lock_irqsave(&pool->lock, flags); iommu_tbl_range_alloc()
141 (*handle >= pool->start) && (*handle < pool->end)) iommu_tbl_range_alloc()
144 start = pool->hint; iommu_tbl_range_alloc()
146 limit = pool->end; iommu_tbl_range_alloc()
155 start = pool->start; iommu_tbl_range_alloc()
161 * but on second pass, start at 0 in pool 0. iommu_tbl_range_alloc()
164 spin_unlock(&(pool->lock)); iommu_tbl_range_alloc()
165 pool = &(iommu->pools[0]); iommu_tbl_range_alloc()
166 spin_lock(&(pool->lock)); iommu_tbl_range_alloc()
167 start = pool->start; iommu_tbl_range_alloc()
194 pool->hint = pool->start; iommu_tbl_range_alloc()
199 spin_unlock(&(pool->lock)); iommu_tbl_range_alloc()
201 pool = &(iommu->pools[pool_nr]); iommu_tbl_range_alloc()
202 spin_lock(&(pool->lock)); iommu_tbl_range_alloc()
203 pool->hint = pool->start; iommu_tbl_range_alloc()
214 (n < pool->hint || need_flush(iommu))) { iommu_tbl_range_alloc()
220 pool->hint = end; iommu_tbl_range_alloc()
226 spin_unlock_irqrestore(&(pool->lock), flags); iommu_tbl_range_alloc()
239 /* The large pool is the last pool at the top of the table */ get_pool()
258 struct iommu_pool *pool; iommu_tbl_range_free() local
264 pool = get_pool(iommu, entry); iommu_tbl_range_free()
266 spin_lock_irqsave(&(pool->lock), flags); iommu_tbl_range_free()
268 spin_unlock_irqrestore(&(pool->lock), flags); iommu_tbl_range_free()
H A Drandom32.c175 * Add some additional seeding to the prandom pool.
253 * moved into the nonblocking pool and thus marking it __prandom_reseed()
257 * already waiting for bytes when the nonblocking pool __prandom_reseed()
H A Ddebugobjects.c130 * Allocate a new object. If the pool is empty, switch off the debugger.
188 * Put the object back into the pool and schedule work to free objects
198 * schedule work when the pool is filled and the cache is free_object()
1004 * the static object pool objects into the poll list. After this call debug_objects_selftest()
1043 /* Remove the statically allocated objects from the pool */ debug_objects_replace_static_objects()
1046 /* Move the allocated objects to the pool */ debug_objects_replace_static_objects()
1077 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
/linux-4.1.27/drivers/staging/octeon/
H A Dethernet-mem.c39 * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
40 * @pool: Pool to allocate an skbuff for
41 * @size: Size of the buffer needed for the pool
46 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) cvm_oct_fill_hw_skbuff() argument
57 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); cvm_oct_fill_hw_skbuff()
64 * cvm_oct_free_hw_skbuff- free hardware pool skbuffs
65 * @pool: Pool to allocate an skbuff for
66 * @size: Size of the buffer needed for the pool
69 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) cvm_oct_free_hw_skbuff() argument
74 memory = cvmx_fpa_alloc(pool); cvm_oct_free_hw_skbuff()
84 pr_warn("Freeing of pool %u had too many skbuffs (%d)\n", cvm_oct_free_hw_skbuff()
85 pool, elements); cvm_oct_free_hw_skbuff()
87 pr_warn("Freeing of pool %u is missing %d skbuffs\n", cvm_oct_free_hw_skbuff()
88 pool, elements); cvm_oct_free_hw_skbuff()
92 * cvm_oct_fill_hw_memory - fill a hardware pool with memory.
93 * @pool: Pool to populate
94 * @size: Size of each buffer in the pool
99 static int cvm_oct_fill_hw_memory(int pool, int size, int elements) cvm_oct_fill_hw_memory() argument
118 pr_warn("Unable to allocate %u bytes for FPA pool %d\n", cvm_oct_fill_hw_memory()
119 elements * size, pool); cvm_oct_fill_hw_memory()
124 cvmx_fpa_free(fpa, pool, 0); cvm_oct_fill_hw_memory()
132 * @pool: FPA pool to free
133 * @size: Size of each buffer in the pool
134 * @elements: Number of buffers that should be in the pool
136 static void cvm_oct_free_hw_memory(int pool, int size, int elements) cvm_oct_free_hw_memory() argument
142 fpa = cvmx_fpa_alloc(pool); cvm_oct_free_hw_memory()
152 pr_warn("Freeing of pool %u had too many buffers (%d)\n", cvm_oct_free_hw_memory()
153 pool, elements); cvm_oct_free_hw_memory()
155 pr_warn("Warning: Freeing of pool %u is missing %d buffers\n", cvm_oct_free_hw_memory()
156 pool, elements); cvm_oct_free_hw_memory()
159 int cvm_oct_mem_fill_fpa(int pool, int size, int elements) cvm_oct_mem_fill_fpa() argument
163 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL) cvm_oct_mem_fill_fpa()
164 freed = cvm_oct_fill_hw_skbuff(pool, size, elements); cvm_oct_mem_fill_fpa()
166 freed = cvm_oct_fill_hw_memory(pool, size, elements); cvm_oct_mem_fill_fpa()
170 void cvm_oct_mem_empty_fpa(int pool, int size, int elements) cvm_oct_mem_empty_fpa() argument
172 if (USE_SKBUFFS_IN_HW && pool == CVMX_FPA_PACKET_POOL) cvm_oct_mem_empty_fpa()
173 cvm_oct_free_hw_skbuff(pool, size, elements); cvm_oct_mem_empty_fpa()
175 cvm_oct_free_hw_memory(pool, size, elements); cvm_oct_mem_empty_fpa()
H A Dethernet-rx.h37 /* Refill the packet buffer pool */ cvm_oct_rx_refill_pool()
/linux-4.1.27/drivers/md/
H A Ddm-thin.c41 * The block size of the device holding pool data must be
183 * A pool device ties together a metadata device and a data device. It
190 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
215 struct pool { struct
217 struct dm_target *ti; /* Only set if a pool target is bound */
267 static enum pool_mode get_pool_mode(struct pool *pool);
268 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
271 * Target context for a pool.
275 struct pool *pool; member in struct:pool_c
295 struct pool *pool; member in struct:thin_c
320 static void wake_worker(struct pool *pool) wake_worker() argument
322 queue_work(pool->wq, &pool->worker); wake_worker()
327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, bio_detain() argument
337 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); bio_detain()
339 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); bio_detain()
345 dm_bio_prison_free_cell(pool->prison, cell_prealloc); bio_detain()
350 static void cell_release(struct pool *pool, cell_release() argument
354 dm_cell_release(pool->prison, cell, bios); cell_release()
355 dm_bio_prison_free_cell(pool->prison, cell); cell_release()
358 static void cell_visit_release(struct pool *pool, cell_visit_release() argument
363 dm_cell_visit_release(pool->prison, fn, context, cell); cell_visit_release()
364 dm_bio_prison_free_cell(pool->prison, cell); cell_visit_release()
367 static void cell_release_no_holder(struct pool *pool, cell_release_no_holder() argument
371 dm_cell_release_no_holder(pool->prison, cell, bios); cell_release_no_holder()
372 dm_bio_prison_free_cell(pool->prison, cell); cell_release_no_holder()
375 static void cell_error_with_code(struct pool *pool, cell_error_with_code() argument
378 dm_cell_error(pool->prison, cell, error_code); cell_error_with_code()
379 dm_bio_prison_free_cell(pool->prison, cell); cell_error_with_code()
382 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) cell_error() argument
384 cell_error_with_code(pool, cell, -EIO); cell_error()
387 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) cell_success() argument
389 cell_error_with_code(pool, cell, 0); cell_success()
392 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) cell_requeue() argument
394 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); cell_requeue()
413 static void __pool_table_insert(struct pool *pool) __pool_table_insert() argument
416 list_add(&pool->list, &dm_thin_pool_table.pools); __pool_table_insert()
419 static void __pool_table_remove(struct pool *pool) __pool_table_remove() argument
422 list_del(&pool->list); __pool_table_remove()
425 static struct pool *__pool_table_lookup(struct mapped_device *md) __pool_table_lookup()
427 struct pool *pool = NULL, *tmp; __pool_table_lookup() local
433 pool = tmp; __pool_table_lookup()
438 return pool; __pool_table_lookup()
441 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) __pool_table_lookup_metadata_dev()
443 struct pool *pool = NULL, *tmp; __pool_table_lookup_metadata_dev() local
449 pool = tmp; __pool_table_lookup_metadata_dev()
454 return pool; __pool_table_lookup_metadata_dev()
497 struct pool *pool = tc->pool; requeue_deferred_cells() local
509 cell_requeue(pool, cell); requeue_deferred_cells()
528 static void error_retry_list(struct pool *pool) error_retry_list() argument
533 list_for_each_entry_rcu(tc, &pool->active_thins, list) error_retry_list()
540 * Much of the code depends on pool object resources (lists, workqueues, etc)
541 * but most is exclusively called from the thin target rather than the thin-pool
545 static bool block_size_is_power_of_two(struct pool *pool) block_size_is_power_of_two() argument
547 return pool->sectors_per_block_shift >= 0; block_size_is_power_of_two()
552 struct pool *pool = tc->pool; get_bio_block() local
555 if (block_size_is_power_of_two(pool)) get_bio_block()
556 block_nr >>= pool->sectors_per_block_shift; get_bio_block()
558 (void) sector_div(block_nr, pool->sectors_per_block); get_bio_block()
565 struct pool *pool = tc->pool; remap() local
569 if (block_size_is_power_of_two(pool)) remap()
571 (block << pool->sectors_per_block_shift) | remap()
572 (bi_sector & (pool->sectors_per_block - 1)); remap()
574 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + remap()
575 sector_div(bi_sector, pool->sectors_per_block); remap()
589 static void inc_all_io_entry(struct pool *pool, struct bio *bio) inc_all_io_entry() argument
597 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); inc_all_io_entry()
602 struct pool *pool = tc->pool; issue() local
624 spin_lock_irqsave(&pool->lock, flags); issue()
625 bio_list_add(&pool->deferred_flush_bios, bio); issue()
626 spin_unlock_irqrestore(&pool->lock, flags); issue()
678 struct pool *pool = m->tc->pool; __complete_mapping_preparation() local
681 list_add_tail(&m->list, &pool->prepared_mappings); __complete_mapping_preparation()
682 wake_worker(pool); __complete_mapping_preparation()
689 struct pool *pool = m->tc->pool; complete_mapping_preparation() local
691 spin_lock_irqsave(&pool->lock, flags); complete_mapping_preparation()
693 spin_unlock_irqrestore(&pool->lock, flags); complete_mapping_preparation()
729 struct pool *pool = tc->pool; cell_defer_no_holder() local
733 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); cell_defer_no_holder()
736 wake_worker(pool); cell_defer_no_holder()
757 inc_all_io_entry(info->tc->pool, bio); __inc_remap_and_issue_cell()
785 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, inc_remap_and_issue_cell()
801 cell_error(m->tc->pool, m->cell); process_prepared_mapping_fail()
803 mempool_free(m, m->tc->pool->mapping_pool); process_prepared_mapping_fail()
809 struct pool *pool = tc->pool; process_prepared_mapping() local
820 cell_error(pool, m->cell); process_prepared_mapping()
831 metadata_operation_failed(pool, "dm_thin_insert_block", r); process_prepared_mapping()
832 cell_error(pool, m->cell); process_prepared_mapping()
846 inc_all_io_entry(tc->pool, m->cell->holder); process_prepared_mapping()
853 mempool_free(m, pool->mapping_pool); process_prepared_mapping()
863 mempool_free(m, tc->pool->mapping_pool); process_prepared_discard_fail()
870 inc_all_io_entry(tc->pool, m->bio); process_prepared_discard_passdown()
879 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used) process_prepared_discard_passdown()
887 mempool_free(m, tc->pool->mapping_pool); process_prepared_discard_passdown()
902 static void process_prepared(struct pool *pool, struct list_head *head, process_prepared() argument
910 spin_lock_irqsave(&pool->lock, flags); process_prepared()
912 spin_unlock_irqrestore(&pool->lock, flags); process_prepared()
921 static int io_overlaps_block(struct pool *pool, struct bio *bio) io_overlaps_block() argument
924 (pool->sectors_per_block << SECTOR_SHIFT); io_overlaps_block()
927 static int io_overwrites_block(struct pool *pool, struct bio *bio) io_overwrites_block() argument
930 io_overlaps_block(pool, bio); io_overwrites_block()
940 static int ensure_next_mapping(struct pool *pool) ensure_next_mapping() argument
942 if (pool->next_mapping) ensure_next_mapping()
945 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC); ensure_next_mapping()
947 return pool->next_mapping ? 0 : -ENOMEM; ensure_next_mapping()
950 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) get_next_mapping() argument
952 struct dm_thin_new_mapping *m = pool->next_mapping; get_next_mapping()
954 BUG_ON(!pool->next_mapping); get_next_mapping()
960 pool->next_mapping = NULL; get_next_mapping()
975 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); ll_zero()
986 struct pool *pool = tc->pool; remap_and_issue_overwrite() local
992 inc_all_io_entry(pool, bio); remap_and_issue_overwrite()
1006 struct pool *pool = tc->pool; schedule_copy() local
1007 struct dm_thin_new_mapping *m = get_next_mapping(pool); schedule_copy()
1021 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) schedule_copy()
1025 * IO to pool_dev remaps to the pool target's data_dev. schedule_copy()
1030 if (io_overwrites_block(pool, bio)) schedule_copy()
1036 from.sector = data_origin * pool->sectors_per_block; schedule_copy()
1040 to.sector = data_dest * pool->sectors_per_block; schedule_copy()
1043 r = dm_kcopyd_copy(pool->copier, &from, 1, &to, schedule_copy()
1060 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { schedule_copy()
1063 data_dest * pool->sectors_per_block + len, schedule_copy()
1064 (data_dest + 1) * pool->sectors_per_block); schedule_copy()
1077 tc->pool->sectors_per_block); schedule_internal_copy()
1084 struct pool *pool = tc->pool; schedule_zero() local
1085 struct dm_thin_new_mapping *m = get_next_mapping(pool); schedule_zero()
1098 if (!pool->pf.zero_new_blocks) schedule_zero()
1101 else if (io_overwrites_block(pool, bio)) schedule_zero()
1106 data_block * pool->sectors_per_block, schedule_zero()
1107 (data_block + 1) * pool->sectors_per_block); schedule_zero()
1114 struct pool *pool = tc->pool; schedule_external_copy() local
1115 sector_t virt_block_begin = virt_block * pool->sectors_per_block; schedule_external_copy()
1116 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; schedule_external_copy()
1121 pool->sectors_per_block); schedule_external_copy()
1132 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1134 static void check_for_space(struct pool *pool) check_for_space() argument
1139 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) check_for_space()
1142 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); check_for_space()
1147 set_pool_mode(pool, PM_WRITE); check_for_space()
1154 static int commit(struct pool *pool) commit() argument
1158 if (get_pool_mode(pool) >= PM_READ_ONLY) commit()
1161 r = dm_pool_commit_metadata(pool->pmd); commit()
1163 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); commit()
1165 check_for_space(pool); commit()
1170 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) check_low_water_mark() argument
1174 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { check_low_water_mark()
1176 dm_device_name(pool->pool_md)); check_low_water_mark()
1177 spin_lock_irqsave(&pool->lock, flags); check_low_water_mark()
1178 pool->low_water_triggered = true; check_low_water_mark()
1179 spin_unlock_irqrestore(&pool->lock, flags); check_low_water_mark()
1180 dm_table_event(pool->ti->table); check_low_water_mark()
1188 struct pool *pool = tc->pool; alloc_data_block() local
1190 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) alloc_data_block()
1193 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); alloc_data_block()
1195 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); alloc_data_block()
1199 check_low_water_mark(pool, free_blocks); alloc_data_block()
1206 r = commit(pool); alloc_data_block()
1210 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); alloc_data_block()
1212 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); alloc_data_block()
1217 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); alloc_data_block()
1222 r = dm_pool_alloc_data_block(pool->pmd, result); alloc_data_block()
1224 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); alloc_data_block()
1246 static int should_error_unserviceable_bio(struct pool *pool) should_error_unserviceable_bio() argument
1248 enum pool_mode m = get_pool_mode(pool); should_error_unserviceable_bio()
1253 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); should_error_unserviceable_bio()
1257 return pool->pf.error_if_no_space ? -ENOSPC : 0; should_error_unserviceable_bio()
1264 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); should_error_unserviceable_bio()
1269 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) handle_unserviceable_bio() argument
1271 int error = should_error_unserviceable_bio(pool); handle_unserviceable_bio()
1279 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) retry_bios_on_resume() argument
1285 error = should_error_unserviceable_bio(pool); retry_bios_on_resume()
1287 cell_error_with_code(pool, cell, error); retry_bios_on_resume()
1292 cell_release(pool, cell, &bios); retry_bios_on_resume()
1302 struct pool *pool = tc->pool; process_discard_cell() local
1310 cell_requeue(pool, cell); process_discard_cell()
1318 * Check nobody is fiddling with this pool block. This can process_discard_cell()
1323 if (bio_detain(tc->pool, &key2, bio, &cell2)) { process_discard_cell()
1328 if (io_overlaps_block(pool, bio)) { process_discard_cell()
1333 m = get_next_mapping(pool); process_discard_cell()
1335 m->pass_discard = pool->pf.discard_passdown; process_discard_cell()
1343 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) process_discard_cell()
1344 pool->process_prepared_discard(m); process_discard_cell()
1347 inc_all_io_entry(pool, bio); process_discard_cell()
1356 if ((!lookup_result.shared) && pool->pf.discard_passdown) process_discard_cell()
1387 if (bio_detain(tc->pool, &key, bio, &cell)) process_discard_bio()
1400 struct pool *pool = tc->pool; break_sharing() local
1410 retry_bios_on_resume(pool, cell); break_sharing()
1416 cell_error(pool, cell); break_sharing()
1434 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); __remap_and_issue_shared_cell()
1435 inc_all_io_entry(info->tc->pool, bio); __remap_and_issue_shared_cell()
1452 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, remap_and_issue_shared_cell()
1468 struct pool *pool = tc->pool; process_shared_bio() local
1476 if (bio_detain(pool, &key, bio, &data_cell)) { process_shared_bio()
1487 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); process_shared_bio()
1488 inc_all_io_entry(pool, bio); process_shared_bio()
1501 struct pool *pool = tc->pool; provision_block() local
1507 inc_all_io_entry(pool, bio); provision_block()
1534 retry_bios_on_resume(pool, cell); provision_block()
1540 cell_error(pool, cell); provision_block()
1548 struct pool *pool = tc->pool; process_cell() local
1554 cell_requeue(pool, cell); process_cell()
1564 inc_all_io_entry(pool, bio); process_cell()
1572 inc_all_io_entry(pool, bio); process_cell()
1602 struct pool *pool = tc->pool; process_bio() local
1612 if (bio_detain(pool, &key, bio, &cell)) process_bio()
1630 handle_unserviceable_bio(tc->pool, bio); __process_bio_read_only()
1634 inc_all_io_entry(tc->pool, bio); __process_bio_read_only()
1645 handle_unserviceable_bio(tc->pool, bio); __process_bio_read_only()
1650 inc_all_io_entry(tc->pool, bio); __process_bio_read_only()
1691 cell_success(tc->pool, cell); process_cell_success()
1696 cell_error(tc->pool, cell); process_cell_fail()
1703 static int need_commit_due_to_time(struct pool *pool) need_commit_due_to_time() argument
1705 return !time_in_range(jiffies, pool->last_commit_jiffies, need_commit_due_to_time()
1706 pool->last_commit_jiffies + COMMIT_PERIOD); need_commit_due_to_time()
1775 struct pool *pool = tc->pool; process_thin_deferred_bios() local
1810 if (ensure_next_mapping(pool)) { process_thin_deferred_bios()
1819 pool->process_discard(tc, bio); process_thin_deferred_bios()
1821 pool->process_bio(tc, bio); process_thin_deferred_bios()
1824 throttle_work_update(&pool->throttle); process_thin_deferred_bios()
1825 dm_pool_issue_prefetches(pool->pmd); process_thin_deferred_bios()
1848 static unsigned sort_cells(struct pool *pool, struct list_head *cells) sort_cells() argument
1857 pool->cell_sort_array[count++] = cell; list_for_each_entry_safe()
1861 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1868 struct pool *pool = tc->pool; process_thin_deferred_cells() local
1884 count = sort_cells(tc->pool, &cells); process_thin_deferred_cells()
1887 cell = pool->cell_sort_array[i]; process_thin_deferred_cells()
1895 if (ensure_next_mapping(pool)) { process_thin_deferred_cells()
1897 list_add(&pool->cell_sort_array[j]->user_list, &cells); process_thin_deferred_cells()
1906 pool->process_discard_cell(tc, cell); process_thin_deferred_cells()
1908 pool->process_cell(tc, cell); process_thin_deferred_cells()
1921 static struct thin_c *get_first_thin(struct pool *pool) get_first_thin() argument
1926 if (!list_empty(&pool->active_thins)) { get_first_thin()
1927 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); get_first_thin()
1935 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) get_next_thin() argument
1940 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { get_next_thin()
1952 static void process_deferred_bios(struct pool *pool) process_deferred_bios() argument
1959 tc = get_first_thin(pool); process_deferred_bios()
1963 tc = get_next_thin(pool, tc); process_deferred_bios()
1971 spin_lock_irqsave(&pool->lock, flags); process_deferred_bios()
1972 bio_list_merge(&bios, &pool->deferred_flush_bios); process_deferred_bios()
1973 bio_list_init(&pool->deferred_flush_bios); process_deferred_bios()
1974 spin_unlock_irqrestore(&pool->lock, flags); process_deferred_bios()
1977 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) process_deferred_bios()
1980 if (commit(pool)) { process_deferred_bios()
1985 pool->last_commit_jiffies = jiffies; process_deferred_bios()
1993 struct pool *pool = container_of(ws, struct pool, worker); do_worker() local
1995 throttle_work_start(&pool->throttle); do_worker()
1996 dm_pool_issue_prefetches(pool->pmd); do_worker()
1997 throttle_work_update(&pool->throttle); do_worker()
1998 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); do_worker()
1999 throttle_work_update(&pool->throttle); do_worker()
2000 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); do_worker()
2001 throttle_work_update(&pool->throttle); do_worker()
2002 process_deferred_bios(pool); do_worker()
2003 throttle_work_complete(&pool->throttle); do_worker()
2012 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); do_waker() local
2013 wake_worker(pool); do_waker()
2014 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); do_waker()
2019 * timeout either the pool will have been resized (and thus back in
2024 struct pool *pool = container_of(to_delayed_work(ws), struct pool, do_no_space_timeout() local
2027 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) do_no_space_timeout()
2028 set_pool_mode(pool, PM_READ_ONLY); do_no_space_timeout()
2048 static void pool_work_wait(struct pool_work *pw, struct pool *pool, pool_work_wait() argument
2053 queue_work(pool->wq, &pw->worker); pool_work_wait()
2089 pool_work_wait(&w.pw, tc->pool, fn); noflush_work()
2094 static enum pool_mode get_pool_mode(struct pool *pool) get_pool_mode() argument
2096 return pool->pf.mode; get_pool_mode()
2099 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) notify_of_pool_mode_change() argument
2101 dm_table_event(pool->ti->table); notify_of_pool_mode_change()
2102 DMINFO("%s: switching pool to %s mode", notify_of_pool_mode_change()
2103 dm_device_name(pool->pool_md), new_mode); notify_of_pool_mode_change()
2106 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) set_pool_mode() argument
2108 struct pool_c *pt = pool->ti->private; set_pool_mode()
2109 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); set_pool_mode()
2110 enum pool_mode old_mode = get_pool_mode(pool); set_pool_mode()
2114 * Never allow the pool to transition to PM_WRITE mode if user set_pool_mode()
2118 DMERR("%s: unable to switch pool to write mode until repaired.", set_pool_mode()
2119 dm_device_name(pool->pool_md)); set_pool_mode()
2128 * pool move out of the old mode. set_pool_mode()
2136 notify_of_pool_mode_change(pool, "failure"); set_pool_mode()
2137 dm_pool_metadata_read_only(pool->pmd); set_pool_mode()
2138 pool->process_bio = process_bio_fail; set_pool_mode()
2139 pool->process_discard = process_bio_fail; set_pool_mode()
2140 pool->process_cell = process_cell_fail; set_pool_mode()
2141 pool->process_discard_cell = process_cell_fail; set_pool_mode()
2142 pool->process_prepared_mapping = process_prepared_mapping_fail; set_pool_mode()
2143 pool->process_prepared_discard = process_prepared_discard_fail; set_pool_mode()
2145 error_retry_list(pool); set_pool_mode()
2150 notify_of_pool_mode_change(pool, "read-only"); set_pool_mode()
2151 dm_pool_metadata_read_only(pool->pmd); set_pool_mode()
2152 pool->process_bio = process_bio_read_only; set_pool_mode()
2153 pool->process_discard = process_bio_success; set_pool_mode()
2154 pool->process_cell = process_cell_read_only; set_pool_mode()
2155 pool->process_discard_cell = process_cell_success; set_pool_mode()
2156 pool->process_prepared_mapping = process_prepared_mapping_fail; set_pool_mode()
2157 pool->process_prepared_discard = process_prepared_discard_passdown; set_pool_mode()
2159 error_retry_list(pool); set_pool_mode()
2165 * would trigger userland to extend the pool before we set_pool_mode()
2172 notify_of_pool_mode_change(pool, "out-of-data-space"); set_pool_mode()
2173 pool->process_bio = process_bio_read_only; set_pool_mode()
2174 pool->process_discard = process_discard_bio; set_pool_mode()
2175 pool->process_cell = process_cell_read_only; set_pool_mode()
2176 pool->process_discard_cell = process_discard_cell; set_pool_mode()
2177 pool->process_prepared_mapping = process_prepared_mapping; set_pool_mode()
2178 pool->process_prepared_discard = process_prepared_discard; set_pool_mode()
2180 if (!pool->pf.error_if_no_space && no_space_timeout) set_pool_mode()
2181 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); set_pool_mode()
2186 notify_of_pool_mode_change(pool, "write"); set_pool_mode()
2187 dm_pool_metadata_read_write(pool->pmd); set_pool_mode()
2188 pool->process_bio = process_bio; set_pool_mode()
2189 pool->process_discard = process_discard_bio; set_pool_mode()
2190 pool->process_cell = process_cell; set_pool_mode()
2191 pool->process_discard_cell = process_discard_cell; set_pool_mode()
2192 pool->process_prepared_mapping = process_prepared_mapping; set_pool_mode()
2193 pool->process_prepared_discard = process_prepared_discard; set_pool_mode()
2197 pool->pf.mode = new_mode; set_pool_mode()
2199 * The pool mode may have changed, sync it so bind_control_target() set_pool_mode()
2205 static void abort_transaction(struct pool *pool) abort_transaction() argument
2207 const char *dev_name = dm_device_name(pool->pool_md); abort_transaction()
2210 if (dm_pool_abort_metadata(pool->pmd)) { abort_transaction()
2212 set_pool_mode(pool, PM_FAIL); abort_transaction()
2215 if (dm_pool_metadata_set_needs_check(pool->pmd)) { abort_transaction()
2217 set_pool_mode(pool, PM_FAIL); abort_transaction()
2221 static void metadata_operation_failed(struct pool *pool, const char *op, int r) metadata_operation_failed() argument
2224 dm_device_name(pool->pool_md), op, r); metadata_operation_failed()
2226 abort_transaction(pool); metadata_operation_failed()
2227 set_pool_mode(pool, PM_READ_ONLY); metadata_operation_failed()
2242 struct pool *pool = tc->pool; thin_defer_bio() local
2248 wake_worker(pool); thin_defer_bio()
2253 struct pool *pool = tc->pool; thin_defer_bio_with_throttle() local
2255 throttle_lock(&pool->throttle); thin_defer_bio_with_throttle()
2257 throttle_unlock(&pool->throttle); thin_defer_bio_with_throttle()
2263 struct pool *pool = tc->pool; thin_defer_cell() local
2265 throttle_lock(&pool->throttle); thin_defer_cell()
2269 throttle_unlock(&pool->throttle); thin_defer_cell()
2271 wake_worker(pool); thin_defer_cell()
2304 if (get_pool_mode(tc->pool) == PM_FAIL) { thin_bio_map()
2319 if (bio_detain(tc->pool, &key, bio, &virt_cell)) thin_bio_map()
2349 if (bio_detain(tc->pool, &key, bio, &data_cell)) { thin_bio_map()
2354 inc_all_io_entry(tc->pool, bio); thin_bio_map()
2370 * pool is switched to fail-io mode. thin_bio_map()
2383 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) pool_is_congested()
2390 static void requeue_bios(struct pool *pool) requeue_bios() argument
2396 list_for_each_entry_rcu(tc, &pool->active_thins, list) { requeue_bios()
2406 * Binding of control targets to a pool object
2426 struct pool *pool = pt->pool; disable_passdown_if_not_supported() local
2429 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT; disable_passdown_if_not_supported()
2439 else if (data_limits->max_discard_sectors < pool->sectors_per_block) disable_passdown_if_not_supported()
2454 static int bind_control_target(struct pool *pool, struct dm_target *ti) bind_control_target() argument
2459 * We want to make sure that a pool in PM_FAIL mode is never upgraded. bind_control_target()
2461 enum pool_mode old_mode = get_pool_mode(pool); bind_control_target()
2465 * Don't change the pool's mode until set_pool_mode() below. bind_control_target()
2466 * Otherwise the pool's process_* function pointers may bind_control_target()
2467 * not match the desired pool mode. bind_control_target()
2471 pool->ti = ti; bind_control_target()
2472 pool->pf = pt->adjusted_pf; bind_control_target()
2473 pool->low_water_blocks = pt->low_water_blocks; bind_control_target()
2475 set_pool_mode(pool, new_mode); bind_control_target()
2480 static void unbind_control_target(struct pool *pool, struct dm_target *ti) unbind_control_target() argument
2482 if (pool->ti == ti) unbind_control_target()
2483 pool->ti = NULL; unbind_control_target()
2489 /* Initialize pool features. */ pool_features_init()
2499 static void __pool_destroy(struct pool *pool) __pool_destroy() argument
2501 __pool_table_remove(pool); __pool_destroy()
2503 vfree(pool->cell_sort_array); __pool_destroy()
2504 if (dm_pool_metadata_close(pool->pmd) < 0) __pool_destroy()
2507 dm_bio_prison_destroy(pool->prison); __pool_destroy()
2508 dm_kcopyd_client_destroy(pool->copier); __pool_destroy()
2510 if (pool->wq) __pool_destroy()
2511 destroy_workqueue(pool->wq); __pool_destroy()
2513 if (pool->next_mapping) __pool_destroy()
2514 mempool_free(pool->next_mapping, pool->mapping_pool); __pool_destroy()
2515 mempool_destroy(pool->mapping_pool); __pool_destroy()
2516 dm_deferred_set_destroy(pool->shared_read_ds); __pool_destroy()
2517 dm_deferred_set_destroy(pool->all_io_ds); __pool_destroy()
2518 kfree(pool); __pool_destroy()
2523 static struct pool *pool_create(struct mapped_device *pool_md, pool_create()
2530 struct pool *pool; pool_create() local
2537 return (struct pool *)pmd; pool_create()
2540 pool = kmalloc(sizeof(*pool), GFP_KERNEL); pool_create()
2541 if (!pool) { pool_create()
2542 *error = "Error allocating memory for pool"; pool_create()
2547 pool->pmd = pmd; pool_create()
2548 pool->sectors_per_block = block_size; pool_create()
2550 pool->sectors_per_block_shift = -1; pool_create()
2552 pool->sectors_per_block_shift = __ffs(block_size); pool_create()
2553 pool->low_water_blocks = 0; pool_create()
2554 pool_features_init(&pool->pf); pool_create()
2555 pool->prison = dm_bio_prison_create(); pool_create()
2556 if (!pool->prison) { pool_create()
2557 *error = "Error creating pool's bio prison"; pool_create()
2562 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); pool_create()
2563 if (IS_ERR(pool->copier)) { pool_create()
2564 r = PTR_ERR(pool->copier); pool_create()
2565 *error = "Error creating pool's kcopyd client"; pool_create()
2574 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); pool_create()
2575 if (!pool->wq) { pool_create()
2576 *error = "Error creating pool's workqueue"; pool_create()
2581 throttle_init(&pool->throttle); pool_create()
2582 INIT_WORK(&pool->worker, do_worker); pool_create()
2583 INIT_DELAYED_WORK(&pool->waker, do_waker); pool_create()
2584 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); pool_create()
2585 spin_lock_init(&pool->lock); pool_create()
2586 bio_list_init(&pool->deferred_flush_bios); pool_create()
2587 INIT_LIST_HEAD(&pool->prepared_mappings); pool_create()
2588 INIT_LIST_HEAD(&pool->prepared_discards); pool_create()
2589 INIT_LIST_HEAD(&pool->active_thins); pool_create()
2590 pool->low_water_triggered = false; pool_create()
2591 pool->suspended = true; pool_create()
2593 pool->shared_read_ds = dm_deferred_set_create(); pool_create()
2594 if (!pool->shared_read_ds) { pool_create()
2595 *error = "Error creating pool's shared read deferred set"; pool_create()
2600 pool->all_io_ds = dm_deferred_set_create(); pool_create()
2601 if (!pool->all_io_ds) { pool_create()
2602 *error = "Error creating pool's all io deferred set"; pool_create()
2607 pool->next_mapping = NULL; pool_create()
2608 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, pool_create()
2610 if (!pool->mapping_pool) { pool_create()
2611 *error = "Error creating pool's mapping mempool"; pool_create()
2616 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); pool_create()
2617 if (!pool->cell_sort_array) { pool_create()
2623 pool->ref_count = 1; pool_create()
2624 pool->last_commit_jiffies = jiffies; pool_create()
2625 pool->pool_md = pool_md; pool_create()
2626 pool->md_dev = metadata_dev; pool_create()
2627 __pool_table_insert(pool); pool_create()
2629 return pool; pool_create()
2632 mempool_destroy(pool->mapping_pool); pool_create()
2634 dm_deferred_set_destroy(pool->all_io_ds); pool_create()
2636 dm_deferred_set_destroy(pool->shared_read_ds); pool_create()
2638 destroy_workqueue(pool->wq); pool_create()
2640 dm_kcopyd_client_destroy(pool->copier); pool_create()
2642 dm_bio_prison_destroy(pool->prison); pool_create()
2644 kfree(pool); pool_create()
2652 static void __pool_inc(struct pool *pool) __pool_inc() argument
2655 pool->ref_count++; __pool_inc()
2658 static void __pool_dec(struct pool *pool) __pool_dec() argument
2661 BUG_ON(!pool->ref_count); __pool_dec()
2662 if (!--pool->ref_count) __pool_dec()
2663 __pool_destroy(pool); __pool_dec()
2666 static struct pool *__pool_find(struct mapped_device *pool_md, __pool_find()
2671 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); __pool_find() local
2673 if (pool) { __pool_find()
2674 if (pool->pool_md != pool_md) { __pool_find()
2675 *error = "metadata device already in use by a pool"; __pool_find()
2678 __pool_inc(pool); __pool_find()
2681 pool = __pool_table_lookup(pool_md); __pool_find()
2682 if (pool) { __pool_find()
2683 if (pool->md_dev != metadata_dev) { __pool_find()
2684 *error = "different pool cannot replace a pool"; __pool_find()
2687 __pool_inc(pool); __pool_find()
2690 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); __pool_find()
2695 return pool; __pool_find()
2707 unbind_control_target(pt->pool, ti); pool_dtr()
2708 __pool_dec(pt->pool); pool_dtr()
2724 {0, 4, "Invalid number of pool feature arguments"}, parse_pool_features()
2757 ti->error = "Unrecognised pool feature requested"; parse_pool_features()
2768 struct pool *pool = context; metadata_low_callback() local
2771 dm_device_name(pool->pool_md)); metadata_low_callback()
2773 dm_table_event(pool->ti->table); metadata_low_callback()
2828 * thin-pool <metadata dev> <data dev>
2837 * read_only: Don't allow any changes to be made to the pool metadata.
2844 struct pool *pool; pool_ctr() local
2868 * Set default pool features. pool_ctr()
2912 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, pool_ctr()
2914 if (IS_ERR(pool)) { pool_ctr()
2915 r = PTR_ERR(pool); pool_ctr()
2922 * initial load. This would require a pool reload to trigger thin pool_ctr()
2925 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { pool_ctr()
2931 pt->pool = pool; pool_ctr()
2940 * Only need to enable discards if the pool should pass pool_ctr()
2950 * stacking of discard limits (this keeps the pool and pool_ctr()
2957 r = dm_pool_register_metadata_threshold(pt->pool->pmd, pool_ctr()
2960 pool); pool_ctr()
2972 __pool_dec(pool); pool_ctr()
2989 struct pool *pool = pt->pool; pool_map() local
2995 spin_lock_irqsave(&pool->lock, flags); pool_map()
2998 spin_unlock_irqrestore(&pool->lock, flags); pool_map()
3007 struct pool *pool = pt->pool; maybe_resize_data_dev() local
3013 (void) sector_div(data_size, pool->sectors_per_block); maybe_resize_data_dev()
3015 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); maybe_resize_data_dev()
3018 dm_device_name(pool->pool_md)); maybe_resize_data_dev()
3023 DMERR("%s: pool target (%llu blocks) too small: expected %llu", maybe_resize_data_dev()
3024 dm_device_name(pool->pool_md), maybe_resize_data_dev()
3029 if (dm_pool_metadata_needs_check(pool->pmd)) { maybe_resize_data_dev()
3031 dm_device_name(pool->pool_md)); maybe_resize_data_dev()
3037 dm_device_name(pool->pool_md), maybe_resize_data_dev()
3039 r = dm_pool_resize_data_dev(pool->pmd, data_size); maybe_resize_data_dev()
3041 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); maybe_resize_data_dev()
3055 struct pool *pool = pt->pool; maybe_resize_metadata_dev() local
3060 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); maybe_resize_metadata_dev()
3062 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); maybe_resize_metadata_dev()
3065 dm_device_name(pool->pool_md)); maybe_resize_metadata_dev()
3071 dm_device_name(pool->pool_md), maybe_resize_metadata_dev()
3076 if (dm_pool_metadata_needs_check(pool->pmd)) { maybe_resize_metadata_dev()
3078 dm_device_name(pool->pool_md)); maybe_resize_metadata_dev()
3082 warn_if_metadata_device_too_big(pool->md_dev); maybe_resize_metadata_dev()
3084 dm_device_name(pool->pool_md), maybe_resize_metadata_dev()
3086 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); maybe_resize_metadata_dev()
3088 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); maybe_resize_metadata_dev()
3114 struct pool *pool = pt->pool; pool_preresume() local
3117 * Take control of the pool object. pool_preresume()
3119 r = bind_control_target(pool, ti); pool_preresume()
3132 (void) commit(pool); pool_preresume()
3137 static void pool_suspend_active_thins(struct pool *pool) pool_suspend_active_thins() argument
3142 tc = get_first_thin(pool); pool_suspend_active_thins()
3145 tc = get_next_thin(pool, tc); pool_suspend_active_thins()
3149 static void pool_resume_active_thins(struct pool *pool) pool_resume_active_thins() argument
3154 tc = get_first_thin(pool); pool_resume_active_thins()
3157 tc = get_next_thin(pool, tc); pool_resume_active_thins()
3164 struct pool *pool = pt->pool; pool_resume() local
3171 requeue_bios(pool); pool_resume()
3172 pool_resume_active_thins(pool); pool_resume()
3174 spin_lock_irqsave(&pool->lock, flags); pool_resume()
3175 pool->low_water_triggered = false; pool_resume()
3176 pool->suspended = false; pool_resume()
3177 spin_unlock_irqrestore(&pool->lock, flags); pool_resume()
3179 do_waker(&pool->waker.work); pool_resume()
3185 struct pool *pool = pt->pool; pool_presuspend() local
3188 spin_lock_irqsave(&pool->lock, flags); pool_presuspend()
3189 pool->suspended = true; pool_presuspend()
3190 spin_unlock_irqrestore(&pool->lock, flags); pool_presuspend()
3192 pool_suspend_active_thins(pool); pool_presuspend()
3198 struct pool *pool = pt->pool; pool_presuspend_undo() local
3201 pool_resume_active_thins(pool); pool_presuspend_undo()
3203 spin_lock_irqsave(&pool->lock, flags); pool_presuspend_undo()
3204 pool->suspended = false; pool_presuspend_undo()
3205 spin_unlock_irqrestore(&pool->lock, flags); pool_presuspend_undo()
3211 struct pool *pool = pt->pool; pool_postsuspend() local
3213 cancel_delayed_work_sync(&pool->waker); pool_postsuspend()
3214 cancel_delayed_work_sync(&pool->no_space_timeout); pool_postsuspend()
3215 flush_workqueue(pool->wq); pool_postsuspend()
3216 (void) commit(pool); pool_postsuspend()
3242 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) process_create_thin_mesg() argument
3255 r = dm_pool_create_thin(pool->pmd, dev_id); process_create_thin_mesg()
3265 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) process_create_snap_mesg() argument
3283 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); process_create_snap_mesg()
3293 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) process_delete_mesg() argument
3306 r = dm_pool_delete_thin_device(pool->pmd, dev_id); process_delete_mesg()
3313 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) process_set_transaction_id_mesg() argument
3332 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); process_set_transaction_id_mesg()
3342 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) process_reserve_metadata_snap_mesg() argument
3350 (void) commit(pool); process_reserve_metadata_snap_mesg()
3352 r = dm_pool_reserve_metadata_snap(pool->pmd); process_reserve_metadata_snap_mesg()
3359 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) process_release_metadata_snap_mesg() argument
3367 r = dm_pool_release_metadata_snap(pool->pmd); process_release_metadata_snap_mesg()
3387 struct pool *pool = pt->pool; pool_message() local
3389 if (get_pool_mode(pool) >= PM_READ_ONLY) { pool_message()
3390 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", pool_message()
3391 dm_device_name(pool->pool_md)); pool_message()
3396 r = process_create_thin_mesg(argc, argv, pool); pool_message()
3399 r = process_create_snap_mesg(argc, argv, pool); pool_message()
3402 r = process_delete_mesg(argc, argv, pool); pool_message()
3405 r = process_set_transaction_id_mesg(argc, argv, pool); pool_message()
3408 r = process_reserve_metadata_snap_mesg(argc, argv, pool); pool_message()
3411 r = process_release_metadata_snap_mesg(argc, argv, pool); pool_message()
3414 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); pool_message()
3417 (void) commit(pool); pool_message()
3465 struct pool *pool = pt->pool; pool_status() local
3469 if (get_pool_mode(pool) == PM_FAIL) { pool_status()
3476 (void) commit(pool); pool_status()
3478 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); pool_status()
3481 dm_device_name(pool->pool_md), r); pool_status()
3485 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); pool_status()
3488 dm_device_name(pool->pool_md), r); pool_status()
3492 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); pool_status()
3495 dm_device_name(pool->pool_md), r); pool_status()
3499 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); pool_status()
3502 dm_device_name(pool->pool_md), r); pool_status()
3506 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); pool_status()
3509 dm_device_name(pool->pool_md), r); pool_status()
3513 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); pool_status()
3516 dm_device_name(pool->pool_md), r); pool_status()
3532 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) pool_status()
3534 else if (pool->pf.mode == PM_READ_ONLY) pool_status()
3539 if (!pool->pf.discard_enabled) pool_status()
3541 else if (pool->pf.discard_passdown) pool_status()
3546 if (pool->pf.error_if_no_space) pool_status()
3557 (unsigned long)pool->sectors_per_block, pool_status()
3592 struct pool *pool = pt->pool; set_discard_limits() local
3595 limits->max_discard_sectors = pool->sectors_per_block; set_discard_limits()
3603 pool->sectors_per_block << SECTOR_SHIFT); set_discard_limits()
3605 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; set_discard_limits()
3611 struct pool *pool = pt->pool; pool_io_hints() local
3615 * If max_sectors is smaller than pool->sectors_per_block adjust it pool_io_hints()
3616 * to the highest possible power-of-2 factor of pool->sectors_per_block. pool_io_hints()
3617 * This is especially beneficial when the pool's data device is a RAID pool_io_hints()
3618 * device that has a full stripe width that matches pool->sectors_per_block pool_io_hints()
3623 if (limits->max_sectors < pool->sectors_per_block) { pool_io_hints()
3624 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { pool_io_hints()
3633 * pool's blocksize (io_opt is a factor) do not override them. pool_io_hints()
3635 if (io_opt_sectors < pool->sectors_per_block || pool_io_hints()
3636 !is_factor(io_opt_sectors, pool->sectors_per_block)) { pool_io_hints()
3637 if (is_factor(pool->sectors_per_block, limits->max_sectors)) pool_io_hints()
3640 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); pool_io_hints()
3641 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); pool_io_hints()
3646 * They get transferred to the live pool in bind_control_target() pool_io_hints()
3652 * block layer will stack them if pool's data device has support. pool_io_hints()
3666 .name = "thin-pool",
3705 spin_lock_irqsave(&tc->pool->lock, flags); thin_dtr()
3707 spin_unlock_irqrestore(&tc->pool->lock, flags); thin_dtr()
3715 __pool_dec(tc->pool); thin_dtr()
3730 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3732 * origin_dev: a device external to the pool that should act as the origin
3734 * If the pool device has discards disabled, they get disabled for the thin
3777 ti->error = "Error opening pool device"; thin_ctr()
3790 ti->error = "Couldn't get pool mapped device"; thin_ctr()
3795 tc->pool = __pool_table_lookup(pool_md); thin_ctr()
3796 if (!tc->pool) { thin_ctr()
3797 ti->error = "Couldn't find pool object"; thin_ctr()
3801 __pool_inc(tc->pool); thin_ctr()
3803 if (get_pool_mode(tc->pool) == PM_FAIL) { thin_ctr()
3809 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); thin_ctr()
3815 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); thin_ctr()
3823 /* In case the pool supports discards, pass them on. */ thin_ctr()
3825 if (tc->pool->pf.discard_enabled) { thin_ctr()
3834 spin_lock_irqsave(&tc->pool->lock, flags); thin_ctr()
3835 if (tc->pool->suspended) { thin_ctr()
3836 spin_unlock_irqrestore(&tc->pool->lock, flags); thin_ctr()
3838 ti->error = "Unable to activate thin device while pool is suspended"; thin_ctr()
3844 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); thin_ctr()
3845 spin_unlock_irqrestore(&tc->pool->lock, flags); thin_ctr()
3861 __pool_dec(tc->pool); thin_ctr()
3890 struct pool *pool = h->tc->pool; thin_endio() local
3896 spin_lock_irqsave(&pool->lock, flags); thin_endio()
3901 spin_unlock_irqrestore(&pool->lock, flags); thin_endio()
3908 spin_lock_irqsave(&pool->lock, flags); thin_endio()
3910 list_add_tail(&m->list, &pool->prepared_discards); thin_endio()
3911 spin_unlock_irqrestore(&pool->lock, flags); thin_endio()
3912 wake_worker(pool); thin_endio()
3960 if (get_pool_mode(tc->pool) == PM_FAIL) { thin_status()
3982 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); thin_status()
3985 tc->pool->sectors_per_block) - 1); thin_status()
4026 struct pool *pool = tc->pool; thin_iterate_devices() local
4030 * we follow a more convoluted path through to the pool's target. thin_iterate_devices()
4032 if (!pool->ti) thin_iterate_devices()
4035 blocks = pool->ti->len; thin_iterate_devices()
4036 (void) sector_div(blocks, pool->sectors_per_block); thin_iterate_devices()
4038 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); thin_iterate_devices()
H A Dmultipath.h15 mempool_t *pool; member in struct:mpconf
H A Ddm-io.c25 mempool_t *pool; member in struct:dm_io_client
57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); dm_io_client_create()
58 if (!client->pool) dm_io_client_create()
68 if (client->pool) dm_io_client_create()
69 mempool_destroy(client->pool); dm_io_client_create()
77 mempool_destroy(client->pool); dm_io_client_destroy()
124 mempool_free(io, io->client->pool); complete_io()
417 io = mempool_alloc(client->pool, GFP_NOIO); sync_io()
449 io = mempool_alloc(client->pool, GFP_NOIO); async_io()
H A Dmultipath.c81 mempool_free(mp_bh, conf->pool); multipath_end_bh_io()
119 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); multipath_make_request()
127 mempool_free(mp_bh, conf->pool); multipath_make_request()
462 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
464 if (conf->pool == NULL) {
496 if (conf->pool)
497 mempool_destroy(conf->pool);
509 mempool_destroy(conf->pool); multipath_free()
H A Draid1.h18 * pool was allocated for, so they know how much to allocate and free.
19 * mddev->raid_disks cannot be used, as it can change while a pool is active
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_page_alloc_dma.c27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
77 * The pool structure. There are usually six pools:
85 * @type: Type of the pool
87 * used with irqsave/irqrestore variants because pool allocator maybe called
96 * @nfrees: Stats when pool is shrinking.
97 * @nrefills: Stats when the pool is grown.
99 * @name: Name of the pool.
136 * Limits for the pool. They are handled without locks because only place where
151 * @dev: The 'struct device' associated with the 'pool'
152 * @pool: The 'struct dma_pool' associated with the 'dev'
157 struct dma_pool *pool; member in struct:device_pools
165 * @options: Limits for the pool.
306 static int ttm_set_pages_caching(struct dma_pool *pool, ttm_set_pages_caching() argument
311 if (pool->type & IS_UC) { ttm_set_pages_caching()
315 pool->dev_name, cpages); ttm_set_pages_caching()
317 if (pool->type & IS_WC) { ttm_set_pages_caching()
321 pool->dev_name, cpages); ttm_set_pages_caching()
326 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) __ttm_dma_free_page() argument
329 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); __ttm_dma_free_page()
334 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) __ttm_dma_alloc_page() argument
342 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, __ttm_dma_alloc_page()
344 pool->gfp_flags); __ttm_dma_alloc_page()
369 static void ttm_pool_update_free_locked(struct dma_pool *pool, ttm_pool_update_free_locked() argument
372 pool->npages_free -= freed_pages; ttm_pool_update_free_locked()
373 pool->nfrees += freed_pages; ttm_pool_update_free_locked()
378 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, ttm_dma_pages_put() argument
383 /* Don't set WB on WB page pool. */ ttm_dma_pages_put()
384 if (npages && !(pool->type & IS_CACHED) && ttm_dma_pages_put()
387 pool->dev_name, npages); ttm_dma_pages_put()
391 __ttm_dma_free_page(pool, d_page); list_for_each_entry_safe()
395 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) ttm_dma_page_put() argument
397 /* Don't set WB on WB page pool. */ ttm_dma_page_put()
398 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) ttm_dma_page_put()
400 pool->dev_name, 1); ttm_dma_page_put()
403 __ttm_dma_free_page(pool, d_page); ttm_dma_page_put()
407 * Free pages from pool.
412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool
416 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, ttm_dma_page_pool_free() argument
432 pool->dev_name, pool->name, current->pid, ttm_dma_page_pool_free()
443 pr_err("%s: Failed to allocate memory for pool free operation\n", ttm_dma_page_pool_free()
444 pool->dev_name); ttm_dma_page_pool_free()
449 spin_lock_irqsave(&pool->lock, irq_flags); ttm_dma_page_pool_free()
452 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, ttm_dma_page_pool_free()
464 ttm_pool_update_free_locked(pool, freed_pages); ttm_dma_page_pool_free()
467 * we unlock the pool to prevent stalling. ttm_dma_page_pool_free()
469 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_dma_page_pool_free()
471 ttm_dma_pages_put(pool, &d_pages, pages_to_free, ttm_dma_page_pool_free()
499 /* remove range of pages from the pool */ ttm_dma_page_pool_free()
501 ttm_pool_update_free_locked(pool, freed_pages); ttm_dma_page_pool_free()
505 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_dma_page_pool_free()
508 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); ttm_dma_page_pool_free()
518 struct dma_pool *pool; ttm_dma_free_pool() local
527 pool = p->pool; ttm_dma_free_pool()
528 if (pool->type != type) ttm_dma_free_pool()
536 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { ttm_dma_free_pool()
537 if (pool->type != type) ttm_dma_free_pool()
541 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); ttm_dma_free_pool()
542 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); ttm_dma_free_pool()
547 list_del(&pool->pools); ttm_dma_free_pool()
548 kfree(pool); ttm_dma_free_pool()
556 * Albeit the pool might have already been freed earlier.
560 struct dma_pool *pool = *(struct dma_pool **)res; ttm_dma_pool_release() local
562 if (pool) ttm_dma_pool_release()
563 ttm_dma_free_pool(dev, pool->type); ttm_dma_pool_release()
577 struct dma_pool *pool = NULL, **ptr; ttm_dma_pool_init() local
591 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, ttm_dma_pool_init()
593 if (!pool) ttm_dma_pool_init()
603 sec_pool->pool = pool; ttm_dma_pool_init()
605 INIT_LIST_HEAD(&pool->free_list); ttm_dma_pool_init()
606 INIT_LIST_HEAD(&pool->inuse_list); ttm_dma_pool_init()
607 INIT_LIST_HEAD(&pool->pools); ttm_dma_pool_init()
608 spin_lock_init(&pool->lock); ttm_dma_pool_init()
609 pool->dev = dev; ttm_dma_pool_init()
610 pool->npages_free = pool->npages_in_use = 0; ttm_dma_pool_init()
611 pool->nfrees = 0; ttm_dma_pool_init()
612 pool->gfp_flags = flags; ttm_dma_pool_init()
613 pool->size = PAGE_SIZE; ttm_dma_pool_init()
614 pool->type = type; ttm_dma_pool_init()
615 pool->nrefills = 0; ttm_dma_pool_init()
616 p = pool->name; ttm_dma_pool_init()
619 p += snprintf(p, sizeof(pool->name) - (p - pool->name), ttm_dma_pool_init()
626 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", ttm_dma_pool_init()
633 list_add(&pool->pools, &dev->dma_pools); ttm_dma_pool_init()
636 *ptr = pool; ttm_dma_pool_init()
639 return pool; ttm_dma_pool_init()
643 kfree(pool); ttm_dma_pool_init()
650 struct dma_pool *pool, *tmp, *found = NULL; ttm_dma_find_pool() local
666 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) { ttm_dma_find_pool()
667 if (pool->type != type) ttm_dma_find_pool()
669 found = pool; ttm_dma_find_pool()
678 * pool.
680 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, ttm_dma_handle_caching_state_failure() argument
698 __ttm_dma_free_page(pool, d_page); list_for_each_entry_safe()
713 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, ttm_dma_pool_alloc_new_pages() argument
730 pool->dev_name); ttm_dma_pool_alloc_new_pages()
736 pool->dev_name, pool->name, current->pid, count); ttm_dma_pool_alloc_new_pages()
740 dma_p = __ttm_dma_alloc_page(pool); ttm_dma_pool_alloc_new_pages()
743 pool->dev_name, i); ttm_dma_pool_alloc_new_pages()
745 /* store already allocated pages in the pool after ttm_dma_pool_alloc_new_pages()
748 r = ttm_set_pages_caching(pool, caching_array, ttm_dma_pool_alloc_new_pages()
752 pool, d_pages, caching_array, ttm_dma_pool_alloc_new_pages()
769 r = ttm_set_pages_caching(pool, caching_array, ttm_dma_pool_alloc_new_pages()
773 pool, d_pages, caching_array, ttm_dma_pool_alloc_new_pages()
784 r = ttm_set_pages_caching(pool, caching_array, cpages); ttm_dma_pool_alloc_new_pages()
786 ttm_dma_handle_caching_state_failure(pool, d_pages, ttm_dma_pool_alloc_new_pages()
797 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, ttm_dma_page_pool_fill_locked() argument
801 int r = pool->npages_free; ttm_dma_page_pool_fill_locked()
803 if (count > pool->npages_free) { ttm_dma_page_pool_fill_locked()
808 spin_unlock_irqrestore(&pool->lock, *irq_flags); ttm_dma_page_pool_fill_locked()
812 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); ttm_dma_page_pool_fill_locked()
814 spin_lock_irqsave(&pool->lock, *irq_flags); ttm_dma_page_pool_fill_locked()
817 list_splice(&d_pages, &pool->free_list); ttm_dma_page_pool_fill_locked()
818 ++pool->nrefills; ttm_dma_page_pool_fill_locked()
819 pool->npages_free += count; ttm_dma_page_pool_fill_locked()
825 pr_err("%s: Failed to fill %s pool (r:%d)!\n", ttm_dma_page_pool_fill_locked()
826 pool->dev_name, pool->name, r); ttm_dma_page_pool_fill_locked()
831 list_splice_tail(&d_pages, &pool->free_list); ttm_dma_page_pool_fill_locked()
832 pool->npages_free += cpages; ttm_dma_page_pool_fill_locked()
844 static int ttm_dma_pool_get_pages(struct dma_pool *pool, ttm_dma_pool_get_pages() argument
853 spin_lock_irqsave(&pool->lock, irq_flags); ttm_dma_pool_get_pages()
854 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); ttm_dma_pool_get_pages()
856 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); ttm_dma_pool_get_pages()
862 pool->npages_in_use += 1; ttm_dma_pool_get_pages()
863 pool->npages_free -= 1; ttm_dma_pool_get_pages()
865 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_dma_pool_get_pages()
877 struct dma_pool *pool; ttm_dma_populate() local
894 pool = ttm_dma_find_pool(dev, type); ttm_dma_populate()
895 if (!pool) { ttm_dma_populate()
896 pool = ttm_dma_pool_init(dev, gfp_flags, type); ttm_dma_populate()
897 if (IS_ERR_OR_NULL(pool)) { ttm_dma_populate()
904 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i); ttm_dma_populate()
931 /* Put all pages in pages list to correct pool to wait for reuse */ ttm_dma_unpopulate()
935 struct dma_pool *pool; ttm_dma_unpopulate() local
943 pool = ttm_dma_find_pool(dev, type); ttm_dma_unpopulate()
944 if (!pool) ttm_dma_unpopulate()
947 is_cached = (ttm_dma_find_pool(pool->dev, ttm_dma_unpopulate()
948 ttm_to_type(ttm->page_flags, tt_cached)) == pool); ttm_dma_unpopulate()
956 spin_lock_irqsave(&pool->lock, irq_flags); ttm_dma_unpopulate()
957 pool->npages_in_use -= count; ttm_dma_unpopulate()
959 pool->nfrees += count; ttm_dma_unpopulate()
961 pool->npages_free += count; ttm_dma_unpopulate()
962 list_splice(&ttm_dma->pages_list, &pool->free_list); ttm_dma_unpopulate()
964 if (pool->npages_free > _manager->options.max_size) { ttm_dma_unpopulate()
965 npages = pool->npages_free - _manager->options.max_size; ttm_dma_unpopulate()
972 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_dma_unpopulate()
978 ttm_dma_page_put(pool, d_page); ttm_dma_unpopulate()
994 /* shrink pool if necessary (only on !is_cached pools)*/ ttm_dma_unpopulate()
996 ttm_dma_page_pool_free(pool, npages, false); ttm_dma_unpopulate()
1002 * Callback for mm to request pool to reduce number of page held.
1006 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1039 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); ttm_dma_pool_shrink_scan()
1043 p->pool->dev_name, p->pool->name, current->pid, ttm_dma_pool_shrink_scan()
1060 count += p->pool->npages_free; ttm_dma_pool_shrink_count()
1084 pr_info("Initializing DMA pool allocator\n"); ttm_dma_page_alloc_init()
1114 pr_info("Finalizing DMA pool allocator\n"); ttm_dma_page_alloc_fini()
1118 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, ttm_dma_page_alloc_fini()
1121 ttm_dma_pool_match, p->pool)); ttm_dma_page_alloc_fini()
1122 ttm_dma_free_pool(p->dev, p->pool->type); ttm_dma_page_alloc_fini()
1131 struct dma_pool *pool = NULL; ttm_dma_page_alloc_debugfs() local
1132 char *h[] = {"pool", "refills", "pages freed", "inuse", "available", ttm_dma_page_alloc_debugfs()
1136 seq_printf(m, "No pool allocator running.\n"); ttm_dma_page_alloc_debugfs()
1146 pool = p->pool; ttm_dma_page_alloc_debugfs()
1148 pool->name, pool->nrefills, ttm_dma_page_alloc_debugfs()
1149 pool->nfrees, pool->npages_in_use, ttm_dma_page_alloc_debugfs()
1150 pool->npages_free, ttm_dma_page_alloc_debugfs()
1151 pool->dev_name); ttm_dma_page_alloc_debugfs()
H A Dttm_page_alloc.c28 /* simple list based uncached page pool
64 * @lock: Protects the shared pool from concurrnet access. Must be used with
65 * irqsave/irqrestore variants because pool allocator maybe called from
70 * @npages: Number of pages in pool.
84 * Limits for the pool. They are handled without locks because only place where
100 * Manager is read only object for pool code so it doesn't need locking.
102 * @free_interval: minimum number of jiffies between freeing pages from pool.
103 * @page_alloc_inited: reference counting for pool allocation.
104 * @work: Work that is used to shrink the pool. Work is only run when there is
108 * @pools: All pool objects in use.
255 * Select the right pool or requested caching state and ttm flags. */ ttm_get_pool()
285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, ttm_pool_update_free_locked() argument
288 pool->npages -= freed_pages; ttm_pool_update_free_locked()
289 pool->nfrees += freed_pages; ttm_pool_update_free_locked()
293 * Free pages from pool.
298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool
302 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, ttm_page_pool_free() argument
321 pr_err("Failed to allocate memory for pool free operation\n"); ttm_page_pool_free()
326 spin_lock_irqsave(&pool->lock, irq_flags); ttm_page_pool_free()
328 list_for_each_entry_reverse(p, &pool->list, lru) { ttm_page_pool_free()
335 /* remove range of pages from the pool */ ttm_page_pool_free()
336 __list_del(p->lru.prev, &pool->list); ttm_page_pool_free()
338 ttm_pool_update_free_locked(pool, freed_pages); ttm_page_pool_free()
341 * we unlock the pool to prevent stalling. ttm_page_pool_free()
343 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_page_pool_free()
369 /* remove range of pages from the pool */ ttm_page_pool_free()
371 __list_del(&p->lru, &pool->list); ttm_page_pool_free()
373 ttm_pool_update_free_locked(pool, freed_pages); ttm_page_pool_free()
377 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_page_pool_free()
388 * Callback for mm to request pool to reduce number of page held.
392 * This code is crying out for a shrinker per pool....
401 struct ttm_page_pool *pool; ttm_pool_shrink_scan() local
408 /* select start pool in round robin fashion */ ttm_pool_shrink_scan()
413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; ttm_pool_shrink_scan()
415 shrink_pages = ttm_page_pool_free(pool, nr_free, true); ttm_pool_shrink_scan()
473 * pool.
517 /* store already allocated pages in the pool after ttm_alloc_new_pages()
570 * Fill the given pool if there aren't enough pages and the requested number of
573 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ttm_page_pool_fill_locked() argument
581 * Only allow one pool fill operation at a time. ttm_page_pool_fill_locked()
582 * If pool doesn't have enough pages for the allocation new pages are ttm_page_pool_fill_locked()
583 * allocated from outside of pool. ttm_page_pool_fill_locked()
585 if (pool->fill_lock) ttm_page_pool_fill_locked()
588 pool->fill_lock = true; ttm_page_pool_fill_locked()
591 * pages in a pool we fill the pool up first. */ ttm_page_pool_fill_locked()
593 && count > pool->npages) { ttm_page_pool_fill_locked()
599 * drop the pool->lock. ttm_page_pool_fill_locked()
601 spin_unlock_irqrestore(&pool->lock, *irq_flags); ttm_page_pool_fill_locked()
604 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, ttm_page_pool_fill_locked()
606 spin_lock_irqsave(&pool->lock, *irq_flags); ttm_page_pool_fill_locked()
609 list_splice(&new_pages, &pool->list); ttm_page_pool_fill_locked()
610 ++pool->nrefills; ttm_page_pool_fill_locked()
611 pool->npages += alloc_size; ttm_page_pool_fill_locked()
613 pr_err("Failed to fill pool (%p)\n", pool); ttm_page_pool_fill_locked()
614 /* If we have any pages left put them to the pool. */ ttm_page_pool_fill_locked()
615 list_for_each_entry(p, &pool->list, lru) { ttm_page_pool_fill_locked()
618 list_splice(&new_pages, &pool->list); ttm_page_pool_fill_locked()
619 pool->npages += cpages; ttm_page_pool_fill_locked()
623 pool->fill_lock = false; ttm_page_pool_fill_locked()
627 * Cut 'count' number of pages from the pool and put them on the return list.
631 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, ttm_page_pool_get_pages() argument
641 spin_lock_irqsave(&pool->lock, irq_flags); ttm_page_pool_get_pages()
642 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); ttm_page_pool_get_pages()
644 if (count >= pool->npages) { ttm_page_pool_get_pages()
645 /* take all pages from the pool */ ttm_page_pool_get_pages()
646 list_splice_init(&pool->list, pages); ttm_page_pool_get_pages()
647 count -= pool->npages; ttm_page_pool_get_pages()
648 pool->npages = 0; ttm_page_pool_get_pages()
652 * pool to begin and halve it to reduce search space. */ ttm_page_pool_get_pages()
653 if (count <= pool->npages/2) { ttm_page_pool_get_pages()
655 list_for_each(p, &pool->list) { ttm_page_pool_get_pages()
660 i = pool->npages + 1; ttm_page_pool_get_pages()
661 list_for_each_prev(p, &pool->list) { ttm_page_pool_get_pages()
666 /* Cut 'count' number of pages from the pool */ ttm_page_pool_get_pages()
667 list_cut_position(pages, &pool->list, p); ttm_page_pool_get_pages()
668 pool->npages -= count; ttm_page_pool_get_pages()
671 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_page_pool_get_pages()
675 /* Put all pages in pages list to correct pool to wait for reuse */ ttm_put_pages()
680 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); ttm_put_pages() local
683 if (pool == NULL) { ttm_put_pages()
684 /* No pool for this memory type so free the pages */ ttm_put_pages()
696 spin_lock_irqsave(&pool->lock, irq_flags); ttm_put_pages()
701 list_add_tail(&pages[i]->lru, &pool->list); ttm_put_pages()
703 pool->npages++; ttm_put_pages()
706 /* Check that we don't go over the pool limit */ ttm_put_pages()
708 if (pool->npages > _manager->options.max_size) { ttm_put_pages()
709 npages = pool->npages - _manager->options.max_size; ttm_put_pages()
715 spin_unlock_irqrestore(&pool->lock, irq_flags); ttm_put_pages()
717 ttm_page_pool_free(pool, npages, false); ttm_put_pages()
727 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); ttm_get_pages() local
738 /* No pool for cached pages */ ttm_get_pages()
739 if (pool == NULL) { ttm_get_pages()
758 /* combine zero flag to pool flags */ ttm_get_pages()
759 gfp_flags |= pool->gfp_flags; ttm_get_pages()
761 /* First we take pages from the pool */ ttm_get_pages()
763 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); ttm_get_pages()
769 /* clear the pages coming from the pool if requested */ ttm_get_pages()
779 /* If pool didn't have enough pages allocate new one. */ ttm_get_pages()
781 /* ttm_alloc_new_pages doesn't reference pool so we can run ttm_get_pages()
791 * the pool. */ ttm_get_pages()
801 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, ttm_page_pool_init_locked() argument
804 spin_lock_init(&pool->lock); ttm_page_pool_init_locked()
805 pool->fill_lock = false; ttm_page_pool_init_locked()
806 INIT_LIST_HEAD(&pool->list); ttm_page_pool_init_locked()
807 pool->npages = pool->nfrees = 0; ttm_page_pool_init_locked()
808 pool->gfp_flags = flags; ttm_page_pool_init_locked()
809 pool->name = name; ttm_page_pool_init_locked()
818 pr_info("Initializing pool allocator\n"); ttm_page_alloc_init()
837 &glob->kobj, "pool"); ttm_page_alloc_init()
853 pr_info("Finalizing pool allocator\n"); ttm_page_alloc_fini()
924 char *h[] = {"pool", "refills", "pages freed", "size"}; ttm_page_alloc_debugfs()
926 seq_printf(m, "No pool allocator running.\n"); ttm_page_alloc_debugfs()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_batch_pool.c28 * DOC: batch pool
36 * The batch pool framework provides a mechanism for the driver to manage a
42 * i915_gem_batch_pool_init() - initialize a batch buffer pool
44 * @pool: the batch buffer pool
47 struct i915_gem_batch_pool *pool) i915_gem_batch_pool_init()
49 pool->dev = dev; i915_gem_batch_pool_init()
50 INIT_LIST_HEAD(&pool->cache_list); i915_gem_batch_pool_init()
54 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
55 * @pool: the pool to clean up
59 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) i915_gem_batch_pool_fini() argument
61 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); i915_gem_batch_pool_fini()
63 while (!list_empty(&pool->cache_list)) { i915_gem_batch_pool_fini()
65 list_first_entry(&pool->cache_list, i915_gem_batch_pool_fini()
77 * i915_gem_batch_pool_get() - select a buffer from the pool
78 * @pool: the batch buffer pool
81 * Finds or allocates a batch buffer in the pool with at least the requested
90 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, i915_gem_batch_pool_get() argument
96 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); i915_gem_batch_pool_get()
99 &pool->cache_list, batch_pool_list) { i915_gem_batch_pool_get()
114 * might be to bucket the pool objects based on size. i915_gem_batch_pool_get()
124 obj = i915_gem_alloc_object(pool->dev, size); i915_gem_batch_pool_get()
128 list_add_tail(&obj->batch_pool_list, &pool->cache_list); i915_gem_batch_pool_get()
132 list_move_tail(&obj->batch_pool_list, &pool->cache_list); i915_gem_batch_pool_get()
46 i915_gem_batch_pool_init(struct drm_device *dev, struct i915_gem_batch_pool *pool) i915_gem_batch_pool_init() argument
/linux-4.1.27/drivers/dma/
H A Dcoh901318.h46 * coh901318_pool_create() - Creates an dma pool for lli:s
47 * @pool: pool handle
49 * @lli_nbr: number of lli:s in the pool
53 int coh901318_pool_create(struct coh901318_pool *pool,
58 * coh901318_pool_destroy() - Destroys the dma pool
59 * @pool: pool handle
62 int coh901318_pool_destroy(struct coh901318_pool *pool);
67 * @pool: pool handle
72 coh901318_lli_alloc(struct coh901318_pool *pool,
76 * coh901318_lli_free() - Returns the linked list items to the pool
77 * @pool: pool handle
80 void coh901318_lli_free(struct coh901318_pool *pool,
85 * @pool: pool handle
95 coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
102 * @pool: pool handle
113 coh901318_lli_fill_single(struct coh901318_pool *pool,
121 * @pool: pool handle
134 coh901318_lli_fill_sg(struct coh901318_pool *pool,
H A Dcoh901318_lli.c19 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
22 #define DEBUGFS_POOL_COUNTER_RESET(pool)
23 #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
35 int coh901318_pool_create(struct coh901318_pool *pool, coh901318_pool_create() argument
39 spin_lock_init(&pool->lock); coh901318_pool_create()
40 pool->dev = dev; coh901318_pool_create()
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); coh901318_pool_create()
43 DEBUGFS_POOL_COUNTER_RESET(pool); coh901318_pool_create()
47 int coh901318_pool_destroy(struct coh901318_pool *pool) coh901318_pool_destroy() argument
50 dma_pool_destroy(pool->dmapool); coh901318_pool_destroy()
55 coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) coh901318_lli_alloc() argument
66 spin_lock(&pool->lock); coh901318_lli_alloc()
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); coh901318_lli_alloc()
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1); coh901318_lli_alloc()
83 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); coh901318_lli_alloc()
88 DEBUGFS_POOL_COUNTER_ADD(pool, 1); coh901318_lli_alloc()
97 spin_unlock(&pool->lock); coh901318_lli_alloc()
102 spin_unlock(&pool->lock); coh901318_lli_alloc()
107 spin_unlock(&pool->lock); coh901318_lli_alloc()
108 coh901318_lli_free(pool, &head); coh901318_lli_alloc()
112 void coh901318_lli_free(struct coh901318_pool *pool, coh901318_lli_free() argument
126 spin_lock(&pool->lock); coh901318_lli_free()
130 dma_pool_free(pool->dmapool, l, l->phy_this); coh901318_lli_free()
131 DEBUGFS_POOL_COUNTER_ADD(pool, -1); coh901318_lli_free()
134 dma_pool_free(pool->dmapool, l, l->phy_this); coh901318_lli_free()
135 DEBUGFS_POOL_COUNTER_ADD(pool, -1); coh901318_lli_free()
137 spin_unlock(&pool->lock); coh901318_lli_free()
142 coh901318_lli_fill_memcpy(struct coh901318_pool *pool, coh901318_lli_fill_memcpy() argument
175 coh901318_lli_fill_single(struct coh901318_pool *pool, coh901318_lli_fill_single() argument
231 coh901318_lli_fill_sg(struct coh901318_pool *pool, coh901318_lli_fill_sg() argument
249 spin_lock(&pool->lock); coh901318_lli_fill_sg()
307 spin_unlock(&pool->lock);
311 spin_unlock(&pool->lock);
H A Dmmp_tdma.c130 struct gen_pool *pool; member in struct:mmp_tdma_chan
363 gpool = tdmac->pool; mmp_tdma_free_descriptor()
413 gpool = tdmac->pool; mmp_tdma_alloc_descriptor()
544 int type, struct gen_pool *pool) mmp_tdma_chan_init()
566 tdmac->pool = pool; mmp_tdma_chan_init()
632 struct gen_pool *pool = NULL; mmp_tdma_probe() local
660 pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); mmp_tdma_probe()
662 pool = sram_get_gpool("asram"); mmp_tdma_probe()
663 if (!pool) { mmp_tdma_probe()
664 dev_err(&pdev->dev, "asram pool not available\n"); mmp_tdma_probe()
679 ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); mmp_tdma_probe()
542 mmp_tdma_chan_init(struct mmp_tdma_device *tdev, int idx, int irq, int type, struct gen_pool *pool) mmp_tdma_chan_init() argument
H A Dmv_xor.h88 * @lock: serializes enqueue/dequeue operations to the descriptors pool
97 * @slots_allocated: records the actual size of the descriptor slot pool
102 spinlock_t lock; /* protects the descriptor slot pool */
132 * @idx: pool index
H A Dpch_dma.c130 struct pci_pool *pool; member in struct:pch_dma
447 desc = pci_pool_alloc(pd->pool, flags, &addr); pdc_alloc_desc()
560 pci_pool_free(pd->pool, desc, desc->txd.phys); pd_free_chan_resources()
891 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, pch_dma_probe()
893 if (!pd->pool) { pch_dma_probe()
942 pci_pool_destroy(pd->pool); pch_dma_probe()
974 pci_pool_destroy(pd->pool); pch_dma_remove()
H A Dsun6i-dma.c161 struct dma_pool *pool; member in struct:sun6i_dma_dev
349 dma_pool_free(sdev->pool, v_lli, p_lli); sun6i_dma_free_desc()
527 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); sun6i_dma_prep_dma_memcpy()
587 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); for_each_sg()
638 dma_pool_free(sdev->pool, v_lli, p_lli);
641 dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
939 sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, sun6i_dma_probe()
941 if (!sdc->pool) { sun6i_dma_probe()
942 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); sun6i_dma_probe()
H A Ddmaengine.c984 mempool_t *pool; member in struct:dmaengine_unmap_pool
1039 mempool_free(unmap, __get_unmap_pool(cnt)->pool); dmaengine_unmap()
1056 if (p->pool) dmaengine_destroy_unmap_pool()
1057 mempool_destroy(p->pool); dmaengine_destroy_unmap_pool()
1058 p->pool = NULL; dmaengine_destroy_unmap_pool()
1080 p->pool = mempool_create_slab_pool(1, p->cache); dmaengine_init_unmap_pool()
1081 if (!p->pool) dmaengine_init_unmap_pool()
1097 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); dmaengine_get_unmap_data()
/linux-4.1.27/net/9p/
H A Dutil.c38 * @lock: protects the pool
39 * @pool: idr to allocate tag id from
45 struct idr pool; member in struct:p9_idpool
49 * p9_idpool_create - create a new per-connection id pool
62 idr_init(&p->pool); p9_idpool_create()
69 * p9_idpool_destroy - create a new per-connection id pool
75 idr_destroy(&p->pool); p9_idpool_destroy()
81 * p9_idpool_get - allocate numeric id from pool
82 * @p: pool to allocate from
97 i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); p9_idpool_get()
104 p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); p9_idpool_get()
110 * p9_idpool_put - release numeric id from pool
112 * @p: pool to release id into
122 p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p); p9_idpool_put()
125 idr_remove(&p->pool, id); p9_idpool_put()
133 * @p: pool to check
138 return idr_find(&p->pool, id) != NULL; p9_idpool_check()
/linux-4.1.27/sound/core/seq/oss/
H A Dseq_oss_writeq.c40 struct snd_seq_client_pool pool; snd_seq_oss_writeq_new() local
51 memset(&pool, 0, sizeof(pool)); snd_seq_oss_writeq_new()
52 pool.client = dp->cseq; snd_seq_oss_writeq_new()
53 pool.output_pool = maxlen; snd_seq_oss_writeq_new()
54 pool.output_room = maxlen / 2; snd_seq_oss_writeq_new()
56 snd_seq_oss_control(dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); snd_seq_oss_writeq_new()
149 * return the unused pool size
154 struct snd_seq_client_pool pool; snd_seq_oss_writeq_get_free_size() local
155 pool.client = q->dp->cseq; snd_seq_oss_writeq_get_free_size()
156 snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); snd_seq_oss_writeq_get_free_size()
157 return pool.output_free; snd_seq_oss_writeq_get_free_size()
167 struct snd_seq_client_pool pool; snd_seq_oss_writeq_set_output() local
168 pool.client = q->dp->cseq; snd_seq_oss_writeq_set_output()
169 snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); snd_seq_oss_writeq_set_output()
170 pool.output_room = val; snd_seq_oss_writeq_set_output()
171 snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); snd_seq_oss_writeq_set_output()
/linux-4.1.27/include/linux/ceph/
H A Dmsgpool.h13 mempool_t *pool; member in struct:ceph_msgpool
18 extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
21 extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
H A Dosdmap.h23 uint64_t pool; member in struct:ceph_pg
44 static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) ceph_can_shift_osds() argument
46 switch (pool->type) { ceph_can_shift_osds()
57 s64 pool; member in struct:ceph_object_locator
188 pgid->pool = ceph_decode_64(p); ceph_decode_pgid()
H A Drados.h62 __le32 pool; /* object pool */ member in struct:ceph_pg_v1
66 * pg_pool is a set of pgs storing a pool of objects
82 #define CEPH_NOPOOL ((__u64) (-1)) /* pool id not defined */
390 CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */
/linux-4.1.27/net/rds/
H A Dib_rdma.c49 struct rds_ib_mr_pool *pool; member in struct:rds_ib_mr
65 * Our own little FMR pool
86 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
217 struct rds_ib_mr_pool *pool; rds_ib_create_mr_pool() local
219 pool = kzalloc(sizeof(*pool), GFP_KERNEL); rds_ib_create_mr_pool()
220 if (!pool) rds_ib_create_mr_pool()
223 init_llist_head(&pool->free_list); rds_ib_create_mr_pool()
224 init_llist_head(&pool->drop_list); rds_ib_create_mr_pool()
225 init_llist_head(&pool->clean_list); rds_ib_create_mr_pool()
226 mutex_init(&pool->flush_lock); rds_ib_create_mr_pool()
227 init_waitqueue_head(&pool->flush_wait); rds_ib_create_mr_pool()
228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); rds_ib_create_mr_pool()
230 pool->fmr_attr.max_pages = fmr_message_size; rds_ib_create_mr_pool()
231 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; rds_ib_create_mr_pool()
232 pool->fmr_attr.page_shift = PAGE_SHIFT; rds_ib_create_mr_pool()
233 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; rds_ib_create_mr_pool()
240 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; rds_ib_create_mr_pool()
241 pool->max_items = rds_ibdev->max_fmrs; rds_ib_create_mr_pool()
243 return pool; rds_ib_create_mr_pool()
248 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rds_ib_get_mr_info() local
250 iinfo->rdma_mr_max = pool->max_items; rds_ib_get_mr_info()
251 iinfo->rdma_mr_size = pool->fmr_attr.max_pages; rds_ib_get_mr_info()
254 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) rds_ib_destroy_mr_pool() argument
256 cancel_delayed_work_sync(&pool->flush_worker); rds_ib_destroy_mr_pool()
257 rds_ib_flush_mr_pool(pool, 1, NULL); rds_ib_destroy_mr_pool()
258 WARN_ON(atomic_read(&pool->item_count)); rds_ib_destroy_mr_pool()
259 WARN_ON(atomic_read(&pool->free_pinned)); rds_ib_destroy_mr_pool()
260 kfree(pool); rds_ib_destroy_mr_pool()
263 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) rds_ib_reuse_fmr() argument
272 ret = llist_del_first(&pool->clean_list); rds_ib_reuse_fmr()
295 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rds_ib_alloc_fmr() local
299 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) rds_ib_alloc_fmr()
300 schedule_delayed_work(&pool->flush_worker, 10); rds_ib_alloc_fmr()
303 ibmr = rds_ib_reuse_fmr(pool); rds_ib_alloc_fmr()
316 if (atomic_inc_return(&pool->item_count) <= pool->max_items) rds_ib_alloc_fmr()
319 atomic_dec(&pool->item_count); rds_ib_alloc_fmr()
328 rds_ib_flush_mr_pool(pool, 0, &ibmr); rds_ib_alloc_fmr()
346 &pool->fmr_attr); rds_ib_alloc_fmr()
363 atomic_dec(&pool->item_count); rds_ib_alloc_fmr()
506 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rds_ib_teardown_mr() local
508 atomic_sub(pinned, &pool->free_pinned); rds_ib_teardown_mr()
512 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) rds_ib_flush_goal() argument
516 item_count = atomic_read(&pool->item_count); rds_ib_flush_goal()
546 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, list_to_llist_nodes() argument
565 * Flush our pool of MRs.
570 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, rds_ib_flush_mr_pool() argument
586 while(!mutex_trylock(&pool->flush_lock)) { rds_ib_flush_mr_pool()
587 ibmr = rds_ib_reuse_fmr(pool); rds_ib_flush_mr_pool()
590 finish_wait(&pool->flush_wait, &wait); rds_ib_flush_mr_pool()
594 prepare_to_wait(&pool->flush_wait, &wait, rds_ib_flush_mr_pool()
596 if (llist_empty(&pool->clean_list)) rds_ib_flush_mr_pool()
599 ibmr = rds_ib_reuse_fmr(pool); rds_ib_flush_mr_pool()
602 finish_wait(&pool->flush_wait, &wait); rds_ib_flush_mr_pool()
606 finish_wait(&pool->flush_wait, &wait); rds_ib_flush_mr_pool()
608 mutex_lock(&pool->flush_lock); rds_ib_flush_mr_pool()
611 ibmr = rds_ib_reuse_fmr(pool); rds_ib_flush_mr_pool()
621 llist_append_to_list(&pool->drop_list, &unmap_list); rds_ib_flush_mr_pool()
622 llist_append_to_list(&pool->free_list, &unmap_list); rds_ib_flush_mr_pool()
624 llist_append_to_list(&pool->clean_list, &unmap_list); rds_ib_flush_mr_pool()
626 free_goal = rds_ib_flush_goal(pool, free_all); rds_ib_flush_mr_pool()
643 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { rds_ib_flush_mr_pool()
665 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); rds_ib_flush_mr_pool()
671 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); rds_ib_flush_mr_pool()
675 atomic_sub(unpinned, &pool->free_pinned); rds_ib_flush_mr_pool()
676 atomic_sub(ncleaned, &pool->dirty_count); rds_ib_flush_mr_pool()
677 atomic_sub(nfreed, &pool->item_count); rds_ib_flush_mr_pool()
680 mutex_unlock(&pool->flush_lock); rds_ib_flush_mr_pool()
681 if (waitqueue_active(&pool->flush_wait)) rds_ib_flush_mr_pool()
682 wake_up(&pool->flush_wait); rds_ib_flush_mr_pool()
689 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); rds_ib_mr_pool_flush_worker() local
691 rds_ib_flush_mr_pool(pool, 0, NULL); rds_ib_mr_pool_flush_worker()
698 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rds_ib_free_mr() local
702 /* Return it to the pool's free list */ rds_ib_free_mr()
703 if (ibmr->remap_count >= pool->fmr_attr.max_maps) rds_ib_free_mr()
704 llist_add(&ibmr->llnode, &pool->drop_list); rds_ib_free_mr()
706 llist_add(&ibmr->llnode, &pool->free_list); rds_ib_free_mr()
708 atomic_add(ibmr->sg_len, &pool->free_pinned); rds_ib_free_mr()
709 atomic_inc(&pool->dirty_count); rds_ib_free_mr()
712 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || rds_ib_free_mr()
713 atomic_read(&pool->dirty_count) >= pool->max_items / 10) rds_ib_free_mr()
714 schedule_delayed_work(&pool->flush_worker, 10); rds_ib_free_mr()
718 rds_ib_flush_mr_pool(pool, 0, NULL); rds_ib_free_mr()
722 schedule_delayed_work(&pool->flush_worker, 10); rds_ib_free_mr()
735 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; rds_ib_flush_mrs() local
737 if (pool) rds_ib_flush_mrs()
738 rds_ib_flush_mr_pool(pool, 0, NULL); rds_ib_flush_mrs()
H A Diw_rdma.c46 struct rds_iw_mr_pool *pool; member in struct:rds_iw_mr
57 * Our own little MR pool
78 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
81 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
336 struct rds_iw_mr_pool *pool; rds_iw_create_mr_pool() local
338 pool = kzalloc(sizeof(*pool), GFP_KERNEL); rds_iw_create_mr_pool()
339 if (!pool) { rds_iw_create_mr_pool()
344 pool->device = rds_iwdev; rds_iw_create_mr_pool()
345 INIT_LIST_HEAD(&pool->dirty_list); rds_iw_create_mr_pool()
346 INIT_LIST_HEAD(&pool->clean_list); rds_iw_create_mr_pool()
347 mutex_init(&pool->flush_lock); rds_iw_create_mr_pool()
348 spin_lock_init(&pool->list_lock); rds_iw_create_mr_pool()
349 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker); rds_iw_create_mr_pool()
351 pool->max_message_size = fastreg_message_size; rds_iw_create_mr_pool()
352 pool->max_items = fastreg_pool_size; rds_iw_create_mr_pool()
353 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4; rds_iw_create_mr_pool()
354 pool->max_pages = fastreg_message_size; rds_iw_create_mr_pool()
361 pool->max_items_soft = pool->max_items * 3 / 4; rds_iw_create_mr_pool()
363 return pool; rds_iw_create_mr_pool()
368 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; rds_iw_get_mr_info() local
370 iinfo->rdma_mr_max = pool->max_items; rds_iw_get_mr_info()
371 iinfo->rdma_mr_size = pool->max_pages; rds_iw_get_mr_info()
374 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) rds_iw_destroy_mr_pool() argument
377 rds_iw_flush_mr_pool(pool, 1); rds_iw_destroy_mr_pool()
378 BUG_ON(atomic_read(&pool->item_count)); rds_iw_destroy_mr_pool()
379 BUG_ON(atomic_read(&pool->free_pinned)); rds_iw_destroy_mr_pool()
380 kfree(pool); rds_iw_destroy_mr_pool()
383 static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) rds_iw_reuse_fmr() argument
388 spin_lock_irqsave(&pool->list_lock, flags); rds_iw_reuse_fmr()
389 if (!list_empty(&pool->clean_list)) { rds_iw_reuse_fmr()
390 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); rds_iw_reuse_fmr()
393 spin_unlock_irqrestore(&pool->list_lock, flags); rds_iw_reuse_fmr()
400 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; rds_iw_alloc_mr() local
405 ibmr = rds_iw_reuse_fmr(pool); rds_iw_alloc_mr()
418 if (atomic_inc_return(&pool->item_count) <= pool->max_items) rds_iw_alloc_mr()
421 atomic_dec(&pool->item_count); rds_iw_alloc_mr()
430 rds_iw_flush_mr_pool(pool, 0); rds_iw_alloc_mr()
443 err = rds_iw_init_fastreg(pool, ibmr); rds_iw_alloc_mr()
452 rds_iw_destroy_fastreg(pool, ibmr); rds_iw_alloc_mr()
455 atomic_dec(&pool->item_count); rds_iw_alloc_mr()
477 * Flush our pool of MRs.
482 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) rds_iw_flush_mr_pool() argument
493 mutex_lock(&pool->flush_lock); rds_iw_flush_mr_pool()
495 spin_lock_irqsave(&pool->list_lock, flags); rds_iw_flush_mr_pool()
497 list_splice_init(&pool->dirty_list, &unmap_list); rds_iw_flush_mr_pool()
499 list_splice_init(&pool->clean_list, &kill_list); rds_iw_flush_mr_pool()
500 spin_unlock_irqrestore(&pool->list_lock, flags); rds_iw_flush_mr_pool()
511 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, rds_iw_flush_mr_pool()
523 rds_iw_destroy_fastreg(pool, ibmr); rds_iw_flush_mr_pool()
531 spin_lock_irqsave(&pool->list_lock, flags); rds_iw_flush_mr_pool()
532 list_splice(&unmap_list, &pool->clean_list); rds_iw_flush_mr_pool()
533 spin_unlock_irqrestore(&pool->list_lock, flags); rds_iw_flush_mr_pool()
536 atomic_sub(unpinned, &pool->free_pinned); rds_iw_flush_mr_pool()
537 atomic_sub(ncleaned, &pool->dirty_count); rds_iw_flush_mr_pool()
538 atomic_sub(nfreed, &pool->item_count); rds_iw_flush_mr_pool()
540 mutex_unlock(&pool->flush_lock); rds_iw_flush_mr_pool()
546 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); rds_iw_mr_pool_flush_worker() local
548 rds_iw_flush_mr_pool(pool, 0); rds_iw_mr_pool_flush_worker()
554 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; rds_iw_free_mr() local
557 if (!pool) rds_iw_free_mr()
560 /* Return it to the pool's free list */ rds_iw_free_mr()
561 rds_iw_free_fastreg(pool, ibmr); rds_iw_free_mr()
564 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || rds_iw_free_mr()
565 atomic_read(&pool->dirty_count) >= pool->max_items / 10) rds_iw_free_mr()
566 queue_work(rds_wq, &pool->flush_worker); rds_iw_free_mr()
570 rds_iw_flush_mr_pool(pool, 0); rds_iw_free_mr()
574 queue_work(rds_wq, &pool->flush_worker); rds_iw_free_mr()
584 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; rds_iw_flush_mrs() local
586 if (pool) rds_iw_flush_mrs()
587 rds_iw_flush_mr_pool(pool, 0); rds_iw_flush_mrs()
662 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, rds_iw_init_fastreg() argument
665 struct rds_iw_device *rds_iwdev = pool->device; rds_iw_init_fastreg()
670 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size); rds_iw_init_fastreg()
681 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size); rds_iw_init_fastreg()
758 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, rds_iw_map_fastreg() argument
763 struct rds_iw_device *rds_iwdev = pool->device; rds_iw_map_fastreg()
777 if (mapping->m_sg.dma_len > pool->max_message_size) { rds_iw_map_fastreg()
800 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, rds_iw_free_fastreg() argument
814 spin_lock_irqsave(&pool->list_lock, flags); rds_iw_free_fastreg()
816 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list); rds_iw_free_fastreg()
817 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned); rds_iw_free_fastreg()
818 atomic_inc(&pool->dirty_count); rds_iw_free_fastreg()
820 spin_unlock_irqrestore(&pool->list_lock, flags); rds_iw_free_fastreg()
823 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, rds_iw_unmap_fastreg_list() argument
851 spin_lock_irqsave(&pool->list_lock, flags); list_for_each_entry_safe()
857 spin_unlock_irqrestore(&pool->list_lock, flags);
868 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, rds_iw_destroy_fastreg() argument
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-fpa.h77 * Structure describing the current state of a FPA pool.
86 /* The number of elements in the pool at creation */
99 * Return the name of the pool
101 * @pool: Pool to get the name of
104 static inline const char *cvmx_fpa_get_name(uint64_t pool) cvmx_fpa_get_name() argument
106 return cvmx_fpa_pool_info[pool].name; cvmx_fpa_get_name()
110 * Return the base of the pool
112 * @pool: Pool to get the base of
115 static inline void *cvmx_fpa_get_base(uint64_t pool) cvmx_fpa_get_base() argument
117 return cvmx_fpa_pool_info[pool].base; cvmx_fpa_get_base()
121 * Check if a pointer belongs to an FPA pool. Return non-zero
123 * an FPA pool.
125 * @pool: Pool to check
127 * Returns Non-zero if pointer is in the pool. Zero if not
129 static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr) cvmx_fpa_is_member() argument
131 return ((ptr >= cvmx_fpa_pool_info[pool].base) && cvmx_fpa_is_member()
133 ((char *)(cvmx_fpa_pool_info[pool].base)) + cvmx_fpa_is_member()
134 cvmx_fpa_pool_info[pool].size * cvmx_fpa_is_member()
135 cvmx_fpa_pool_info[pool].starting_element_count)); cvmx_fpa_is_member()
180 * @pool: Pool to get the block from
183 static inline void *cvmx_fpa_alloc(uint64_t pool) cvmx_fpa_alloc() argument
186 cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool))); cvmx_fpa_alloc()
198 * @pool: Pool to get the block from
200 static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool) cvmx_fpa_async_alloc() argument
210 data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool); cvmx_fpa_async_alloc()
216 * Free a block allocated with a FPA pool. Does NOT provide memory
220 * @pool: Pool to put it in
224 static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool, cvmx_fpa_free_nosync() argument
230 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)); cvmx_fpa_free_nosync()
238 * Free a block allocated with a FPA pool. Provides required memory
242 * @pool: Pool to put it in
246 static inline void cvmx_fpa_free(void *ptr, uint64_t pool, cvmx_fpa_free() argument
252 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)); cvmx_fpa_free()
265 * Setup a FPA pool to control a new block of memory.
266 * This can only be called once per pool. Make sure proper
269 * @pool: Pool to initialize
270 * 0 <= pool < 8
271 * @name: Constant character string to name this pool.
281 extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
285 * Shutdown a Memory pool and validate that it had all of
288 * using the pool.
290 * @pool: Pool to shutdown
295 extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
298 * Get the size of blocks controlled by the pool
301 * @pool: Pool to access
304 uint64_t cvmx_fpa_get_block_size(uint64_t pool);
H A Dcvmx-packet.h53 /* The pool that the buffer came from / goes to */
54 uint64_t pool:3; member in struct:cvmx_buf_ptr::__anon1983
62 uint64_t pool:3;
/linux-4.1.27/drivers/mtd/ubi/
H A Dfastmap-wl.c54 * @pool: fastmap pool description object
57 struct ubi_fm_pool *pool) return_unused_pool_pebs()
62 for (i = pool->used; i < pool->size; i++) { return_unused_pool_pebs()
63 e = ubi->lookuptbl[pool->pebs[i]]; return_unused_pool_pebs()
122 struct ubi_fm_pool *pool = &ubi->fm_pool; ubi_refill_pools() local
129 return_unused_pool_pebs(ubi, pool); ubi_refill_pools()
132 pool->size = 0; ubi_refill_pools()
136 if (pool->size < pool->max_size) { ubi_refill_pools()
144 pool->pebs[pool->size] = e->pnum; ubi_refill_pools()
145 pool->size++; ubi_refill_pools()
169 pool->used = 0; ubi_refill_pools()
185 struct ubi_fm_pool *pool = &ubi->fm_pool; ubi_wl_get_peb() local
192 /* We check here also for the WL pool because at this point we can ubi_wl_get_peb()
193 * refill the WL pool synchronous. */ ubi_wl_get_peb()
194 if (pool->used == pool->size || wl_pool->used == wl_pool->size) { ubi_wl_get_peb()
207 if (pool->used == pool->size) { ubi_wl_get_peb()
210 ubi_err(ubi, "Unable to get a free PEB from user WL pool"); ubi_wl_get_peb()
219 ubi_assert(pool->used < pool->size); ubi_wl_get_peb()
220 ret = pool->pebs[pool->used++]; ubi_wl_get_peb()
233 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; get_peb_for_wl() local
236 if (pool->used == pool->size) { get_peb_for_wl()
247 pnum = pool->pebs[pool->used++]; get_peb_for_wl()
56 return_unused_pool_pebs(struct ubi_device *ubi, struct ubi_fm_pool *pool) return_unused_pool_pebs() argument
H A Dfastmap.c260 * update_vol - inserts or updates a LEB which was found a pool.
292 * Then a PEB can be within the persistent EBA and the pool. update_vol()
357 * process_pool_aeb - we found a non-empty PEB in a pool.
398 ubi_err(ubi, "orphaned volume in fastmap pool!"); process_pool_aeb()
410 * If fastmap detects a free PEB in the pool it has to check whether
439 * scan_pool - scans a pool for changed (no longer empty PEBs).
442 * @pebs: an array of all PEB numbers in the to be scanned pool
443 * @pool_size: size of the pool (number of entries in @pebs)
447 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
469 dbg_bld("scanning fastmap pool: size = %i", pool_size); scan_pool()
472 * Now scan all PEBs in the pool to find changes which have been made scan_pool()
482 ubi_err(ubi, "bad PEB in fastmap pool!"); scan_pool()
520 dbg_bld("Found non empty PEB:%i in pool", pnum); scan_pool()
549 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); scan_pool()
639 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", ubi_attach_fastmap()
649 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", ubi_attach_fastmap()
660 ubi_err(ubi, "bad pool size: %i", pool_size); ubi_attach_fastmap()
665 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); ubi_attach_fastmap()
672 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); ubi_attach_fastmap()
678 ubi_err(ubi, "bad maximal WL pool size: %i", ubi_attach_fastmap()
1050 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); ubi_scan_fastmap()
1051 ubi_msg(ubi, "fastmap WL pool size: %d", ubi_scan_fastmap()
1467 * a fastmap pool becomes full.
H A Dubi.h235 * @max_pool_size: maximal size of the user pool
236 * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
247 * struct ubi_fm_pool - in-memory fastmap pool
248 * @pebs: PEBs in this pool
250 * @size: total number of PEBs in this pool
251 * @max_size: maximal size of the pool
253 * A pool gets filled with up to max_size.
254 * If all PEBs within the pool are used a new fastmap will be written
255 * to the flash and the pool gets refilled with empty PEBs.
455 * @fm_pool: in-memory data structure of the fastmap pool
456 * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
/linux-4.1.27/drivers/scsi/lpfc/
H A Dlpfc_mem.c43 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
44 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
45 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
83 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; lpfc_mem_alloc() local
115 pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) * lpfc_mem_alloc()
117 if (!pool->elements) lpfc_mem_alloc()
120 pool->max_count = 0; lpfc_mem_alloc()
121 pool->current_count = 0; lpfc_mem_alloc()
123 pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool, lpfc_mem_alloc()
124 GFP_KERNEL, &pool->elements[i].phys); lpfc_mem_alloc()
125 if (!pool->elements[i].virt) lpfc_mem_alloc()
127 pool->max_count++; lpfc_mem_alloc()
128 pool->current_count++; lpfc_mem_alloc()
193 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, lpfc_mem_alloc()
194 pool->elements[i].phys); lpfc_mem_alloc()
195 kfree(pool->elements); lpfc_mem_alloc()
219 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; lpfc_mem_free() local
239 /* Free NLP memory pool */ lpfc_mem_free()
247 /* Free mbox memory pool */ lpfc_mem_free()
251 /* Free MBUF memory pool */ lpfc_mem_free()
252 for (i = 0; i < pool->current_count; i++) lpfc_mem_free()
253 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, lpfc_mem_free()
254 pool->elements[i].phys); lpfc_mem_free()
255 kfree(pool->elements); lpfc_mem_free()
260 /* Free DMA buffer memory pool */ lpfc_mem_free()
264 /* Free Device Data memory pool */ lpfc_mem_free()
266 /* Ensure all objects have been returned to the pool */ lpfc_mem_free()
298 /* Free memory used in mailbox queue back to mailbox memory pool */ lpfc_mem_free_all()
308 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ lpfc_mem_free_all()
318 /* Free the active mailbox command back to the mailbox memory pool */ lpfc_mem_free_all()
344 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
345 * @phba: HBA which owns the pool to allocate from
349 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
352 * HBA's pool.
364 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; lpfc_mbuf_alloc() local
371 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { lpfc_mbuf_alloc()
372 pool->current_count--; lpfc_mbuf_alloc()
373 ret = pool->elements[pool->current_count].virt; lpfc_mbuf_alloc()
374 *handle = pool->elements[pool->current_count].phys; lpfc_mbuf_alloc()
381 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
382 * @phba: HBA which owns the pool to return to
397 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; __lpfc_mbuf_free() local
399 if (pool->current_count < pool->max_count) { __lpfc_mbuf_free()
400 pool->elements[pool->current_count].virt = virt; __lpfc_mbuf_free()
401 pool->elements[pool->current_count].phys = dma; __lpfc_mbuf_free()
402 pool->current_count++; __lpfc_mbuf_free()
410 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
411 * @phba: HBA which owns the pool to return to
438 * pool along a non-DMA-mapped container for it.
490 * pool along a non-DMA-mapped container for it.
/linux-4.1.27/include/uapi/linux/
H A Datm_zatm.h19 /* get pool statistics */
23 /* set pool parameters */
26 int ref_count; /* free buffer pool usage counters */
34 int pool_num; /* pool number */
44 #define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */
45 #define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */
46 #define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */
H A Drandom.h22 /* Get the contents of the entropy pool. (Superuser only.) */
26 * Write bytes into the entropy pool and add to the entropy count.
34 /* Clear the entropy pool and associated counters. (Superuser only.) */
47 * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom
H A Datm_nicstar.h25 /* get pool statistics */
H A Dmsg.h80 #define MSGPOOL (MSGMNI * MSGMNB / 1024) /* size in kbytes of message pool */
/linux-4.1.27/kernel/
H A Dworkqueue.c2 * kernel/workqueue.c - generic async execution with shared worker pool
18 * executed in process context. The worker pool is shared and
58 * A bound pool is either associated or disassociated with its CPU.
65 * be executing on any CPU. The pool behaves as an unbound one.
86 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
117 * L: pool->lock protected. Access with pool->lock held.
119 * X: During normal operation, modification requires pool->lock and should
121 * cpu or grabbing pool->lock is enough for read access. If
124 * A: pool->attach_mutex protected.
145 spinlock_t lock; /* the pool lock */
148 int id; /* I: pool ID */
186 * Destruction of pool is sched-RCU protected to allow dereferences
193 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
199 struct worker_pool *pool; /* I: the associated pool */ member in struct:pool_workqueue
313 /* PL: hash of all unbound pools keyed by pool->attrs */
361 #define for_each_cpu_worker_pool(pool, cpu) \
362 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
363 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
364 (pool)++)
368 * @pool: iteration cursor
372 * locked. If the pool needs to be used beyond the locking in effect, the
373 * caller is responsible for guaranteeing that the pool stays online.
378 #define for_each_pool(pool, pi) \
379 idr_for_each_entry(&worker_pool_idr, pool, pi) \
386 * @pool: worker_pool to iterate workers of
388 * This must be called with @pool->attach_mutex.
393 #define for_each_pool_worker(worker, pool) \
394 list_for_each_entry((worker), &(pool)->workers, node) \
395 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
539 * worker_pool_assign_id - allocate ID and assing it to @pool debug_work_deactivate()
540 * @pool: the pool pointer of interest debug_work_deactivate()
545 static int worker_pool_assign_id(struct worker_pool *pool) worker_pool_assign_id() argument
551 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, worker_pool_assign_id()
554 pool->id = ret; worker_pool_assign_id()
608 * is cleared and the high bits contain OFFQ flags and pool ID.
611 * and clear_work_data() can be used to set the pwq, pool or clear
615 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
712 * All fields of the returned pool are accessible as long as the above
713 * mentioned locking is in effect. If the returned pool needs to be used
715 * returned pool is and stays online.
728 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; get_work_pool()
738 * get_work_pool_id - return the worker pool ID a given work is associated with
750 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; get_work_pool_id()
773 * they're being called with pool->lock held.
776 static bool __need_more_worker(struct worker_pool *pool) __need_more_worker() argument
778 return !atomic_read(&pool->nr_running); __need_more_worker()
789 static bool need_more_worker(struct worker_pool *pool) need_more_worker() argument
791 return !list_empty(&pool->worklist) && __need_more_worker(pool); need_more_worker()
795 static bool may_start_working(struct worker_pool *pool) may_start_working() argument
797 return pool->nr_idle; may_start_working()
801 static bool keep_working(struct worker_pool *pool) keep_working() argument
803 return !list_empty(&pool->worklist) && keep_working()
804 atomic_read(&pool->nr_running) <= 1; keep_working()
808 static bool need_to_create_worker(struct worker_pool *pool) need_to_create_worker() argument
810 return need_more_worker(pool) && !may_start_working(pool); need_to_create_worker()
814 static bool too_many_workers(struct worker_pool *pool) too_many_workers() argument
816 bool managing = mutex_is_locked(&pool->manager_arb); too_many_workers()
817 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ too_many_workers()
818 int nr_busy = pool->nr_workers - nr_idle; too_many_workers()
828 static struct worker *first_idle_worker(struct worker_pool *pool) first_idle_worker() argument
830 if (unlikely(list_empty(&pool->idle_list))) first_idle_worker()
833 return list_first_entry(&pool->idle_list, struct worker, entry); first_idle_worker()
838 * @pool: worker pool to wake worker from
840 * Wake up the first idle worker of @pool.
843 * spin_lock_irq(pool->lock).
845 static void wake_up_worker(struct worker_pool *pool) wake_up_worker() argument
847 struct worker *worker = first_idle_worker(pool); wake_up_worker()
869 WARN_ON_ONCE(worker->pool->cpu != cpu); wq_worker_waking_up()
870 atomic_inc(&worker->pool->nr_running); wq_worker_waking_up()
892 struct worker_pool *pool; wq_worker_sleeping() local
902 pool = worker->pool; wq_worker_sleeping()
905 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) wq_worker_sleeping()
916 * manipulating idle_list, so dereferencing idle_list without pool wq_worker_sleeping()
919 if (atomic_dec_and_test(&pool->nr_running) && wq_worker_sleeping()
920 !list_empty(&pool->worklist)) wq_worker_sleeping()
921 to_wakeup = first_idle_worker(pool); wq_worker_sleeping()
933 * spin_lock_irq(pool->lock)
937 struct worker_pool *pool = worker->pool; worker_set_flags() local
944 atomic_dec(&pool->nr_running); worker_set_flags()
958 * spin_lock_irq(pool->lock)
962 struct worker_pool *pool = worker->pool; worker_clr_flags() local
976 atomic_inc(&pool->nr_running); worker_clr_flags()
981 * @pool: pool of interest
984 * Find a worker which is executing @work on @pool by searching
985 * @pool->busy_hash which is keyed by the address of @work. For a worker
1006 * spin_lock_irq(pool->lock).
1012 static struct worker *find_worker_executing_work(struct worker_pool *pool, find_worker_executing_work() argument
1017 hash_for_each_possible(pool->busy_hash, worker, hentry, find_worker_executing_work()
1041 * spin_lock_irq(pool->lock).
1072 * @pwq has positive refcnt and be holding the matching pool->lock.
1076 lockdep_assert_held(&pwq->pool->lock); get_pwq()
1086 * destruction. The caller should be holding the matching pool->lock.
1090 lockdep_assert_held(&pwq->pool->lock); put_pwq()
1096 * @pwq can't be released under pool->lock, bounce to put_pwq()
1098 * pool->lock as this path is taken only for unbound workqueues and put_pwq()
1100 * avoid lockdep warning, unbound pool->locks are given lockdep put_pwq()
1107 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1119 spin_lock_irq(&pwq->pool->lock); put_pwq_unlocked()
1121 spin_unlock_irq(&pwq->pool->lock); put_pwq_unlocked()
1130 move_linked_works(work, &pwq->pool->worklist, NULL); pwq_activate_delayed_work()
1152 * spin_lock_irq(pool->lock).
1220 struct worker_pool *pool; try_to_grab_pending() local
1246 pool = get_work_pool(work); try_to_grab_pending()
1247 if (!pool) try_to_grab_pending()
1250 spin_lock(&pool->lock); try_to_grab_pending()
1254 * to pwq on queueing and to pool on dequeueing are done under try_to_grab_pending()
1255 * pwq->pool->lock. This in turn guarantees that, if work->data try_to_grab_pending()
1256 * points to pwq which is associated with a locked pool, the work try_to_grab_pending()
1257 * item is currently queued on that pool. try_to_grab_pending()
1260 if (pwq && pwq->pool == pool) { try_to_grab_pending()
1276 /* work->data points to pwq iff queued, point to pool */ try_to_grab_pending()
1277 set_work_pool_and_keep_pending(work, pool->id); try_to_grab_pending()
1279 spin_unlock(&pool->lock); try_to_grab_pending()
1282 spin_unlock(&pool->lock); try_to_grab_pending()
1292 * insert_work - insert a work into a pool
1302 * spin_lock_irq(pool->lock).
1307 struct worker_pool *pool = pwq->pool; insert_work() local
1321 if (__need_more_worker(pool)) insert_work()
1322 wake_up_worker(pool); insert_work()
1375 * If @work was previously on a different pool, it might still be __queue_work()
1377 * pool to guarantee non-reentrancy. __queue_work()
1380 if (last_pool && last_pool != pwq->pool) { __queue_work()
1392 spin_lock(&pwq->pool->lock); __queue_work()
1395 spin_lock(&pwq->pool->lock); __queue_work()
1408 spin_unlock(&pwq->pool->lock); __queue_work()
1421 spin_unlock(&pwq->pool->lock); __queue_work()
1431 worklist = &pwq->pool->worklist; __queue_work()
1439 spin_unlock(&pwq->pool->lock); __queue_work()
1591 * spin_lock_irq(pool->lock).
1595 struct worker_pool *pool = worker->pool; worker_enter_idle() local
1604 pool->nr_idle++; worker_enter_idle()
1608 list_add(&worker->entry, &pool->idle_list); worker_enter_idle()
1610 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) worker_enter_idle()
1611 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); worker_enter_idle()
1615 * pool->lock between setting %WORKER_UNBOUND and zapping worker_enter_idle()
1619 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && worker_enter_idle()
1620 pool->nr_workers == pool->nr_idle && worker_enter_idle()
1621 atomic_read(&pool->nr_running)); worker_enter_idle()
1631 * spin_lock_irq(pool->lock).
1635 struct worker_pool *pool = worker->pool; worker_leave_idle() local
1640 pool->nr_idle--; worker_leave_idle()
1660 * worker_attach_to_pool() - attach a worker to a pool
1662 * @pool: the target pool
1664 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1665 * cpu-binding of @worker are kept coordinated with the pool across
1669 struct worker_pool *pool) worker_attach_to_pool()
1671 mutex_lock(&pool->attach_mutex); worker_attach_to_pool()
1677 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); worker_attach_to_pool()
1680 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains worker_attach_to_pool()
1684 if (pool->flags & POOL_DISASSOCIATED) worker_attach_to_pool()
1687 list_add_tail(&worker->node, &pool->workers); worker_attach_to_pool()
1689 mutex_unlock(&pool->attach_mutex); worker_attach_to_pool()
1693 * worker_detach_from_pool() - detach a worker from its pool
1694 * @worker: worker which is attached to its pool
1695 * @pool: the pool @worker is attached to
1698 * caller worker shouldn't access to the pool after detached except it has
1699 * other reference to the pool.
1702 struct worker_pool *pool) worker_detach_from_pool()
1706 mutex_lock(&pool->attach_mutex); worker_detach_from_pool()
1708 if (list_empty(&pool->workers)) worker_detach_from_pool()
1709 detach_completion = pool->detach_completion; worker_detach_from_pool()
1710 mutex_unlock(&pool->attach_mutex); worker_detach_from_pool()
1712 /* clear leftover flags without pool->lock after it is detached */ worker_detach_from_pool()
1721 * @pool: pool the new worker will belong to
1723 * Create and start a new worker which is attached to @pool.
1731 static struct worker *create_worker(struct worker_pool *pool) create_worker() argument
1738 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); create_worker()
1742 worker = alloc_worker(pool->node); create_worker()
1746 worker->pool = pool; create_worker()
1749 if (pool->cpu >= 0) create_worker()
1750 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, create_worker()
1751 pool->attrs->nice < 0 ? "H" : ""); create_worker()
1753 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); create_worker()
1755 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, create_worker()
1760 set_user_nice(worker->task, pool->attrs->nice); create_worker()
1765 /* successful, attach the worker to the pool */ create_worker()
1766 worker_attach_to_pool(worker, pool); create_worker()
1769 spin_lock_irq(&pool->lock); create_worker()
1770 worker->pool->nr_workers++; create_worker()
1773 spin_unlock_irq(&pool->lock); create_worker()
1779 ida_simple_remove(&pool->worker_ida, id); create_worker()
1788 * Destroy @worker and adjust @pool stats accordingly. The worker should
1792 * spin_lock_irq(pool->lock).
1796 struct worker_pool *pool = worker->pool; destroy_worker() local
1798 lockdep_assert_held(&pool->lock); destroy_worker()
1806 pool->nr_workers--; destroy_worker()
1807 pool->nr_idle--; destroy_worker()
1816 struct worker_pool *pool = (void *)__pool; idle_worker_timeout() local
1818 spin_lock_irq(&pool->lock); idle_worker_timeout()
1820 while (too_many_workers(pool)) { idle_worker_timeout()
1825 worker = list_entry(pool->idle_list.prev, struct worker, entry); idle_worker_timeout()
1829 mod_timer(&pool->idle_timer, expires); idle_worker_timeout()
1836 spin_unlock_irq(&pool->lock); idle_worker_timeout()
1864 struct worker_pool *pool = (void *)__pool; pool_mayday_timeout() local
1867 spin_lock_irq(&pool->lock); pool_mayday_timeout()
1870 if (need_to_create_worker(pool)) { pool_mayday_timeout()
1877 list_for_each_entry(work, &pool->worklist, entry) pool_mayday_timeout()
1882 spin_unlock_irq(&pool->lock); pool_mayday_timeout()
1884 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); pool_mayday_timeout()
1889 * @pool: pool to create a new worker for
1891 * Create a new worker for @pool if necessary. @pool is guaranteed to
1894 * sent to all rescuers with works scheduled on @pool to resolve
1901 * spin_lock_irq(pool->lock) which may be released and regrabbed
1905 static void maybe_create_worker(struct worker_pool *pool)
1906 __releases(&pool->lock)
1907 __acquires(&pool->lock)
1910 spin_unlock_irq(&pool->lock);
1913 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1916 if (create_worker(pool) || !need_to_create_worker(pool))
1921 if (!need_to_create_worker(pool))
1925 del_timer_sync(&pool->mayday_timer);
1926 spin_lock_irq(&pool->lock);
1929 * created as @pool->lock was dropped and the new worker might have
1932 if (need_to_create_worker(pool))
1937 * manage_workers - manage worker pool
1940 * Assume the manager role and manage the worker pool @worker belongs
1942 * pool. The exclusion is handled automatically by this function.
1949 * spin_lock_irq(pool->lock) which may be released and regrabbed
1953 * %false if the pool doesn't need management and the caller can safely
1960 struct worker_pool *pool = worker->pool; manage_workers() local
1964 * and becomes the manager. mutex_trylock() on pool->manager_arb manage_workers()
1965 * failure while holding pool->lock reliably indicates that someone manage_workers()
1966 * else is managing the pool and the worker which failed trylock manage_workers()
1970 * actual management, the pool may stall indefinitely. manage_workers()
1972 if (!mutex_trylock(&pool->manager_arb)) manage_workers()
1974 pool->manager = worker; manage_workers()
1976 maybe_create_worker(pool); manage_workers()
1978 pool->manager = NULL; manage_workers()
1979 mutex_unlock(&pool->manager_arb); manage_workers()
1995 * spin_lock_irq(pool->lock) which is released and regrabbed.
1998 __releases(&pool->lock)
1999 __acquires(&pool->lock)
2002 struct worker_pool *pool = worker->pool; variable in typeref:struct:worker_pool
2019 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2020 raw_smp_processor_id() != pool->cpu);
2028 collision = find_worker_executing_work(pool, work);
2036 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2060 if (need_more_worker(pool))
2061 wake_up_worker(pool); variable
2064 * Record the last pool and clear PENDING which should be the last
2065 * update to @work. Also, do this inside @pool->lock so that
2069 set_work_pool_and_clear_pending(work, pool->id);
2071 spin_unlock_irq(&pool->lock);
2104 spin_lock_irq(&pool->lock);
2128 * spin_lock_irq(pool->lock) which may be released and regrabbed
2155 struct worker_pool *pool = worker->pool; worker_thread() local
2160 spin_lock_irq(&pool->lock); worker_thread()
2164 spin_unlock_irq(&pool->lock); worker_thread()
2169 ida_simple_remove(&pool->worker_ida, worker->id); worker_thread()
2170 worker_detach_from_pool(worker, pool); worker_thread()
2178 if (!need_more_worker(pool)) worker_thread()
2182 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) worker_thread()
2203 list_first_entry(&pool->worklist, worker_thread()
2215 } while (keep_working(pool)); worker_thread()
2220 * pool->lock is held and there's no work to process and no need to worker_thread()
2222 * pool->lock or from local cpu, so setting the current state worker_thread()
2223 * before releasing pool->lock is enough to prevent losing any worker_thread()
2228 spin_unlock_irq(&pool->lock); worker_thread()
2240 * Regular work processing on a pool may block trying to create a new
2246 * When such condition is possible, the pool summons rescuers of all
2247 * workqueues which have works queued on the pool and let them process
2287 struct worker_pool *pool = pwq->pool; rescuer_thread() local
2295 worker_attach_to_pool(rescuer, pool); rescuer_thread()
2297 spin_lock_irq(&pool->lock); rescuer_thread()
2298 rescuer->pool = pool; rescuer_thread()
2305 list_for_each_entry_safe(work, n, &pool->worklist, entry) rescuer_thread()
2321 if (need_to_create_worker(pool)) { rescuer_thread()
2330 * Put the reference grabbed by send_mayday(). @pool won't rescuer_thread()
2336 * Leave this pool. If need_more_worker() is %true, notify a rescuer_thread()
2340 if (need_more_worker(pool)) rescuer_thread()
2341 wake_up_worker(pool); rescuer_thread()
2343 rescuer->pool = NULL; rescuer_thread()
2344 spin_unlock_irq(&pool->lock); rescuer_thread()
2346 worker_detach_from_pool(rescuer, pool); rescuer_thread()
2399 * spin_lock_irq(pool->lock).
2409 * debugobject calls are safe here even with pool->lock locked insert_wq_barrier()
2482 struct worker_pool *pool = pwq->pool; for_each_pwq() local
2484 spin_lock_irq(&pool->lock); for_each_pwq()
2501 spin_unlock_irq(&pool->lock); for_each_pwq()
2696 spin_lock_irq(&pwq->pool->lock); for_each_pwq()
2698 spin_unlock_irq(&pwq->pool->lock); for_each_pwq()
2721 struct worker_pool *pool; start_flush_work() local
2727 pool = get_work_pool(work); start_flush_work()
2728 if (!pool) { start_flush_work()
2733 spin_lock(&pool->lock); start_flush_work()
2737 if (unlikely(pwq->pool != pool)) start_flush_work()
2740 worker = find_worker_executing_work(pool, work); start_flush_work()
2747 spin_unlock_irq(&pool->lock); start_flush_work()
2763 spin_unlock_irq(&pool->lock); start_flush_work()
3103 * ->no_numa as it is used for both pool and wq attrs. Instead, copy_workqueue_attrs()
3133 * @pool: worker_pool to initialize
3135 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
3138 * inside @pool proper are initialized and put_unbound_pool() can be called
3139 * on @pool safely to release it.
3141 static int init_worker_pool(struct worker_pool *pool) init_worker_pool() argument
3143 spin_lock_init(&pool->lock); init_worker_pool()
3144 pool->id = -1; init_worker_pool()
3145 pool->cpu = -1; init_worker_pool()
3146 pool->node = NUMA_NO_NODE; init_worker_pool()
3147 pool->flags |= POOL_DISASSOCIATED; init_worker_pool()
3148 INIT_LIST_HEAD(&pool->worklist); init_worker_pool()
3149 INIT_LIST_HEAD(&pool->idle_list); init_worker_pool()
3150 hash_init(pool->busy_hash); init_worker_pool()
3152 init_timer_deferrable(&pool->idle_timer); init_worker_pool()
3153 pool->idle_timer.function = idle_worker_timeout; init_worker_pool()
3154 pool->idle_timer.data = (unsigned long)pool; init_worker_pool()
3156 setup_timer(&pool->mayday_timer, pool_mayday_timeout, init_worker_pool()
3157 (unsigned long)pool); init_worker_pool()
3159 mutex_init(&pool->manager_arb); init_worker_pool()
3160 mutex_init(&pool->attach_mutex); init_worker_pool()
3161 INIT_LIST_HEAD(&pool->workers); init_worker_pool()
3163 ida_init(&pool->worker_ida); init_worker_pool()
3164 INIT_HLIST_NODE(&pool->hash_node); init_worker_pool()
3165 pool->refcnt = 1; init_worker_pool()
3168 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); init_worker_pool()
3169 if (!pool->attrs) init_worker_pool()
3190 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); rcu_free_pool() local
3192 ida_destroy(&pool->worker_ida); rcu_free_pool()
3193 free_workqueue_attrs(pool->attrs); rcu_free_pool()
3194 kfree(pool); rcu_free_pool()
3199 * @pool: worker_pool to put
3201 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
3208 static void put_unbound_pool(struct worker_pool *pool) put_unbound_pool() argument
3215 if (--pool->refcnt) put_unbound_pool()
3219 if (WARN_ON(!(pool->cpu < 0)) || put_unbound_pool()
3220 WARN_ON(!list_empty(&pool->worklist))) put_unbound_pool()
3224 if (pool->id >= 0) put_unbound_pool()
3225 idr_remove(&worker_pool_idr, pool->id); put_unbound_pool()
3226 hash_del(&pool->hash_node); put_unbound_pool()
3230 * manager_arb prevents @pool's workers from blocking on put_unbound_pool()
3233 mutex_lock(&pool->manager_arb); put_unbound_pool()
3235 spin_lock_irq(&pool->lock); put_unbound_pool()
3236 while ((worker = first_idle_worker(pool))) put_unbound_pool()
3238 WARN_ON(pool->nr_workers || pool->nr_idle); put_unbound_pool()
3239 spin_unlock_irq(&pool->lock); put_unbound_pool()
3241 mutex_lock(&pool->attach_mutex); put_unbound_pool()
3242 if (!list_empty(&pool->workers)) put_unbound_pool()
3243 pool->detach_completion = &detach_completion; put_unbound_pool()
3244 mutex_unlock(&pool->attach_mutex); put_unbound_pool()
3246 if (pool->detach_completion) put_unbound_pool()
3247 wait_for_completion(pool->detach_completion); put_unbound_pool()
3249 mutex_unlock(&pool->manager_arb); put_unbound_pool()
3252 del_timer_sync(&pool->idle_timer); put_unbound_pool()
3253 del_timer_sync(&pool->mayday_timer); put_unbound_pool()
3256 call_rcu_sched(&pool->rcu, rcu_free_pool); put_unbound_pool()
3276 struct worker_pool *pool; get_unbound_pool() local
3281 /* do we already have a matching pool? */ hash_for_each_possible()
3282 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { hash_for_each_possible()
3283 if (wqattrs_equal(pool->attrs, attrs)) { hash_for_each_possible()
3284 pool->refcnt++; hash_for_each_possible()
3285 return pool; hash_for_each_possible()
3290 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
3291 if (!pool || init_worker_pool(pool) < 0)
3294 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3295 copy_workqueue_attrs(pool->attrs, attrs);
3301 pool->attrs->no_numa = false;
3306 if (cpumask_subset(pool->attrs->cpumask, for_each_node()
3308 pool->node = node; for_each_node()
3314 if (worker_pool_assign_id(pool) < 0)
3318 if (!create_worker(pool))
3322 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3324 return pool;
3326 if (pool)
3327 put_unbound_pool(pool);
3346 struct worker_pool *pool = pwq->pool; pwq_unbound_release_workfn() local
3358 put_unbound_pool(pool); pwq_unbound_release_workfn()
3391 spin_lock_irq(&pwq->pool->lock); pwq_adjust_max_active()
3409 wake_up_worker(pwq->pool); pwq_adjust_max_active()
3414 spin_unlock_irq(&pwq->pool->lock); pwq_adjust_max_active()
3417 /* initialize newly alloced @pwq which is associated with @wq and @pool */ init_pwq()
3419 struct worker_pool *pool) init_pwq()
3425 pwq->pool = pool; init_pwq()
3456 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ alloc_unbound_pwq()
3460 struct worker_pool *pool; alloc_unbound_pwq() local
3465 pool = get_unbound_pool(attrs); alloc_unbound_pwq()
3466 if (!pool) alloc_unbound_pwq()
3469 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); alloc_unbound_pwq()
3471 put_unbound_pool(pool); alloc_unbound_pwq()
3475 init_pwq(pwq, wq, pool); alloc_unbound_pwq()
3762 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) wq_update_unbound_numa()
3790 spin_lock_irq(&wq->dfl_pwq->pool->lock); wq_update_unbound_numa()
3792 spin_unlock_irq(&wq->dfl_pwq->pool->lock); wq_update_unbound_numa()
4129 struct worker_pool *pool; work_busy() local
4137 pool = get_work_pool(work); work_busy()
4138 if (pool) { work_busy()
4139 spin_lock(&pool->lock); work_busy()
4140 if (find_worker_executing_work(pool, work)) work_busy()
4142 spin_unlock(&pool->lock); work_busy()
4227 static void pr_cont_pool_info(struct worker_pool *pool) pr_cont_pool_info() argument
4229 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); pr_cont_pool_info()
4230 if (pool->node != NUMA_NO_NODE) pr_cont_pool_info()
4231 pr_cont(" node=%d", pool->node); pr_cont_pool_info()
4232 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); pr_cont_pool_info()
4251 struct worker_pool *pool = pwq->pool; show_pwq() local
4257 pr_info(" pwq %d:", pool->id); show_pwq()
4258 pr_cont_pool_info(pool); show_pwq()
4263 hash_for_each(pool->busy_hash, bkt, worker, hentry) { show_pwq()
4273 hash_for_each(pool->busy_hash, bkt, worker, hentry) { show_pwq()
4288 list_for_each_entry(work, &pool->worklist, entry) { show_pwq()
4298 list_for_each_entry(work, &pool->worklist, entry) { show_pwq()
4329 struct worker_pool *pool; show_workqueue_state() local
4353 spin_lock_irqsave(&pwq->pool->lock, flags); for_each_pwq()
4356 spin_unlock_irqrestore(&pwq->pool->lock, flags); for_each_pwq()
4360 for_each_pool(pool, pi) { for_each_pool()
4364 spin_lock_irqsave(&pool->lock, flags); for_each_pool()
4365 if (pool->nr_workers == pool->nr_idle) for_each_pool()
4368 pr_info("pool %d:", pool->id); for_each_pool()
4369 pr_cont_pool_info(pool); for_each_pool()
4370 pr_cont(" workers=%d", pool->nr_workers); for_each_pool()
4371 if (pool->manager) for_each_pool()
4373 task_pid_nr(pool->manager->task)); for_each_pool()
4374 list_for_each_entry(worker, &pool->idle_list, entry) { for_each_pool()
4381 spin_unlock_irqrestore(&pool->lock, flags); for_each_pool()
4392 * pool which make migrating pending and scheduled works very
4405 struct worker_pool *pool; wq_unbind_fn() local
4408 for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool()
4409 mutex_lock(&pool->attach_mutex); for_each_cpu_worker_pool()
4410 spin_lock_irq(&pool->lock); for_each_cpu_worker_pool()
4419 for_each_pool_worker(worker, pool) for_each_cpu_worker_pool()
4422 pool->flags |= POOL_DISASSOCIATED; for_each_cpu_worker_pool()
4424 spin_unlock_irq(&pool->lock); for_each_cpu_worker_pool()
4425 mutex_unlock(&pool->attach_mutex); for_each_cpu_worker_pool()
4439 * worklist is not empty. This pool now behaves as an for_each_cpu_worker_pool()
4440 * unbound (in terms of concurrency management) pool which for_each_cpu_worker_pool()
4441 * are served by workers tied to the pool. for_each_cpu_worker_pool()
4443 atomic_set(&pool->nr_running, 0); for_each_cpu_worker_pool()
4450 spin_lock_irq(&pool->lock); for_each_cpu_worker_pool()
4451 wake_up_worker(pool); for_each_cpu_worker_pool()
4452 spin_unlock_irq(&pool->lock); for_each_cpu_worker_pool()
4457 * rebind_workers - rebind all workers of a pool to the associated CPU
4458 * @pool: pool of interest
4460 * @pool->cpu is coming online. Rebind all workers to the CPU.
4462 static void rebind_workers(struct worker_pool *pool) rebind_workers() argument
4466 lockdep_assert_held(&pool->attach_mutex); rebind_workers()
4475 for_each_pool_worker(worker, pool) rebind_workers()
4477 pool->attrs->cpumask) < 0); rebind_workers()
4479 spin_lock_irq(&pool->lock); rebind_workers()
4486 if (!(pool->flags & POOL_DISASSOCIATED)) { rebind_workers()
4487 spin_unlock_irq(&pool->lock); rebind_workers()
4491 pool->flags &= ~POOL_DISASSOCIATED; rebind_workers()
4493 for_each_pool_worker(worker, pool) { for_each_pool_worker()
4502 * be bound before @pool->lock is released. for_each_pool_worker()
4528 spin_unlock_irq(&pool->lock);
4533 * @pool: unbound pool of interest
4536 * An unbound pool may end up with a cpumask which doesn't have any online
4537 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4538 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4541 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) restore_unbound_workers_cpumask() argument
4546 lockdep_assert_held(&pool->attach_mutex); restore_unbound_workers_cpumask()
4548 /* is @cpu allowed for @pool? */ restore_unbound_workers_cpumask()
4549 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) restore_unbound_workers_cpumask()
4553 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); restore_unbound_workers_cpumask()
4558 for_each_pool_worker(worker, pool) restore_unbound_workers_cpumask()
4560 pool->attrs->cpumask) < 0); restore_unbound_workers_cpumask()
4572 struct worker_pool *pool; workqueue_cpu_up_callback() local
4578 for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool()
4579 if (pool->nr_workers) for_each_cpu_worker_pool()
4581 if (!create_worker(pool)) for_each_cpu_worker_pool()
4590 for_each_pool(pool, pi) { for_each_pool()
4591 mutex_lock(&pool->attach_mutex); for_each_pool()
4593 if (pool->cpu == cpu) for_each_pool()
4594 rebind_workers(pool); for_each_pool()
4595 else if (pool->cpu < 0) for_each_pool()
4596 restore_unbound_workers_cpumask(pool, cpu); for_each_pool()
4598 mutex_unlock(&pool->attach_mutex); for_each_pool()
4690 * pool->worklist.
4693 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4765 * frozen works are transferred to their respective pool worklists.
4768 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4806 * id RO int : the associated pool ID
4872 unbound_pwq_by_node(wq, node)->pool->id); for_each_node()
5170 struct worker_pool *pool; for_each_possible_cpu() local
5173 for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool()
5174 BUG_ON(init_worker_pool(pool)); for_each_cpu_worker_pool()
5175 pool->cpu = cpu; for_each_cpu_worker_pool()
5176 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); for_each_cpu_worker_pool()
5177 pool->attrs->nice = std_nice[i++]; for_each_cpu_worker_pool()
5178 pool->node = cpu_to_node(cpu); for_each_cpu_worker_pool()
5180 /* alloc pool ID */ for_each_cpu_worker_pool()
5182 BUG_ON(worker_pool_assign_id(pool)); for_each_cpu_worker_pool()
5189 struct worker_pool *pool; for_each_online_cpu() local
5191 for_each_cpu_worker_pool(pool, cpu) { for_each_cpu_worker_pool()
5192 pool->flags &= ~POOL_DISASSOCIATED; for_each_cpu_worker_pool()
5193 BUG_ON(!create_worker(pool)); for_each_cpu_worker_pool()
1668 worker_attach_to_pool(struct worker *worker, struct worker_pool *pool) worker_attach_to_pool() argument
1701 worker_detach_from_pool(struct worker *worker, struct worker_pool *pool) worker_detach_from_pool() argument
3418 init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) init_pwq() argument
H A Dworkqueue_internal.h38 struct worker_pool *pool; /* I: the associated pool */ member in struct:worker
40 struct list_head node; /* A: anchored at pool->workers */
/linux-4.1.27/drivers/net/ethernet/ibm/
H A Dibmveth.c142 /* setup the initial settings for a buffer pool */ ibmveth_init_buffer_pool()
143 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, ibmveth_init_buffer_pool() argument
147 pool->size = pool_size; ibmveth_init_buffer_pool()
148 pool->index = pool_index; ibmveth_init_buffer_pool()
149 pool->buff_size = buff_size; ibmveth_init_buffer_pool()
150 pool->threshold = pool_size * 7 / 8; ibmveth_init_buffer_pool()
151 pool->active = pool_active; ibmveth_init_buffer_pool()
154 /* allocate and setup an buffer pool - called during open */ ibmveth_alloc_buffer_pool()
155 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) ibmveth_alloc_buffer_pool() argument
159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); ibmveth_alloc_buffer_pool()
161 if (!pool->free_map) ibmveth_alloc_buffer_pool()
164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); ibmveth_alloc_buffer_pool()
165 if (!pool->dma_addr) { ibmveth_alloc_buffer_pool()
166 kfree(pool->free_map); ibmveth_alloc_buffer_pool()
167 pool->free_map = NULL; ibmveth_alloc_buffer_pool()
171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); ibmveth_alloc_buffer_pool()
173 if (!pool->skbuff) { ibmveth_alloc_buffer_pool()
174 kfree(pool->dma_addr); ibmveth_alloc_buffer_pool()
175 pool->dma_addr = NULL; ibmveth_alloc_buffer_pool()
177 kfree(pool->free_map); ibmveth_alloc_buffer_pool()
178 pool->free_map = NULL; ibmveth_alloc_buffer_pool()
182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); ibmveth_alloc_buffer_pool()
184 for (i = 0; i < pool->size; ++i) ibmveth_alloc_buffer_pool()
185 pool->free_map[i] = i; ibmveth_alloc_buffer_pool()
187 atomic_set(&pool->available, 0); ibmveth_alloc_buffer_pool()
188 pool->producer_index = 0; ibmveth_alloc_buffer_pool()
189 pool->consumer_index = 0; ibmveth_alloc_buffer_pool()
202 /* replenish the buffers for a pool. note that we don't need to
206 struct ibmveth_buff_pool *pool) ibmveth_replenish_buffer_pool()
209 u32 count = pool->size - atomic_read(&pool->available); ibmveth_replenish_buffer_pool()
222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); ibmveth_replenish_buffer_pool()
231 free_index = pool->consumer_index; ibmveth_replenish_buffer_pool()
232 pool->consumer_index++; ibmveth_replenish_buffer_pool()
233 if (pool->consumer_index >= pool->size) ibmveth_replenish_buffer_pool()
234 pool->consumer_index = 0; ibmveth_replenish_buffer_pool()
235 index = pool->free_map[free_index]; ibmveth_replenish_buffer_pool()
238 BUG_ON(pool->skbuff[index] != NULL); ibmveth_replenish_buffer_pool()
241 pool->buff_size, DMA_FROM_DEVICE); ibmveth_replenish_buffer_pool()
246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; ibmveth_replenish_buffer_pool()
247 pool->dma_addr[index] = dma_addr; ibmveth_replenish_buffer_pool()
248 pool->skbuff[index] = skb; ibmveth_replenish_buffer_pool()
250 correlator = ((u64)pool->index << 32) | index; ibmveth_replenish_buffer_pool()
253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; ibmveth_replenish_buffer_pool()
257 unsigned int len = min(pool->buff_size, ibmveth_replenish_buffer_pool()
274 atomic_add(buffers_added, &(pool->available)); ibmveth_replenish_buffer_pool()
278 pool->free_map[free_index] = index; ibmveth_replenish_buffer_pool()
279 pool->skbuff[index] = NULL; ibmveth_replenish_buffer_pool()
280 if (pool->consumer_index == 0) ibmveth_replenish_buffer_pool()
281 pool->consumer_index = pool->size - 1; ibmveth_replenish_buffer_pool()
283 pool->consumer_index--; ibmveth_replenish_buffer_pool()
286 pool->dma_addr[index], pool->buff_size, ibmveth_replenish_buffer_pool()
292 atomic_add(buffers_added, &(pool->available)); ibmveth_replenish_buffer_pool()
315 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; ibmveth_replenish_task() local
317 if (pool->active && ibmveth_replenish_task()
318 (atomic_read(&pool->available) < pool->threshold)) ibmveth_replenish_task()
319 ibmveth_replenish_buffer_pool(adapter, pool); ibmveth_replenish_task()
325 /* empty and free ana buffer pool - also used to do cleanup in error paths */ ibmveth_free_buffer_pool()
327 struct ibmveth_buff_pool *pool) ibmveth_free_buffer_pool()
331 kfree(pool->free_map); ibmveth_free_buffer_pool()
332 pool->free_map = NULL; ibmveth_free_buffer_pool()
334 if (pool->skbuff && pool->dma_addr) { ibmveth_free_buffer_pool()
335 for (i = 0; i < pool->size; ++i) { ibmveth_free_buffer_pool()
336 struct sk_buff *skb = pool->skbuff[i]; ibmveth_free_buffer_pool()
339 pool->dma_addr[i], ibmveth_free_buffer_pool()
340 pool->buff_size, ibmveth_free_buffer_pool()
343 pool->skbuff[i] = NULL; ibmveth_free_buffer_pool()
348 if (pool->dma_addr) { ibmveth_free_buffer_pool()
349 kfree(pool->dma_addr); ibmveth_free_buffer_pool()
350 pool->dma_addr = NULL; ibmveth_free_buffer_pool()
353 if (pool->skbuff) { ibmveth_free_buffer_pool()
354 kfree(pool->skbuff); ibmveth_free_buffer_pool()
355 pool->skbuff = NULL; ibmveth_free_buffer_pool()
359 /* remove a buffer from a pool */ ibmveth_remove_buffer_from_pool()
363 unsigned int pool = correlator >> 32; ibmveth_remove_buffer_from_pool() local
368 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); ibmveth_remove_buffer_from_pool()
369 BUG_ON(index >= adapter->rx_buff_pool[pool].size); ibmveth_remove_buffer_from_pool()
371 skb = adapter->rx_buff_pool[pool].skbuff[index]; ibmveth_remove_buffer_from_pool()
375 adapter->rx_buff_pool[pool].skbuff[index] = NULL; ibmveth_remove_buffer_from_pool()
378 adapter->rx_buff_pool[pool].dma_addr[index], ibmveth_remove_buffer_from_pool()
379 adapter->rx_buff_pool[pool].buff_size, ibmveth_remove_buffer_from_pool()
382 free_index = adapter->rx_buff_pool[pool].producer_index; ibmveth_remove_buffer_from_pool()
383 adapter->rx_buff_pool[pool].producer_index++; ibmveth_remove_buffer_from_pool()
384 if (adapter->rx_buff_pool[pool].producer_index >= ibmveth_remove_buffer_from_pool()
385 adapter->rx_buff_pool[pool].size) ibmveth_remove_buffer_from_pool()
386 adapter->rx_buff_pool[pool].producer_index = 0; ibmveth_remove_buffer_from_pool()
387 adapter->rx_buff_pool[pool].free_map[free_index] = index; ibmveth_remove_buffer_from_pool()
391 atomic_dec(&(adapter->rx_buff_pool[pool].available)); ibmveth_remove_buffer_from_pool()
398 unsigned int pool = correlator >> 32; ibmveth_rxq_get_buffer() local
401 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); ibmveth_rxq_get_buffer()
402 BUG_ON(index >= adapter->rx_buff_pool[pool].size); ibmveth_rxq_get_buffer()
404 return adapter->rx_buff_pool[pool].skbuff[index]; ibmveth_rxq_get_buffer()
412 unsigned int pool = correlator >> 32; ibmveth_rxq_recycle_buffer() local
418 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); ibmveth_rxq_recycle_buffer()
419 BUG_ON(index >= adapter->rx_buff_pool[pool].size); ibmveth_rxq_recycle_buffer()
421 if (!adapter->rx_buff_pool[pool].active) { ibmveth_rxq_recycle_buffer()
423 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); ibmveth_rxq_recycle_buffer()
428 adapter->rx_buff_pool[pool].buff_size; ibmveth_rxq_recycle_buffer()
429 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; ibmveth_rxq_recycle_buffer()
635 netdev_err(netdev, "unable to alloc pool\n"); ibmveth_open()
1256 /* Look for an active buffer pool that can hold the new MTU */ ibmveth_change_mtu()
1435 &dev->dev.kobj, "pool%d", i); ibmveth_probe()
1487 struct ibmveth_buff_pool *pool = container_of(kobj, veth_pool_show() local
1492 return sprintf(buf, "%d\n", pool->active); veth_pool_show()
1494 return sprintf(buf, "%d\n", pool->size); veth_pool_show()
1496 return sprintf(buf, "%d\n", pool->buff_size); veth_pool_show()
1503 struct ibmveth_buff_pool *pool = container_of(kobj, veth_pool_store() local
1513 if (value && !pool->active) { veth_pool_store()
1515 if (ibmveth_alloc_buffer_pool(pool)) { veth_pool_store()
1517 "unable to alloc pool\n"); veth_pool_store()
1520 pool->active = 1; veth_pool_store()
1527 pool->active = 1; veth_pool_store()
1529 } else if (!value && pool->active) { veth_pool_store()
1532 /* Make sure there is a buffer pool with buffers that veth_pool_store()
1535 if (pool == &adapter->rx_buff_pool[i]) veth_pool_store()
1544 netdev_err(netdev, "no active pool >= MTU\n"); veth_pool_store()
1551 pool->active = 0; veth_pool_store()
1556 pool->active = 0; veth_pool_store()
1566 pool->size = value; veth_pool_store()
1570 pool->size = value; veth_pool_store()
1581 pool->buff_size = value; veth_pool_store()
1585 pool->buff_size = value; veth_pool_store()
205 ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) ibmveth_replenish_buffer_pool() argument
326 ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) ibmveth_free_buffer_pool() argument
/linux-4.1.27/arch/sh/mm/
H A Dsram.c2 * SRAM pool for tiny memories not otherwise managed.
16 * This provides a standard SRAM pool for tiny memories that can be
18 * to be inserted in to the pool will generally be less than the page
27 * This is a global pool, we don't care about node locality. sram_pool_init()
/linux-4.1.27/drivers/usb/core/
H A Dbuffer.c24 /* FIXME tune these based on pool statistics ... */
40 pool_max[0] = 0; /* Don't use this pool */ usb_init_pool_max()
74 hcd->pool[i] = dma_pool_create(name, hcd->self.controller, hcd_buffer_create()
76 if (!hcd->pool[i]) { hcd_buffer_create()
97 struct dma_pool *pool = hcd->pool[i]; hcd_buffer_destroy() local
98 if (pool) { hcd_buffer_destroy()
99 dma_pool_destroy(pool); hcd_buffer_destroy()
100 hcd->pool[i] = NULL; hcd_buffer_destroy()
129 return dma_pool_alloc(hcd->pool[i], mem_flags, dma); hcd_buffer_alloc()
155 dma_pool_free(hcd->pool[i], addr, dma); hcd_buffer_free()
/linux-4.1.27/drivers/net/ethernet/ti/
H A Ddavinci_cpdma.c107 struct cpdma_desc_pool *pool; member in struct:cpdma_ctlr
149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
159 struct cpdma_desc_pool *pool; cpdma_desc_pool_create() local
161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); cpdma_desc_pool_create()
162 if (!pool) cpdma_desc_pool_create()
165 spin_lock_init(&pool->lock); cpdma_desc_pool_create()
167 pool->dev = dev; cpdma_desc_pool_create()
168 pool->mem_size = size; cpdma_desc_pool_create()
169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); cpdma_desc_pool_create()
170 pool->num_desc = size / pool->desc_size; cpdma_desc_pool_create()
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); cpdma_desc_pool_create()
173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); cpdma_desc_pool_create()
174 if (!pool->bitmap) cpdma_desc_pool_create()
178 pool->phys = phys; cpdma_desc_pool_create()
179 pool->iomap = ioremap(phys, size); cpdma_desc_pool_create()
180 pool->hw_addr = hw_addr; cpdma_desc_pool_create()
182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, cpdma_desc_pool_create()
184 pool->iomap = pool->cpumap; cpdma_desc_pool_create()
185 pool->hw_addr = pool->phys; cpdma_desc_pool_create()
188 if (pool->iomap) cpdma_desc_pool_create()
189 return pool; cpdma_desc_pool_create()
194 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) cpdma_desc_pool_destroy() argument
196 if (!pool) cpdma_desc_pool_destroy()
199 WARN_ON(pool->used_desc); cpdma_desc_pool_destroy()
200 if (pool->cpumap) { cpdma_desc_pool_destroy()
201 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, cpdma_desc_pool_destroy()
202 pool->phys); cpdma_desc_pool_destroy()
204 iounmap(pool->iomap); cpdma_desc_pool_destroy()
208 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, desc_phys() argument
213 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; desc_phys()
217 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) desc_from_phys() argument
219 return dma ? pool->iomap + dma - pool->hw_addr : NULL; desc_from_phys()
223 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) cpdma_desc_alloc() argument
231 spin_lock_irqsave(&pool->lock, flags); cpdma_desc_alloc()
235 desc_end = pool->num_desc/2; cpdma_desc_alloc()
237 desc_start = pool->num_desc/2; cpdma_desc_alloc()
238 desc_end = pool->num_desc; cpdma_desc_alloc()
241 index = bitmap_find_next_zero_area(pool->bitmap, cpdma_desc_alloc()
244 bitmap_set(pool->bitmap, index, num_desc); cpdma_desc_alloc()
245 desc = pool->iomap + pool->desc_size * index; cpdma_desc_alloc()
246 pool->used_desc++; cpdma_desc_alloc()
249 spin_unlock_irqrestore(&pool->lock, flags); cpdma_desc_alloc()
253 static void cpdma_desc_free(struct cpdma_desc_pool *pool, cpdma_desc_free() argument
258 index = ((unsigned long)desc - (unsigned long)pool->iomap) / cpdma_desc_free()
259 pool->desc_size; cpdma_desc_free()
260 spin_lock_irqsave(&pool->lock, flags); cpdma_desc_free()
261 bitmap_clear(pool->bitmap, index, num_desc); cpdma_desc_free()
262 pool->used_desc--; cpdma_desc_free()
263 spin_unlock_irqrestore(&pool->lock, flags); cpdma_desc_free()
279 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, cpdma_ctlr_create()
284 if (!ctlr->pool) cpdma_ctlr_create()
458 cpdma_desc_pool_destroy(ctlr->pool); cpdma_ctlr_destroy()
630 struct cpdma_desc_pool *pool = ctlr->pool; __cpdma_chan_submit() local
634 desc_dma = desc_phys(pool, desc); __cpdma_chan_submit()
678 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); cpdma_chan_submit()
693 cpdma_desc_free(ctlr->pool, desc, 1); cpdma_chan_submit()
728 struct cpdma_desc_pool *pool = ctlr->pool; cpdma_check_free_tx_desc() local
730 spin_lock_irqsave(&pool->lock, flags); cpdma_check_free_tx_desc()
732 index = bitmap_find_next_zero_area(pool->bitmap, cpdma_check_free_tx_desc()
733 pool->num_desc, pool->num_desc/2, 1, 0); cpdma_check_free_tx_desc()
735 if (index < pool->num_desc) cpdma_check_free_tx_desc()
740 spin_unlock_irqrestore(&pool->lock, flags); cpdma_check_free_tx_desc()
750 struct cpdma_desc_pool *pool = ctlr->pool; __cpdma_chan_free() local
760 cpdma_desc_free(pool, desc, 1); __cpdma_chan_free()
770 struct cpdma_desc_pool *pool = ctlr->pool; __cpdma_chan_process() local
782 desc_dma = desc_phys(pool, desc); __cpdma_chan_process()
798 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); __cpdma_chan_process()
805 chan_write(chan, hdp, desc_phys(pool, chan->head)); __cpdma_chan_process()
842 struct cpdma_desc_pool *pool = ctlr->pool; cpdma_chan_start() local
857 chan_write(chan, hdp, desc_phys(pool, chan->head)); cpdma_chan_start()
870 struct cpdma_desc_pool *pool = ctlr->pool; cpdma_chan_stop() local
914 chan->head = desc_from_phys(pool, next_dma); cpdma_chan_stop()
H A Dcpts.c74 if (list_empty(&cpts->pool)) { cpts_fifo_read()
75 pr_err("cpts: event pool is empty\n"); cpts_fifo_read()
78 event = list_first_entry(&cpts->pool, struct cpts_event, list); cpts_fifo_read()
119 list_add(&event->list, &cpts->pool); cpts_systim_read()
305 list_add(&event->list, &cpts->pool); cpts_find_ts()
314 list_add(&event->list, &cpts->pool); cpts_find_ts()
378 INIT_LIST_HEAD(&cpts->pool); cpts_register()
380 list_add(&cpts->pool_data[i].list, &cpts->pool); cpts_register()
/linux-4.1.27/drivers/xen/
H A Dtmem.c171 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, tmem_cleancache_put_page() argument
178 if (pool < 0) tmem_cleancache_put_page()
183 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); tmem_cleancache_put_page()
186 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, tmem_cleancache_get_page() argument
195 if (pool < 0) tmem_cleancache_get_page()
199 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); tmem_cleancache_get_page()
206 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, tmem_cleancache_flush_page() argument
212 if (pool < 0) tmem_cleancache_flush_page()
216 (void)xen_tmem_flush_page((u32)pool, oid, ind); tmem_cleancache_flush_page()
219 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) tmem_cleancache_flush_inode() argument
223 if (pool < 0) tmem_cleancache_flush_inode()
225 (void)xen_tmem_flush_object((u32)pool, oid); tmem_cleancache_flush_inode()
228 static void tmem_cleancache_flush_fs(int pool) tmem_cleancache_flush_fs() argument
230 if (pool < 0) tmem_cleancache_flush_fs()
232 (void)xen_tmem_destroy_pool((u32)pool); tmem_cleancache_flush_fs()
291 int pool = tmem_frontswap_poolid; tmem_frontswap_store() local
294 if (pool < 0) tmem_frontswap_store()
299 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); tmem_frontswap_store()
317 int pool = tmem_frontswap_poolid; tmem_frontswap_load() local
320 if (pool < 0) tmem_frontswap_load()
324 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); tmem_frontswap_load()
337 int pool = tmem_frontswap_poolid; tmem_frontswap_flush_page() local
339 if (pool < 0) tmem_frontswap_flush_page()
343 (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind)); tmem_frontswap_flush_page()
349 int pool = tmem_frontswap_poolid; tmem_frontswap_flush_area() local
352 if (pool < 0) tmem_frontswap_flush_area()
355 (void)xen_tmem_flush_object(pool, oswiz(type, ind)); tmem_frontswap_flush_area()
/linux-4.1.27/drivers/soc/ti/
H A Dknav_qmss_queue.c670 static void kdesc_fill_pool(struct knav_pool *pool) kdesc_fill_pool() argument
675 region = pool->region; kdesc_fill_pool()
676 pool->desc_size = region->desc_size; kdesc_fill_pool()
677 for (i = 0; i < pool->num_desc; i++) { kdesc_fill_pool()
678 int index = pool->region_offset + i; kdesc_fill_pool()
682 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); kdesc_fill_pool()
683 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, kdesc_fill_pool()
685 knav_queue_push(pool->queue, dma_addr, dma_size, 0); kdesc_fill_pool()
690 static void kdesc_empty_pool(struct knav_pool *pool) kdesc_empty_pool() argument
697 if (!pool->queue) kdesc_empty_pool()
701 dma = knav_queue_pop(pool->queue, &size); kdesc_empty_pool()
704 desc = knav_pool_desc_dma_to_virt(pool, dma); kdesc_empty_pool()
706 dev_dbg(pool->kdev->dev, kdesc_empty_pool()
711 WARN_ON(i != pool->num_desc); kdesc_empty_pool()
712 knav_queue_close(pool->queue); kdesc_empty_pool()
719 struct knav_pool *pool = ph; knav_pool_desc_virt_to_dma() local
720 return pool->region->dma_start + (virt - pool->region->virt_start); knav_pool_desc_virt_to_dma()
726 struct knav_pool *pool = ph; knav_pool_desc_dma_to_virt() local
727 return pool->region->virt_start + (dma - pool->region->dma_start); knav_pool_desc_dma_to_virt()
732 * knav_pool_create() - Create a pool of descriptors
733 * @name - name to give the pool handle
734 * @num_desc - numbers of descriptors in the pool
738 * Returns a pool handle on success.
745 struct knav_pool *pool, *pi; knav_pool_create() local
754 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); knav_pool_create()
755 if (!pool) { knav_pool_create()
756 dev_err(kdev->dev, "out of memory allocating pool\n"); knav_pool_create()
773 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
774 if (IS_ERR_OR_NULL(pool->queue)) {
776 "failed to open queue for pool(%s), error %ld\n",
777 name, PTR_ERR(pool->queue));
778 ret = PTR_ERR(pool->queue);
782 pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
783 pool->kdev = kdev;
784 pool->dev = kdev->dev;
789 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
812 pool->region = region;
813 pool->num_desc = num_desc;
814 pool->region_offset = last_offset;
816 list_add_tail(&pool->list, &kdev->pools);
817 list_add_tail(&pool->region_inst, node);
819 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
826 kdesc_fill_pool(pool);
827 return pool;
832 kfree(pool->name);
833 devm_kfree(kdev->dev, pool);
839 * knav_pool_destroy() - Free a pool of descriptors
840 * @pool - pool handle
844 struct knav_pool *pool = ph; knav_pool_destroy() local
846 if (!pool) knav_pool_destroy()
849 if (!pool->region) knav_pool_destroy()
852 kdesc_empty_pool(pool); knav_pool_destroy()
855 pool->region->used_desc -= pool->num_desc; knav_pool_destroy()
856 list_del(&pool->region_inst); knav_pool_destroy()
857 list_del(&pool->list); knav_pool_destroy()
860 kfree(pool->name); knav_pool_destroy()
861 devm_kfree(kdev->dev, pool); knav_pool_destroy()
867 * knav_pool_desc_get() - Get a descriptor from the pool
868 * @pool - pool handle
870 * Returns descriptor from the pool.
874 struct knav_pool *pool = ph; knav_pool_desc_get() local
879 dma = knav_queue_pop(pool->queue, &size); knav_pool_desc_get()
882 data = knav_pool_desc_dma_to_virt(pool, dma); knav_pool_desc_get()
888 * knav_pool_desc_put() - return a descriptor to the pool
889 * @pool - pool handle
893 struct knav_pool *pool = ph; knav_pool_desc_put() local
895 dma = knav_pool_desc_virt_to_dma(pool, desc); knav_pool_desc_put()
896 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); knav_pool_desc_put()
902 * @pool - pool handle
913 struct knav_pool *pool = ph; knav_pool_desc_map() local
914 *dma = knav_pool_desc_virt_to_dma(pool, desc); knav_pool_desc_map()
915 size = min(size, pool->region->desc_size); knav_pool_desc_map()
918 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); knav_pool_desc_map()
929 * @pool - pool handle
938 struct knav_pool *pool = ph; knav_pool_desc_unmap() local
942 desc_sz = min(dma_sz, pool->region->desc_size); knav_pool_desc_unmap()
943 desc = knav_pool_desc_dma_to_virt(pool, dma); knav_pool_desc_unmap()
944 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); knav_pool_desc_unmap()
951 * knav_pool_count() - Get the number of descriptors in pool.
952 * @pool - pool handle
953 * Returns number of elements in the pool.
957 struct knav_pool *pool = ph; knav_pool_count() local
958 return knav_queue_get_count(pool->queue); knav_pool_count()
968 struct knav_pool *pool; knav_queue_setup_region() local
1010 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); knav_queue_setup_region()
1011 if (!pool) { knav_queue_setup_region()
1012 dev_err(kdev->dev, "out of memory allocating dummy pool\n"); knav_queue_setup_region()
1015 pool->num_desc = 0; knav_queue_setup_region()
1016 pool->region_offset = region->num_desc; knav_queue_setup_region()
1017 list_add(&pool->region_inst, &region->pools); knav_queue_setup_region()
1318 struct knav_pool *pool, *tmp; knav_queue_free_regions() local
1325 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst) knav_queue_free_regions()
1326 knav_pool_destroy(pool); knav_queue_free_regions()
H A Dknav_qmss.h221 * @name: pool name
223 * @region_inst: instance in the region's pool list
361 #define for_each_pool(kdev, pool) \
362 list_for_each_entry(pool, &kdev->pools, list)
/linux-4.1.27/arch/ia64/kernel/
H A Duncached.c12 * pool of pages per node.
35 struct gen_pool *pool; member in struct:uncached_pool
37 int nchunks_added; /* #of converted chunks added to pool */
70 * Add a new chunk of uncached memory pages to the specified pool.
72 * @pool: pool to add new chunk of uncached memory to
154 * can add it to the pool. uncached_add_chunk()
156 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); uncached_add_chunk()
202 if (uc_pool->pool == NULL) uncached_alloc_page()
205 uc_addr = gen_pool_alloc(uc_pool->pool, uncached_alloc_page()
229 struct gen_pool *pool = uncached_pools[nid].pool; uncached_free_page() local
231 if (unlikely(pool == NULL)) uncached_free_page()
237 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); uncached_free_page()
255 struct gen_pool *pool = uncached_pools[nid].pool; uncached_build_memmap() local
260 if (pool != NULL) { uncached_build_memmap()
262 (void) gen_pool_add(pool, uc_start, size, nid); uncached_build_memmap()
273 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); for_each_node_state()
H A Dmca_drv.c69 * This pool keeps pointers to the section part of SAL error record
72 slidx_list_t *buffer; /* section pointer list pool */
73 int cur_idx; /* Current index of section pointer list pool */
74 int max_idx; /* Maximum index of section pointer list pool */
305 * init_record_index_pools - Initialize pool of lists for SAL record index
337 * 3. Allocate the pool as enough to 2 SAL records init_record_index_pools()
/linux-4.1.27/arch/arm/common/
H A Ddmabounce.c56 struct dmabounce_pool *pool; member in struct:safe_buffer
63 struct dma_pool *pool; member in struct:dmabounce_pool
111 struct dmabounce_pool *pool; alloc_safe_buffer() local
119 pool = &device_info->small; alloc_safe_buffer()
121 pool = &device_info->large; alloc_safe_buffer()
123 pool = NULL; alloc_safe_buffer()
135 buf->pool = pool; alloc_safe_buffer()
137 if (pool) { alloc_safe_buffer()
138 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, alloc_safe_buffer()
154 if (pool) alloc_safe_buffer()
155 pool->allocs++; alloc_safe_buffer()
166 /* determine if a buffer is from our "safe" pool */
199 if (buf->pool) free_safe_buffer()
200 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); free_safe_buffer()
467 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, dmabounce_init_pool() argument
470 pool->size = size; dmabounce_init_pool()
471 DO_STATS(pool->allocs = 0); dmabounce_init_pool()
472 pool->pool = dma_pool_create(name, dev, size, dmabounce_init_pool()
476 return pool->pool ? 0 : -ENOMEM; dmabounce_init_pool()
497 "dmabounce: could not allocate DMA pool for %ld byte objects\n", dmabounce_register_dev()
508 "dmabounce: could not allocate DMA pool for %ld byte objects\n", dmabounce_register_dev()
534 dma_pool_destroy(device_info->small.pool); dmabounce_register_dev()
561 if (device_info->small.pool) dmabounce_unregister_dev()
562 dma_pool_destroy(device_info->small.pool); dmabounce_unregister_dev()
563 if (device_info->large.pool) dmabounce_unregister_dev()
564 dma_pool_destroy(device_info->large.pool); dmabounce_unregister_dev()
/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_aux.c204 adapter->pool.erp_req = zfcp_allocate_low_mem_buffers()
206 if (!adapter->pool.erp_req) zfcp_allocate_low_mem_buffers()
209 adapter->pool.gid_pn_req = zfcp_allocate_low_mem_buffers()
211 if (!adapter->pool.gid_pn_req) zfcp_allocate_low_mem_buffers()
214 adapter->pool.scsi_req = zfcp_allocate_low_mem_buffers()
216 if (!adapter->pool.scsi_req) zfcp_allocate_low_mem_buffers()
219 adapter->pool.scsi_abort = zfcp_allocate_low_mem_buffers()
221 if (!adapter->pool.scsi_abort) zfcp_allocate_low_mem_buffers()
224 adapter->pool.status_read_req = zfcp_allocate_low_mem_buffers()
227 if (!adapter->pool.status_read_req) zfcp_allocate_low_mem_buffers()
230 adapter->pool.qtcb_pool = zfcp_allocate_low_mem_buffers()
232 if (!adapter->pool.qtcb_pool) zfcp_allocate_low_mem_buffers()
236 adapter->pool.sr_data = zfcp_allocate_low_mem_buffers()
238 if (!adapter->pool.sr_data) zfcp_allocate_low_mem_buffers()
241 adapter->pool.gid_pn = zfcp_allocate_low_mem_buffers()
243 if (!adapter->pool.gid_pn) zfcp_allocate_low_mem_buffers()
251 if (adapter->pool.erp_req) zfcp_free_low_mem_buffers()
252 mempool_destroy(adapter->pool.erp_req); zfcp_free_low_mem_buffers()
253 if (adapter->pool.scsi_req) zfcp_free_low_mem_buffers()
254 mempool_destroy(adapter->pool.scsi_req); zfcp_free_low_mem_buffers()
255 if (adapter->pool.scsi_abort) zfcp_free_low_mem_buffers()
256 mempool_destroy(adapter->pool.scsi_abort); zfcp_free_low_mem_buffers()
257 if (adapter->pool.qtcb_pool) zfcp_free_low_mem_buffers()
258 mempool_destroy(adapter->pool.qtcb_pool); zfcp_free_low_mem_buffers()
259 if (adapter->pool.status_read_req) zfcp_free_low_mem_buffers()
260 mempool_destroy(adapter->pool.status_read_req); zfcp_free_low_mem_buffers()
261 if (adapter->pool.sr_data) zfcp_free_low_mem_buffers()
262 mempool_destroy(adapter->pool.sr_data); zfcp_free_low_mem_buffers()
263 if (adapter->pool.gid_pn) zfcp_free_low_mem_buffers()
264 mempool_destroy(adapter->pool.gid_pn); zfcp_free_low_mem_buffers()
H A Dzfcp_fsf.c80 if (likely(req->pool)) { zfcp_fsf_req_free()
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); zfcp_fsf_req_free()
83 mempool_free(req, req->pool); zfcp_fsf_req_free()
217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_status_read_handler()
265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_status_read_handler()
647 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) zfcp_fsf_alloc() argument
651 if (likely(pool)) zfcp_fsf_alloc()
652 req = mempool_alloc(pool, GFP_ATOMIC); zfcp_fsf_alloc()
660 req->pool = pool; zfcp_fsf_alloc()
664 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) zfcp_qtcb_alloc() argument
668 if (likely(pool)) zfcp_qtcb_alloc()
669 qtcb = mempool_alloc(pool, GFP_ATOMIC); zfcp_qtcb_alloc()
682 mempool_t *pool) zfcp_fsf_req_create()
685 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); zfcp_fsf_req_create()
702 if (likely(pool)) zfcp_fsf_req_create()
703 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); zfcp_fsf_req_create()
775 adapter->pool.status_read_req); zfcp_fsf_status_read()
781 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); zfcp_fsf_status_read()
801 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_status_read()
886 qdio->adapter->pool.scsi_abort); zfcp_fsf_abort_fcp_cmnd()
1047 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1050 struct zfcp_fsf_ct_els *ct, mempool_t *pool, zfcp_fsf_send_ct()
1062 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); zfcp_fsf_send_ct()
1201 qdio->adapter->pool.erp_req); zfcp_fsf_exchange_config_data()
1291 qdio->adapter->pool.erp_req); zfcp_fsf_exchange_port_data()
1448 qdio->adapter->pool.erp_req); zfcp_fsf_open_port()
1514 qdio->adapter->pool.erp_req); zfcp_fsf_close_port()
1587 qdio->adapter->pool.erp_req); zfcp_fsf_open_wka_port()
1640 qdio->adapter->pool.erp_req); zfcp_fsf_close_wka_port()
1729 qdio->adapter->pool.erp_req); zfcp_fsf_close_physical_port()
1848 adapter->pool.erp_req); zfcp_fsf_open_lun()
1939 qdio->adapter->pool.erp_req); zfcp_fsf_close_lun()
2220 sbtype, adapter->pool.scsi_req); zfcp_fsf_fcp_cmnd()
2322 qdio->adapter->pool.scsi_req); zfcp_fsf_fcp_task_mgmt()
680 zfcp_fsf_req_create(struct zfcp_qdio *qdio, u32 fsf_cmd, u8 sbtype, mempool_t *pool) zfcp_fsf_req_create() argument
1049 zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, struct zfcp_fsf_ct_els *ct, mempool_t *pool, unsigned int timeout) zfcp_fsf_send_ct() argument
H A Dzfcp_def.h185 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ member in struct:zfcp_adapter
295 * @pool: reference to memory pool if used for this request
312 mempool_t *pool; member in struct:zfcp_fsf_req
/linux-4.1.27/drivers/staging/fsl-mc/include/
H A Dmc-private.h57 * @type: type of resources in the pool
58 * @max_count: maximum number of resources in the pool
59 * @free_count: number of free resources in the pool
60 * @mutex: mutex to serialize access to the pool's free list
61 * @free_list: anchor node of list of free resources in the pool
62 * @mc_bus: pointer to the MC bus that owns this resource pool
76 * @resource_pools: array of resource pools (one pool per resource type)
H A Dmc.h80 * NOTE: New resource pool types must be added before this entry
91 * @parent_pool: pointer to the parent resource pool from which this
93 * @node: Node in the free list of the corresponding resource pool
142 * corresponding resource pool in the object's parent DPRC, using the
H A Ddprc.h44 * allocated by the DPRC from the pool of ICIDs.
51 * user and should be allocated by the DPRC from the pool of portal ids.
568 * dprc_get_pool() - Get the type (string) of a certain dprc's pool
571 * @pool_index; Index of the pool to be queried (< pool_count)
572 * @type: The type of the pool
574 * The pool types retrieved one by one by incrementing
646 * to this container, by pool type
649 * @type: pool type
691 * @type: pool type
H A Dmc-sys.h54 * the MC portal came from a resource pool, or NULL if the MC portal
/linux-4.1.27/drivers/atm/
H A Dzatm.h45 int pool; /* free buffer pool */ member in struct:zatm_vcc
64 int pool_ref[NR_POOLS]; /* free buffer pool usage counters */
66 /* last entry in respective pool */
67 struct sk_buff_head pool[NR_POOLS];/* free buffer pools */ member in struct:zatm_dev
68 struct zatm_pool_info pool_info[NR_POOLS]; /* pool information */
78 u32 pool_base; /* Free buffer pool dsc (word addr) */
H A Dambassador.c162 Note on RX pool sizes:
164 Each pool should have enough buffers to handle a back-to-back stream
178 In fact, each pool should have enough buffers to support the
194 subject to the limit given by the pool size.
208 by another command on the same VC, 3. the changes to RX pool size
687 static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { rx_give() argument
688 amb_rxq * rxq = &dev->rxq[pool]; rx_give()
691 PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); rx_give()
702 wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); rx_give()
712 static int rx_take (amb_dev * dev, unsigned char pool) { rx_take() argument
713 amb_rxq * rxq = &dev->rxq[pool]; rx_take()
716 PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); rx_take()
745 static void drain_rx_pool (amb_dev * dev, unsigned char pool) { drain_rx_pool() argument
746 amb_rxq * rxq = &dev->rxq[pool]; drain_rx_pool()
748 PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); drain_rx_pool()
753 /* we are not quite like the fill pool routines as we cannot just drain_rx_pool()
759 cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); drain_rx_pool()
762 /* the pool may also be emptied via the interrupt handler */ drain_rx_pool()
764 if (rx_take (dev, pool)) drain_rx_pool()
772 unsigned char pool; drain_rx_pools() local
776 for (pool = 0; pool < NUM_RX_POOLS; ++pool) drain_rx_pools()
777 drain_rx_pool (dev, pool); drain_rx_pools()
780 static void fill_rx_pool (amb_dev * dev, unsigned char pool, fill_rx_pool() argument
786 PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority); fill_rx_pool()
791 rxq = &dev->rxq[pool]; fill_rx_pool()
796 PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool); fill_rx_pool()
808 if (rx_give (dev, &rx, pool)) fill_rx_pool()
818 unsigned char pool; fill_rx_pools() local
822 for (pool = 0; pool < NUM_RX_POOLS; ++pool) fill_rx_pools()
823 fill_rx_pool (dev, pool, GFP_ATOMIC); fill_rx_pools()
867 unsigned char pool; interrupt_handler() local
868 for (pool = 0; pool < NUM_RX_POOLS; ++pool) interrupt_handler()
869 while (!rx_take (dev, pool)) interrupt_handler()
1026 unsigned char pool = -1; // hush gcc amb_open() local
1105 // choose an RX pool (arranged in increasing size) amb_open()
1106 for (pool = 0; pool < NUM_RX_POOLS; ++pool) amb_open()
1107 if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) { amb_open()
1108 PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)", amb_open()
1109 pool, rxtp->max_sdu, dev->rxq[pool].buffer_size); amb_open()
1112 if (pool == NUM_RX_POOLS) { amb_open()
1114 "no pool suitable for VC (RX max_sdu %d is too large)", amb_open()
1165 // ... and TX flags, preserving the RX pool amb_open()
1169 ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT) amb_open()
1174 // no RXer on the channel, just open (with pool zero) amb_open()
1189 vcc->rx_info.pool = pool; amb_open()
1192 /* grow RX buffer pool */ amb_open()
1193 if (!dev->rxq[pool].buffers_wanted) amb_open()
1194 dev->rxq[pool].buffers_wanted = rx_lats; amb_open()
1195 dev->rxq[pool].buffers_wanted += 1; amb_open()
1196 fill_rx_pool (dev, pool, GFP_KERNEL); amb_open()
1200 // switch (from pool zero) to this pool, preserving the TX bits amb_open()
1204 ( (pool << SRB_POOL_SHIFT) amb_open()
1210 cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); amb_open()
1248 // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool amb_close()
1265 unsigned char pool = vcc->rx_info.pool; amb_close() local
1269 // TXer still on the channel, just go to pool zero XXX not really needed amb_close()
1288 /* shrink RX buffer pool */ amb_close()
1289 dev->rxq[pool].buffers_wanted -= 1; amb_close()
1290 if (dev->rxq[pool].buffers_wanted == rx_lats) { amb_close()
1291 dev->rxq[pool].buffers_wanted = 0; amb_close()
1292 drain_rx_pool (dev, pool); amb_close()
1391 unsigned char pool = vcc->rx_info.pool;
1409 if (!rx_give (dev, &rx, pool)) {
1411 PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
1427 unsigned char pool; amb_proc_read() local
1457 for (pool = 0; pool < NUM_RX_POOLS; ++pool) { amb_proc_read()
1458 amb_rxq * r = &dev->rxq[pool]; amb_proc_read()
1468 for (pool = 0; pool < NUM_RX_POOLS; ++pool) { amb_proc_read()
1469 amb_rxq * r = &dev->rxq[pool]; amb_proc_read()
1513 unsigned char pool; create_queues() local
1524 for (pool = 0; pool < NUM_RX_POOLS; ++pool) create_queues()
1525 total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out)); create_queues()
1587 for (pool = 0; pool < NUM_RX_POOLS; ++pool) { create_queues()
1590 amb_rxq * rxq = &dev->rxq[pool]; create_queues()
1592 rxq->buffer_size = rx_buffer_sizes[pool]; create_queues()
1596 rxq->low = rxs[pool] - 1; create_queues()
1598 rxq->maximum = rxs[pool] - 1; create_queues()
1602 rxq->in.limit = in + rxs[pool]; create_queues()
1609 rxq->out.limit = out + rxs[pool]; create_queues()
1989 unsigned char pool; amb_talk() local
2001 for (pool = 0; pool < NUM_RX_POOLS; ++pool) { amb_talk()
2003 a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start); amb_talk()
2004 a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit); amb_talk()
2005 a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start); amb_talk()
2006 a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit); amb_talk()
2007 a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size); amb_talk()
2141 unsigned char pool; setup_dev() local
2169 for (pool = 0; pool < NUM_RX_POOLS; ++pool) setup_dev()
2170 spin_lock_init (&dev->rxq[pool].lock); setup_dev()
2316 unsigned char pool; amb_check_args() local
2334 for (pool = 0; pool < NUM_RX_POOLS; ++pool) amb_check_args()
2335 if (rxs[pool] < MIN_QUEUE_SIZE) amb_check_args()
2337 pool, rxs[pool] = MIN_QUEUE_SIZE); amb_check_args()
2341 for (pool = 0; pool < NUM_RX_POOLS; ++pool) amb_check_args()
2342 if (rxs_bs[pool] <= max_rx_size) amb_check_args()
2343 PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)", amb_check_args()
2344 pool, rxs_bs[pool]); amb_check_args()
2346 max_rx_size = rxs_bs[pool]; amb_check_args()
H A Dzatm.c178 static void refill_pool(struct atm_dev *dev,int pool) refill_pool() argument
188 size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : refill_pool()
189 pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); refill_pool()
196 offset = zatm_dev->pool_info[pool].offset+ refill_pool()
201 free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & refill_pool()
204 if (free >= zatm_dev->pool_info[pool].low_water) return; refill_pool()
206 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), refill_pool()
207 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); refill_pool()
211 while (free < zatm_dev->pool_info[pool].high_water) { refill_pool()
233 if (zatm_dev->last_free[pool]) refill_pool()
234 ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> refill_pool()
236 zatm_dev->last_free[pool] = skb; refill_pool()
237 skb_queue_tail(&zatm_dev->pool[pool],skb); refill_pool()
245 zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, refill_pool()
249 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), refill_pool()
250 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); refill_pool()
256 static void drain_free(struct atm_dev *dev,int pool) drain_free() argument
258 skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); drain_free()
278 static void use_pool(struct atm_dev *dev,int pool) use_pool() argument
285 if (!(zatm_dev->pool_info[pool].ref_count++)) { use_pool()
286 skb_queue_head_init(&zatm_dev->pool[pool]); use_pool()
287 size = pool-ZATM_AAL5_POOL_BASE; use_pool()
291 zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << use_pool()
295 zatm_dev->pool_base+pool*2); use_pool()
297 pool*2+1); use_pool()
299 zatm_dev->last_free[pool] = NULL; use_pool()
300 refill_pool(dev,pool); use_pool()
302 DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); use_pool()
306 static void unuse_pool(struct atm_dev *dev,int pool) unuse_pool() argument
308 if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) unuse_pool()
309 drain_free(dev,pool); unuse_pool()
421 pos = ZATM_VCC(vcc)->pool; poll_rx()
424 skb_unlink(skb, zatm_dev->pool + pos); poll_rx()
476 refill_pool(dev,zatm_vcc->pool); poll_rx()
502 zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); open_rx_first()
506 zatm_vcc->pool = ZATM_AAL0_POOL; open_rx_first()
508 if (zatm_vcc->pool < 0) return -EMSGSIZE; open_rx_first()
518 use_pool(vcc->dev,zatm_vcc->pool); open_rx_first()
519 DPRINTK("pool %d\n",zatm_vcc->pool); open_rx_first()
522 zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, open_rx_first()
594 unuse_pool(vcc->dev,zatm_vcc->pool); close_rx()
1290 DPRINTK("RX pool 0x%08lx\n",curr); zatm_start()
1291 zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ zatm_start()
1298 zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ zatm_start()
1452 int pool; zatm_ioctl() local
1454 if (get_user(pool, zatm_ioctl()
1457 if (pool < 0 || pool > ZATM_LAST_POOL) zatm_ioctl()
1460 info = zatm_dev->pool_info[pool]; zatm_ioctl()
1462 zatm_dev->pool_info[pool].rqa_count = 0; zatm_ioctl()
1463 zatm_dev->pool_info[pool].rqu_count = 0; zatm_ioctl()
1473 int pool; zatm_ioctl() local
1476 if (get_user(pool, zatm_ioctl()
1479 if (pool < 0 || pool > ZATM_LAST_POOL) zatm_ioctl()
1486 pool_info[pool].low_water; zatm_ioctl()
1489 pool_info[pool].high_water; zatm_ioctl()
1492 pool_info[pool].next_thres; zatm_ioctl()
1497 zatm_dev->pool_info[pool].low_water = zatm_ioctl()
1499 zatm_dev->pool_info[pool].high_water = zatm_ioctl()
1501 zatm_dev->pool_info[pool].next_thres = zatm_ioctl()
H A DuPD98401.h28 #define uPD98401_POOL 0x000f0000 /* pool number */
77 #define uPD98401_AAL5_POOL 0x0000001f /* Free buffer pool number */
257 #define uPD98401_RXFP_REMAIN 0x0000ffff /* remaining batches in pool */
266 #define uPD98401_RXVC_POOL 0x001f0000 /* free buffer pool number */
/linux-4.1.27/include/rdma/
H A Dib_fmr_pool.h42 * struct ib_fmr_pool_param - Parameters for creating FMR pool
45 * @access:Access flags for FMRs in pool.
46 * @pool_size:Number of FMRs to allocate for pool.
61 void (*flush_function)(struct ib_fmr_pool *pool,
69 struct ib_fmr_pool *pool; member in struct:ib_pool_fmr
82 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
84 int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
/linux-4.1.27/drivers/staging/i2o/
H A Dmemory.c270 int i2o_pool_alloc(struct i2o_pool *pool, const char *name, i2o_pool_alloc() argument
273 pool->name = kstrdup(name, GFP_KERNEL); i2o_pool_alloc()
274 if (!pool->name) i2o_pool_alloc()
277 pool->slab = i2o_pool_alloc()
278 kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); i2o_pool_alloc()
279 if (!pool->slab) i2o_pool_alloc()
282 pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); i2o_pool_alloc()
283 if (!pool->mempool) i2o_pool_alloc()
289 kmem_cache_destroy(pool->slab); i2o_pool_alloc()
292 kfree(pool->name); i2o_pool_alloc()
306 void i2o_pool_free(struct i2o_pool *pool) i2o_pool_free() argument
308 mempool_destroy(pool->mempool); i2o_pool_free()
309 kmem_cache_destroy(pool->slab); i2o_pool_free()
310 kfree(pool->name); i2o_pool_free()
H A Di2o_block.h68 mempool_t *pool; member in struct:i2o_block_mempool
/linux-4.1.27/block/
H A Dbounce.c40 pr_info("pool size: %d pages\n", POOL_SIZE); init_emergency_pool()
72 * allocate pages in the DMA region for the ISA pool
81 * as the max address, so check if the pool has already been created.
92 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); init_emergency_isa_pool()
125 static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) bounce_end_io() argument
143 mempool_free(bvec->bv_page, pool); bio_for_each_segment_all()
161 static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) __bounce_end_io_read() argument
168 bounce_end_io(bio, pool, err); __bounce_end_io_read()
200 mempool_t *pool, int force) __blk_queue_bounce()
224 to->bv_page = mempool_alloc(pool, q->bounce_gfp); bio_for_each_segment_all()
243 if (pool == page_pool) {
260 mempool_t *pool; blk_queue_bounce() local
278 pool = page_pool; blk_queue_bounce()
281 pool = isa_page_pool; blk_queue_bounce()
287 __blk_queue_bounce(q, bio_orig, pool, must_bounce); blk_queue_bounce()
199 __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, mempool_t *pool, int force) __blk_queue_bounce() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_irq.c75 static struct ehca_comp_pool *pool; variable in typeref:struct:ehca_comp_pool
656 static int find_next_online_cpu(struct ehca_comp_pool *pool) find_next_online_cpu() argument
665 spin_lock_irqsave(&pool->last_cpu_lock, flags); find_next_online_cpu()
667 cpu = cpumask_next(pool->last_cpu, cpu_online_mask); find_next_online_cpu()
670 pool->last_cpu = cpu; find_next_online_cpu()
671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); find_next_online_cpu()
672 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); find_next_online_cpu()
706 cpu_id = find_next_online_cpu(pool); queue_comp_task()
709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); queue_comp_task()
710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); queue_comp_task()
717 cpu_id = find_next_online_cpu(pool); queue_comp_task()
718 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); queue_comp_task()
719 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); queue_comp_task()
750 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); comp_task_park()
762 cpu = find_next_online_cpu(pool); comp_task_park()
763 target = per_cpu_ptr(pool->cpu_comp_tasks, cpu); comp_task_park()
764 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu); comp_task_park()
775 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); comp_task_stop()
786 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); comp_task_should_run()
793 struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks); comp_task()
820 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL); ehca_create_comp_pool()
821 if (pool == NULL) ehca_create_comp_pool()
824 spin_lock_init(&pool->last_cpu_lock); ehca_create_comp_pool()
825 pool->last_cpu = cpumask_any(cpu_online_mask); ehca_create_comp_pool()
827 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); ehca_create_comp_pool()
828 if (!pool->cpu_comp_tasks) ehca_create_comp_pool()
831 pool->cpu_comp_threads = alloc_percpu(struct task_struct *); ehca_create_comp_pool()
832 if (!pool->cpu_comp_threads) ehca_create_comp_pool()
838 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); for_each_present_cpu()
843 comp_pool_threads.store = pool->cpu_comp_threads;
852 free_percpu(pool->cpu_comp_threads);
854 free_percpu(pool->cpu_comp_tasks);
856 kfree(pool);
867 free_percpu(pool->cpu_comp_threads); ehca_destroy_comp_pool()
868 free_percpu(pool->cpu_comp_tasks); ehca_destroy_comp_pool()
869 kfree(pool); ehca_destroy_comp_pool()
/linux-4.1.27/drivers/scsi/megaraid/
H A Dmegaraid_mm.c214 * Return the kioc to free pool mraid_mm_ioctl()
506 * First we search for a pool with smallest buffer that is >= @xferlen. If
507 * that pool has no free buffer, we will try for the next bigger size. If none
509 * @xferlen and attach it the pool.
514 mm_dmapool_t *pool; mraid_mm_attach_buf() local
531 pool = &adp->dma_pool_list[i]; mraid_mm_attach_buf()
533 if (xferlen > pool->buf_size) mraid_mm_attach_buf()
539 spin_lock_irqsave(&pool->lock, flags); mraid_mm_attach_buf()
541 if (!pool->in_use) { mraid_mm_attach_buf()
543 pool->in_use = 1; mraid_mm_attach_buf()
545 kioc->buf_vaddr = pool->vaddr; mraid_mm_attach_buf()
546 kioc->buf_paddr = pool->paddr; mraid_mm_attach_buf()
548 spin_unlock_irqrestore(&pool->lock, flags); mraid_mm_attach_buf()
552 spin_unlock_irqrestore(&pool->lock, flags); mraid_mm_attach_buf()
564 * We did not get any buffer from the preallocated pool. Let us try mraid_mm_attach_buf()
567 pool = &adp->dma_pool_list[right_pool]; mraid_mm_attach_buf()
569 spin_lock_irqsave(&pool->lock, flags); mraid_mm_attach_buf()
573 kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, mraid_mm_attach_buf()
575 spin_unlock_irqrestore(&pool->lock, flags); mraid_mm_attach_buf()
588 * free kioc pool. If the kioc pool is empty, this function blocks till
633 * mraid_mm_dealloc_kioc - Return kioc to free pool
635 * @kioc : uioc_t node to be returned to free pool
640 mm_dmapool_t *pool; mraid_mm_dealloc_kioc() local
644 pool = &adp->dma_pool_list[kioc->pool_index]; mraid_mm_dealloc_kioc()
647 spin_lock_irqsave(&pool->lock, flags); mraid_mm_dealloc_kioc()
651 * required buffer from the pool, we would have allocated mraid_mm_dealloc_kioc()
657 pci_pool_free(pool->handle, kioc->buf_vaddr, mraid_mm_dealloc_kioc()
660 pool->in_use = 0; mraid_mm_dealloc_kioc()
662 spin_unlock_irqrestore(&pool->lock, flags); mraid_mm_dealloc_kioc()
665 /* Return the kioc to the free pool */ mraid_mm_dealloc_kioc()
939 adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool", mraid_mm_register_adp()
1060 * We maintain a pool of dma buffers per each adapter. Each pool has one
1062 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1064 * pool.
1069 mm_dmapool_t *pool; mraid_mm_setup_dma_pools() local
1080 pool = &adp->dma_pool_list[i]; mraid_mm_setup_dma_pools()
1082 pool->buf_size = bufsize; mraid_mm_setup_dma_pools()
1083 spin_lock_init(&pool->lock); mraid_mm_setup_dma_pools()
1085 pool->handle = pci_pool_create("megaraid mm data buffer", mraid_mm_setup_dma_pools()
1088 if (!pool->handle) { mraid_mm_setup_dma_pools()
1092 pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, mraid_mm_setup_dma_pools()
1093 &pool->paddr); mraid_mm_setup_dma_pools()
1095 if (!pool->vaddr) mraid_mm_setup_dma_pools()
1184 mm_dmapool_t *pool; mraid_mm_teardown_dma_pools() local
1188 pool = &adp->dma_pool_list[i]; mraid_mm_teardown_dma_pools()
1190 if (pool->handle) { mraid_mm_teardown_dma_pools()
1192 if (pool->vaddr) mraid_mm_teardown_dma_pools()
1193 pci_pool_free(pool->handle, pool->vaddr, mraid_mm_teardown_dma_pools()
1194 pool->paddr); mraid_mm_teardown_dma_pools()
1196 pci_pool_destroy(pool->handle); mraid_mm_teardown_dma_pools()
1197 pool->handle = NULL; mraid_mm_teardown_dma_pools()
H A Dmegaraid_ioctl.h107 * @list : for kioc free pool list maintenance
109 * @buf_vaddr : dma pool buffer attached to kioc for data transfer
110 * @buf_paddr : physical address of the dma pool buffer
111 * @pool_index : index of the dma pool that @buf_vaddr is taken from
226 * mm_dmapool_t : Represents one dma pool with just one buffer
231 * @handle : Handle to the dma pool
232 * @lock : lock to synchronize access to the pool
233 * @in_use : If pool already in use, attach new block
259 * @kioc_pool : pool of free kiocs
260 * @kioc_pool_lock : protection for free pool
263 * @pthru_dma_pool : DMA pool to allocate passthru packets
H A Dmegaraid_mbox.h158 * @mbox_pool : pool of mailboxes
159 * @mbox_pool_handle : handle for the mailbox pool memory
160 * @epthru_pool : a pool for extended passthru commands
161 * @epthru_pool_handle : handle to the pool above
162 * @sg_pool : pool of scatter-gather lists for this driver
163 * @sg_pool_handle : handle to the pool above
H A Dmegaraid_mm.h38 // The smallest dma pool
H A Dmegaraid_sas_fusion.c156 * megasas_get_cmd_fusion - Get a command from the free pool
159 * Returns a free command from the pool
176 printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); megasas_get_cmd_fusion()
184 * megasas_return_cmd_fusion - Return a cmd to free command pool
186 * @cmd: Command packet to be returned to free command pool
206 * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
208 * @cmd_mfi: MFI Command packet to be returned to free command pool
209 * @cmd_mpt: MPT Command packet to be returned to free command pool
233 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
247 printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, " megasas_teardown_frame_pool_fusion()
248 "sense pool : %p\n", fusion->sg_dma_pool, megasas_teardown_frame_pool_fusion()
254 * Return all frames to pool megasas_teardown_frame_pool_fusion()
270 * Now destroy the pool itself megasas_teardown_frame_pool_fusion()
280 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
318 /* Free the Fusion frame pool */ megasas_free_cmds_fusion()
333 * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
351 * Use DMA pool facility provided by PCI layer megasas_create_frame_pool_fusion()
354 fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion", megasas_create_frame_pool_fusion()
359 printk(KERN_DEBUG "megasas: failed to setup request pool " megasas_create_frame_pool_fusion()
363 fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion", megasas_create_frame_pool_fusion()
368 printk(KERN_DEBUG "megasas: failed to setup sense pool " megasas_create_frame_pool_fusion()
446 pci_pool_create("reply_frames pool", instance->pdev, megasas_alloc_cmds_fusion()
451 "reply_frame pool\n"); megasas_alloc_cmds_fusion()
460 "reply_frame pool\n"); megasas_alloc_cmds_fusion()
472 pci_pool_create("io_request_frames pool", instance->pdev, megasas_alloc_cmds_fusion()
477 "io_request_frame pool\n"); megasas_alloc_cmds_fusion()
528 * Add all the commands to command pool (fusion->cmd_pool) megasas_alloc_cmds_fusion()
551 * Create a frame pool and assign one frame to each cmd megasas_alloc_cmds_fusion()
554 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); megasas_alloc_cmds_fusion()
1065 * Create a pool of commands megasas_init_adapter_fusion()
H A Dmega_common.h109 * @kscb_pool : pool of free scbs for IO
110 * @kscb_pool_lock : lock for pool of free scbs
127 * @uscb_pool : pool of SCBs for user commands
/linux-4.1.27/drivers/scsi/
H A Dscsi.c183 struct scsi_host_cmd_pool *pool = shost->cmd_pool; scsi_host_free_command() local
187 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); scsi_host_free_command()
188 kmem_cache_free(pool->cmd_slab, cmd); scsi_host_free_command()
193 * @shost: SCSI host whose pool to allocate from
202 struct scsi_host_cmd_pool *pool = shost->cmd_pool; scsi_host_alloc_command() local
205 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); scsi_host_alloc_command()
209 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, scsi_host_alloc_command()
210 gfp_mask | pool->gfp_mask); scsi_host_alloc_command()
223 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); scsi_host_alloc_command()
225 kmem_cache_free(pool->cmd_slab, cmd); scsi_host_alloc_command()
351 scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) scsi_free_host_cmd_pool() argument
353 kfree(pool->sense_name); scsi_free_host_cmd_pool()
354 kfree(pool->cmd_name); scsi_free_host_cmd_pool()
355 kfree(pool); scsi_free_host_cmd_pool()
362 struct scsi_host_cmd_pool *pool; scsi_alloc_host_cmd_pool() local
364 pool = kzalloc(sizeof(*pool), GFP_KERNEL); scsi_alloc_host_cmd_pool()
365 if (!pool) scsi_alloc_host_cmd_pool()
368 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name); scsi_alloc_host_cmd_pool()
369 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name); scsi_alloc_host_cmd_pool()
370 if (!pool->cmd_name || !pool->sense_name) { scsi_alloc_host_cmd_pool()
371 scsi_free_host_cmd_pool(pool); scsi_alloc_host_cmd_pool()
375 pool->slab_flags = SLAB_HWCACHE_ALIGN; scsi_alloc_host_cmd_pool()
377 pool->slab_flags |= SLAB_CACHE_DMA; scsi_alloc_host_cmd_pool()
378 pool->gfp_mask = __GFP_DMA; scsi_alloc_host_cmd_pool()
382 hostt->cmd_pool = pool; scsi_alloc_host_cmd_pool()
384 return pool; scsi_alloc_host_cmd_pool()
391 struct scsi_host_cmd_pool *retval = NULL, *pool; scsi_get_host_cmd_pool() local
399 pool = scsi_find_host_cmd_pool(shost); scsi_get_host_cmd_pool()
400 if (!pool) { scsi_get_host_cmd_pool()
401 pool = scsi_alloc_host_cmd_pool(shost); scsi_get_host_cmd_pool()
402 if (!pool) scsi_get_host_cmd_pool()
406 if (!pool->users) { scsi_get_host_cmd_pool()
407 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0, scsi_get_host_cmd_pool()
408 pool->slab_flags, NULL); scsi_get_host_cmd_pool()
409 if (!pool->cmd_slab) scsi_get_host_cmd_pool()
412 pool->sense_slab = kmem_cache_create(pool->sense_name, scsi_get_host_cmd_pool()
414 pool->slab_flags, NULL); scsi_get_host_cmd_pool()
415 if (!pool->sense_slab) scsi_get_host_cmd_pool()
419 pool->users++; scsi_get_host_cmd_pool()
420 retval = pool; scsi_get_host_cmd_pool()
426 kmem_cache_destroy(pool->cmd_slab); scsi_get_host_cmd_pool()
429 scsi_free_host_cmd_pool(pool); scsi_get_host_cmd_pool()
438 struct scsi_host_cmd_pool *pool; scsi_put_host_cmd_pool() local
441 pool = scsi_find_host_cmd_pool(shost); scsi_put_host_cmd_pool()
445 * of the command pool; the driver should be implicated in scsi_put_host_cmd_pool()
448 BUG_ON(pool->users == 0); scsi_put_host_cmd_pool()
450 if (!--pool->users) { scsi_put_host_cmd_pool()
451 kmem_cache_destroy(pool->cmd_slab); scsi_put_host_cmd_pool()
452 kmem_cache_destroy(pool->sense_slab); scsi_put_host_cmd_pool()
454 scsi_free_host_cmd_pool(pool); scsi_put_host_cmd_pool()
/linux-4.1.27/net/sunrpc/
H A Dsvc.c44 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
46 SVC_POOL_PERCPU, /* one pool per cpu */
47 SVC_POOL_PERNODE /* one pool per numa node */
61 unsigned int *pool_to; /* maps pool id to cpu or node */
62 unsigned int *to_pool; /* maps cpu or node to pool id */
123 * Detect best pool mapping mode heuristically,
150 /* default: one global pool */ svc_pool_map_choose_mode()
178 * Initialise the pool map for SVC_POOL_PERCPU mode.
206 * Initialise the pool map for SVC_POOL_PERNODE mode.
279 * freed; this allows the sysadmin to change the pool
316 * will only run on cpus in the given pool.
347 * Use the mapping mode to choose a pool for a given CPU.
349 * a non-NULL pool pointer.
473 struct svc_pool *pool = &serv->sv_pools[i]; __svc_create() local
475 dprintk("svc: initialising pool %u for %s\n", __svc_create()
478 pool->sp_id = i; __svc_create()
479 INIT_LIST_HEAD(&pool->sp_sockets); __svc_create()
480 INIT_LIST_HEAD(&pool->sp_all_threads); __svc_create()
481 spin_lock_init(&pool->sp_lock); __svc_create()
607 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) svc_prepare_thread() argument
619 rqstp->rq_pool = pool; svc_prepare_thread()
620 spin_lock_bh(&pool->sp_lock); svc_prepare_thread()
621 pool->sp_nrthreads++; svc_prepare_thread()
622 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); svc_prepare_thread()
623 spin_unlock_bh(&pool->sp_lock); svc_prepare_thread()
645 * Choose a pool in which to create a new thread, for svc_set_num_threads
648 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) choose_pool() argument
650 if (pool != NULL) choose_pool()
651 return pool; choose_pool()
660 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) choose_victim() argument
665 if (pool != NULL) { choose_victim()
666 spin_lock_bh(&pool->sp_lock); choose_victim()
668 /* choose a pool in round-robin fashion */ choose_victim()
670 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; choose_victim()
671 spin_lock_bh(&pool->sp_lock); choose_victim()
672 if (!list_empty(&pool->sp_all_threads)) choose_victim()
674 spin_unlock_bh(&pool->sp_lock); choose_victim()
680 if (!list_empty(&pool->sp_all_threads)) { choose_victim()
684 * Remove from the pool->sp_all_threads list choose_victim()
687 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); choose_victim()
692 spin_unlock_bh(&pool->sp_lock); choose_victim()
699 * of threads the given number. If `pool' is non-NULL, applies
700 * only to threads in that pool, otherwise round-robins between
709 * to be pool-aware.
712 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) svc_set_num_threads() argument
721 if (pool == NULL) { svc_set_num_threads()
725 spin_lock_bh(&pool->sp_lock); svc_set_num_threads()
726 nrservs -= pool->sp_nrthreads; svc_set_num_threads()
727 spin_unlock_bh(&pool->sp_lock); svc_set_num_threads()
733 chosen_pool = choose_pool(serv, pool, &state); svc_set_num_threads()
761 (task = choose_victim(serv, pool, &state)) != NULL) { svc_set_num_threads()
778 struct svc_pool *pool = rqstp->rq_pool; svc_exit_thread() local
785 spin_lock_bh(&pool->sp_lock); svc_exit_thread()
786 pool->sp_nrthreads--; svc_exit_thread()
789 spin_unlock_bh(&pool->sp_lock); svc_exit_thread()
H A Dsvc_xprt.c42 * svc_pool->sp_lock protects most of the fields of that pool.
325 struct svc_pool *pool; svc_xprt_do_enqueue() local
345 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); svc_xprt_do_enqueue()
347 atomic_long_inc(&pool->sp_stats.packets); svc_xprt_do_enqueue()
352 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { svc_xprt_do_enqueue()
378 atomic_long_inc(&pool->sp_stats.threads_woken); svc_xprt_do_enqueue()
394 spin_lock_bh(&pool->sp_lock); svc_xprt_do_enqueue()
395 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); svc_xprt_do_enqueue()
396 pool->sp_stats.sockets_queued++; svc_xprt_do_enqueue()
397 spin_unlock_bh(&pool->sp_lock); svc_xprt_do_enqueue()
422 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) svc_xprt_dequeue() argument
426 if (list_empty(&pool->sp_sockets)) svc_xprt_dequeue()
429 spin_lock_bh(&pool->sp_lock); svc_xprt_dequeue()
430 if (likely(!list_empty(&pool->sp_sockets))) { svc_xprt_dequeue()
431 xprt = list_first_entry(&pool->sp_sockets, svc_xprt_dequeue()
439 spin_unlock_bh(&pool->sp_lock); svc_xprt_dequeue()
505 * bother with pool 0 as we don't need to wake up more than one thread for
511 struct svc_pool *pool; svc_wake_up() local
513 pool = &serv->sv_pools[0]; svc_wake_up()
516 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { svc_wake_up()
529 set_bit(SP_TASK_PENDING, &pool->sp_flags); svc_wake_up()
645 struct svc_pool *pool = rqstp->rq_pool; rqst_should_sleep() local
648 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) rqst_should_sleep()
652 if (!list_empty(&pool->sp_sockets)) rqst_should_sleep()
669 struct svc_pool *pool = rqstp->rq_pool; svc_get_next_xprt() local
680 xprt = svc_xprt_dequeue(pool); svc_get_next_xprt()
689 clear_bit(SP_TASK_PENDING, &pool->sp_flags); svc_get_next_xprt()
717 atomic_long_inc(&pool->sp_stats.threads_timedout); svc_get_next_xprt()
767 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", svc_handle_xprt()
1018 struct svc_pool *pool; svc_dequeue_net() local
1024 pool = &serv->sv_pools[i]; svc_dequeue_net()
1026 spin_lock_bh(&pool->sp_lock); svc_dequeue_net()
1027 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { svc_dequeue_net()
1031 spin_unlock_bh(&pool->sp_lock); svc_dequeue_net()
1034 spin_unlock_bh(&pool->sp_lock); svc_dequeue_net()
1312 struct svc_pool *pool = p; svc_pool_stats_next() local
1318 pool = &serv->sv_pools[0]; svc_pool_stats_next()
1320 unsigned int pidx = (pool - &serv->sv_pools[0]); svc_pool_stats_next()
1322 pool = &serv->sv_pools[pidx+1]; svc_pool_stats_next()
1324 pool = NULL; svc_pool_stats_next()
1327 return pool; svc_pool_stats_next()
1336 struct svc_pool *pool = p; svc_pool_stats_show() local
1339 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); svc_pool_stats_show()
1344 pool->sp_id, svc_pool_stats_show()
1345 (unsigned long)atomic_long_read(&pool->sp_stats.packets), svc_pool_stats_show()
1346 pool->sp_stats.sockets_queued, svc_pool_stats_show()
1347 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), svc_pool_stats_show()
1348 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); svc_pool_stats_show()
/linux-4.1.27/tools/hv/
H A Dhv_kvp_daemon.c123 static void kvp_acquire_lock(int pool) kvp_acquire_lock() argument
128 if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { kvp_acquire_lock()
129 syslog(LOG_ERR, "Failed to acquire the lock pool: %d; error: %d %s", pool, kvp_acquire_lock()
135 static void kvp_release_lock(int pool) kvp_release_lock() argument
140 if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { kvp_release_lock()
141 syslog(LOG_ERR, "Failed to release the lock pool: %d; error: %d %s", pool, kvp_release_lock()
147 static void kvp_update_file(int pool) kvp_update_file() argument
155 kvp_acquire_lock(pool); kvp_update_file()
157 filep = fopen(kvp_file_info[pool].fname, "we"); kvp_update_file()
159 syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, kvp_update_file()
161 kvp_release_lock(pool); kvp_update_file()
165 fwrite(kvp_file_info[pool].records, sizeof(struct kvp_record), kvp_update_file()
166 kvp_file_info[pool].num_records, filep); kvp_update_file()
169 kvp_release_lock(pool); kvp_update_file()
170 syslog(LOG_ERR, "Failed to write file, pool: %d", pool); kvp_update_file()
174 kvp_release_lock(pool); kvp_update_file()
177 static void kvp_update_mem_state(int pool) kvp_update_mem_state() argument
181 struct kvp_record *record = kvp_file_info[pool].records; kvp_update_mem_state()
183 int num_blocks = kvp_file_info[pool].num_blocks; kvp_update_mem_state()
186 kvp_acquire_lock(pool); kvp_update_mem_state()
188 filep = fopen(kvp_file_info[pool].fname, "re"); kvp_update_mem_state()
190 syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, kvp_update_mem_state()
192 kvp_release_lock(pool); kvp_update_mem_state()
202 syslog(LOG_ERR, "Failed to read file, pool: %d", pool); kvp_update_mem_state()
222 kvp_file_info[pool].num_blocks = num_blocks; kvp_update_mem_state()
223 kvp_file_info[pool].records = record; kvp_update_mem_state()
224 kvp_file_info[pool].num_records = records_read; kvp_update_mem_state()
227 kvp_release_lock(pool); kvp_update_mem_state()
279 syslog(LOG_ERR, "Failed to read file, pool: %d", kvp_file_init()
311 static int kvp_key_delete(int pool, const __u8 *key, int key_size) kvp_key_delete() argument
321 kvp_update_mem_state(pool); kvp_key_delete()
323 num_records = kvp_file_info[pool].num_records; kvp_key_delete()
324 record = kvp_file_info[pool].records; kvp_key_delete()
334 kvp_file_info[pool].num_records--; kvp_key_delete()
335 kvp_update_file(pool); kvp_key_delete()
347 kvp_file_info[pool].num_records--; kvp_key_delete()
348 kvp_update_file(pool); kvp_key_delete()
354 static int kvp_key_add_or_modify(int pool, const __u8 *key, int key_size, kvp_key_add_or_modify() argument
369 kvp_update_mem_state(pool); kvp_key_add_or_modify()
371 num_records = kvp_file_info[pool].num_records; kvp_key_add_or_modify()
372 record = kvp_file_info[pool].records; kvp_key_add_or_modify()
373 num_blocks = kvp_file_info[pool].num_blocks; kvp_key_add_or_modify()
383 kvp_update_file(pool); kvp_key_add_or_modify()
397 kvp_file_info[pool].num_blocks++; kvp_key_add_or_modify()
402 kvp_file_info[pool].records = record; kvp_key_add_or_modify()
403 kvp_file_info[pool].num_records++; kvp_key_add_or_modify()
404 kvp_update_file(pool); kvp_key_add_or_modify()
408 static int kvp_get_value(int pool, const __u8 *key, int key_size, __u8 *value, kvp_get_value() argument
422 kvp_update_mem_state(pool); kvp_get_value()
424 num_records = kvp_file_info[pool].num_records; kvp_get_value()
425 record = kvp_file_info[pool].records; kvp_get_value()
440 static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, kvp_pool_enumerate() argument
448 kvp_update_mem_state(pool); kvp_pool_enumerate()
449 record = kvp_file_info[pool].records; kvp_pool_enumerate()
451 if (index >= kvp_file_info[pool].num_records) { kvp_pool_enumerate()
1439 int pool; main() local
1592 pool = hv_msg->kvp_hdr.pool; main()
1658 if (kvp_key_add_or_modify(pool, main()
1667 if (kvp_get_value(pool, main()
1676 if (kvp_key_delete(pool, main()
1690 * If the pool is KVP_POOL_AUTO, dynamically generate main()
1692 * appropriate pool. main()
1694 if (pool != KVP_POOL_AUTO) { main()
1695 if (kvp_pool_enumerate(pool, main()
/linux-4.1.27/drivers/char/
H A Drandom.c70 * added to an "entropy pool", which is mixed using a CRC-like function.
74 * As random bytes are mixed into the entropy pool, the routines keep
79 * hash of the contents of the "entropy pool". The SHA hash avoids
80 * exposing the internal state of the entropy pool. It is believed to
85 * the pool, the output data is totally unpredictable. For this
87 * bits of "true randomness" are contained in the entropy pool as it
114 * contained in the entropy pool.
118 * requested without giving time for the entropy pool to recharge,
134 * add_device_randomness() is for adding data to the random pool that
138 * pool, but it initializes the pool to different values for devices
146 * inputs to the entropy pool. Using the cycle counters and the irq source
151 * entropy pool. Note that high-speed solid state drives with very low
166 * entropy pool below the value in entropy_count. In order to
168 * entropy pool across shut-downs and start-ups. To do this, put the
175 * # Load and then save the whole entropy pool
188 * # Save the whole entropy pool
200 * Effectively, these commands cause the contents of the entropy pool
201 * to be saved at shut-down time and reloaded into the entropy pool at
206 * of the entropy pool requires knowledge of the previous history of
227 * pool, taken from PGPfone. Dale Worley has also contributed many
313 * The minimum number of seconds between urandom pool reseeding. We
315 * input pool even if there are heavy demands on /dev/urandom.
339 * the pool state differs for different inputs, we have preserved the
342 * alterations to the pool's state is not important because we don't
345 * increase his/her knowledge of the pool's state. Since all
415 * storing entropy in an entropy pool.
423 __u32 *pool; member in struct:entropy_store
451 .pool = input_pool_data
460 .pool = blocking_pool_data,
470 .pool = nonblocking_pool_data,
480 * This function adds bytes into the entropy "pool". It does not
484 * The pool is stirred with a primitive polynomial of the appropriate
513 w ^= r->pool[i]; _mix_pool_bytes()
514 w ^= r->pool[(i + tap1) & wordmask]; _mix_pool_bytes()
515 w ^= r->pool[(i + tap2) & wordmask]; _mix_pool_bytes()
516 w ^= r->pool[(i + tap3) & wordmask]; _mix_pool_bytes()
517 w ^= r->pool[(i + tap4) & wordmask]; _mix_pool_bytes()
518 w ^= r->pool[(i + tap5) & wordmask]; _mix_pool_bytes()
521 r->pool[i] = (w >> 3) ^ twist_table[w & 7]; _mix_pool_bytes()
524 * Normally, we add 7 bits of rotation to the pool. _mix_pool_bytes()
525 * At the beginning of the pool, add an extra 7 bits _mix_pool_bytes()
527 * input bits across the pool evenly. _mix_pool_bytes()
555 __u32 pool[4]; member in struct:fast_pool
563 * collector. It's hardcoded for an 128 bit pool and assumes that any
568 __u32 a = f->pool[0], b = f->pool[1]; fast_mix()
569 __u32 c = f->pool[2], d = f->pool[3]; fast_mix()
587 f->pool[0] = a; f->pool[1] = b; fast_mix()
588 f->pool[2] = c; f->pool[3] = d; fast_mix()
648 pr_warn("random: negative entropy/overflow: pool %s count %d\n", credit_entropy_bits()
664 pr_notice("random: %s pool is initialized\n", r->name); credit_entropy_bits()
680 /* If the input pool is getting full, send some credit_entropy_bits()
736 * problem of the nonblocking pool having similar initial state
760 * This function adds entropy to the entropy "pool" by using timing
762 * of how many bits of entropy this call has added to the pool.
764 * The number "num" is also added to the pool - it should somehow describe
890 fast_pool->pool[0] ^= cycles ^ j_high ^ irq; add_interrupt_randomness()
891 fast_pool->pool[1] ^= now ^ c_high; add_interrupt_randomness()
893 fast_pool->pool[2] ^= ip; add_interrupt_randomness()
894 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : add_interrupt_randomness()
909 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); add_interrupt_randomness()
913 * add it to the pool. For the sake of paranoia don't let the add_interrupt_randomness()
925 /* award one bit for the contents of the fast pool */ add_interrupt_randomness()
952 * from the primary pool to the secondary extraction pool. We make
979 /* For /dev/random's pool, always leave two wakeups' worth */ _xfer_secondary_pool()
997 * Used as a workqueue function so that when the input pool is getting
1014 * given pool, and also debits the entropy count accordingly.
1040 pr_warn("random: negative entropy count: pool %s count %d\n", account()
1092 /* Generate a hash across the pool, 16 words (512 bits) at a time */ extract_buf()
1095 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); extract_buf()
1098 * We mix the hash back into the pool to prevent backtracking extract_buf()
1099 * attacks (where the attacker knows the state of the pool extract_buf()
1125 * This function extracts randomness from the "entropy pool", and
1131 * pool after each pull to avoid starving other readers.
1184 * This function extracts randomness from the "entropy pool", and
1281 * init_std_data - initialize pool with system data
1283 * @r: pool to initialize
1285 * This function clears the pool's entropy count and mixes some system
1286 * data into the pool to prepare it for use. The pool is not cleared
1287 * as that can only decrease the entropy in the pool.
1487 * Clear the entropy pool counters. We no longer clear random_ioctl()
1488 * the entropy pool, as that's silly. random_ioctl()
1719 * with the goal of minimal entropy pool depletion. As a result, the random
1764 * when our pool is full.
/linux-4.1.27/arch/m68k/atari/
H A Dstram.c36 * The ST-RAM allocator allocates memory from a pool of reserved ST-RAM of
38 * As long as this pool is not exhausted, allocation of real ST-RAM can be
97 pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n"); atari_stram_reserve_pages()
102 pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", atari_stram_reserve_pages()
104 pr_debug("atari_stram pool: stram_virt_offset = %lx\n", atari_stram_reserve_pages()
120 pr_debug("atari_stram pool: kernel not in ST-RAM, using ioremap!\n"); atari_stram_map_pages()
126 pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", atari_stram_map_pages()
128 pr_debug("atari_stram pool: stram_virt_offset = %lx\n", atari_stram_map_pages()
/linux-4.1.27/tools/usb/usbip/libsrc/
H A Dnames.c160 struct pool { struct
161 struct pool *next;
165 static struct pool *pool_head;
169 struct pool *p; my_malloc()
171 p = calloc(1, sizeof(struct pool)); my_malloc()
189 struct pool *pool; names_free() local
194 for (pool = pool_head; pool != NULL; ) { names_free()
195 struct pool *tmp; names_free()
197 if (pool->mem) names_free()
198 free(pool->mem); names_free()
200 tmp = pool; names_free()
201 pool = pool->next; names_free()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd.c1244 kib_pool_t *pool = &tpo->tpo_pool; kiblnd_map_tx_pool() local
1245 kib_net_t *net = pool->po_owner->ps_net; kiblnd_map_tx_pool()
1265 for (ipage = page_offset = i = 0; i < pool->po_size; i++) { kiblnd_map_tx_pool()
1279 list_add(&tx->tx_list, &pool->po_free_list); kiblnd_map_tx_pool()
1343 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) kiblnd_destroy_fmr_pool() argument
1345 LASSERT(pool->fpo_map_count == 0); kiblnd_destroy_fmr_pool()
1347 if (pool->fpo_fmr_pool != NULL) kiblnd_destroy_fmr_pool()
1348 ib_destroy_fmr_pool(pool->fpo_fmr_pool); kiblnd_destroy_fmr_pool()
1350 if (pool->fpo_hdev != NULL) kiblnd_destroy_fmr_pool()
1351 kiblnd_hdev_decref(pool->fpo_hdev); kiblnd_destroy_fmr_pool()
1353 LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t)); kiblnd_destroy_fmr_pool()
1358 kib_fmr_pool_t *pool; kiblnd_destroy_fmr_pool_list() local
1361 pool = list_entry(head->next, kib_fmr_pool_t, fpo_list); kiblnd_destroy_fmr_pool_list()
1362 list_del(&pool->fpo_list); kiblnd_destroy_fmr_pool_list()
1363 kiblnd_destroy_fmr_pool(pool); kiblnd_destroy_fmr_pool_list()
1384 /* FMR pool for RDMA */ kiblnd_create_fmr_pool()
1408 CERROR("Failed to create FMR pool: %d\n", rc); kiblnd_create_fmr_pool()
1506 fpo->fpo_map_count--; /* decref the pool */ kiblnd_fmr_pool_unmap()
1509 /* the first pool is persistent */ kiblnd_fmr_pool_unmap()
1565 "Another thread is allocating new FMR pool, waiting for her to complete\n"); kiblnd_fmr_pool_map()
1580 CDEBUG(D_NET, "Allocate new FMR pool\n"); kiblnd_fmr_pool_map()
1595 static void kiblnd_fini_pool(kib_pool_t *pool) kiblnd_fini_pool() argument
1597 LASSERT(list_empty(&pool->po_free_list)); kiblnd_fini_pool()
1598 LASSERT(pool->po_allocated == 0); kiblnd_fini_pool()
1600 CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); kiblnd_fini_pool()
1603 static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) kiblnd_init_pool() argument
1605 CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); kiblnd_init_pool()
1607 memset(pool, 0, sizeof(kib_pool_t)); kiblnd_init_pool()
1608 INIT_LIST_HEAD(&pool->po_free_list); kiblnd_init_pool()
1609 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); kiblnd_init_pool()
1610 pool->po_owner = ps; kiblnd_init_pool()
1611 pool->po_size = size; kiblnd_init_pool()
1616 kib_pool_t *pool; kiblnd_destroy_pool_list() local
1619 pool = list_entry(head->next, kib_pool_t, po_list); kiblnd_destroy_pool_list()
1620 list_del(&pool->po_list); kiblnd_destroy_pool_list()
1622 LASSERT(pool->po_owner != NULL); kiblnd_destroy_pool_list()
1623 pool->po_owner->ps_pool_destroy(pool); kiblnd_destroy_pool_list()
1661 kib_pool_t *pool; kiblnd_init_poolset() local
1680 rc = ps->ps_pool_create(ps, size, &pool); kiblnd_init_poolset()
1682 list_add(&pool->po_list, &ps->ps_pool_list); kiblnd_init_poolset()
1684 CERROR("Failed to create the first pool for %s\n", ps->ps_name); kiblnd_init_poolset()
1689 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) kiblnd_pool_is_idle() argument
1691 if (pool->po_allocated != 0) /* still in use */ kiblnd_pool_is_idle()
1693 if (pool->po_failed) kiblnd_pool_is_idle()
1695 return cfs_time_aftereq(now, pool->po_deadline); kiblnd_pool_is_idle()
1698 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) kiblnd_pool_free_node() argument
1701 kib_poolset_t *ps = pool->po_owner; kiblnd_pool_free_node()
1708 ps->ps_node_fini(pool, node); kiblnd_pool_free_node()
1710 LASSERT(pool->po_allocated > 0); kiblnd_pool_free_node()
1711 list_add(node, &pool->po_free_list); kiblnd_pool_free_node()
1712 pool->po_allocated--; kiblnd_pool_free_node()
1714 list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) { kiblnd_pool_free_node()
1715 /* the first pool is persistent */ kiblnd_pool_free_node()
1716 if (ps->ps_pool_list.next == &pool->po_list) kiblnd_pool_free_node()
1719 if (kiblnd_pool_is_idle(pool, now)) kiblnd_pool_free_node()
1720 list_move(&pool->po_list, &zombies); kiblnd_pool_free_node()
1731 kib_pool_t *pool; kiblnd_pool_alloc_node() local
1736 list_for_each_entry(pool, &ps->ps_pool_list, po_list) { kiblnd_pool_alloc_node()
1737 if (list_empty(&pool->po_free_list)) kiblnd_pool_alloc_node()
1740 pool->po_allocated++; kiblnd_pool_alloc_node()
1741 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); kiblnd_pool_alloc_node()
1742 node = pool->po_free_list.next; kiblnd_pool_alloc_node()
1747 ps->ps_node_init(pool, node); kiblnd_pool_alloc_node()
1753 /* no available tx pool and ... */ kiblnd_pool_alloc_node()
1755 /* another thread is allocating a new pool */ kiblnd_pool_alloc_node()
1757 CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n", kiblnd_pool_alloc_node()
1772 CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); kiblnd_pool_alloc_node()
1774 rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); kiblnd_pool_alloc_node()
1779 list_add_tail(&pool->po_list, &ps->ps_pool_list); kiblnd_pool_alloc_node()
1782 CERROR("Can't allocate new %s pool because out of memory\n", kiblnd_pool_alloc_node()
1846 static void kiblnd_destroy_pmr_pool(kib_pool_t *pool) kiblnd_destroy_pmr_pool() argument
1848 kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool); kiblnd_destroy_pmr_pool()
1852 LASSERT(pool->po_allocated == 0); kiblnd_destroy_pmr_pool()
1854 list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) { kiblnd_destroy_pmr_pool()
1867 kiblnd_fini_pool(pool); kiblnd_destroy_pmr_pool()
1885 struct kib_pool *pool; kiblnd_create_pmr_pool() local
1892 CERROR("Failed to allocate PMR pool\n"); kiblnd_create_pmr_pool()
1896 pool = &ppo->ppo_pool; kiblnd_create_pmr_pool()
1897 kiblnd_init_pool(ps, pool, size); kiblnd_create_pmr_pool()
1911 list_add(&pmr->pmr_list, &pool->po_free_list); kiblnd_create_pmr_pool()
1915 ps->ps_pool_destroy(pool); kiblnd_create_pmr_pool()
1920 *pp_po = pool; kiblnd_create_pmr_pool()
1924 static void kiblnd_destroy_tx_pool(kib_pool_t *pool) kiblnd_destroy_tx_pool() argument
1926 kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); kiblnd_destroy_tx_pool()
1929 LASSERT(pool->po_allocated == 0); kiblnd_destroy_tx_pool()
1939 for (i = 0; i < pool->po_size; i++) { kiblnd_destroy_tx_pool()
1966 pool->po_size * sizeof(kib_tx_t)); kiblnd_destroy_tx_pool()
1968 kiblnd_fini_pool(pool); kiblnd_destroy_tx_pool()
1984 kib_pool_t *pool; kiblnd_create_tx_pool() local
1989 CERROR("Failed to allocate TX pool\n"); kiblnd_create_tx_pool()
1993 pool = &tpo->tpo_pool; kiblnd_create_tx_pool()
1994 kiblnd_init_pool(ps, pool, size); kiblnd_create_tx_pool()
2009 ps->ps_pool_destroy(pool); kiblnd_create_tx_pool()
2055 *pp_po = pool; kiblnd_create_tx_pool()
2059 ps->ps_pool_destroy(pool); kiblnd_create_tx_pool()
2063 static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) kiblnd_tx_init() argument
2065 kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, kiblnd_tx_init()
2132 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n", kiblnd_net_init_pools()
2139 /* TX pool must be created later than FMR/PMR, see LU-2268 kiblnd_net_init_pools()
2144 * FMR/PMR pool and map-on-demand if premapping failed */ kiblnd_net_init_pools()
2149 CERROR("Failed to allocate FMR pool array\n"); kiblnd_net_init_pools()
2160 break; /* create PMR pool */ kiblnd_net_init_pools()
2163 CERROR("Can't initialize FMR pool for CPT %d: %d\n", kiblnd_net_init_pools()
2181 CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n", kiblnd_net_init_pools()
2191 CERROR("Failed to allocate PMR pool array\n"); kiblnd_net_init_pools()
2204 CERROR("Can't initialize PMR pool for CPT %d: %d\n", kiblnd_net_init_pools()
2214 CERROR("Failed to allocate tx pool array\n"); kiblnd_net_init_pools()
2228 CERROR("Can't initialize TX pool for CPT %d: %d\n", kiblnd_net_init_pools()
H A Do2iblnd.h103 int *kib_pmr_pool_size; /* # physical MR in pool */
104 int *kib_fmr_pool_size; /* # FMRs in pool */
106 int *kib_fmr_cache; /* enable FMR pool cache? */
219 /** # of seconds to keep pool alive */
256 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
258 struct list_head ps_failed_pool_list; /* failed pool list */
260 int ps_increasing; /* is allocating new pool */
261 int ps_pool_size; /* new pool size */
264 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
265 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
271 struct list_head po_list; /* chain on pool list */
273 kib_poolset_t *po_owner; /* pool_set of this pool */
274 unsigned long po_deadline; /* deadline of this pool */
276 int po_failed; /* pool is created on failed HCA */
281 kib_poolset_t tps_poolset; /* pool-set */
286 kib_pool_t tpo_pool; /* pool */
287 struct kib_hca_dev *tpo_hdev; /* device for this pool */
293 kib_poolset_t pps_poolset; /* pool-set */
297 struct kib_hca_dev *ppo_hdev; /* device for this pool */
298 kib_pool_t ppo_pool; /* pool */
304 struct list_head fps_pool_list; /* FMR pool list */
305 struct list_head fps_failed_pool_list; /* FMR pool list */
310 /* is allocating new pool */
317 struct list_head fpo_list; /* chain on pool list */
318 struct kib_hca_dev *fpo_hdev; /* device for this pool */
319 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
320 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
321 unsigned long fpo_deadline; /* deadline of this pool */
322 int fpo_failed; /* fmr pool is failed */
327 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
328 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
340 kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
341 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
342 kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */
532 kib_tx_pool_t *tx_pool; /* pool I'm from */
957 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
H A Do2iblnd_modparams.c55 /* Number of threads in each scheduler pool which is percpt,
59 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
64 MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool");
118 MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)");
123 MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush");
132 MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT");
/linux-4.1.27/arch/powerpc/kernel/
H A Diommu.c75 * with 4 pools all primary threads would map to the same pool.
191 struct iommu_pool *pool; iommu_range_alloc() local
209 * safely use any IOMMU pool. iommu_range_alloc()
214 pool = &(tbl->large_pool); iommu_range_alloc()
216 pool = &(tbl->pools[pool_nr]); iommu_range_alloc()
218 spin_lock_irqsave(&(pool->lock), flags); iommu_range_alloc()
222 (*handle >= pool->start) && (*handle < pool->end)) iommu_range_alloc()
225 start = pool->hint; iommu_range_alloc()
227 limit = pool->end; iommu_range_alloc()
234 start = pool->start; iommu_range_alloc()
240 * but on second pass, start at 0 in pool 0. iommu_range_alloc()
243 spin_unlock(&(pool->lock)); iommu_range_alloc()
244 pool = &(tbl->pools[0]); iommu_range_alloc()
245 spin_lock(&(pool->lock)); iommu_range_alloc()
246 start = pool->start; iommu_range_alloc()
263 /* First try the pool from the start */ iommu_range_alloc()
264 pool->hint = pool->start; iommu_range_alloc()
270 spin_unlock(&(pool->lock)); iommu_range_alloc()
272 pool = &tbl->pools[pool_nr]; iommu_range_alloc()
273 spin_lock(&(pool->lock)); iommu_range_alloc()
274 pool->hint = pool->start; iommu_range_alloc()
280 spin_unlock_irqrestore(&(pool->lock), flags); iommu_range_alloc()
290 pool->hint = end; iommu_range_alloc()
293 pool->hint = (end + tbl->it_blocksize - 1) & iommu_range_alloc()
301 spin_unlock_irqrestore(&(pool->lock), flags); iommu_range_alloc()
383 /* The large pool is the last pool at the top of the table */ get_pool()
401 struct iommu_pool *pool; __iommu_free() local
406 pool = get_pool(tbl, free_entry); __iommu_free()
413 spin_lock_irqsave(&(pool->lock), flags); __iommu_free()
415 spin_unlock_irqrestore(&(pool->lock), flags); __iommu_free()
978 struct iommu_pool *pool = get_pool(tbl, entry); iommu_clear_tce() local
980 spin_lock(&(pool->lock)); iommu_clear_tce()
988 spin_unlock(&(pool->lock)); iommu_clear_tce()
1027 struct iommu_pool *pool = get_pool(tbl, entry); iommu_tce_build() local
1029 spin_lock(&(pool->lock)); iommu_tce_build()
1036 spin_unlock(&(pool->lock)); iommu_tce_build()
H A Dvio.c48 * vio_cmo_pool - A pool of IO memory for CMO use
50 * @size: The size of the pool in bytes
51 * @free: The amount of free memory in the pool
82 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
83 * @excess: pool of excess entitlement not needed for device reserves or spare
139 * IO memory available to all devices. The spare pool used to service
140 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
159 /* If spare is not fulfilled, the excess pool can not be used. */ vio_cmo_alloc()
184 * The spare pool is replenished first from either memory pool, then
185 * the reserve pool is used to reduce device entitlement, the excess
186 * pool is used to increase the reserve pool toward the desired entitlement
202 /* Amount of memory freed from the excess pool */ vio_cmo_dealloc()
212 /* Spare is a subset of the reserve pool, replenish it first. */ vio_cmo_dealloc()
216 * Replenish the spare in the reserve pool from the excess pool. vio_cmo_dealloc()
217 * This moves entitlement into the reserve pool. vio_cmo_dealloc()
230 * Replenish the spare in the reserve pool from the reserve pool. vio_cmo_dealloc()
232 * if needed, and gives it to the spare pool. The amount of used vio_cmo_dealloc()
233 * memory in this pool does not change. vio_cmo_dealloc()
246 * Increase the reserve pool until the desired allocation is met. vio_cmo_dealloc()
247 * Move an allocation freed from the excess pool into the reserve vio_cmo_dealloc()
248 * pool and schedule a balance operation. vio_cmo_dealloc()
259 /* Return memory from the excess pool to that pool */ vio_cmo_dealloc()
274 * and the rest is given to the excess pool. Decreases, if they are
275 * possible, come from the excess pool and from unused device entitlement
300 /* Remaining new allocation goes to the excess pool */ vio_cmo_entitlement_update()
331 /* Take entitlement from the excess pool first */ vio_cmo_entitlement_update()
449 * from the available pool being portioned out. vio_cmo_balance()
461 /* Calculate new reserve and excess pool sizes */ vio_cmo_balance()
679 * any reserve memory in the change region to the excess pool. vio_cmo_set_dev_desired()
685 * If entitlement moving from the reserve pool to the vio_cmo_set_dev_desired()
686 * excess pool is currently unused, add to the excess vio_cmo_set_dev_desired()
774 * the reserve pool. vio_cmo_bus_probe()
795 /* Use excess pool first to fulfill request */ vio_cmo_bus_probe()
801 /* Use spare if excess pool was insufficient */ vio_cmo_bus_probe()
867 /* Replenish spare from freed reserve pool */ vio_cmo_bus_remove()
875 /* Remaining reserve goes to excess pool */ vio_cmo_bus_remove()
903 * require entitlement in the reserve pool.
/linux-4.1.27/drivers/scsi/libfc/
H A Dfc_exch.c59 * struct fc_exch_pool - Per cpu exchange pool
62 * @lock: Exch pool lock
67 * assigned range of exchanges to per cpu pool.
87 * @pool_max_index: Max exch array index in exch pool
88 * @pool: Per cpu exch pool
95 struct fc_exch_pool __percpu *pool; member in struct:fc_exch_mgr
213 * - If the EM pool lock and ex_lock must be taken at the same time, then the
214 * EM pool lock must be taken before the ex_lock.
410 * fc_exch_ptr_get() - Return an exchange from an exchange pool
411 * @pool: Exchange Pool to get an exchange from
412 * @index: Index of the exchange within the pool
414 * Use the index to get an exchange from within an exchange pool. exches
418 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, fc_exch_ptr_get() argument
421 struct fc_exch **exches = (struct fc_exch **)(pool + 1); fc_exch_ptr_get()
426 * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
427 * @pool: The pool to assign the exchange to
428 * @index: The index in the pool where the exchange will be assigned
429 * @ep: The exchange to assign to the pool
431 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, fc_exch_ptr_set() argument
434 ((struct fc_exch **)(pool + 1))[index] = ep; fc_exch_ptr_set()
443 struct fc_exch_pool *pool; fc_exch_delete() local
446 pool = ep->pool; fc_exch_delete()
447 spin_lock_bh(&pool->lock); fc_exch_delete()
448 WARN_ON(pool->total_exches <= 0); fc_exch_delete()
449 pool->total_exches--; fc_exch_delete()
453 if (pool->left == FC_XID_UNKNOWN) fc_exch_delete()
454 pool->left = index; fc_exch_delete()
455 else if (pool->right == FC_XID_UNKNOWN) fc_exch_delete()
456 pool->right = index; fc_exch_delete()
458 pool->next_index = index; fc_exch_delete()
460 fc_exch_ptr_set(pool, index, NULL); fc_exch_delete()
462 spin_unlock_bh(&pool->lock); fc_exch_delete()
807 struct fc_exch_pool *pool; fc_exch_em_alloc() local
818 pool = per_cpu_ptr(mp->pool, cpu); fc_exch_em_alloc()
819 spin_lock_bh(&pool->lock); fc_exch_em_alloc()
823 if (pool->left != FC_XID_UNKNOWN) { fc_exch_em_alloc()
824 index = pool->left; fc_exch_em_alloc()
825 pool->left = FC_XID_UNKNOWN; fc_exch_em_alloc()
828 if (pool->right != FC_XID_UNKNOWN) { fc_exch_em_alloc()
829 index = pool->right; fc_exch_em_alloc()
830 pool->right = FC_XID_UNKNOWN; fc_exch_em_alloc()
834 index = pool->next_index; fc_exch_em_alloc()
835 /* allocate new exch from pool */ fc_exch_em_alloc()
836 while (fc_exch_ptr_get(pool, index)) { fc_exch_em_alloc()
838 if (index == pool->next_index) fc_exch_em_alloc()
841 pool->next_index = index == mp->pool_max_index ? 0 : index + 1; fc_exch_em_alloc()
852 fc_exch_ptr_set(pool, index, ep); fc_exch_em_alloc()
853 list_add_tail(&ep->ex_list, &pool->ex_list); fc_exch_em_alloc()
855 pool->total_exches++; fc_exch_em_alloc()
856 spin_unlock_bh(&pool->lock); fc_exch_em_alloc()
863 ep->pool = pool; fc_exch_em_alloc()
874 spin_unlock_bh(&pool->lock); fc_exch_em_alloc()
909 struct fc_exch_pool *pool; fc_exch_find() local
913 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); fc_exch_find()
914 spin_lock_bh(&pool->lock); fc_exch_find()
915 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); fc_exch_find()
920 spin_unlock_bh(&pool->lock); fc_exch_find()
1853 * fc_exch_pool_reset() - Reset a per cpu exchange pool
1854 * @lport: The local port that the exchange pool is on
1855 * @pool: The exchange pool to be reset
1859 * Resets a per cpu exches pool, releasing all of its sequences
1865 struct fc_exch_pool *pool, fc_exch_pool_reset()
1871 spin_lock_bh(&pool->lock); fc_exch_pool_reset()
1873 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) { fc_exch_pool_reset()
1878 spin_unlock_bh(&pool->lock); fc_exch_pool_reset()
1883 spin_lock_bh(&pool->lock); fc_exch_pool_reset()
1892 pool->next_index = 0; fc_exch_pool_reset()
1893 pool->left = FC_XID_UNKNOWN; fc_exch_pool_reset()
1894 pool->right = FC_XID_UNKNOWN; fc_exch_pool_reset()
1895 spin_unlock_bh(&pool->lock); fc_exch_pool_reset()
1917 per_cpu_ptr(ema->mp->pool, cpu), fc_exch_mgr_reset()
2309 free_percpu(mp->pool); fc_exch_mgr_destroy()
2364 struct fc_exch_pool *pool; fc_exch_mgr_alloc() local
2384 /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */ fc_exch_mgr_alloc()
2385 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) / fc_exch_mgr_alloc()
2401 * Setup per cpu exch pool with entire exchange id range equally fc_exch_mgr_alloc()
2403 * allocated for exch range per pool. fc_exch_mgr_alloc()
2408 * Allocate and initialize per cpu exch pool fc_exch_mgr_alloc()
2410 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *); fc_exch_mgr_alloc()
2411 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool)); fc_exch_mgr_alloc()
2412 if (!mp->pool) fc_exch_mgr_alloc()
2415 pool = per_cpu_ptr(mp->pool, cpu); for_each_possible_cpu()
2416 pool->next_index = 0; for_each_possible_cpu()
2417 pool->left = FC_XID_UNKNOWN; for_each_possible_cpu()
2418 pool->right = FC_XID_UNKNOWN; for_each_possible_cpu()
2419 spin_lock_init(&pool->lock); for_each_possible_cpu()
2420 INIT_LIST_HEAD(&pool->ex_list); for_each_possible_cpu()
2425 free_percpu(mp->pool);
2604 * in per cpu exch pool. fc_setup_exch_mgr()
1864 fc_exch_pool_reset(struct fc_lport *lport, struct fc_exch_pool *pool, u32 sid, u32 did) fc_exch_pool_reset() argument
/linux-4.1.27/include/sound/
H A Dseq_kernel.h39 /* max number of events in memory pool */
42 /* default number of events in memory pool */
45 /* max number of events in memory pool for one client (outqueue) */
48 /* default number of events in memory pool for one client (outqueue) */
/linux-4.1.27/arch/arm/kernel/
H A Dtcm.c281 * This creates the TCM memory pool and has to be done later,
292 * Set up malloc pool, 2^2 = 4 bytes granularity since setup_tcm_pool()
298 pr_debug("Setting up TCM memory pool\n"); setup_tcm_pool()
300 /* Add the rest of DTCM to the TCM pool */ setup_tcm_pool()
307 "remainder to pool!\n"); setup_tcm_pool()
311 "the TCM memory pool\n", setup_tcm_pool()
317 /* Add the rest of ITCM to the TCM pool */ setup_tcm_pool()
324 "remainder to pool!\n"); setup_tcm_pool()
328 "the TCM memory pool\n", setup_tcm_pool()
/linux-4.1.27/sound/core/
H A Dmemalloc.c121 struct gen_pool *pool = NULL; snd_malloc_dev_iram() local
127 pool = of_get_named_gen_pool(dev->of_node, "iram", 0); snd_malloc_dev_iram()
129 if (!pool) snd_malloc_dev_iram()
132 /* Assign the pool into private_data field */ snd_malloc_dev_iram()
133 dmab->private_data = pool; snd_malloc_dev_iram()
135 dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr); snd_malloc_dev_iram()
144 struct gen_pool *pool = dmab->private_data; snd_free_dev_iram() local
146 if (pool && dmab->area) snd_free_dev_iram()
147 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); snd_free_dev_iram()
/linux-4.1.27/include/drm/ttm/
H A Dttm_page_alloc.h35 * Initialize pool allocator.
39 * Free pool allocator.
69 * Initialize pool allocator.
74 * Free pool allocator.
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dclient.c422 * Wind down request pool \a pool.
423 * Frees all requests from the pool too
425 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) ptlrpc_free_rq_pool() argument
430 LASSERT(pool != NULL); ptlrpc_free_rq_pool()
432 spin_lock(&pool->prp_lock); ptlrpc_free_rq_pool()
433 list_for_each_safe(l, tmp, &pool->prp_req_list) { ptlrpc_free_rq_pool()
437 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); ptlrpc_free_rq_pool()
438 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size); ptlrpc_free_rq_pool()
441 spin_unlock(&pool->prp_lock); ptlrpc_free_rq_pool()
442 OBD_FREE(pool, sizeof(*pool)); ptlrpc_free_rq_pool()
447 * Allocates, initializes and adds \a num_rq requests to the pool \a pool
449 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) ptlrpc_add_rqs_to_pool() argument
454 while (size < pool->prp_rq_size) ptlrpc_add_rqs_to_pool()
457 LASSERTF(list_empty(&pool->prp_req_list) || ptlrpc_add_rqs_to_pool()
458 size == pool->prp_rq_size, ptlrpc_add_rqs_to_pool()
459 "Trying to change pool size with nonempty pool from %d to %d bytes\n", ptlrpc_add_rqs_to_pool()
460 pool->prp_rq_size, size); ptlrpc_add_rqs_to_pool()
462 spin_lock(&pool->prp_lock); ptlrpc_add_rqs_to_pool()
463 pool->prp_rq_size = size; ptlrpc_add_rqs_to_pool()
468 spin_unlock(&pool->prp_lock); ptlrpc_add_rqs_to_pool()
479 req->rq_pool = pool; ptlrpc_add_rqs_to_pool()
480 spin_lock(&pool->prp_lock); ptlrpc_add_rqs_to_pool()
481 list_add_tail(&req->rq_list, &pool->prp_req_list); ptlrpc_add_rqs_to_pool()
483 spin_unlock(&pool->prp_lock); ptlrpc_add_rqs_to_pool()
488 * Create and initialize new request pool with given attributes:
489 * \a num_rq - initial number of requests to create for the pool
490 * \a msgsize - maximum message size possible for requests in thid pool
492 * to the pool
493 * Returns pointer to newly created pool or NULL on error.
499 struct ptlrpc_request_pool *pool; ptlrpc_init_rq_pool() local
501 OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool)); ptlrpc_init_rq_pool()
502 if (!pool) ptlrpc_init_rq_pool()
508 spin_lock_init(&pool->prp_lock); ptlrpc_init_rq_pool()
509 INIT_LIST_HEAD(&pool->prp_req_list); ptlrpc_init_rq_pool()
510 pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD; ptlrpc_init_rq_pool()
511 pool->prp_populate = populate_pool; ptlrpc_init_rq_pool()
513 populate_pool(pool, num_rq); ptlrpc_init_rq_pool()
515 if (list_empty(&pool->prp_req_list)) { ptlrpc_init_rq_pool()
516 /* have not allocated a single request for the pool */ ptlrpc_init_rq_pool()
517 OBD_FREE(pool, sizeof(struct ptlrpc_request_pool)); ptlrpc_init_rq_pool()
518 pool = NULL; ptlrpc_init_rq_pool()
520 return pool; ptlrpc_init_rq_pool()
525 * Fetches one request from pool \a pool
528 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) ptlrpc_prep_req_from_pool() argument
533 if (!pool) ptlrpc_prep_req_from_pool()
536 spin_lock(&pool->prp_lock); ptlrpc_prep_req_from_pool()
538 /* See if we have anything in a pool, and bail out if nothing, ptlrpc_prep_req_from_pool()
542 if (unlikely(list_empty(&pool->prp_req_list))) { ptlrpc_prep_req_from_pool()
543 spin_unlock(&pool->prp_lock); ptlrpc_prep_req_from_pool()
547 request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, ptlrpc_prep_req_from_pool()
550 spin_unlock(&pool->prp_lock); ptlrpc_prep_req_from_pool()
558 request->rq_reqbuf_len = pool->prp_rq_size; ptlrpc_prep_req_from_pool()
559 request->rq_pool = pool; ptlrpc_prep_req_from_pool()
565 * Returns freed \a request to pool.
569 struct ptlrpc_request_pool *pool = request->rq_pool; __ptlrpc_free_req_to_pool() local
571 spin_lock(&pool->prp_lock); __ptlrpc_free_req_to_pool()
574 list_add_tail(&request->rq_list, &pool->prp_req_list); __ptlrpc_free_req_to_pool()
575 spin_unlock(&pool->prp_lock); __ptlrpc_free_req_to_pool()
695 * and possibly using existing request from pool \a pool if provided.
701 struct ptlrpc_request_pool *pool) __ptlrpc_request_alloc()
705 if (pool) __ptlrpc_request_alloc()
706 request = ptlrpc_prep_req_from_pool(pool); __ptlrpc_request_alloc()
734 struct ptlrpc_request_pool *pool, ptlrpc_request_alloc_internal()
739 request = __ptlrpc_request_alloc(imp, pool); ptlrpc_request_alloc_internal()
760 * Allocate new request structure for import \a imp from pool \a pool and
764 struct ptlrpc_request_pool *pool, ptlrpc_request_alloc_pool()
767 return ptlrpc_request_alloc_internal(imp, pool, format); ptlrpc_request_alloc_pool()
772 * For requests not from pool, free memory of the request structure.
773 * For requests obtained from a pool earlier, return request back to pool.
810 * Prepare request (fetched from pool \a pool if not NULL) on import \a imp
820 struct ptlrpc_request_pool *pool) ptlrpc_prep_req_pool()
825 request = __ptlrpc_request_alloc(imp, pool); ptlrpc_prep_req_pool()
840 * Same as ptlrpc_prep_req_pool, but without pool
1565 * can't return to pool before that and we can't ptlrpc_check_set()
2400 * unlinked before returning a req to the pool. ptlrpc_unregister_reply()
700 __ptlrpc_request_alloc(struct obd_import *imp, struct ptlrpc_request_pool *pool) __ptlrpc_request_alloc() argument
733 ptlrpc_request_alloc_internal(struct obd_import *imp, struct ptlrpc_request_pool *pool, const struct req_format *format) ptlrpc_request_alloc_internal() argument
763 ptlrpc_request_alloc_pool(struct obd_import *imp, struct ptlrpc_request_pool *pool, const struct req_format *format) ptlrpc_request_alloc_pool() argument
817 ptlrpc_prep_req_pool(struct obd_import *imp, __u32 version, int opcode, int count, __u32 *lengths, char **bufs, struct ptlrpc_request_pool *pool) ptlrpc_prep_req_pool() argument
H A Dsec_bulk.c99 * in-pool pages bookkeeping
132 "pages per pool: %lu\n" sptlrpc_proc_enc_pool_seq_show()
183 /* max pool index before the release */ enc_pools_release_free_pages()
189 /* max pool index after the release */ enc_pools_release_free_pages()
220 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
226 * if no pool access for a long time, we consider it's fully idle. enc_pools_shrink_count()
242 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
261 * if no pool access for a long time, we consider it's fully idle. enc_pools_shrink_scan()
329 * index >= total_pages, locate at the tail of last pool. */ enc_pools_insert()
/linux-4.1.27/arch/sh/boards/mach-sdk7786/
H A Dsram.c51 * up a mapping prior to inserting it in to the pool. fpga_sram_init()
60 "(area %d) to pool.\n", fpga_sram_init()
/linux-4.1.27/drivers/scsi/ibmvscsi/
H A Dibmvscsi.c443 * Routines for the event pool and event structs
446 * initialize_event_pool: - Allocates and initializes the event pool for a host
447 * @pool: event_pool to be initialized
448 * @size: Number of events in pool
449 * @hostdata: ibmvscsi_host_data who owns the event pool
453 static int initialize_event_pool(struct event_pool *pool, initialize_event_pool() argument
458 pool->size = size; initialize_event_pool()
459 pool->next = 0; initialize_event_pool()
460 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); initialize_event_pool()
461 if (!pool->events) initialize_event_pool()
464 pool->iu_storage = initialize_event_pool()
466 pool->size * sizeof(*pool->iu_storage), initialize_event_pool()
467 &pool->iu_token, 0); initialize_event_pool()
468 if (!pool->iu_storage) { initialize_event_pool()
469 kfree(pool->events); initialize_event_pool()
473 for (i = 0; i < pool->size; ++i) { initialize_event_pool()
474 struct srp_event_struct *evt = &pool->events[i]; initialize_event_pool()
479 evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + initialize_event_pool()
481 evt->xfer_iu = pool->iu_storage + i; initialize_event_pool()
491 * release_event_pool: - Frees memory of an event pool of a host
492 * @pool: event_pool to be released
493 * @hostdata: ibmvscsi_host_data who owns the even pool
497 static void release_event_pool(struct event_pool *pool, release_event_pool() argument
501 for (i = 0; i < pool->size; ++i) { release_event_pool()
502 if (atomic_read(&pool->events[i].free) != 1) release_event_pool()
504 if (pool->events[i].ext_list) { release_event_pool()
507 pool->events[i].ext_list, release_event_pool()
508 pool->events[i].ext_list_token); release_event_pool()
512 dev_warn(hostdata->dev, "releasing event pool with %d " release_event_pool()
514 kfree(pool->events); release_event_pool()
516 pool->size * sizeof(*pool->iu_storage), release_event_pool()
517 pool->iu_storage, pool->iu_token); release_event_pool()
522 * @pool: event_pool that contains the event
527 static int valid_event_struct(struct event_pool *pool, valid_event_struct() argument
530 int index = evt - pool->events; valid_event_struct()
531 if (index < 0 || index >= pool->size) /* outside of bounds */ valid_event_struct()
533 if (evt != pool->events + index) /* unaligned */ valid_event_struct()
540 * @pool: event_pool that contains the event
544 static void free_event_struct(struct event_pool *pool, free_event_struct() argument
547 if (!valid_event_struct(pool, evt)) { free_event_struct()
549 "(not in pool %p)\n", evt, pool->events); free_event_struct()
560 * get_evt_struct: - Gets the next free event in pool
561 * @pool: event_pool that contains the events to be searched
567 static struct srp_event_struct *get_event_struct(struct event_pool *pool) get_event_struct() argument
570 int poolsize = pool->size; get_event_struct()
571 int offset = pool->next; get_event_struct()
575 if (!atomic_dec_if_positive(&pool->events[offset].free)) { get_event_struct()
576 pool->next = offset; get_event_struct()
577 return &pool->events[offset]; get_event_struct()
581 printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n"); get_event_struct()
813 free_event_struct(&evt->hostdata->pool, evt); purge_requests()
964 free_event_struct(&hostdata->pool, evt_struct); ibmvscsi_send_srp_event()
978 free_event_struct(&hostdata->pool, evt_struct); ibmvscsi_send_srp_event()
1047 evt_struct = get_event_struct(&hostdata->pool); ibmvscsi_queuecommand_lck()
1062 free_event_struct(&hostdata->pool, evt_struct); ibmvscsi_queuecommand_lck()
1195 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); send_srp_login()
1264 evt_struct = get_event_struct(&hostdata->pool); send_mad_capabilities()
1358 evt_struct = get_event_struct(&hostdata->pool); enable_fast_fail()
1434 evt_struct = get_event_struct(&hostdata->pool); send_mad_adapter_info()
1514 evt = get_event_struct(&hostdata->pool); ibmvscsi_eh_abort_handler()
1611 free_event_struct(&found_evt->hostdata->pool, found_evt); ibmvscsi_eh_abort_handler()
1637 evt = get_event_struct(&hostdata->pool); ibmvscsi_eh_device_reset_handler()
1716 free_event_struct(&tmp_evt->hostdata->pool, ibmvscsi_eh_device_reset_handler()
1822 if (!valid_event_struct(&hostdata->pool, evt_struct)) { ibmvscsi_handle_crq()
1853 free_event_struct(&evt_struct->hostdata->pool, evt_struct); ibmvscsi_handle_crq()
1870 evt_struct = get_event_struct(&hostdata->pool); ibmvscsi_do_host_config()
1896 free_event_struct(&hostdata->pool, evt_struct); ibmvscsi_do_host_config()
2287 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) { ibmvscsi_probe()
2288 dev_err(&vdev->dev, "couldn't initialize event pool\n"); ibmvscsi_probe()
2338 release_event_pool(&hostdata->pool, hostdata); ibmvscsi_probe()
2355 release_event_pool(&hostdata->pool, hostdata); ibmvscsi_remove()
H A Dibmvscsi.h81 /* a pool of event structs for use */
97 struct event_pool pool; member in struct:ibmvscsi_host_data
H A Dibmvfc.c742 * @pool: event_pool that contains the event
748 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, ibmvfc_valid_event() argument
751 int index = evt - pool->events; ibmvfc_valid_event()
752 if (index < 0 || index >= pool->size) /* outside of bounds */ ibmvfc_valid_event()
754 if (evt != pool->events + index) /* unaligned */ ibmvfc_valid_event()
767 struct ibmvfc_event_pool *pool = &vhost->pool; ibmvfc_free_event() local
769 BUG_ON(!ibmvfc_valid_event(pool, evt)); ibmvfc_free_event()
1199 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1200 * @vhost: ibmvfc host who owns the event pool
1207 struct ibmvfc_event_pool *pool = &vhost->pool; ibmvfc_init_event_pool() local
1210 pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; ibmvfc_init_event_pool()
1211 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); ibmvfc_init_event_pool()
1212 if (!pool->events) ibmvfc_init_event_pool()
1215 pool->iu_storage = dma_alloc_coherent(vhost->dev, ibmvfc_init_event_pool()
1216 pool->size * sizeof(*pool->iu_storage), ibmvfc_init_event_pool()
1217 &pool->iu_token, 0); ibmvfc_init_event_pool()
1219 if (!pool->iu_storage) { ibmvfc_init_event_pool()
1220 kfree(pool->events); ibmvfc_init_event_pool()
1224 for (i = 0; i < pool->size; ++i) { ibmvfc_init_event_pool()
1225 struct ibmvfc_event *evt = &pool->events[i]; ibmvfc_init_event_pool()
1228 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); ibmvfc_init_event_pool()
1229 evt->xfer_iu = pool->iu_storage + i; ibmvfc_init_event_pool()
1240 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1241 * @vhost: ibmvfc host who owns the event pool
1247 struct ibmvfc_event_pool *pool = &vhost->pool; ibmvfc_free_event_pool() local
1250 for (i = 0; i < pool->size; ++i) { ibmvfc_free_event_pool()
1251 list_del(&pool->events[i].queue); ibmvfc_free_event_pool()
1252 BUG_ON(atomic_read(&pool->events[i].free) != 1); ibmvfc_free_event_pool()
1253 if (pool->events[i].ext_list) ibmvfc_free_event_pool()
1255 pool->events[i].ext_list, ibmvfc_free_event_pool()
1256 pool->events[i].ext_list_token); ibmvfc_free_event_pool()
1259 kfree(pool->events); ibmvfc_free_event_pool()
1261 pool->size * sizeof(*pool->iu_storage), ibmvfc_free_event_pool()
1262 pool->iu_storage, pool->iu_token); ibmvfc_free_event_pool()
1267 * ibmvfc_get_event - Gets the next free event in pool
1270 * Returns a free event from the pool.
2778 if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { ibmvfc_handle_crq()
4632 dev_err(dev, "Failed to allocate sg pool\n"); ibmvfc_alloc_mem()
4663 dev_err(dev, "Couldn't allocate target memory pool\n"); ibmvfc_alloc_mem()
4804 dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc); ibmvfc_probe()
/linux-4.1.27/drivers/staging/fsl-mc/bus/
H A Dmc-allocator.c21 * pool of a given MC bus
24 * @pool_type: MC bus pool type
27 * It adds an allocatable MC object device to a container's resource pool of
93 * resource pool
98 * pool, the device is currently in, as long as it is in the pool's free list.
139 "Device %s cannot be removed from resource pool\n", fsl_mc_resource_pool_remove_device()
275 * from the corresponding MC bus' pool of MC portals and wraps
338 * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
400 * pool type from a given MC bus
403 * @pool_type: MC bus resource pool type
408 * from the corresponding MC bus' pool of allocatable MC object devices of
457 * corresponding resource pool of a given MC bus.
/linux-4.1.27/drivers/misc/
H A Dsram.c38 struct gen_pool *pool; member in struct:sram_dev
99 sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1); sram_probe()
100 if (!sram->pool) sram_probe()
179 ret = gen_pool_add_virt(sram->pool,
193 dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base);
209 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) sram_remove()
/linux-4.1.27/drivers/scsi/sym53c8xx_2/
H A Dsym_malloc.c188 * Default memory pool we donnot need to involve in DMA.
219 * New pools are created on the fly when a new pool id is provided.
222 /* Get a memory cluster that matches the DMA constraints of a given pool */ ___get_dma_mem_cluster()
264 /* Fetch the memory pool for a given pool id (i.e. DMA constraints) */ ___get_dma_pool()
274 /* Create a new memory DMAable pool (when fetch failed) */ ___cre_dma_pool()
292 /* Destroy a DMAable memory pool (when got emptied) */ ___del_dma_pool()
/linux-4.1.27/drivers/base/
H A Ddma-coherent.c201 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
207 * coherent memory pool and if so, releases that memory.
233 * per-device coherent memory pool to userspace
241 * coherent memory pool and if so, maps that memory to the provided vma.
287 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", rmem_dma_device_init()
322 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", rmem_dma_setup()
326 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
H A Ddma-contiguous.c274 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", rmem_cma_setup()
279 RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dqueue.c184 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, cw1200_queue_init()
186 if (!queue->pool) cw1200_queue_init()
192 kfree(queue->pool); cw1200_queue_init()
193 queue->pool = NULL; cw1200_queue_init()
198 list_add_tail(&queue->pool[i].head, &queue->free_pool); cw1200_queue_init()
250 kfree(queue->pool); cw1200_queue_deinit()
252 queue->pool = NULL; cw1200_queue_deinit()
305 item - queue->pool); cw1200_queue_put()
384 item = &queue->pool[item_id]; cw1200_queue_requeue()
435 item - queue->pool); cw1200_queue_requeue_all()
455 item = &queue->pool[item_id]; cw1200_queue_remove()
504 item = &queue->pool[item_id]; cw1200_queue_get_skb()
H A Dqueue.h35 struct cw1200_queue_item *pool; member in struct:cw1200_queue
/linux-4.1.27/block/partitions/
H A Datari.h11 * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
/linux-4.1.27/arch/arm/include/asm/hardware/
H A Diop_adma.h52 * @lock: serializes enqueue/dequeue operations to the slot pool
59 * @slots_allocated: records the actual size of the descriptor slot pool
64 spinlock_t lock; /* protects the descriptor slot pool */
84 * @idx: pool index
/linux-4.1.27/net/ipv4/
H A Dinetpeer.c33 * also be removed if the pool is overloaded i.e. if the total amount of
36 * Node pool is organised as an AVL tree.
43 * 1. Nodes may appear in the tree only with the pool lock held.
44 * 2. Nodes may disappear from the tree only with the pool lock held
46 * 3. Global variable peer_total is modified under the pool lock.
48 * avl_left, avl_right, avl_parent, avl_height: pool lock
146 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ inet_initpeers()
180 * Called with local BH disabled and the pool lock held.
237 /* Called with local BH disabled and the pool lock held. */
253 /* Called with local BH disabled and the pool lock held.
334 /* Called with local BH disabled and the pool lock held. */
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_fcoe.h67 struct dma_pool *pool; member in struct:ixgbe_fcoe_ddp
72 struct dma_pool *pool; member in struct:ixgbe_fcoe_ddp_pool
H A Dixgbe_fcoe.c137 if (ddp->pool) { ixgbe_fcoe_ddp_put()
138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ixgbe_fcoe_ddp_put()
139 ddp->pool = NULL; ixgbe_fcoe_ddp_put()
206 if (!ddp_pool->pool) { ixgbe_fcoe_ddp_setup()
207 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); ixgbe_fcoe_ddp_setup()
218 /* alloc the udl from per cpu ddp pool */ ixgbe_fcoe_ddp_setup()
219 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); ixgbe_fcoe_ddp_setup()
224 ddp->pool = ddp_pool->pool; ixgbe_fcoe_ddp_setup()
343 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
623 if (ddp_pool->pool) ixgbe_fcoe_dma_pool_free()
624 dma_pool_destroy(ddp_pool->pool); ixgbe_fcoe_dma_pool_free()
625 ddp_pool->pool = NULL; ixgbe_fcoe_dma_pool_free()
633 struct dma_pool *pool; ixgbe_fcoe_dma_pool_alloc() local
638 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, ixgbe_fcoe_dma_pool_alloc()
640 if (!pool) ixgbe_fcoe_dma_pool_alloc()
644 ddp_pool->pool = pool; ixgbe_fcoe_dma_pool_alloc()
799 /* allocate pci pool for each cpu */ for_each_possible_cpu()
805 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); for_each_possible_cpu()
H A Dixgbe_lib.c63 /* If we are greater than indices move to next pool */ ixgbe_cache_ring_dcb_sriov()
71 /* If we are greater than indices move to next pool */ ixgbe_cache_ring_dcb_sriov()
225 /* If we are greater than indices move to next pool */ ixgbe_cache_ring_sriov()
244 /* If we are greater than indices move to next pool */ ixgbe_cache_ring_sriov()
344 /* Add starting offset to total pool count */ ixgbe_set_dcb_sriov_queues()
347 /* 16 pools w/ 8 TC per pool */ ixgbe_set_dcb_sriov_queues()
351 /* 32 pools w/ 4 TC per pool */ ixgbe_set_dcb_sriov_queues()
362 /* remove the starting offset from the pool count */ ixgbe_set_dcb_sriov_queues()
510 /* Add starting offset to total pool count */ ixgbe_set_sriov_queues()
516 /* 64 pool mode with 2 queues per pool */ ixgbe_set_sriov_queues()
521 /* 32 pool mode with 4 queues per pool */ ixgbe_set_sriov_queues()
533 /* remove the starting offset from the pool count */ ixgbe_set_sriov_queues()
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_pool.c82 * pl_limit - Number of allowed locks in pool. Applies to server and client
314 * pool. This is required to avoid race between sending reply to client ldlm_srv_pool_push_slv()
327 * Recalculates all pool fields on passed \a pl.
352 * Make sure that pool informed obd of last SLV changes. ldlm_srv_pool_recalc()
403 * interval pool will either increase SLV if locks load is not high ldlm_srv_pool_shrink()
416 * Make sure that pool informed obd of last SLV changes. ldlm_srv_pool_shrink()
429 * Setup server side pool \a pl with passed \a limit.
466 * Recalculates client size pool \a pl according to current SLV and Limit.
488 * Make sure that pool knows last SLV and Limit from obd. ldlm_cli_pool_recalc()
543 * Make sure that pool knows last SLV and Limit from obd. ldlm_cli_pool_shrink()
569 * Pool recalc wrapper. Will call either client or server pool recalc callback
570 * depending what pool \a pl is used.
584 * Update pool statistics every 1s. ldlm_pool_recalc()
617 * Pool shrink wrapper. Will call either client or server pool recalc callback
618 * depending what pool pl is used. When nr == 0, just return the number of
644 * Pool setup wrapper. Will call either client or server pool recalc callback
645 * depending what pool \a pl is used.
647 * Sets passed \a limit into pool \a pl.
679 seq_printf(m, "LDLM pool state (%s):\n" lprocfs_pool_state_seq_show()
760 pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc, ldlm_pool_proc_init()
763 CERROR("LProcFS failed in ldlm-pool-init\n"); ldlm_pool_proc_init()
869 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d", ldlm_pool_init()
888 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name); ldlm_pool_init()
908 * Add new taken ldlm lock \a lock into pool \a pl accounting.
925 * Do not do pool recalc for client side as all locks which ldlm_pool_add()
936 * Remove ldlm lock \a lock from pool \a pl accounting.
1191 * No need to setup pool limit for client pools. ldlm_pools_recalc()
1264 * to the tail, unlock and call pool recalc. This way we avoid
1317 * After setup is done - recalc the pool.
1340 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n", ldlm_pools_thread_main()
1372 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n", ldlm_pools_thread_main()
1396 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task)); ldlm_pools_thread_start()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_fcoe.c173 if (ddp->pool) { i40e_fcoe_ddp_unmap()
174 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); i40e_fcoe_ddp_unmap()
175 ddp->pool = NULL; i40e_fcoe_ddp_unmap()
475 * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
477 * @dev: the device that the pool is associated with
478 * @cpu: the cpu for this pool
488 if (!ddp_pool->pool) { i40e_fcoe_dma_pool_free()
489 dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu); i40e_fcoe_dma_pool_free()
492 dma_pool_destroy(ddp_pool->pool); i40e_fcoe_dma_pool_free()
493 ddp_pool->pool = NULL; i40e_fcoe_dma_pool_free()
497 * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
499 * @dev: the device that the pool is associated with
500 * @cpu: the cpu for this pool
510 struct dma_pool *pool; i40e_fcoe_dma_pool_create() local
514 if (ddp_pool && ddp_pool->pool) { i40e_fcoe_dma_pool_create()
515 dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu); i40e_fcoe_dma_pool_create()
519 pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX, i40e_fcoe_dma_pool_create()
521 if (!pool) { i40e_fcoe_dma_pool_create()
525 ddp_pool->pool = pool; i40e_fcoe_dma_pool_create()
590 /* allocate pci pool for each cpu */ for_each_possible_cpu()
595 dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu); for_each_possible_cpu()
855 dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid); i40e_fcoe_ddp_setup()
860 if (!ddp_pool->pool) { i40e_fcoe_ddp_setup()
861 dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid); i40e_fcoe_ddp_setup()
873 /* alloc the udl from our ddp pool */ i40e_fcoe_ddp_setup()
874 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); i40e_fcoe_ddp_setup()
926 ddp->pool = ddp_pool->pool;
938 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
H A Di40e_fcoe.h112 struct dma_pool *pool; member in struct:i40e_fcoe_ddp
117 struct dma_pool *pool; member in struct:i40e_fcoe_ddp_pool
/linux-4.1.27/drivers/infiniband/ulp/iser/
H A Diser_verbs.c208 * iser_create_fmr_pool - Creates FMR pool and page_vector
230 /* make the pool size twice the max number of SCSI commands * iser_create_fmr_pool()
240 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params); iser_create_fmr_pool()
241 if (!IS_ERR(ib_conn->fmr.pool)) iser_create_fmr_pool()
248 ret = PTR_ERR(ib_conn->fmr.pool); iser_create_fmr_pool()
249 ib_conn->fmr.pool = NULL; iser_create_fmr_pool()
260 * iser_free_fmr_pool - releases the FMR pool and page vec
264 iser_info("freeing conn %p fmr pool %p\n", iser_free_fmr_pool()
265 ib_conn, ib_conn->fmr.pool); iser_free_fmr_pool()
267 if (ib_conn->fmr.pool != NULL) iser_free_fmr_pool()
268 ib_destroy_fmr_pool(ib_conn->fmr.pool); iser_free_fmr_pool()
270 ib_conn->fmr.pool = NULL; iser_free_fmr_pool()
374 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
384 INIT_LIST_HEAD(&ib_conn->fastreg.pool); iser_create_fastreg_pool()
403 list_add_tail(&desc->list, &ib_conn->fastreg.pool); iser_create_fastreg_pool()
415 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
422 if (list_empty(&ib_conn->fastreg.pool)) iser_free_fastreg_pool()
425 iser_info("freeing conn %p fr pool\n", ib_conn); iser_free_fastreg_pool()
427 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { iser_free_fastreg_pool()
438 iser_warn("pool still has %d regions registered\n", iser_free_fastreg_pool()
612 * iser device and memory regoins pool (only iscsi
H A Discsi_iser.h341 * Memory registration pool Function pointers (FMR or Fastreg):
342 * @iser_alloc_rdma_reg_res: Allocation of memory regions pool
343 * @iser_free_rdma_reg_res: Free of memory regions pool
393 * @list: entry in connection fastreg pool
420 * @lock: protects fmr/fastreg pool
422 * @pool: FMR pool for fast registrations
426 * @pool: Fast registration descriptors pool for fast
428 * @pool_size: Size of pool
444 struct ib_fmr_pool *pool; member in struct:ib_conn::__anon5063::__anon5064
448 struct list_head pool; member in struct:ib_conn::__anon5063::__anon5065
/linux-4.1.27/drivers/gpu/drm/sis/
H A Dsis_mm.c83 void *data, int pool) sis_drm_alloc()
94 if (0 == ((pool == 0) ? dev_priv->vram_initialized : sis_drm_alloc()
109 if (pool == AGP_TYPE) { sis_drm_alloc()
141 mem->offset = ((pool == 0) ? sis_drm_alloc()
159 DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size, sis_drm_alloc()
82 sis_drm_alloc(struct drm_device *dev, struct drm_file *file, void *data, int pool) sis_drm_alloc() argument
/linux-4.1.27/drivers/video/fbdev/
H A Dsh_mobile_meram.c149 * @pool: Allocation pool to manage the MERAM
160 struct gen_pool *pool; member in struct:sh_mobile_meram_priv
203 return gen_pool_alloc(priv->pool, size); meram_alloc()
209 gen_pool_free(priv->pool, mem, size); meram_free()
685 /* Create and initialize the MERAM memory pool. */ sh_mobile_meram_probe()
686 priv->pool = gen_pool_create(ilog2(MERAM_GRANULARITY), -1); sh_mobile_meram_probe()
687 if (priv->pool == NULL) { sh_mobile_meram_probe()
692 error = gen_pool_add(priv->pool, meram->start, resource_size(meram), sh_mobile_meram_probe()
709 if (priv->pool) sh_mobile_meram_probe()
710 gen_pool_destroy(priv->pool); sh_mobile_meram_probe()
732 gen_pool_destroy(priv->pool); sh_mobile_meram_remove()
/linux-4.1.27/arch/hexagon/kernel/
H A Ddma.c61 /* Allocates from a pool of uncached memory that was reserved at boot time */
72 * for the pool. hexagon_dma_alloc_coherent()
79 panic("Can't create %s() memory pool!", __func__); hexagon_dma_alloc_coherent()
/linux-4.1.27/include/linux/iio/
H A Dtrigger.h54 * @pool: [INTERN] bitmap of irqs currently in use.
55 * @pool_lock: [INTERN] protection of the irq pool.
71 unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)]; member in struct:iio_trigger
H A Dtrigger_consumer.h28 * trigger pool
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_quota.h71 /* Name used in the configuration logs to identify the default metadata pool
72 * (composed of all the MDTs, with pool ID 0) and the default data pool (all
73 * the OSTs, with pool ID 0 too). */
213 /* Since we enforce only inode quota in meta pool (MDTs), and block quota in
214 * data pool (OSTs), there are at most 4 quota ids being enforced in a single
H A Dlustre_cfg.h84 LCFG_POOL_NEW = 0x00ce020, /**< create an ost pool name */
85 LCFG_POOL_ADD = 0x00ce021, /**< add an ost to a pool */
86 LCFG_POOL_REM = 0x00ce022, /**< remove an ost from a pool */
87 LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */
H A Dlustre_dlm.h202 * LDLM pool is a pool of locks in the namespace without any implicitly
204 * Locks in the pool are organized in LRU.
206 * can trigger freeing of locks from the pool
209 /** Recalculate pool \a pl usage */
211 /** Cancel at least \a nr locks from pool \a pl */
217 /** One second for pools thread check interval. Each pool has own period. */
230 * LDLM pool structure to track granted locks.
241 /** Number of allowed locks in in pool, both, client and server side. */
258 /** Recalculation period for pool. */
297 * LDLM pools related, type of lock pool in the namespace.
369 /** Backward link to OBD, required for LDLM pool to store new SLV. */
464 /** LDLM pool structure for this namespace */
501 * recalculation of LDLM pool statistics should be skipped.
1453 /** \defgroup ldlm_pools Various LDLM pool related functions
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_av.c189 ah->av = pci_pool_alloc(dev->av_table.pool, mthca_create_ah()
250 pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); mthca_destroy_ah()
338 dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev, mthca_init_av_table()
341 if (!dev->av_table.pool) mthca_init_av_table()
358 pci_pool_destroy(dev->av_table.pool); mthca_init_av_table()
372 pci_pool_destroy(dev->av_table.pool); mthca_cleanup_av_table()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_ib.c74 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address radeon_ib_get()
75 * space and soffset is the offset inside the pool bo radeon_ib_get()
183 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
187 * Initialize the suballocator to manage a pool of memory
231 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
235 * Tear down the suballocator managing the pool of memory
/linux-4.1.27/drivers/dma/ppc4xx/
H A Dadma.h57 * @pool_size: size of the pool
78 * @lock: serializes enqueue/dequeue operations to the slot pool
84 * @slots_allocated: records the actual size of the descriptor slot pool
134 * @idx: pool index
/linux-4.1.27/arch/sparc/include/asm/
H A Dkdebug_32.h35 * taken from to total pool.
/linux-4.1.27/arch/mips/cavium-octeon/executive/
H A Dcvmx-cmd-queue.c107 * @fpa_pool: FPA pool the command queues should come from.
108 * @pool_size: Size of each buffer in the FPA pool (bytes)
152 "FPA pool (%u).\n", cvmx_cmd_queue_initialize()
159 "FPA pool size (%u).\n", cvmx_cmd_queue_initialize()
/linux-4.1.27/include/trace/events/
H A Drandom.h46 TP_printk("%s pool: bytes %d caller %pS",
84 TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
189 TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
245 TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
H A Dworkqueue.h57 __entry->cpu = pwq->pool->cpu;
/linux-4.1.27/include/uapi/linux/netfilter_bridge/
H A Debt_among.h42 struct ebt_mac_wormhash_tuple pool[0]; member in struct:ebt_mac_wormhash
/linux-4.1.27/drivers/infiniband/hw/amso1100/
H A Dc2_rnic.c477 /* Allocate MQ shared pointer pool for kernel clients. User c2_rnic_init()
486 * the shared pointer pool. c2_rnic_init()
583 /* Initialize the PD pool */ c2_rnic_init()
588 /* Initialize the QP pool */ c2_rnic_init()
627 /* Free the QP pool */ c2_rnic_term()
630 /* Free the PD pool */ c2_rnic_term()
648 /* Free the MQ shared pointer pool */ c2_rnic_term()
/linux-4.1.27/drivers/infiniband/ulp/srp/
H A Dib_srp.c332 * srp_destroy_fr_pool() - free the resources owned by a pool
333 * @pool: Fast registration pool to be destroyed.
335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool) srp_destroy_fr_pool() argument
340 if (!pool) srp_destroy_fr_pool()
343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { srp_destroy_fr_pool()
349 kfree(pool); srp_destroy_fr_pool()
353 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
363 struct srp_fr_pool *pool; srp_create_fr_pool() local
372 pool = kzalloc(sizeof(struct srp_fr_pool) + srp_create_fr_pool()
374 if (!pool) srp_create_fr_pool()
376 pool->size = pool_size; srp_create_fr_pool()
377 pool->max_page_list_len = max_page_list_len; srp_create_fr_pool()
378 spin_lock_init(&pool->lock); srp_create_fr_pool()
379 INIT_LIST_HEAD(&pool->free_list); srp_create_fr_pool()
381 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { srp_create_fr_pool()
394 list_add_tail(&d->entry, &pool->free_list); srp_create_fr_pool()
398 return pool; srp_create_fr_pool()
401 srp_destroy_fr_pool(pool); srp_create_fr_pool()
404 pool = ERR_PTR(ret); srp_create_fr_pool()
410 * @pool: Pool to obtain descriptor from.
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool) srp_fr_pool_get() argument
417 spin_lock_irqsave(&pool->lock, flags); srp_fr_pool_get()
418 if (!list_empty(&pool->free_list)) { srp_fr_pool_get()
419 d = list_first_entry(&pool->free_list, typeof(*d), entry); srp_fr_pool_get()
422 spin_unlock_irqrestore(&pool->lock, flags); srp_fr_pool_get()
429 * @pool: Pool the descriptor was allocated from.
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc, srp_fr_pool_put() argument
442 spin_lock_irqsave(&pool->lock, flags); srp_fr_pool_put()
444 list_add(&desc[i]->entry, &pool->free_list); srp_fr_pool_put()
445 spin_unlock_irqrestore(&pool->lock, flags); srp_fr_pool_put()
549 "FR pool allocation failed (%d)\n", ret); srp_create_ch_ib()
560 "FMR pool allocation failed (%d)\n", ret); srp_create_ch_ib()
1630 * Return an IU and possible credit to the free pool
3198 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); srp_create_target()
H A Dib_srp.h249 * struct srp_fr_pool - pool of fast registration descriptors
253 * @size: Number of descriptors in this pool.
257 * @desc: Fast registration descriptor pool.
/linux-4.1.27/drivers/net/ethernet/marvell/
H A Dmvpp2.c44 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
176 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
178 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
180 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
182 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
184 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
185 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
188 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
200 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
206 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
207 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
210 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
613 /* BM short pool packet size
898 /* Number of buffers for this pool */
910 /* Ports using BM pool */
3343 /* Create pool */ mvpp2_bm_pool_create()
3361 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", mvpp2_bm_pool_create()
3384 /* Set pool buffer size */ mvpp2_bm_pool_bufsize_set()
3397 /* Free all buffers from the pool */ mvpp2_bm_bufs_free()
3413 /* Update BM driver with number of buffers removed from pool */ mvpp2_bm_bufs_free()
3417 /* Cleanup pool */ mvpp2_bm_pool_destroy()
3426 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); mvpp2_bm_pool_destroy()
3459 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); mvpp2_bm_pools_init()
3488 /* Attach long pool to rxq */ mvpp2_rxq_long_pool_set()
3506 /* Attach short pool to rxq */ mvpp2_rxq_short_pool_set()
3524 /* Allocate skb for BM pool */ mvpp2_skb_alloc()
3549 /* Set pool number in a BM cookie */ mvpp2_bm_cookie_pool_set()
3550 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) mvpp2_bm_cookie_pool_set() argument
3555 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); mvpp2_bm_cookie_pool_set()
3560 /* Get pool number from a BM cookie */ mvpp2_bm_cookie_pool_get()
3567 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, mvpp2_bm_pool_put() argument
3571 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); mvpp2_bm_pool_put()
3575 static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, mvpp2_bm_pool_mc_put() argument
3584 mvpp2_bm_pool_put(port, pool, mvpp2_bm_pool_mc_put()
3589 /* Refill BM pool */ mvpp2_pool_refill()
3593 int pool = mvpp2_bm_cookie_pool_get(bm); mvpp2_pool_refill() local
3595 mvpp2_bm_pool_put(port, pool, phys_addr, cookie); mvpp2_pool_refill()
3598 /* Allocate buffers for the pool */ mvpp2_bm_bufs_add()
3613 "cannot allocate %d buffers for pool %d\n", mvpp2_bm_bufs_add()
3627 /* Update BM driver with number of buffers added to pool */ mvpp2_bm_bufs_add()
3632 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", mvpp2_bm_bufs_add()
3637 "%s pool %d: %d of %d buffers added\n", mvpp2_bm_bufs_add()
3643 /* Notify the driver that BM pool is being used as specific type and return the
3644 * pool pointer on success
3647 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_use() argument
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; mvpp2_bm_pool_use()
3655 netdev_err(port->dev, "mixing pool types is forbidden\n"); mvpp2_bm_pool_use()
3664 /* Allocate buffers in case BM pool is used as long pool, but packet mvpp2_bm_pool_use()
3665 * size doesn't match MTU or BM pool hasn't being used yet mvpp2_bm_pool_use()
3672 * the pool is not empty mvpp2_bm_pool_use()
3684 /* Allocate buffers for this pool */ mvpp2_bm_pool_use()
3687 WARN(1, "pool %d: %d of %d allocated\n", mvpp2_bm_pool_use()
3752 /* Update BM pool with new buffer size */ mvpp2_bm_update_mtu()
3755 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); mvpp2_bm_update_mtu()
3762 WARN(1, "pool %d: %d of %d allocated\n", mvpp2_bm_update_mtu()
4121 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> mvpp2_bm_cookie_build() local
4125 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | mvpp2_bm_cookie_build()
4538 /* Push packets received by the RXQ to BM pool */ mvpp2_rxq_drop_pkts()
4932 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ mvpp2_rx_refill()
5042 int pool, rx_bytes, err; mvpp2_rx() local
5049 pool = mvpp2_bm_cookie_pool_get(bm); mvpp2_rx()
5050 bm_pool = &port->priv->bm_pools[pool]; mvpp2_rx()
/linux-4.1.27/drivers/net/ethernet/hisilicon/
H A Dhix5hd2_gmac.c206 struct hix5hd2_desc_sw pool[QUEUE_NUMS]; member in struct:hix5hd2_priv
207 #define rx_fq pool[0]
208 #define rx_bq pool[1]
209 #define tx_bq pool[2]
210 #define tx_rq pool[3]
847 if (priv->pool[i].desc) { hix5hd2_destroy_hw_desc_queue()
848 dma_free_coherent(priv->dev, priv->pool[i].size, hix5hd2_destroy_hw_desc_queue()
849 priv->pool[i].desc, hix5hd2_destroy_hw_desc_queue()
850 priv->pool[i].phys_addr); hix5hd2_destroy_hw_desc_queue()
851 priv->pool[i].desc = NULL; hix5hd2_destroy_hw_desc_queue()
869 size = priv->pool[i].count * sizeof(struct hix5hd2_desc); hix5hd2_init_hw_desc_queue()
876 priv->pool[i].size = size; hix5hd2_init_hw_desc_queue()
877 priv->pool[i].desc = virt_addr; hix5hd2_init_hw_desc_queue()
878 priv->pool[i].phys_addr = phys_addr; hix5hd2_init_hw_desc_queue()
/linux-4.1.27/drivers/hid/usbhid/
H A Dhid-pidff.c170 struct pidff_usage pool[sizeof(pidff_pool)]; member in struct:pidff_device
1149 PIDFF_FIND_FIELDS(pool, PID_POOL, 0); pidff_init_fields()
1177 /* pool report is sometimes messed up, refetch it */ pidff_reset()
1181 if (pidff->pool[PID_SIMULTANEOUS_MAX].value) { pidff_reset()
1182 while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) { pidff_reset()
1186 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); pidff_reset()
1297 if (pidff->pool[PID_SIMULTANEOUS_MAX].value) hid_pidff_init()
1299 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); hid_pidff_init()
1301 if (pidff->pool[PID_RAM_POOL_SIZE].value) hid_pidff_init()
1303 pidff->pool[PID_RAM_POOL_SIZE].value[0]); hid_pidff_init()
1305 if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && hid_pidff_init()
1306 pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { hid_pidff_init()
1308 "device does not support device managed pool\n"); hid_pidff_init()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/
H A Dsocklnd_modparams.c44 /* Number of daemons in each thread pool which is percpt,
48 MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
/linux-4.1.27/drivers/crypto/amcc/
H A Dcrypto4xx_core.h90 void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
92 void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
/linux-4.1.27/arch/x86/include/asm/
H A Dstackprotector.h67 * We both use the random pool and the current TSC as a source boot_init_stack_canary()
70 * on during the bootup the random pool has true entropy too. boot_init_stack_canary()
/linux-4.1.27/fs/jffs2/
H A Djffs2_fs_sb.h36 /* The size of the reserved pool. The reserved pool is the JFFS2 flash
/linux-4.1.27/net/bridge/netfilter/
H A Debt_among.c36 p = &wh->pool[i]; ebt_mac_wormhash_contains()
43 p = &wh->pool[i]; ebt_mac_wormhash_contains()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-debug.h51 #define PVR2_TRACE_BUF_POOL (1 << 23) /* Track buffer pool management */
/linux-4.1.27/arch/mips/bcm63xx/
H A Dnvram.c71 * pool. bcm63xx_nvram_init()
/linux-4.1.27/drivers/staging/nvec/
H A Dnvec.h30 /* NVEC_POOL_SIZE - Size of the pool in &struct nvec_msg */
92 * @used: Used for the message pool to mark a message as free/allocated.
124 * @msg_pool: A pool of messages for allocation
/linux-4.1.27/drivers/uio/
H A Duio_pruss.c35 MODULE_PARM_DESC(sram_pool_sz, "sram pool size to allocate ");
39 MODULE_PARM_DESC(extram_pool_sz, "external ram pool size to allocate");
165 dev_err(dev, "Could not allocate SRAM pool\n"); pruss_probe()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_sp.h305 /* MACs credit pool */
308 /* VLANs credit pool */
645 /* Current amount of credit in the pool */
651 /* Allocate a pool table statically.
660 /* Base pool offset (initialized differently */
664 * Get the next free pool entry.
666 * @return true if there was a free entry in the pool
671 * Return the entry back to the pool.
674 * returned to the pool.
679 * Get the requested amount of credit from the pool.
687 * Returns the credit to the pool.
/linux-4.1.27/include/linux/sunrpc/
H A Dsvc.h37 * RPC service thread pool.
42 * have one pool per NUMA node. This optimisation reduces cross-
46 unsigned int sp_id; /* pool id; also node id on NUMA */
49 unsigned int sp_nrthreads; /* # of threads in pool */
51 struct svc_pool_stats sp_stats; /* statistics on pool operation */
234 struct svc_pool * rq_pool; /* thread pool */
434 struct svc_pool *pool, int node);
/linux-4.1.27/fs/ceph/
H A Dxattr.c73 s64 pool = ceph_file_layout_pg_pool(ci->i_layout); ceph_vxattrcb_layout() local
79 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); ceph_vxattrcb_layout()
83 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=", ceph_vxattrcb_layout()
98 "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld", ceph_vxattrcb_layout()
102 (unsigned long long)pool); ceph_vxattrcb_layout()
141 s64 pool = ceph_file_layout_pg_pool(ci->i_layout); ceph_vxattrcb_layout_pool() local
145 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); ceph_vxattrcb_layout_pool()
149 ret = snprintf(val, size, "%lld", (unsigned long long)pool); ceph_vxattrcb_layout_pool()
241 XATTR_LAYOUT_FIELD(dir, layout, pool),
268 XATTR_LAYOUT_FIELD(file, layout, pool),
/linux-4.1.27/drivers/char/agp/
H A Dfrontend.c54 curr = agp_fe.current_controller->pool; agp_find_mem_by_key()
71 /* Check to see if this is even in the memory pool */ agp_remove_from_pool()
88 agp_fe.current_controller->pool = next; agp_remove_from_pool()
203 prev = agp_fe.current_controller->pool; agp_insert_into_pool()
209 agp_fe.current_controller->pool = temp; agp_insert_into_pool()
363 memory = controller->pool; agp_remove_all_memory()
/linux-4.1.27/drivers/usb/chipidea/
H A Dci.h81 * @td_pool: pointer to controller's TD pool
171 * @qh_pool: allocation pool for queue heads
172 * @td_pool: allocation pool for transfer descriptors
/linux-4.1.27/arch/arm64/mm/
H A Ddma-mapping.c60 WARN(1, "coherent pool not initialised!\n"); __alloc_from_pool()
397 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", atomic_pool_init()
412 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", atomic_pool_init()
/linux-4.1.27/arch/arm/include/asm/
H A Ddma-mapping.h276 * coherent DMA pool above the default value of 256KiB. It must be called
297 * @small_buf_size: size of buffers to use with small buffer pool
298 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
/linux-4.1.27/drivers/iio/
H A Dindustrialio-trigger.c173 ret = bitmap_find_free_region(trig->pool, iio_trigger_get_irq()
186 clear_bit(irq - trig->subirq_base, trig->pool); iio_trigger_put_irq()
202 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); iio_trigger_attach_poll_func()
229 = (bitmap_weight(trig->pool, iio_trigger_detach_poll_func()
/linux-4.1.27/drivers/net/wireless/ti/wl12xx/
H A Dwl12xx.h155 /* Size (in Memory Blocks) of TX pool */
/linux-4.1.27/arch/x86/mm/
H A Dhighmem_32.c28 * invalidation when the kmap pool wraps.
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp5/
H A Dmdp5_ctl.c32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
526 dev_err(dev->dev, "Increase static pool size to at least %d\n", mdp5_ctlm_init()
539 /* initialize each CTL of the pool: */ mdp5_ctlm_init()
/linux-4.1.27/arch/metag/mm/
H A Dhighmem.c34 * invalidation when the kmap pool wraps.
/linux-4.1.27/drivers/usb/musb/
H A Dcppi_dma.h126 struct dma_pool *pool; member in struct:cppi
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Dlparcfg.c108 * XXXXXX - Max procs capacity % available to the partitions pool.
110 * partitions pool.
173 /* pool related entries are appropriate for shared configs */ parse_ppp_data()
177 seq_printf(m, "pool=%d\n", ppp_data.pool_num); parse_ppp_data()
/linux-4.1.27/arch/mips/mm/
H A Dhighmem.c39 * invalidation when the kmap pool wraps.
/linux-4.1.27/arch/arm/mach-mmp/
H A Dsram.c96 dev_err(&pdev->dev, "create pool failed\n"); sram_probe()
/linux-4.1.27/drivers/scsi/mpt2sas/
H A Dmpt2sas_base.h576 * @reply_post_host_index: head index in the pool where FW completes IO
750 * @request: pool of request frames
756 * @chain: pool of chains
777 * @sense: pool of sense
782 * @reply: pool of replys:
786 * @reply_free: pool for reply free queue (32 bit addr)
789 * @reply_free_host_index: tail index in pool to insert free replys
798 * @reply_post_host_index: head index in the pool where FW completes IO
/linux-4.1.27/drivers/scsi/mpt3sas/
H A Dmpt3sas_base.h496 * @reply_post_host_index: head index in the pool where FW completes IO
684 * @request: pool of request frames
692 * @chain: pool of chains
710 * @sense: pool of sense
715 * @reply: pool of replys:
719 * @reply_free: pool for reply free queue (32 bit addr)
722 * @reply_free_host_index: tail index in pool to insert free replys
731 * @reply_post_host_index: head index in the pool where FW completes IO
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/
H A Dlibcfs_cpu.h37 * in other words, CPU partition is a processors pool.
69 * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
/linux-4.1.27/drivers/staging/unisys/visorchipset/
H A Dvisorchipset.h231 void *visorchipset_cache_alloc(struct kmem_cache *pool,
233 void visorchipset_cache_free(struct kmem_cache *pool, void *p,
/linux-4.1.27/drivers/usb/gadget/function/
H A Du_serial.c362 struct list_head *pool = &port->write_pool; gs_start_tx() local
367 while (!port->write_busy && !list_empty(pool)) { gs_start_tx()
374 req = list_entry(pool->next, struct usb_request, list); gs_start_tx()
406 list_add(&req->list, pool); gs_start_tx()
431 struct list_head *pool = &port->read_pool; gs_start_rx() local
434 while (!list_empty(pool)) { gs_start_rx()
447 req = list_entry(pool->next, struct usb_request, list); gs_start_rx()
461 list_add(&req->list, pool); gs_start_rx()

Completed in 6763 milliseconds

1234