/linux-4.4.14/mm/ |
D | mempool.c | 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) in check_element() 61 __check_element(pool, element, ksize(element)); in check_element() 64 if (pool->free == mempool_free_pages) { in check_element() 65 int order = (int)(long)pool->pool_data; in check_element() [all …]
|
D | zbud.c | 128 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) in zbud_zpool_evict() argument 130 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in zbud_zpool_evict() 131 return pool->zpool_ops->evict(pool->zpool, handle); in zbud_zpool_evict() 144 struct zbud_pool *pool; in zbud_zpool_create() local 146 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL); in zbud_zpool_create() 147 if (pool) { in zbud_zpool_create() 148 pool->zpool = zpool; in zbud_zpool_create() 149 pool->zpool_ops = zpool_ops; in zbud_zpool_create() 151 return pool; in zbud_zpool_create() 154 static void zbud_zpool_destroy(void *pool) in zbud_zpool_destroy() argument [all …]
|
D | dmapool.c | 74 struct dma_pool *pool; in show_pools() local 84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools() 88 spin_lock_irq(&pool->lock); in show_pools() 89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools() 93 spin_unlock_irq(&pool->lock); in show_pools() 97 pool->name, blocks, in show_pools() 98 pages * (pool->allocation / pool->size), in show_pools() 99 pool->size, pages); in show_pools() 206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument 209 unsigned int next_boundary = pool->boundary; in pool_initialise_page() [all …]
|
D | zswap.c | 149 struct zswap_pool *pool; member 187 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); 188 static int zswap_pool_get(struct zswap_pool *pool); 189 static void zswap_pool_put(struct zswap_pool *pool); 203 struct zswap_pool *pool; in zswap_update_total_size() local 208 list_for_each_entry_rcu(pool, &zswap_pools, list) in zswap_update_total_size() 209 total += zpool_get_total_size(pool->zpool); in zswap_update_total_size() 309 zpool_free(entry->pool->zpool, entry->handle); in zswap_free_entry() 310 zswap_pool_put(entry->pool); in zswap_free_entry() 421 static int __zswap_cpu_comp_notifier(struct zswap_pool *pool, in __zswap_cpu_comp_notifier() argument [all …]
|
D | zsmalloc.c | 287 static int create_handle_cache(struct zs_pool *pool) in create_handle_cache() argument 289 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, in create_handle_cache() 291 return pool->handle_cachep ? 0 : 1; in create_handle_cache() 294 static void destroy_handle_cache(struct zs_pool *pool) in destroy_handle_cache() argument 296 kmem_cache_destroy(pool->handle_cachep); in destroy_handle_cache() 299 static unsigned long alloc_handle(struct zs_pool *pool) in alloc_handle() argument 301 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in alloc_handle() 302 pool->flags & ~__GFP_HIGHMEM); in alloc_handle() 305 static void free_handle(struct zs_pool *pool, unsigned long handle) in free_handle() argument 307 kmem_cache_free(pool->handle_cachep, (void *)handle); in free_handle() [all …]
|
D | zpool.c | 22 void *pool; member 181 zpool->pool = driver->create(name, gfp, ops, zpool); in zpool_create_pool() 184 if (!zpool->pool) { in zpool_create_pool() 218 zpool->driver->destroy(zpool->pool); in zpool_destroy_pool() 257 return zpool->driver->malloc(zpool->pool, size, gfp, handle); in zpool_malloc() 276 zpool->driver->free(zpool->pool, handle); in zpool_free() 299 return zpool->driver->shrink(zpool->pool, pages, reclaimed); in zpool_shrink() 327 return zpool->driver->map(zpool->pool, handle, mapmode); in zpool_map_handle() 342 zpool->driver->unmap(zpool->pool, handle); in zpool_unmap_handle() 355 return zpool->driver->total_size(zpool->pool); in zpool_get_total_size()
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_page_pool.c | 27 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) in ion_page_pool_alloc_pages() argument 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, in ion_page_pool_alloc_pages() 38 static void ion_page_pool_free_pages(struct ion_page_pool *pool, in ion_page_pool_free_pages() argument 41 __free_pages(page, pool->order); in ion_page_pool_free_pages() 44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument 46 mutex_lock(&pool->mutex); in ion_page_pool_add() 48 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add() 49 pool->high_count++; in ion_page_pool_add() 51 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add() [all …]
|
D | ion_system_heap.c | 60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page() local 64 page = ion_page_pool_alloc(pool); in alloc_buffer_page() 87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in free_buffer_page() local 89 ion_page_pool_free(pool, page); in free_buffer_page() 227 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_shrink() local 229 nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); in ion_system_heap_shrink() 264 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_debug_show() local 267 pool->high_count, pool->order, in ion_system_heap_debug_show() 268 (PAGE_SIZE << pool->order) * pool->high_count); in ion_system_heap_debug_show() 270 pool->low_count, pool->order, in ion_system_heap_debug_show() [all …]
|
D | ion_chunk_heap.c | 29 struct gen_pool *pool; member 69 unsigned long paddr = gen_pool_alloc(chunk_heap->pool, in ion_chunk_heap_allocate() 84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), in ion_chunk_heap_allocate() 112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), in ion_chunk_heap_free() 162 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + in ion_chunk_heap_create() 164 if (!chunk_heap->pool) { in ion_chunk_heap_create() 172 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); in ion_chunk_heap_create() 191 gen_pool_destroy(chunk_heap->pool); in ion_chunk_heap_destroy()
|
D | ion_carveout_heap.c | 30 struct gen_pool *pool; member 40 unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); in ion_carveout_allocate() 56 gen_pool_free(carveout_heap->pool, addr, size); in ion_carveout_free() 170 carveout_heap->pool = gen_pool_create(12, -1); in ion_carveout_heap_create() 171 if (!carveout_heap->pool) { in ion_carveout_heap_create() 176 gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, in ion_carveout_heap_create() 190 gen_pool_destroy(carveout_heap->pool); in ion_carveout_heap_destroy()
|
D | ion_priv.h | 392 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
/linux-4.4.14/drivers/md/ |
D | dm-thin.c | 223 struct pool { struct 275 static enum pool_mode get_pool_mode(struct pool *pool); argument 276 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 283 struct pool *pool; member 303 struct pool *pool; member 374 static bool block_size_is_power_of_two(struct pool *pool) in block_size_is_power_of_two() argument 376 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two() 379 static sector_t block_to_sectors(struct pool *pool, dm_block_t b) in block_to_sectors() argument 381 return block_size_is_power_of_two(pool) ? in block_to_sectors() 382 (b << pool->sectors_per_block_shift) : in block_to_sectors() [all …]
|
D | dm-io.c | 25 mempool_t *pool; member 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); in dm_io_client_create() 58 if (!client->pool) in dm_io_client_create() 68 mempool_destroy(client->pool); in dm_io_client_create() 76 mempool_destroy(client->pool); in dm_io_client_destroy() 123 mempool_free(io, io->client->pool); in complete_io() 418 io = mempool_alloc(client->pool, GFP_NOIO); in sync_io() 450 io = mempool_alloc(client->pool, GFP_NOIO); in async_io()
|
D | multipath.c | 82 mempool_free(mp_bh, conf->pool); in multipath_end_bh_io() 119 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); in multipath_make_request() 127 mempool_free(mp_bh, conf->pool); in multipath_make_request() 443 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, in multipath_run() 445 if (conf->pool == NULL) { in multipath_run() 477 mempool_destroy(conf->pool); in multipath_run() 489 mempool_destroy(conf->pool); in multipath_free()
|
D | multipath.h | 15 mempool_t *pool; member
|
D | Makefile | 13 dm-thin-pool-y += dm-thin.o dm-thin-metadata.o 54 obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
|
/linux-4.4.14/sound/core/seq/ |
D | seq_memory.c | 35 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 37 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 40 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 42 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 178 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 181 cell->next = pool->free; in free_cell() 182 pool->free = cell; in free_cell() 183 atomic_dec(&pool->counter); in free_cell() 189 struct snd_seq_pool *pool; in snd_seq_cell_free() local 193 pool = cell->pool; in snd_seq_cell_free() [all …]
|
D | seq_memory.h | 32 struct snd_seq_pool *pool; /* used pool */ member 68 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 72 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) in snd_seq_unused_cells() argument 74 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; in snd_seq_unused_cells() 78 static inline int snd_seq_total_cells(struct snd_seq_pool *pool) in snd_seq_total_cells() argument 80 return pool ? pool->total_elements : 0; in snd_seq_total_cells() 84 int snd_seq_pool_init(struct snd_seq_pool *pool); 87 int snd_seq_pool_done(struct snd_seq_pool *pool); 93 int snd_seq_pool_delete(struct snd_seq_pool **pool); 102 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait); [all …]
|
D | seq_fifo.c | 39 f->pool = snd_seq_pool_new(poolsize); in snd_seq_fifo_new() 40 if (f->pool == NULL) { in snd_seq_fifo_new() 44 if (snd_seq_pool_init(f->pool) < 0) { in snd_seq_fifo_new() 45 snd_seq_pool_delete(&f->pool); in snd_seq_fifo_new() 82 if (f->pool) { in snd_seq_fifo_delete() 83 snd_seq_pool_done(f->pool); in snd_seq_fifo_delete() 84 snd_seq_pool_delete(&f->pool); in snd_seq_fifo_delete() 123 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ in snd_seq_fifo_event_in() 238 if (snd_BUG_ON(!f || !f->pool)) in snd_seq_fifo_resize() 252 oldpool = f->pool; in snd_seq_fifo_resize() [all …]
|
D | seq_clientmgr.c | 119 return snd_seq_total_cells(client->pool) > 0; in snd_seq_write_pool_allocated() 229 client->pool = snd_seq_pool_new(poolsize); in seq_create_client1() 230 if (client->pool == NULL) { in seq_create_client1() 260 snd_seq_pool_delete(&client->pool); in seq_create_client1() 280 if (client->pool) in seq_free_client1() 281 snd_seq_pool_delete(&client->pool); in seq_free_client1() 959 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); in snd_seq_client_enqueue_event() 1024 if (!client->accept_output || client->pool == NULL) in snd_seq_write() 1028 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { in snd_seq_write() 1029 if (snd_seq_pool_init(client->pool) < 0) in snd_seq_write() [all …]
|
D | seq_fifo.h | 31 struct snd_seq_pool *pool; /* FIFO pool */ member
|
D | seq_clientmgr.h | 65 struct snd_seq_pool *pool; /* memory pool for this client */ member
|
/linux-4.4.14/drivers/infiniband/core/ |
D | fmr_pool.c | 95 void (*flush_function)(struct ib_fmr_pool *pool, 114 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument 122 if (!pool->cache_bucket) in ib_fmr_cache_lookup() 125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument 144 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release() 146 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release() 159 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release() 160 pool->dirty_len = 0; in ib_fmr_batch_release() 162 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release() [all …]
|
/linux-4.4.14/net/ceph/ |
D | msgpool.c | 12 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); in msgpool_alloc() 17 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 19 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 20 msg->pool = pool; in msgpool_alloc() 27 struct ceph_msgpool *pool = arg; in msgpool_free() local 30 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 31 msg->pool = NULL; in msgpool_free() 35 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 39 pool->type = type; in ceph_msgpool_init() [all …]
|
D | osdmap.c | 380 if (l.pool < r.pool) in pgid_cmp() 382 if (l.pool > r.pool) in pgid_cmp() 433 pgid.pool, pgid.seed, pg); in __lookup_pg_mapping() 445 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, in __remove_pg_mapping() 451 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); in __remove_pg_mapping() 624 u64 pool; in decode_pool_names() local 629 ceph_decode_64_safe(p, end, pool, bad); in decode_pool_names() 631 dout(" pool %llu len %d\n", pool, len); in decode_pool_names() 633 pi = __lookup_pg_pool(&map->pg_pools, pool); in decode_pool_names() 803 u64 pool; in __decode_pools() local [all …]
|
D | debugfs.c | 69 struct ceph_pg_pool_info *pool = in osdmap_show() local 73 pool->id, pool->pg_num, pool->pg_num_mask, in osdmap_show() 74 pool->read_tier, pool->write_tier); in osdmap_show() 91 seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool, in osdmap_show() 102 seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool, in osdmap_show() 157 req->r_pgid.pool, req->r_pgid.seed); in osdc_show()
|
D | osd_client.c | 408 req->r_base_oloc.pool = -1; in ceph_osdc_alloc_request() 409 req->r_target_oloc.pool = -1; in ceph_osdc_alloc_request() 822 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout); in ceph_osdc_new_request() 1364 if (req->r_target_oloc.pool == -1) { in __calc_request_pg() 1377 pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool); in __calc_request_pg() 1381 req->r_target_oloc.pool = pi->read_tier; in __calc_request_pg() 1384 req->r_target_oloc.pool = pi->write_tier; in __calc_request_pg() 1456 req->r_tid, pgid.pool, pgid.seed, o, in __map_request() 1504 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); in __send_request() 1509 put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool); in __send_request() [all …]
|
/linux-4.4.14/lib/ |
D | percpu_ida.c | 60 static inline void steal_tags(struct percpu_ida *pool, in steal_tags() argument 63 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; in steal_tags() 66 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); in steal_tags() 68 cpu = cpumask_next(cpu, &pool->cpus_have_tags); in steal_tags() 71 cpu = cpumask_first(&pool->cpus_have_tags); in steal_tags() 76 pool->cpu_last_stolen = cpu; in steal_tags() 77 remote = per_cpu_ptr(pool->tag_cpu, cpu); in steal_tags() 79 cpumask_clear_cpu(cpu, &pool->cpus_have_tags); in steal_tags() 106 static inline void alloc_global_tags(struct percpu_ida *pool, in alloc_global_tags() argument 110 pool->freelist, &pool->nr_free, in alloc_global_tags() [all …]
|
D | genalloc.c | 154 struct gen_pool *pool; in gen_pool_create() local 156 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 157 if (pool != NULL) { in gen_pool_create() 158 spin_lock_init(&pool->lock); in gen_pool_create() 159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 160 pool->min_alloc_order = min_alloc_order; in gen_pool_create() 161 pool->algo = gen_pool_first_fit; in gen_pool_create() 162 pool->data = NULL; in gen_pool_create() 163 pool->name = NULL; in gen_pool_create() 165 return pool; in gen_pool_create() [all …]
|
D | iommu-common.c | 106 struct iommu_pool *pool; in iommu_tbl_range_alloc() local 126 pool = &(iommu->large_pool); in iommu_tbl_range_alloc() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 133 spin_lock_irqsave(&pool->lock, flags); in iommu_tbl_range_alloc() 137 (*handle >= pool->start) && (*handle < pool->end)) in iommu_tbl_range_alloc() 140 start = pool->hint; in iommu_tbl_range_alloc() 142 limit = pool->end; in iommu_tbl_range_alloc() 151 start = pool->start; in iommu_tbl_range_alloc() 160 spin_unlock(&(pool->lock)); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() [all …]
|
/linux-4.4.14/arch/metag/kernel/ |
D | tcm.c | 21 struct gen_pool *pool; member 29 struct tcm_pool *pool; in find_pool() local 32 pool = list_entry(lh, struct tcm_pool, list); in find_pool() 33 if (pool->tag == tag) in find_pool() 34 return pool; in find_pool() 52 struct tcm_pool *pool; in tcm_alloc() local 54 pool = find_pool(tag); in tcm_alloc() 55 if (!pool) in tcm_alloc() 58 vaddr = gen_pool_alloc(pool->pool, len); in tcm_alloc() 76 struct tcm_pool *pool; in tcm_free() local [all …]
|
/linux-4.4.14/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc_dma.c | 157 struct dma_pool *pool; member 306 static int ttm_set_pages_caching(struct dma_pool *pool, in ttm_set_pages_caching() argument 311 if (pool->type & IS_UC) { in ttm_set_pages_caching() 315 pool->dev_name, cpages); in ttm_set_pages_caching() 317 if (pool->type & IS_WC) { in ttm_set_pages_caching() 321 pool->dev_name, cpages); in ttm_set_pages_caching() 326 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) in __ttm_dma_free_page() argument 329 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); in __ttm_dma_free_page() 334 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) in __ttm_dma_alloc_page() argument 342 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, in __ttm_dma_alloc_page() [all …]
|
D | ttm_page_alloc.c | 285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, in ttm_pool_update_free_locked() argument 288 pool->npages -= freed_pages; in ttm_pool_update_free_locked() 289 pool->nfrees += freed_pages; in ttm_pool_update_free_locked() 302 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, in ttm_page_pool_free() argument 326 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_page_pool_free() 328 list_for_each_entry_reverse(p, &pool->list, lru) { in ttm_page_pool_free() 336 __list_del(p->lru.prev, &pool->list); in ttm_page_pool_free() 338 ttm_pool_update_free_locked(pool, freed_pages); in ttm_page_pool_free() 343 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_page_pool_free() 371 __list_del(&p->lru, &pool->list); in ttm_page_pool_free() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/lov/ |
D | lov_pool.c | 55 static void lov_pool_getref(struct pool_desc *pool) in lov_pool_getref() argument 57 CDEBUG(D_INFO, "pool %p\n", pool); in lov_pool_getref() 58 atomic_inc(&pool->pool_refcount); in lov_pool_getref() 61 void lov_pool_putref(struct pool_desc *pool) in lov_pool_putref() argument 63 CDEBUG(D_INFO, "pool %p\n", pool); in lov_pool_putref() 64 if (atomic_dec_and_test(&pool->pool_refcount)) { in lov_pool_putref() 65 LASSERT(hlist_unhashed(&pool->pool_hash)); in lov_pool_putref() 66 LASSERT(list_empty(&pool->pool_list)); in lov_pool_putref() 67 LASSERT(pool->pool_debugfs_entry == NULL); in lov_pool_putref() 68 lov_ost_pool_free(&(pool->pool_rr.lqr_pool)); in lov_pool_putref() [all …]
|
D | lov_internal.h | 251 int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool); 252 void lov_pool_putref(struct pool_desc *pool);
|
/linux-4.4.14/net/rds/ |
D | ib_rdma.c | 49 struct rds_ib_mr_pool *pool; member 106 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); 241 struct rds_ib_mr_pool *pool; in rds_ib_create_mr_pool() local 243 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in rds_ib_create_mr_pool() 244 if (!pool) in rds_ib_create_mr_pool() 247 pool->pool_type = pool_type; in rds_ib_create_mr_pool() 248 init_llist_head(&pool->free_list); in rds_ib_create_mr_pool() 249 init_llist_head(&pool->drop_list); in rds_ib_create_mr_pool() 250 init_llist_head(&pool->clean_list); in rds_ib_create_mr_pool() 251 mutex_init(&pool->flush_lock); in rds_ib_create_mr_pool() [all …]
|
D | iw_rdma.c | 46 struct rds_iw_mr_pool *pool; member 77 static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); 79 static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 80 static int rds_iw_map_reg(struct rds_iw_mr_pool *pool, 83 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 84 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 88 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 317 struct rds_iw_mr_pool *pool; local 319 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 320 if (!pool) { [all …]
|
/linux-4.4.14/kernel/ |
D | workqueue.c | 199 struct worker_pool *pool; /* I: the associated pool */ member 356 #define for_each_cpu_worker_pool(pool, cpu) \ argument 357 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 358 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 359 (pool)++) 373 #define for_each_pool(pool, pi) \ argument 374 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 388 #define for_each_pool_worker(worker, pool) \ argument 389 list_for_each_entry((worker), &(pool)->workers, node) \ 390 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ [all …]
|
D | workqueue_internal.h | 38 struct worker_pool *pool; /* I: the associated pool */ member
|
/linux-4.4.14/drivers/staging/octeon/ |
D | ethernet-mem.c | 30 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 41 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 53 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 58 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 69 pool, elements); in cvm_oct_free_hw_skbuff() 72 pool, elements); in cvm_oct_free_hw_skbuff() 83 static int cvm_oct_fill_hw_memory(int pool, int size, int elements) in cvm_oct_fill_hw_memory() argument 103 elements * size, pool); in cvm_oct_fill_hw_memory() 108 cvmx_fpa_free(fpa, pool, 0); in cvm_oct_fill_hw_memory() 120 static void cvm_oct_free_hw_memory(int pool, int size, int elements) in cvm_oct_free_hw_memory() argument [all …]
|
D | ethernet-mem.h | 11 int cvm_oct_mem_fill_fpa(int pool, int size, int elements); 12 void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
|
D | ethernet-tx.c | 276 hw_buffer.s.pool = 0; in cvm_oct_xmit() 280 hw_buffer.s.pool = 0; in cvm_oct_xmit() 607 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; in cvm_oct_xmit_pow()
|
/linux-4.4.14/drivers/dma/ |
D | coh901318_lli.c | 19 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0) argument 20 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add) argument 22 #define DEBUGFS_POOL_COUNTER_RESET(pool) argument 23 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) argument 35 int coh901318_pool_create(struct coh901318_pool *pool, in coh901318_pool_create() argument 39 spin_lock_init(&pool->lock); in coh901318_pool_create() 40 pool->dev = dev; in coh901318_pool_create() 41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); in coh901318_pool_create() 43 DEBUGFS_POOL_COUNTER_RESET(pool); in coh901318_pool_create() 47 int coh901318_pool_destroy(struct coh901318_pool *pool) in coh901318_pool_destroy() argument [all …]
|
D | coh901318.h | 53 int coh901318_pool_create(struct coh901318_pool *pool, 62 int coh901318_pool_destroy(struct coh901318_pool *pool); 72 coh901318_lli_alloc(struct coh901318_pool *pool, 80 void coh901318_lli_free(struct coh901318_pool *pool, 95 coh901318_lli_fill_memcpy(struct coh901318_pool *pool, 113 coh901318_lli_fill_single(struct coh901318_pool *pool, 134 coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
D | mmp_tdma.c | 129 struct gen_pool *pool; member 362 gpool = tdmac->pool; in mmp_tdma_free_descriptor() 412 gpool = tdmac->pool; in mmp_tdma_alloc_descriptor() 543 int type, struct gen_pool *pool) in mmp_tdma_chan_init() argument 565 tdmac->pool = pool; in mmp_tdma_chan_init() 631 struct gen_pool *pool = NULL; in mmp_tdma_probe() local 659 pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0); in mmp_tdma_probe() 661 pool = sram_get_gpool("asram"); in mmp_tdma_probe() 662 if (!pool) { in mmp_tdma_probe() 678 ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); in mmp_tdma_probe()
|
D | idma64.c | 227 dma_pool_free(idma64c->pool, hw->lli, hw->llp); in idma64_desc_free() 320 hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp); in idma64_prep_slave_sg() 509 idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)), in idma64_alloc_chan_resources() 512 if (!idma64c->pool) { in idma64_alloc_chan_resources() 525 dma_pool_destroy(idma64c->pool); in idma64_free_chan_resources() 526 idma64c->pool = NULL; in idma64_free_chan_resources()
|
D | dmaengine.c | 1005 mempool_t *pool; member 1060 mempool_free(unmap, __get_unmap_pool(cnt)->pool); in dmaengine_unmap() 1077 mempool_destroy(p->pool); in dmaengine_destroy_unmap_pool() 1078 p->pool = NULL; in dmaengine_destroy_unmap_pool() 1099 p->pool = mempool_create_slab_pool(1, p->cache); in dmaengine_init_unmap_pool() 1100 if (!p->pool) in dmaengine_init_unmap_pool() 1116 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); in dmaengine_get_unmap_data()
|
D | sun6i-dma.c | 161 struct dma_pool *pool; member 349 dma_pool_free(sdev->pool, v_lli, p_lli); in sun6i_dma_free_desc() 527 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); in sun6i_dma_prep_dma_memcpy() 587 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); in sun6i_dma_prep_slave_sg() 638 dma_pool_free(sdev->pool, v_lli, p_lli); in sun6i_dma_prep_slave_sg() 641 dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); in sun6i_dma_prep_slave_sg() 952 sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in sun6i_dma_probe() 954 if (!sdc->pool) { in sun6i_dma_probe()
|
D | pch_dma.c | 126 struct pci_pool *pool; member 443 desc = pci_pool_alloc(pd->pool, flags, &addr); in pdc_alloc_desc() 556 pci_pool_free(pd->pool, desc, desc->txd.phys); in pd_free_chan_resources() 887 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, in pch_dma_probe() 889 if (!pd->pool) { in pch_dma_probe() 938 pci_pool_destroy(pd->pool); in pch_dma_probe() 970 pci_pool_destroy(pd->pool); in pch_dma_remove()
|
D | coh901318.c | 1283 struct coh901318_pool pool; member 1341 int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; in coh901318_debugfs_read() 1914 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); in dma_tasklet() 2145 coh901318_lli_free(&cohc->base->pool, &cohd->lli); in coh901318_terminate_all() 2154 coh901318_lli_free(&cohc->base->pool, &cohd->lli); in coh901318_terminate_all() 2261 lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); in coh901318_prep_memcpy() 2267 &cohc->base->pool, lli, src, size, dest, in coh901318_prep_memcpy() 2374 lli = coh901318_lli_alloc(&cohc->base->pool, len); in coh901318_prep_slave_sg() 2380 ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, in coh901318_prep_slave_sg() 2683 err = coh901318_pool_create(&base->pool, &pdev->dev, in coh901318_probe() [all …]
|
D | zx296702_dma.c | 128 struct dma_pool *pool; member 438 ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); in zx_alloc_desc_resource() 724 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); in zx_dma_free_desc() 795 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, in zx_dma_probe() 797 if (!d->pool) in zx_dma_probe() 899 dmam_pool_destroy(d->pool); in zx_dma_remove()
|
D | idma64.h | 139 void *pool; member
|
D | amba-pl08x.c | 271 struct dma_pool *pool; member 939 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); in pl08x_fill_llis_for_desc() 1162 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); in pl08x_free_txd() 2306 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, in pl08x_probe() 2308 if (!pl08x->pool) { in pl08x_probe() 2428 dma_pool_destroy(pl08x->pool); in pl08x_probe()
|
/linux-4.4.14/include/linux/ |
D | zpool.h | 17 int (*evict)(struct zpool *pool, unsigned long handle); 44 const char *zpool_get_type(struct zpool *pool); 46 void zpool_destroy_pool(struct zpool *pool); 48 int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, 51 void zpool_free(struct zpool *pool, unsigned long handle); 53 int zpool_shrink(struct zpool *pool, unsigned int pages, 56 void *zpool_map_handle(struct zpool *pool, unsigned long handle, 59 void zpool_unmap_handle(struct zpool *pool, unsigned long handle); 61 u64 zpool_get_total_size(struct zpool *pool); 90 void (*destroy)(void *pool); [all …]
|
D | zbud.h | 9 int (*evict)(struct zbud_pool *pool, unsigned long handle); 13 void zbud_destroy_pool(struct zbud_pool *pool); 14 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, 16 void zbud_free(struct zbud_pool *pool, unsigned long handle); 17 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); 18 void *zbud_map(struct zbud_pool *pool, unsigned long handle); 19 void zbud_unmap(struct zbud_pool *pool, unsigned long handle); 20 u64 zbud_get_pool_size(struct zbud_pool *pool);
|
D | zsmalloc.h | 45 void zs_destroy_pool(struct zs_pool *pool); 47 unsigned long zs_malloc(struct zs_pool *pool, size_t size); 48 void zs_free(struct zs_pool *pool, unsigned long obj); 50 void *zs_map_object(struct zs_pool *pool, unsigned long handle, 52 void zs_unmap_object(struct zs_pool *pool, unsigned long handle); 54 unsigned long zs_get_total_pages(struct zs_pool *pool); 55 unsigned long zs_compact(struct zs_pool *pool); 57 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
|
D | percpu_ida.h | 65 int percpu_ida_alloc(struct percpu_ida *pool, int state); 66 void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 68 void percpu_ida_destroy(struct percpu_ida *pool); 69 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, 71 static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) in percpu_ida_init() argument 73 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE, in percpu_ida_init() 78 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, 81 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
|
D | dmapool.h | 22 void dma_pool_destroy(struct dma_pool *pool); 24 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 27 static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, in dma_pool_zalloc() argument 30 return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); in dma_pool_zalloc() 33 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); 40 void dmam_pool_destroy(struct dma_pool *pool);
|
D | genalloc.h | 79 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 94 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument 97 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 101 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, 109 extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, 126 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
|
D | mempool.h | 32 extern int mempool_resize(mempool_t *pool, int new_min_nr); 33 extern void mempool_destroy(mempool_t *pool); 34 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); 35 extern void mempool_free(void *element, mempool_t *pool);
|
D | agpgart.h | 104 struct agp_memory *pool; member
|
D | pci.h | 1233 #define pci_pool_destroy(pool) dma_pool_destroy(pool) argument 1234 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) argument 1235 #define pci_pool_zalloc(pool, flags, handle) \ argument 1236 dma_pool_zalloc(pool, flags, handle) 1237 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) argument
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
D | cvmx-fpa.h | 104 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument 106 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name() 115 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument 117 return cvmx_fpa_pool_info[pool].base; in cvmx_fpa_get_base() 129 static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr) in cvmx_fpa_is_member() argument 131 return ((ptr >= cvmx_fpa_pool_info[pool].base) && in cvmx_fpa_is_member() 133 ((char *)(cvmx_fpa_pool_info[pool].base)) + in cvmx_fpa_is_member() 134 cvmx_fpa_pool_info[pool].size * in cvmx_fpa_is_member() 135 cvmx_fpa_pool_info[pool].starting_element_count)); in cvmx_fpa_is_member() 183 static inline void *cvmx_fpa_alloc(uint64_t pool) in cvmx_fpa_alloc() argument [all …]
|
D | cvmx-packet.h | 54 uint64_t pool:3; member 62 uint64_t pool:3;
|
D | cvmx-helper-util.h | 191 buffer_ptr.s.pool, 0); in cvmx_helper_free_packet_data()
|
D | cvmx-pko-defs.h | 192 uint64_t pool:3; member 198 uint64_t pool:3; 282 uint64_t pool:3; member 288 uint64_t pool:3; 297 uint64_t pool:3; member 303 uint64_t pool:3; 429 uint64_t pool:3; member 435 uint64_t pool:3; 505 uint64_t pool:3; member 511 uint64_t pool:3; [all …]
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem_batch_pool.c | 48 struct i915_gem_batch_pool *pool) in i915_gem_batch_pool_init() argument 52 pool->dev = dev; in i915_gem_batch_pool_init() 54 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) in i915_gem_batch_pool_init() 55 INIT_LIST_HEAD(&pool->cache_list[n]); in i915_gem_batch_pool_init() 64 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) in i915_gem_batch_pool_fini() argument 68 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); in i915_gem_batch_pool_fini() 70 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in i915_gem_batch_pool_fini() 71 while (!list_empty(&pool->cache_list[n])) { in i915_gem_batch_pool_fini() 73 list_first_entry(&pool->cache_list[n], in i915_gem_batch_pool_fini() 97 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, in i915_gem_batch_pool_get() argument [all …]
|
D | i915_gem_batch_pool.h | 37 struct i915_gem_batch_pool *pool); 38 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); 40 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
|
/linux-4.4.14/drivers/net/ethernet/ti/ |
D | davinci_cpdma.c | 107 struct cpdma_desc_pool *pool; member 159 struct cpdma_desc_pool *pool; in cpdma_desc_pool_create() local 161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); in cpdma_desc_pool_create() 162 if (!pool) in cpdma_desc_pool_create() 165 spin_lock_init(&pool->lock); in cpdma_desc_pool_create() 167 pool->dev = dev; in cpdma_desc_pool_create() 168 pool->mem_size = size; in cpdma_desc_pool_create() 169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); in cpdma_desc_pool_create() 170 pool->num_desc = size / pool->desc_size; in cpdma_desc_pool_create() 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); in cpdma_desc_pool_create() [all …]
|
D | cpts.c | 74 if (list_empty(&cpts->pool)) { in cpts_fifo_read() 78 event = list_first_entry(&cpts->pool, struct cpts_event, list); in cpts_fifo_read() 119 list_add(&event->list, &cpts->pool); in cpts_systim_read() 305 list_add(&event->list, &cpts->pool); in cpts_find_ts() 314 list_add(&event->list, &cpts->pool); in cpts_find_ts() 378 INIT_LIST_HEAD(&cpts->pool); in cpts_register() 380 list_add(&cpts->pool_data[i].list, &cpts->pool); in cpts_register()
|
D | cpts.h | 125 struct list_head pool; member
|
/linux-4.4.14/sound/core/seq/oss/ |
D | seq_oss_writeq.c | 40 struct snd_seq_client_pool pool; in snd_seq_oss_writeq_new() local 51 memset(&pool, 0, sizeof(pool)); in snd_seq_oss_writeq_new() 52 pool.client = dp->cseq; in snd_seq_oss_writeq_new() 53 pool.output_pool = maxlen; in snd_seq_oss_writeq_new() 54 pool.output_room = maxlen / 2; in snd_seq_oss_writeq_new() 56 snd_seq_oss_control(dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); in snd_seq_oss_writeq_new() 152 struct snd_seq_client_pool pool; in snd_seq_oss_writeq_get_free_size() local 153 pool.client = q->dp->cseq; in snd_seq_oss_writeq_get_free_size() 154 snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); in snd_seq_oss_writeq_get_free_size() 155 return pool.output_free; in snd_seq_oss_writeq_get_free_size() [all …]
|
/linux-4.4.14/drivers/net/ethernet/ibm/ |
D | ibmveth.c | 151 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, in ibmveth_init_buffer_pool() argument 155 pool->size = pool_size; in ibmveth_init_buffer_pool() 156 pool->index = pool_index; in ibmveth_init_buffer_pool() 157 pool->buff_size = buff_size; in ibmveth_init_buffer_pool() 158 pool->threshold = pool_size * 7 / 8; in ibmveth_init_buffer_pool() 159 pool->active = pool_active; in ibmveth_init_buffer_pool() 163 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) in ibmveth_alloc_buffer_pool() argument 167 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool() 169 if (!pool->free_map) in ibmveth_alloc_buffer_pool() 172 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool() [all …]
|
/linux-4.4.14/drivers/mtd/ubi/ |
D | fastmap-wl.c | 57 struct ubi_fm_pool *pool) in return_unused_pool_pebs() argument 62 for (i = pool->used; i < pool->size; i++) { in return_unused_pool_pebs() 63 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs() 122 struct ubi_fm_pool *pool = &ubi->fm_pool; in ubi_refill_pools() local 129 return_unused_pool_pebs(ubi, pool); in ubi_refill_pools() 132 pool->size = 0; in ubi_refill_pools() 136 if (pool->size < pool->max_size) { in ubi_refill_pools() 144 pool->pebs[pool->size] = e->pnum; in ubi_refill_pools() 145 pool->size++; in ubi_refill_pools() 169 pool->used = 0; in ubi_refill_pools() [all …]
|
/linux-4.4.14/drivers/xen/ |
D | tmem.c | 167 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, in tmem_cleancache_put_page() argument 173 if (pool < 0) in tmem_cleancache_put_page() 178 (void)xen_tmem_put_page((u32)pool, oid, ind, page); in tmem_cleancache_put_page() 181 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, in tmem_cleancache_get_page() argument 189 if (pool < 0) in tmem_cleancache_get_page() 193 ret = xen_tmem_get_page((u32)pool, oid, ind, page); in tmem_cleancache_get_page() 200 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, in tmem_cleancache_flush_page() argument 206 if (pool < 0) in tmem_cleancache_flush_page() 210 (void)xen_tmem_flush_page((u32)pool, oid, ind); in tmem_cleancache_flush_page() 213 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) in tmem_cleancache_flush_inode() argument [all …]
|
/linux-4.4.14/drivers/scsi/lpfc/ |
D | lpfc_mem.c | 83 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc() local 115 pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) * in lpfc_mem_alloc() 117 if (!pool->elements) in lpfc_mem_alloc() 120 pool->max_count = 0; in lpfc_mem_alloc() 121 pool->current_count = 0; in lpfc_mem_alloc() 123 pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool, in lpfc_mem_alloc() 124 GFP_KERNEL, &pool->elements[i].phys); in lpfc_mem_alloc() 125 if (!pool->elements[i].virt) in lpfc_mem_alloc() 127 pool->max_count++; in lpfc_mem_alloc() 128 pool->current_count++; in lpfc_mem_alloc() [all …]
|
/linux-4.4.14/drivers/scsi/ |
D | scsi.c | 137 struct scsi_host_cmd_pool *pool = shost->cmd_pool; in scsi_host_free_command() local 141 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); in scsi_host_free_command() 142 kmem_cache_free(pool->cmd_slab, cmd); in scsi_host_free_command() 156 struct scsi_host_cmd_pool *pool = shost->cmd_pool; in scsi_host_alloc_command() local 159 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 163 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, in scsi_host_alloc_command() 164 gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 177 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); in scsi_host_alloc_command() 179 kmem_cache_free(pool->cmd_slab, cmd); in scsi_host_alloc_command() 305 scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) in scsi_free_host_cmd_pool() argument [all …]
|
D | scsi_lib.c | 49 mempool_t *pool; member 574 mempool_free(sgl, sgp->pool); in scsi_sg_free() 582 return mempool_alloc(sgp->pool, gfp_mask); in scsi_sg_alloc() 2290 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, in scsi_init_queue() 2292 if (!sgp->pool) { in scsi_init_queue() 2304 if (sgp->pool) in scsi_init_queue() 2305 mempool_destroy(sgp->pool); in scsi_init_queue() 2322 mempool_destroy(sgp->pool); in scsi_exit_queue()
|
D | libiscsi.c | 2537 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); in iscsi_pool_init() 2538 if (q->pool == NULL) in iscsi_pool_init() 2541 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); in iscsi_pool_init() 2544 q->pool[i] = kzalloc(item_size, GFP_KERNEL); in iscsi_pool_init() 2545 if (q->pool[i] == NULL) { in iscsi_pool_init() 2549 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); in iscsi_pool_init() 2553 *items = q->pool + max; in iscsi_pool_init() 2554 memcpy(*items, q->pool, max * sizeof(void *)); in iscsi_pool_init() 2570 kfree(q->pool[i]); in iscsi_pool_free() 2571 kfree(q->pool); in iscsi_pool_free()
|
/linux-4.4.14/drivers/atm/ |
D | ambassador.c | 687 static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { in rx_give() argument 688 amb_rxq * rxq = &dev->rxq[pool]; in rx_give() 691 PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); in rx_give() 702 wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); in rx_give() 712 static int rx_take (amb_dev * dev, unsigned char pool) { in rx_take() argument 713 amb_rxq * rxq = &dev->rxq[pool]; in rx_take() 716 PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); in rx_take() 745 static void drain_rx_pool (amb_dev * dev, unsigned char pool) { in drain_rx_pool() argument 746 amb_rxq * rxq = &dev->rxq[pool]; in drain_rx_pool() 748 PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); in drain_rx_pool() [all …]
|
D | zatm.c | 178 static void refill_pool(struct atm_dev *dev,int pool) in refill_pool() argument 188 size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : in refill_pool() 189 pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); in refill_pool() 196 offset = zatm_dev->pool_info[pool].offset+ in refill_pool() 201 free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & in refill_pool() 204 if (free >= zatm_dev->pool_info[pool].low_water) return; in refill_pool() 206 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), in refill_pool() 207 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); in refill_pool() 211 while (free < zatm_dev->pool_info[pool].high_water) { in refill_pool() 233 if (zatm_dev->last_free[pool]) in refill_pool() [all …]
|
D | zatm.h | 45 int pool; /* free buffer pool */ member 67 struct sk_buff_head pool[NR_POOLS];/* free buffer pools */ member
|
D | idt77252.h | 788 u32 pool; /* sb_pool handle */ member 796 (((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->pool)
|
/linux-4.4.14/drivers/s390/scsi/ |
D | zfcp_aux.c | 204 adapter->pool.erp_req = in zfcp_allocate_low_mem_buffers() 206 if (!adapter->pool.erp_req) in zfcp_allocate_low_mem_buffers() 209 adapter->pool.gid_pn_req = in zfcp_allocate_low_mem_buffers() 211 if (!adapter->pool.gid_pn_req) in zfcp_allocate_low_mem_buffers() 214 adapter->pool.scsi_req = in zfcp_allocate_low_mem_buffers() 216 if (!adapter->pool.scsi_req) in zfcp_allocate_low_mem_buffers() 219 adapter->pool.scsi_abort = in zfcp_allocate_low_mem_buffers() 221 if (!adapter->pool.scsi_abort) in zfcp_allocate_low_mem_buffers() 224 adapter->pool.status_read_req = in zfcp_allocate_low_mem_buffers() 227 if (!adapter->pool.status_read_req) in zfcp_allocate_low_mem_buffers() [all …]
|
D | zfcp_fsf.c | 80 if (likely(req->pool)) { in zfcp_fsf_req_free() 82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 83 mempool_free(req, req->pool); in zfcp_fsf_req_free() 217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 647 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) in zfcp_fsf_alloc() argument 651 if (likely(pool)) in zfcp_fsf_alloc() 652 req = mempool_alloc(pool, GFP_ATOMIC); in zfcp_fsf_alloc() 660 req->pool = pool; in zfcp_fsf_alloc() 664 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) in zfcp_qtcb_alloc() argument [all …]
|
D | zfcp_def.h | 185 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ member 312 mempool_t *pool; member
|
/linux-4.4.14/tools/hv/ |
D | hv_kvp_daemon.c | 121 static void kvp_acquire_lock(int pool) in kvp_acquire_lock() argument 126 if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { in kvp_acquire_lock() 127 syslog(LOG_ERR, "Failed to acquire the lock pool: %d; error: %d %s", pool, in kvp_acquire_lock() 133 static void kvp_release_lock(int pool) in kvp_release_lock() argument 138 if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { in kvp_release_lock() 139 syslog(LOG_ERR, "Failed to release the lock pool: %d; error: %d %s", pool, in kvp_release_lock() 145 static void kvp_update_file(int pool) in kvp_update_file() argument 153 kvp_acquire_lock(pool); in kvp_update_file() 155 filep = fopen(kvp_file_info[pool].fname, "we"); in kvp_update_file() 157 syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, in kvp_update_file() [all …]
|
/linux-4.4.14/arch/ia64/kernel/ |
D | uncached.c | 35 struct gen_pool *pool; member 156 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); in uncached_add_chunk() 202 if (uc_pool->pool == NULL) in uncached_alloc_page() 205 uc_addr = gen_pool_alloc(uc_pool->pool, in uncached_alloc_page() 229 struct gen_pool *pool = uncached_pools[nid].pool; in uncached_free_page() local 231 if (unlikely(pool == NULL)) in uncached_free_page() 237 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); in uncached_free_page() 255 struct gen_pool *pool = uncached_pools[nid].pool; in uncached_build_memmap() local 260 if (pool != NULL) { in uncached_build_memmap() 262 (void) gen_pool_add(pool, uc_start, size, nid); in uncached_build_memmap() [all …]
|
/linux-4.4.14/drivers/scsi/megaraid/ |
D | megaraid_mm.c | 514 mm_dmapool_t *pool; in mraid_mm_attach_buf() local 531 pool = &adp->dma_pool_list[i]; in mraid_mm_attach_buf() 533 if (xferlen > pool->buf_size) in mraid_mm_attach_buf() 539 spin_lock_irqsave(&pool->lock, flags); in mraid_mm_attach_buf() 541 if (!pool->in_use) { in mraid_mm_attach_buf() 543 pool->in_use = 1; in mraid_mm_attach_buf() 545 kioc->buf_vaddr = pool->vaddr; in mraid_mm_attach_buf() 546 kioc->buf_paddr = pool->paddr; in mraid_mm_attach_buf() 548 spin_unlock_irqrestore(&pool->lock, flags); in mraid_mm_attach_buf() 552 spin_unlock_irqrestore(&pool->lock, flags); in mraid_mm_attach_buf() [all …]
|
/linux-4.4.14/drivers/usb/core/ |
D | buffer.c | 74 hcd->pool[i] = dma_pool_create(name, hcd->self.controller, in hcd_buffer_create() 76 if (!hcd->pool[i]) { in hcd_buffer_create() 97 struct dma_pool *pool = hcd->pool[i]; in hcd_buffer_destroy() local 99 if (pool) { in hcd_buffer_destroy() 100 dma_pool_destroy(pool); in hcd_buffer_destroy() 101 hcd->pool[i] = NULL; in hcd_buffer_destroy() 130 return dma_pool_alloc(hcd->pool[i], mem_flags, dma); in hcd_buffer_alloc() 156 dma_pool_free(hcd->pool[i], addr, dma); in hcd_buffer_free()
|
/linux-4.4.14/drivers/soc/ti/ |
D | knav_qmss_queue.c | 676 static void kdesc_fill_pool(struct knav_pool *pool) in kdesc_fill_pool() argument 681 region = pool->region; in kdesc_fill_pool() 682 pool->desc_size = region->desc_size; in kdesc_fill_pool() 683 for (i = 0; i < pool->num_desc; i++) { in kdesc_fill_pool() 684 int index = pool->region_offset + i; in kdesc_fill_pool() 688 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); in kdesc_fill_pool() 689 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, in kdesc_fill_pool() 691 knav_queue_push(pool->queue, dma_addr, dma_size, 0); in kdesc_fill_pool() 696 static void kdesc_empty_pool(struct knav_pool *pool) in kdesc_empty_pool() argument 703 if (!pool->queue) in kdesc_empty_pool() [all …]
|
D | knav_qmss.h | 362 #define for_each_pool(kdev, pool) \ argument 363 list_for_each_entry(pool, &kdev->pools, list)
|
/linux-4.4.14/arch/arm/common/ |
D | dmabounce.c | 56 struct dmabounce_pool *pool; member 63 struct dma_pool *pool; member 111 struct dmabounce_pool *pool; in alloc_safe_buffer() local 119 pool = &device_info->small; in alloc_safe_buffer() 121 pool = &device_info->large; in alloc_safe_buffer() 123 pool = NULL; in alloc_safe_buffer() 135 buf->pool = pool; in alloc_safe_buffer() 137 if (pool) { in alloc_safe_buffer() 138 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, in alloc_safe_buffer() 154 if (pool) in alloc_safe_buffer() [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_buffers.c | 88 u8 pool; member 103 .pool = _pool, \ 137 const struct mlxsw_sp_sb_pool *pool; in mlxsw_sp_sb_pools_init() local 139 pool = &mlxsw_sp_sb_pools[i]; in mlxsw_sp_sb_pools_init() 140 mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, in mlxsw_sp_sb_pools_init() 141 pool->mode, pool->size); in mlxsw_sp_sb_pools_init() 157 u8 pool; member 166 .pool = _pool, \ 262 cm->min_buff, cm->max_buff, cm->pool); in mlxsw_sp_sb_cms_init() 284 u8 pool; member [all …]
|
D | reg.h | 2117 MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4); 2136 static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, in mlxsw_reg_sbpr_pack() argument 2141 mlxsw_reg_sbpr_pool_set(payload, pool); in mlxsw_reg_sbpr_pack() 2213 MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); 2217 u32 min_buff, u32 max_buff, u8 pool) in mlxsw_reg_sbcm_pack() argument 2225 mlxsw_reg_sbcm_pool_set(payload, pool); in mlxsw_reg_sbcm_pack() 2254 MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); 2286 static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, in mlxsw_reg_sbpm_pack() argument 2292 mlxsw_reg_sbpm_pool_set(payload, pool); in mlxsw_reg_sbpm_pack() 2341 MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4); [all …]
|
/linux-4.4.14/drivers/misc/ |
D | sram.c | 34 struct gen_pool *pool; member 43 struct gen_pool *pool; member 55 bool pool; member 94 part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), in sram_add_pool() 96 if (IS_ERR(part->pool)) in sram_add_pool() 97 return PTR_ERR(part->pool); in sram_add_pool() 99 ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start, in sram_add_pool() 136 if (block->pool) { in sram_add_partition() 163 if (part->pool && in sram_free_partitions() 164 gen_pool_avail(part->pool) < gen_pool_size(part->pool)) in sram_free_partitions() [all …]
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_irq.c | 75 static struct ehca_comp_pool *pool; variable 656 static int find_next_online_cpu(struct ehca_comp_pool *pool) in find_next_online_cpu() argument 665 spin_lock_irqsave(&pool->last_cpu_lock, flags); in find_next_online_cpu() 667 cpu = cpumask_next(pool->last_cpu, cpu_online_mask); in find_next_online_cpu() 670 pool->last_cpu = cpu; in find_next_online_cpu() 671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); in find_next_online_cpu() 672 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); in find_next_online_cpu() 706 cpu_id = find_next_online_cpu(pool); in queue_comp_task() 709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task() 710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task() [all …]
|
/linux-4.4.14/Documentation/device-mapper/ |
D | thin-provisioning.txt | 54 The pool device ties together the metadata volume and the data volume. 63 Setting up a fresh pool device 66 Setting up a pool device requires a valid metadata device, and a 85 Reloading a pool table 88 You may reload a pool's table, indeed this is how the pool is resized 94 Using an existing pool device 97 dmsetup create pool \ 98 --table "0 20971520 thin-pool $metadata_dev $data_dev \ 105 thin-pool is created. People primarily interested in thin provisioning 114 extend the pool device. Only one such event will be sent. [all …]
|
/linux-4.4.14/block/ |
D | bounce.c | 126 static void bounce_end_io(struct bio *bio, mempool_t *pool) in bounce_end_io() argument 143 mempool_free(bvec->bv_page, pool); in bounce_end_io() 162 static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) in __bounce_end_io_read() argument 169 bounce_end_io(bio, pool); in __bounce_end_io_read() 183 mempool_t *pool) in __blk_queue_bounce() argument 205 to->bv_page = mempool_alloc(pool, q->bounce_gfp); in __blk_queue_bounce() 224 if (pool == page_pool) { in __blk_queue_bounce() 240 mempool_t *pool; in blk_queue_bounce() local 256 pool = page_pool; in blk_queue_bounce() 259 pool = isa_page_pool; in blk_queue_bounce() [all …]
|
D | bio.c | 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) in bvec_free() argument 166 mempool_free(bv, pool); in bvec_free() 175 mempool_t *pool) in bvec_alloc() argument 211 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc()
|
/linux-4.4.14/net/sunrpc/ |
D | svc_xprt.c | 324 struct svc_pool *pool; in svc_xprt_do_enqueue() local 344 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); in svc_xprt_do_enqueue() 346 atomic_long_inc(&pool->sp_stats.packets); in svc_xprt_do_enqueue() 351 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { in svc_xprt_do_enqueue() 377 atomic_long_inc(&pool->sp_stats.threads_woken); in svc_xprt_do_enqueue() 393 spin_lock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 394 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); in svc_xprt_do_enqueue() 395 pool->sp_stats.sockets_queued++; in svc_xprt_do_enqueue() 396 spin_unlock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 422 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) in svc_xprt_dequeue() argument [all …]
|
D | svc.c | 456 struct svc_pool *pool = &serv->sv_pools[i]; in __svc_create() local 461 pool->sp_id = i; in __svc_create() 462 INIT_LIST_HEAD(&pool->sp_sockets); in __svc_create() 463 INIT_LIST_HEAD(&pool->sp_all_threads); in __svc_create() 464 spin_lock_init(&pool->sp_lock); in __svc_create() 586 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_rqst_alloc() argument 597 rqstp->rq_pool = pool; in svc_rqst_alloc() 618 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_prepare_thread() argument 622 rqstp = svc_rqst_alloc(serv, pool, node); in svc_prepare_thread() 627 spin_lock_bh(&pool->sp_lock); in svc_prepare_thread() [all …]
|
/linux-4.4.14/net/9p/ |
D | util.c | 45 struct idr pool; member 62 idr_init(&p->pool); in p9_idpool_create() 75 idr_destroy(&p->pool); in p9_idpool_destroy() 97 i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); in p9_idpool_get() 125 idr_remove(&p->pool, id); in p9_idpool_put() 138 return idr_find(&p->pool, id) != NULL; in p9_idpool_check()
|
/linux-4.4.14/drivers/scsi/libfc/ |
D | fc_exch.c | 95 struct fc_exch_pool __percpu *pool; member 418 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, in fc_exch_ptr_get() argument 421 struct fc_exch **exches = (struct fc_exch **)(pool + 1); in fc_exch_ptr_get() 431 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, in fc_exch_ptr_set() argument 434 ((struct fc_exch **)(pool + 1))[index] = ep; in fc_exch_ptr_set() 443 struct fc_exch_pool *pool; in fc_exch_delete() local 446 pool = ep->pool; in fc_exch_delete() 447 spin_lock_bh(&pool->lock); in fc_exch_delete() 448 WARN_ON(pool->total_exches <= 0); in fc_exch_delete() 449 pool->total_exches--; in fc_exch_delete() [all …]
|
/linux-4.4.14/drivers/scsi/ibmvscsi/ |
D | ibmvscsi.c | 453 static int initialize_event_pool(struct event_pool *pool, in initialize_event_pool() argument 458 pool->size = size; in initialize_event_pool() 459 pool->next = 0; in initialize_event_pool() 460 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); in initialize_event_pool() 461 if (!pool->events) in initialize_event_pool() 464 pool->iu_storage = in initialize_event_pool() 466 pool->size * sizeof(*pool->iu_storage), in initialize_event_pool() 467 &pool->iu_token, 0); in initialize_event_pool() 468 if (!pool->iu_storage) { in initialize_event_pool() 469 kfree(pool->events); in initialize_event_pool() [all …]
|
D | ibmvfc.c | 748 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, in ibmvfc_valid_event() argument 751 int index = evt - pool->events; in ibmvfc_valid_event() 752 if (index < 0 || index >= pool->size) /* outside of bounds */ in ibmvfc_valid_event() 754 if (evt != pool->events + index) /* unaligned */ in ibmvfc_valid_event() 767 struct ibmvfc_event_pool *pool = &vhost->pool; in ibmvfc_free_event() local 769 BUG_ON(!ibmvfc_valid_event(pool, evt)); in ibmvfc_free_event() 1207 struct ibmvfc_event_pool *pool = &vhost->pool; in ibmvfc_init_event_pool() local 1210 pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; in ibmvfc_init_event_pool() 1211 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); in ibmvfc_init_event_pool() 1212 if (!pool->events) in ibmvfc_init_event_pool() [all …]
|
D | ibmvscsi.h | 98 struct event_pool pool; member
|
/linux-4.4.14/sound/core/ |
D | memalloc.c | 121 struct gen_pool *pool = NULL; in snd_malloc_dev_iram() local 127 pool = of_gen_pool_get(dev->of_node, "iram", 0); in snd_malloc_dev_iram() 129 if (!pool) in snd_malloc_dev_iram() 133 dmab->private_data = pool; in snd_malloc_dev_iram() 135 dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr); in snd_malloc_dev_iram() 144 struct gen_pool *pool = dmab->private_data; in snd_free_dev_iram() local 146 if (pool && dmab->area) in snd_free_dev_iram() 147 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_free_dev_iram()
|
/linux-4.4.14/tools/usb/usbip/libsrc/ |
D | names.c | 160 struct pool { struct 161 struct pool *next; argument 165 static struct pool *pool_head; argument 169 struct pool *p; in my_malloc() 171 p = calloc(1, sizeof(struct pool)); in my_malloc() 189 struct pool *pool; in names_free() local 194 for (pool = pool_head; pool != NULL; ) { in names_free() 195 struct pool *tmp; in names_free() 197 if (pool->mem) in names_free() 198 free(pool->mem); in names_free() [all …]
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | iommu.c | 191 struct iommu_pool *pool; in iommu_range_alloc() local 214 pool = &(tbl->large_pool); in iommu_range_alloc() 216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 218 spin_lock_irqsave(&(pool->lock), flags); in iommu_range_alloc() 222 (*handle >= pool->start) && (*handle < pool->end)) in iommu_range_alloc() 225 start = pool->hint; in iommu_range_alloc() 227 limit = pool->end; in iommu_range_alloc() 234 start = pool->start; in iommu_range_alloc() 243 spin_unlock(&(pool->lock)); in iommu_range_alloc() 244 pool = &(tbl->pools[0]); in iommu_range_alloc() [all …]
|
/linux-4.4.14/include/rdma/ |
D | ib_fmr_pool.h | 61 void (*flush_function)(struct ib_fmr_pool *pool, 69 struct ib_fmr_pool *pool; member 82 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool); 84 int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
|
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.c | 1246 kib_pool_t *pool = &tpo->tpo_pool; in kiblnd_map_tx_pool() local 1247 kib_net_t *net = pool->po_owner->ps_net; in kiblnd_map_tx_pool() 1267 for (ipage = page_offset = i = 0; i < pool->po_size; i++) { in kiblnd_map_tx_pool() 1281 list_add(&tx->tx_list, &pool->po_free_list); in kiblnd_map_tx_pool() 1345 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) in kiblnd_destroy_fmr_pool() argument 1347 LASSERT(pool->fpo_map_count == 0); in kiblnd_destroy_fmr_pool() 1349 if (pool->fpo_fmr_pool != NULL) in kiblnd_destroy_fmr_pool() 1350 ib_destroy_fmr_pool(pool->fpo_fmr_pool); in kiblnd_destroy_fmr_pool() 1352 if (pool->fpo_hdev != NULL) in kiblnd_destroy_fmr_pool() 1353 kiblnd_hdev_decref(pool->fpo_hdev); in kiblnd_destroy_fmr_pool() [all …]
|
/linux-4.4.14/include/linux/ceph/ |
D | msgpool.h | 13 mempool_t *pool; member 18 extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, 21 extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
|
D | osdmap.h | 23 uint64_t pool; member 44 static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) in ceph_can_shift_osds() argument 46 switch (pool->type) { in ceph_can_shift_osds() 57 s64 pool; member 188 pgid->pool = ceph_decode_64(p); in ceph_decode_pgid()
|
D | messenger.h | 166 struct ceph_msgpool *pool; member
|
/linux-4.4.14/Documentation/vm/ |
D | zswap.txt | 5 dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles 26 device when the compressed pool reaches its size limit. This requirement had 39 back into memory all of the pages stored in the compressed pool. The 40 pages stored in zswap will remain in the compressed pool until they are 42 pages out of the compressed pool, a swapoff on the swap device(s) will 44 compressed pool. 49 evict pages from its own compressed pool on an LRU basis and write them back to 50 the backing swap device in the case that the compressed pool is full. 52 Zswap makes use of zpool for the managing the compressed memory pool. Each 55 accessed. The compressed memory pool grows on demand and shrinks as compressed [all …]
|
D | hugetlbpage.txt | 22 persistent hugetlb pages in the kernel's huge page pool. It also displays 38 HugePages_Total is the size of the pool of huge pages. 39 HugePages_Free is the number of huge pages in the pool that are not yet 42 which a commitment to allocate from the pool has been made, 45 huge page from the pool of huge pages at fault time. 47 the pool above the value in /proc/sys/vm/nr_hugepages. The 55 pages in the kernel's huge page pool. "Persistent" huge pages will be 56 returned to the huge page pool when freed by a task. A user with root 65 pool, a user with appropriate privilege can use either the mmap system call 88 huge page pool to 20, allocating or freeing huge pages, as required. [all …]
|
D | cleancache.txt | 41 pool id which, if positive, must be saved in the filesystem's superblock; 44 the pool id, a file key, and a page index into the file. (The combination 45 of a pool id, a file key, and an index is sometimes called a "handle".) 50 all pages in all files specified by the given pool id and also surrender 51 the pool id. 53 An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache 54 to treat the pool as shared using a 128-bit UUID as a key. On systems 58 same UUID will receive the same pool id, thus allowing the pages to 64 If a get_page is successful on a non-shared pool, the page is invalidated 65 (thus making cleancache an "exclusive" cache). On a shared pool, the page [all …]
|
D | balance | 21 mapped pages from the direct mapped pool, instead of falling back on 22 the dma pool, so as to keep the dma pool filled for dma requests (atomic 25 regular memory requests by allocating one from the dma pool, instead
|
D | overcommit-accounting | 62 shmfs memory drawn from the same pool
|
/linux-4.4.14/Documentation/ABI/testing/ |
D | sysfs-bus-rbd | 9 Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name] 71 pool 73 The name of the storage pool where this rbd image resides. 74 An rbd image name is unique within its pool. 78 The unique identifier for the rbd image's pool. This is 79 a permanent attribute of the pool. A pool's id will never
|
D | sysfs-fs-ext4 | 46 block group specific preallocation pool, so that small 49 preallocation pool.
|
/linux-4.4.14/drivers/char/ |
D | random.c | 426 __u32 *pool; member 454 .pool = input_pool_data 463 .pool = blocking_pool_data, 473 .pool = nonblocking_pool_data, 516 w ^= r->pool[i]; in _mix_pool_bytes() 517 w ^= r->pool[(i + tap1) & wordmask]; in _mix_pool_bytes() 518 w ^= r->pool[(i + tap2) & wordmask]; in _mix_pool_bytes() 519 w ^= r->pool[(i + tap3) & wordmask]; in _mix_pool_bytes() 520 w ^= r->pool[(i + tap4) & wordmask]; in _mix_pool_bytes() 521 w ^= r->pool[(i + tap5) & wordmask]; in _mix_pool_bytes() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
D | client.c | 439 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) in ptlrpc_free_rq_pool() argument 444 LASSERT(pool != NULL); in ptlrpc_free_rq_pool() 446 spin_lock(&pool->prp_lock); in ptlrpc_free_rq_pool() 447 list_for_each_safe(l, tmp, &pool->prp_req_list) { in ptlrpc_free_rq_pool() 451 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); in ptlrpc_free_rq_pool() 455 spin_unlock(&pool->prp_lock); in ptlrpc_free_rq_pool() 456 kfree(pool); in ptlrpc_free_rq_pool() 463 int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) in ptlrpc_add_rqs_to_pool() argument 468 while (size < pool->prp_rq_size) in ptlrpc_add_rqs_to_pool() 471 LASSERTF(list_empty(&pool->prp_req_list) || in ptlrpc_add_rqs_to_pool() [all …]
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_fcoe.c | 137 if (ddp->pool) { in ixgbe_fcoe_ddp_put() 138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); in ixgbe_fcoe_ddp_put() 139 ddp->pool = NULL; in ixgbe_fcoe_ddp_put() 206 if (!ddp_pool->pool) { in ixgbe_fcoe_ddp_setup() 219 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); in ixgbe_fcoe_ddp_setup() 224 ddp->pool = ddp_pool->pool; in ixgbe_fcoe_ddp_setup() 343 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); in ixgbe_fcoe_ddp_setup() 623 if (ddp_pool->pool) in ixgbe_fcoe_dma_pool_free() 624 dma_pool_destroy(ddp_pool->pool); in ixgbe_fcoe_dma_pool_free() 625 ddp_pool->pool = NULL; in ixgbe_fcoe_dma_pool_free() [all …]
|
D | ixgbe_fcoe.h | 67 struct dma_pool *pool; member 72 struct dma_pool *pool; member
|
D | ixgbe_main.c | 3655 u16 pool; in ixgbe_setup_psrtype() local 3672 for_each_set_bit(pool, &adapter->fwd_bitmask, 32) in ixgbe_setup_psrtype() 3673 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); in ixgbe_setup_psrtype() 4553 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, in ixgbe_macvlan_set_rx_mode() argument 4560 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); in ixgbe_macvlan_set_rx_mode() 4572 ixgbe_write_uc_addr_list(adapter->netdev, pool); in ixgbe_macvlan_set_rx_mode() 4573 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); in ixgbe_macvlan_set_rx_mode() 4581 u16 pool = vadapter->pool; in ixgbe_fwd_psrtype() local 4596 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); in ixgbe_fwd_psrtype() 4694 if (!test_bit(accel->pool, &adapter->fwd_bitmask)) in ixgbe_fwd_ring_up() [all …]
|
/linux-4.4.14/drivers/net/wireless/cw1200/ |
D | queue.c | 184 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, in cw1200_queue_init() 186 if (!queue->pool) in cw1200_queue_init() 192 kfree(queue->pool); in cw1200_queue_init() 193 queue->pool = NULL; in cw1200_queue_init() 198 list_add_tail(&queue->pool[i].head, &queue->free_pool); in cw1200_queue_init() 250 kfree(queue->pool); in cw1200_queue_deinit() 252 queue->pool = NULL; in cw1200_queue_deinit() 305 item - queue->pool); in cw1200_queue_put() 384 item = &queue->pool[item_id]; in cw1200_queue_requeue() 435 item - queue->pool); in cw1200_queue_requeue_all() [all …]
|
D | queue.h | 35 struct cw1200_queue_item *pool; member
|
/linux-4.4.14/fs/ceph/ |
D | addr.c | 784 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ in ceph_writepages_start() local 904 pool = fsc->wb_pagevec_pool; in ceph_writepages_start() 905 pages = mempool_alloc(pool, GFP_NOFS); in ceph_writepages_start() 971 !!pool, false); in ceph_writepages_start() 974 pool = NULL; in ceph_writepages_start() 1620 static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) in __ceph_pool_perm_get() argument 1634 if (pool < perm->pool) in __ceph_pool_perm_get() 1636 else if (pool > perm->pool) in __ceph_pool_perm_get() 1647 dout("__ceph_pool_perm_get pool %u no perm cached\n", pool); in __ceph_pool_perm_get() 1654 if (pool < perm->pool) in __ceph_pool_perm_get() [all …]
|
D | xattr.c | 73 s64 pool = ceph_file_layout_pg_pool(ci->i_layout); in ceph_vxattrcb_layout() local 79 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); in ceph_vxattrcb_layout() 102 (unsigned long long)pool); in ceph_vxattrcb_layout() 141 s64 pool = ceph_file_layout_pg_pool(ci->i_layout); in ceph_vxattrcb_layout_pool() local 145 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); in ceph_vxattrcb_layout_pool() 149 ret = snprintf(val, size, "%lld", (unsigned long long)pool); in ceph_vxattrcb_layout_pool() 241 XATTR_LAYOUT_FIELD(dir, layout, pool), 268 XATTR_LAYOUT_FIELD(file, layout, pool),
|
D | mds_client.h | 270 u32 pool; member
|
/linux-4.4.14/Documentation/ |
D | java.txt | 178 /* From Sun's Java VM Specification, as tag entries in the constant pool. */ 204 long *pool; 234 /* Reads in a value from the constant pool. */ 239 pool[*cur] = ftell(classfile); 301 pool = calloc(cp_count, sizeof(long)); 302 if(!pool) 303 error("%s: Out of memory for constant pool\n", program); 313 if(!pool[this_class] || pool[this_class] == -1) 315 if(fseek(classfile, pool[this_class] + 1, SEEK_SET)) 321 if(!pool[classinfo_ptr] || pool[classinfo_ptr] == -1) [all …]
|
D | workqueue.txt | 48 worker pool. A MT wq could provide only one execution context per CPU 59 their own thread pool. 70 * Automatically regulate worker pool and level of concurrency so that 107 When a work item is queued to a workqueue, the target worker-pool is 109 and appended on the shared worklist of the worker-pool. For example, 111 be queued on the worklist of either normal or highpri worker-pool that 114 For any worker pool implementation, managing the concurrency level 120 Each worker-pool bound to an actual CPU implements concurrency 121 management by hooking into the scheduler. The worker-pool is notified 127 workers on the CPU, the worker-pool doesn't start execution of a new [all …]
|
D | DMA-API.txt | 95 dma_pool_create() initializes a pool of DMA-coherent buffers 104 from this pool must not cross 4KByte boundaries. 107 void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, 114 void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, 117 This allocates memory from the pool; the returned memory will meet the 122 address usable by the CPU, and the DMA address usable by the pool's 126 void dma_pool_free(struct dma_pool *pool, void *vaddr, 129 This puts memory back into the pool. The pool is what was passed to 134 void dma_pool_destroy(struct dma_pool *pool); 136 dma_pool_destroy() frees the resources of the pool. It must be [all …]
|
D | DMA-API-HOWTO.txt | 435 struct dma_pool *pool; 437 pool = dma_pool_create(name, dev, size, align, boundary); 443 pass 0 for boundary; passing 4096 says memory allocated from this pool 447 Allocate memory from a DMA pool like this: 449 cpu_addr = dma_pool_alloc(pool, flags, &dma_handle); 457 dma_pool_free(pool, cpu_addr, dma_handle); 459 where pool is what you passed to dma_pool_alloc(), and cpu_addr and 465 dma_pool_destroy(pool); 468 from a pool before you destroy the pool. This function may not
|
/linux-4.4.14/Documentation/filesystems/nfs/ |
D | knfsd-stats.txt | 29 for each NFS thread pool. 35 pool 36 The id number of the NFS thread pool to which this line applies. 39 Thread pool ids are a contiguous set of small integers starting 40 at zero. The maximum value depends on the thread pool mode, but 42 Note that in the default case there will be a single thread pool 44 and thus this file will have a single line with a pool id of "0". 72 pool for the NFS workload (the workload is thread-limited), in which
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_av.c | 189 ah->av = pci_pool_alloc(dev->av_table.pool, in mthca_create_ah() 250 pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); in mthca_destroy_ah() 338 dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev, in mthca_init_av_table() 341 if (!dev->av_table.pool) in mthca_init_av_table() 358 pci_pool_destroy(dev->av_table.pool); in mthca_init_av_table() 372 pci_pool_destroy(dev->av_table.pool); in mthca_cleanup_av_table()
|
D | mthca_dev.h | 121 struct pci_pool *pool; member 266 struct pci_pool *pool; member
|
/linux-4.4.14/Documentation/devicetree/bindings/reserved-memory/ |
D | reserved-memory.txt | 27 reflect the purpose of the node (ie. "framebuffer" or "dma-pool"). Unit 50 - shared-dma-pool: This indicates a region of memory meant to be 51 used as a shared pool of DMA buffers for a set of devices. It can 52 be used by an operating system to instanciate the necessary pool 69 region for the default pool of the contiguous memory allocator. 100 compatible = "shared-dma-pool";
|
/linux-4.4.14/drivers/net/ethernet/intel/i40e/ |
D | i40e_fcoe.c | 176 if (ddp->pool) { in i40e_fcoe_ddp_unmap() 177 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); in i40e_fcoe_ddp_unmap() 178 ddp->pool = NULL; in i40e_fcoe_ddp_unmap() 488 if (!ddp_pool->pool) { in i40e_fcoe_dma_pool_free() 492 dma_pool_destroy(ddp_pool->pool); in i40e_fcoe_dma_pool_free() 493 ddp_pool->pool = NULL; in i40e_fcoe_dma_pool_free() 510 struct dma_pool *pool; in i40e_fcoe_dma_pool_create() local 514 if (ddp_pool && ddp_pool->pool) { in i40e_fcoe_dma_pool_create() 519 pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX, in i40e_fcoe_dma_pool_create() 521 if (!pool) { in i40e_fcoe_dma_pool_create() [all …]
|
D | i40e_fcoe.h | 112 struct dma_pool *pool; member 117 struct dma_pool *pool; member
|
/linux-4.4.14/drivers/scsi/snic/ |
D | snic_main.c | 358 mempool_t *pool; in snic_probe() local 582 pool = mempool_create_slab_pool(2, in snic_probe() 584 if (!pool) { in snic_probe() 590 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; in snic_probe() 592 pool = mempool_create_slab_pool(2, in snic_probe() 594 if (!pool) { in snic_probe() 600 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; in snic_probe() 602 pool = mempool_create_slab_pool(2, in snic_probe() 604 if (!pool) { in snic_probe() 610 snic->req_pool[SNIC_REQ_TM_CACHE] = pool; in snic_probe()
|
/linux-4.4.14/drivers/gpu/drm/sis/ |
D | sis_mm.c | 83 void *data, int pool) in sis_drm_alloc() argument 94 if (0 == ((pool == 0) ? dev_priv->vram_initialized : in sis_drm_alloc() 109 if (pool == AGP_TYPE) { in sis_drm_alloc() 141 mem->offset = ((pool == 0) ? in sis_drm_alloc() 159 DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size, in sis_drm_alloc()
|
/linux-4.4.14/Documentation/devicetree/bindings/net/ |
D | keystone-netcp.txt | 120 - rx-pool: specifies the number of descriptors to be used & the region-id 121 for creating the rx descriptor pool. 122 - tx-pool: specifies the number of descriptors to be used & the region-id 123 for creating the tx descriptor pool. 198 rx-pool = <1024 12>; 199 tx-pool = <1024 12>; 210 rx-pool = <1024 12>; 211 tx-pool = <1024 12>;
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | k2l-netcp.dtsi | 189 rx-pool = <1024 12>; 190 tx-pool = <1024 12>; 201 rx-pool = <1024 12>; 202 tx-pool = <1024 12>;
|
D | k2e-netcp.dtsi | 206 rx-pool = <1024 12>; 207 tx-pool = <1024 12>; 218 rx-pool = <1024 12>; 219 tx-pool = <1024 12>;
|
D | k2hk-netcp.dtsi | 209 rx-pool = <1024 12>; 210 tx-pool = <1024 12>; 221 rx-pool = <1024 12>; 222 tx-pool = <1024 12>;
|
/linux-4.4.14/drivers/video/fbdev/ |
D | sh_mobile_meram.c | 160 struct gen_pool *pool; member 203 return gen_pool_alloc(priv->pool, size); in meram_alloc() 209 gen_pool_free(priv->pool, mem, size); in meram_free() 686 priv->pool = gen_pool_create(ilog2(MERAM_GRANULARITY), -1); in sh_mobile_meram_probe() 687 if (priv->pool == NULL) { in sh_mobile_meram_probe() 692 error = gen_pool_add(priv->pool, meram->start, resource_size(meram), in sh_mobile_meram_probe() 709 if (priv->pool) in sh_mobile_meram_probe() 710 gen_pool_destroy(priv->pool); in sh_mobile_meram_probe() 732 gen_pool_destroy(priv->pool); in sh_mobile_meram_remove()
|
/linux-4.4.14/drivers/net/ethernet/hisilicon/ |
D | hix5hd2_gmac.c | 206 struct hix5hd2_desc_sw pool[QUEUE_NUMS]; member 207 #define rx_fq pool[0] 208 #define rx_bq pool[1] 209 #define tx_bq pool[2] 210 #define tx_rq pool[3] 846 if (priv->pool[i].desc) { in hix5hd2_destroy_hw_desc_queue() 847 dma_free_coherent(priv->dev, priv->pool[i].size, in hix5hd2_destroy_hw_desc_queue() 848 priv->pool[i].desc, in hix5hd2_destroy_hw_desc_queue() 849 priv->pool[i].phys_addr); in hix5hd2_destroy_hw_desc_queue() 850 priv->pool[i].desc = NULL; in hix5hd2_destroy_hw_desc_queue() [all …]
|
/linux-4.4.14/drivers/crypto/marvell/ |
D | cesa.c | 324 engine->pool = of_gen_pool_get(cesa->dev->of_node, in mv_cesa_get_sram() 326 if (engine->pool) { in mv_cesa_get_sram() 327 engine->sram = gen_pool_dma_alloc(engine->pool, in mv_cesa_get_sram() 333 engine->pool = NULL; in mv_cesa_get_sram() 364 if (!engine->pool) in mv_cesa_put_sram() 367 gen_pool_free(engine->pool, (unsigned long)engine->sram, in mv_cesa_put_sram()
|
/linux-4.4.14/drivers/hid/usbhid/ |
D | hid-pidff.c | 170 struct pidff_usage pool[sizeof(pidff_pool)]; member 1149 PIDFF_FIND_FIELDS(pool, PID_POOL, 0); in pidff_init_fields() 1181 if (pidff->pool[PID_SIMULTANEOUS_MAX].value) { in pidff_reset() 1182 while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) { in pidff_reset() 1186 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); in pidff_reset() 1297 if (pidff->pool[PID_SIMULTANEOUS_MAX].value) in hid_pidff_init() 1299 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); in hid_pidff_init() 1301 if (pidff->pool[PID_RAM_POOL_SIZE].value) in hid_pidff_init() 1303 pidff->pool[PID_RAM_POOL_SIZE].value[0]); in hid_pidff_init() 1305 if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && in hid_pidff_init() [all …]
|
/linux-4.4.14/drivers/nvme/host/ |
D | lightnvm.c | 528 static void nvme_nvm_destroy_dma_pool(void *pool) in nvme_nvm_destroy_dma_pool() argument 530 struct dma_pool *dma_pool = pool; in nvme_nvm_destroy_dma_pool() 535 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, in nvme_nvm_dev_dma_alloc() argument 538 return dma_pool_alloc(pool, mem_flags, dma_handler); in nvme_nvm_dev_dma_alloc() 541 static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list, in nvme_nvm_dev_dma_free() argument 544 dma_pool_free(pool, ppa_list, dma_handler); in nvme_nvm_dev_dma_free()
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/pcie/ |
D | rx.c | 398 if (!rxq->pool[i].page) in iwl_pcie_rxq_free_rbs() 400 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, in iwl_pcie_rxq_free_rbs() 403 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); in iwl_pcie_rxq_free_rbs() 404 rxq->pool[i].page = NULL; in iwl_pcie_rxq_free_rbs() 664 list_add(&rxq->pool[i].list, &rxq->rx_used); in iwl_pcie_rx_init_rxb_lists() 677 list_add(&rba->pool[i].list, &rba->rbd_empty); in iwl_pcie_rx_init_rba() 689 if (!rba->pool[i].page) in iwl_pcie_rx_free_rba() 691 dma_unmap_page(trans->dev, rba->pool[i].page_dma, in iwl_pcie_rx_free_rba() 694 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); in iwl_pcie_rx_free_rba() 695 rba->pool[i].page = NULL; in iwl_pcie_rx_free_rba()
|
D | internal.h | 125 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; member 143 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; member
|
/linux-4.4.14/include/uapi/linux/ |
D | hyperv.h | 339 __u8 pool; member 394 __u8 pool; member
|
/linux-4.4.14/Documentation/devicetree/bindings/misc/ |
D | sram.txt | 36 - pool : indicates that the particular reserved SRAM area is addressable 60 pool;
|
/linux-4.4.14/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 15 queue pool management (allocation, push, pop and notify) and descriptor 16 pool management. 44 - qpend : pool of qpend(interruptible) queues 45 - general-purpose : pool of general queues, primarly used 48 - accumulator : pool of queues on PDSP accumulator channel
|
/linux-4.4.14/drivers/staging/fsl-mc/ |
D | README.txt | 183 network interface configuration, and rx buffer pool configuration 191 packets and do hardware buffer pool management operations. For 196 pool depletion 200 pool. 231 A network interface requires a 'buffer pool' (DPBP 331 The allocator maintains a pool of objects that are available for 341 -hardware buffer pool management
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool) in srp_destroy_fr_pool() argument 339 if (!pool) in srp_destroy_fr_pool() 342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_destroy_fr_pool() 346 kfree(pool); in srp_destroy_fr_pool() 360 struct srp_fr_pool *pool; in srp_create_fr_pool() local 368 pool = kzalloc(sizeof(struct srp_fr_pool) + in srp_create_fr_pool() 370 if (!pool) in srp_create_fr_pool() 372 pool->size = pool_size; in srp_create_fr_pool() 373 pool->max_page_list_len = max_page_list_len; in srp_create_fr_pool() 374 spin_lock_init(&pool->lock); in srp_create_fr_pool() [all …]
|
/linux-4.4.14/drivers/usb/gadget/function/ |
D | u_serial.c | 363 struct list_head *pool = &port->write_pool; in gs_start_tx() local 368 while (!port->write_busy && !list_empty(pool)) { in gs_start_tx() 375 req = list_entry(pool->next, struct usb_request, list); in gs_start_tx() 407 list_add(&req->list, pool); in gs_start_tx() 432 struct list_head *pool = &port->read_pool; in gs_start_rx() local 435 while (!list_empty(pool)) { in gs_start_rx() 448 req = list_entry(pool->next, struct usb_request, list); in gs_start_rx() 462 list_add(&req->list, pool); in gs_start_rx()
|
/linux-4.4.14/drivers/net/ethernet/marvell/ |
D | mvpp2.c | 46 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) argument 178 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) argument 180 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) argument 182 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) argument 184 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) argument 186 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) argument 187 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) argument 190 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) argument 202 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) argument 208 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) argument [all …]
|
/linux-4.4.14/include/uapi/linux/netfilter_bridge/ |
D | ebt_among.h | 42 struct ebt_mac_wormhash_tuple pool[0]; member
|
/linux-4.4.14/drivers/iio/ |
D | industrialio-trigger.c | 180 ret = bitmap_find_free_region(trig->pool, in iio_trigger_get_irq() 193 clear_bit(irq - trig->subirq_base, trig->pool); in iio_trigger_put_irq() 209 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_attach_poll_func() 236 = (bitmap_weight(trig->pool, in iio_trigger_detach_poll_func()
|
/linux-4.4.14/drivers/block/ |
D | null_blk.c | 539 static void null_lnvm_destroy_dma_pool(void *pool) in null_lnvm_destroy_dma_pool() argument 541 mempool_destroy(pool); in null_lnvm_destroy_dma_pool() 544 static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool, in null_lnvm_dev_dma_alloc() argument 547 return mempool_alloc(pool, mem_flags); in null_lnvm_dev_dma_alloc() 550 static void null_lnvm_dev_dma_free(void *pool, void *entry, in null_lnvm_dev_dma_free() argument 553 mempool_free(entry, pool); in null_lnvm_dev_dma_free()
|
D | cciss_scsi.c | 110 struct cciss_scsi_cmd_stack_elem_t *pool; member 213 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); in scsi_cmd_stack_setup() 215 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) in scsi_cmd_stack_setup() 218 if (stk->pool == NULL) { in scsi_cmd_stack_setup() 225 pci_free_consistent(h->pdev, size, stk->pool, in scsi_cmd_stack_setup() 230 stk->elem[i] = &stk->pool[i]; in scsi_cmd_stack_setup() 255 pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); in scsi_cmd_stack_free() 256 stk->pool = NULL; in scsi_cmd_stack_free()
|
/linux-4.4.14/drivers/hv/ |
D | hv_kvp.c | 346 __u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool; in kvp_send_key() local 360 message->kvp_hdr.pool = pool; in kvp_send_key()
|
/linux-4.4.14/Documentation/trace/ |
D | stm.txt | 11 master/channel combination from this pool. 44 which means that the master allocation pool for this rule consists of 45 masters 48 through 63 and channel allocation pool has channels 0
|
/linux-4.4.14/net/bridge/netfilter/ |
D | ebt_among.c | 36 p = &wh->pool[i]; in ebt_mac_wormhash_contains() 43 p = &wh->pool[i]; in ebt_mac_wormhash_contains()
|
/linux-4.4.14/drivers/scsi/fnic/ |
D | fnic_main.c | 546 mempool_t *pool; in fnic_probe() local 742 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); in fnic_probe() 743 if (!pool) in fnic_probe() 745 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; in fnic_probe() 747 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); in fnic_probe() 748 if (!pool) in fnic_probe() 750 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; in fnic_probe()
|
/linux-4.4.14/include/linux/iio/ |
D | trigger.h | 74 unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)]; member
|
/linux-4.4.14/include/trace/events/ |
D | workqueue.h | 57 __entry->cpu = pwq->pool->cpu;
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | pio.c | 304 int pool; in init_sc_pools_and_sizes() local 336 pool = wildcard_to_pool(size); in init_sc_pools_and_sizes() 337 if (pool == -1) { /* non-wildcard */ in init_sc_pools_and_sizes() 339 } else if (pool < NUM_SC_POOLS) { /* valid wildcard */ in init_sc_pools_and_sizes() 340 mem_pool_info[pool].count += count; in init_sc_pools_and_sizes() 403 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); in init_sc_pools_and_sizes() local 405 WARN_ON_ONCE(pool >= NUM_SC_POOLS); in init_sc_pools_and_sizes() 406 dd->sc_sizes[i].size = mem_pool_info[pool].size; in init_sc_pools_and_sizes()
|
/linux-4.4.14/drivers/usb/musb/ |
D | cppi_dma.h | 126 struct dma_pool *pool; member
|
D | cppi_dma.c | 126 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); in cppi_pool_init() 147 dma_pool_free(cppi->pool, bd, bd->dma); in cppi_pool_free() 1326 controller->pool = dma_pool_create("cppi", in cppi_dma_controller_create() 1330 if (!controller->pool) { in cppi_dma_controller_create() 1364 dma_pool_destroy(cppi->pool); in cppi_dma_controller_destroy()
|
/linux-4.4.14/Documentation/arm/keystone/ |
D | knav-qmss.txt | 19 queue pool management (allocation, push, pop and notify) and descriptor 20 pool management.
|
/linux-4.4.14/drivers/char/agp/ |
D | frontend.c | 54 curr = agp_fe.current_controller->pool; in agp_find_mem_by_key() 88 agp_fe.current_controller->pool = next; in agp_remove_from_pool() 203 prev = agp_fe.current_controller->pool; in agp_insert_into_pool() 209 agp_fe.current_controller->pool = temp; in agp_insert_into_pool() 363 memory = controller->pool; in agp_remove_all_memory()
|
/linux-4.4.14/drivers/staging/lustre/ |
D | sysfs-fs-lustre | 356 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/granted 362 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_rate 369 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/cancel_rate 376 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_speed 383 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_plan 390 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/limit 394 Controls number of allowed locks in this pool. 397 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/lock_volume_factor 405 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/server_lock_volume 411 What: /sys/fs/lustre/ldlm/namespaces/<name>/pool/recalc_period
|
/linux-4.4.14/Documentation/devicetree/bindings/soc/fsl/ |
D | qman-portals.txt | 94 Definition: Must include "fsl,qman-pool-channel" 95 May include "fsl,<SoC>-qman-pool-channel"
|
/linux-4.4.14/arch/mips/cavium-octeon/executive/ |
D | cvmx-helper-util.c | 107 buffer_ptr.s.pool = wqe_pool.s.wqe_pool; in cvmx_helper_dump_packet() 139 cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool); in cvmx_helper_dump_packet()
|
/linux-4.4.14/scripts/kconfig/ |
D | zconf.gperf | 4 %define string-pool-name kconf_id_strings
|
/linux-4.4.14/drivers/net/wireless/ath/wcn36xx/ |
D | dxe.c | 235 struct wcn36xx_dxe_mem_pool *pool) in wcn36xx_dxe_init_tx_bd() argument 237 int i, chunk_size = pool->chunk_size; in wcn36xx_dxe_init_tx_bd() 238 dma_addr_t bd_phy_addr = pool->phy_addr; in wcn36xx_dxe_init_tx_bd() 239 void *bd_cpu_addr = pool->virt_addr; in wcn36xx_dxe_init_tx_bd()
|
/linux-4.4.14/include/linux/sunrpc/ |
D | svc.h | 462 struct svc_pool *pool, int node); 464 struct svc_pool *pool, int node);
|
/linux-4.4.14/drivers/md/bcache/ |
D | bset.c | 1119 if (state->pool) in bch_bset_sort_state_free() 1120 mempool_destroy(state->pool); in bch_bset_sort_state_free() 1130 state->pool = mempool_create_page_pool(1, page_order); in bch_bset_sort_state_init() 1131 if (!state->pool) in bch_bset_sort_state_init() 1192 outp = mempool_alloc(state->pool, GFP_NOIO); in __btree_sort() 1221 mempool_free(virt_to_page(out), state->pool); in __btree_sort()
|
/linux-4.4.14/arch/sparc/kernel/ |
D | pci_sun4v.c | 531 struct iommu_pool *pool; in probe_existing_entries() local 537 pool = &(iommu->pools[pool_nr]); in probe_existing_entries() 538 for (i = pool->start; i <= pool->end; i++) { in probe_existing_entries()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.h | 1003 struct bnx2x_alloc_pool *pool) in bnx2x_free_rx_mem_pool() argument 1005 if (!pool->page) in bnx2x_free_rx_mem_pool() 1008 put_page(pool->page); in bnx2x_free_rx_mem_pool() 1010 pool->page = NULL; in bnx2x_free_rx_mem_pool()
|
D | bnx2x_cmn.c | 551 struct bnx2x_alloc_pool *pool = &fp->page_pool; in bnx2x_alloc_rx_sge() local 554 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { in bnx2x_alloc_rx_sge() 559 if (pool->page) in bnx2x_alloc_rx_sge() 560 put_page(pool->page); in bnx2x_alloc_rx_sge() 562 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); in bnx2x_alloc_rx_sge() 563 if (unlikely(!pool->page)) { in bnx2x_alloc_rx_sge() 568 pool->offset = 0; in bnx2x_alloc_rx_sge() 571 mapping = dma_map_page(&bp->pdev->dev, pool->page, in bnx2x_alloc_rx_sge() 572 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); in bnx2x_alloc_rx_sge() 578 get_page(pool->page); in bnx2x_alloc_rx_sge() [all …]
|
/linux-4.4.14/Documentation/power/ |
D | apm-acpi.txt | 31 apmd: http://ftp.debian.org/pool/main/a/apmd/
|
/linux-4.4.14/fs/logfs/ |
D | logfs.h | 730 static inline void logfs_mempool_destroy(mempool_t *pool) in logfs_mempool_destroy() argument 732 if (pool) in logfs_mempool_destroy() 733 mempool_destroy(pool); in logfs_mempool_destroy()
|
/linux-4.4.14/Documentation/block/ |
D | queue-sysfs.txt | 101 queue maintains a separate request pool per each cgroup when 103 per-block-cgroup request pool. IOW, if there are N block cgroups,
|
D | biodoc.txt | 444 used as index into pool */ 596 where it cannot allocate through normal means. If the pool is empty and it 598 replenish the pool (without deadlocking) and wait for availability in the pool. 600 could fail if the pool is empty. In general mempool always first tries to 602 pool as long it is not less that 50% full. 604 On a free, memory is released to the pool or directly freed depending on 605 the current availability in the pool. The mempool interface lets the 610 deadlocks, e.g. avoid trying to allocate more memory from the pool while 611 already holding memory obtained from the pool. 614 it ends up allocating a second bio from the same pool while [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cmd.c | 921 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, in alloc_cmd_box() 937 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); in free_cmd_box() 1562 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); in mlx5_cmd_init() 1563 if (!cmd->pool) in mlx5_cmd_init() 1656 pci_pool_destroy(cmd->pool); in mlx5_cmd_init() 1670 pci_pool_destroy(cmd->pool); in mlx5_cmd_cleanup()
|
/linux-4.4.14/Documentation/filesystems/caching/ |
D | backend-api.txt | 82 (2) that of one of the processes in the FS-Cache thread pool. 165 FS-Cache has a pool of threads that it uses to give CPU time to the 194 submit it to the thread pool. CacheFiles, for example, uses this to queue 384 This operation is run asynchronously from FS-Cache's thread pool, and 435 pool. If this is desired, the op->op.processor should be set to point to 505 This method is called asynchronously from the FS-Cache thread pool. It is 716 pool. One of the threads in the pool will invoke the retrieval record's
|
/linux-4.4.14/Documentation/arm/ |
D | tcm.txt | 71 allocation pool with gen_pool_create() and gen_pool_add() 142 /* Allocate some TCM memory from the pool */
|
/linux-4.4.14/Documentation/DocBook/ |
D | kernel-api.xml.db | 255 API-dma-pool-create 256 API-dma-pool-destroy 257 API-dma-pool-alloc 258 API-dma-pool-free 259 API-dmam-pool-create 260 API-dmam-pool-destroy
|
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/ |
D | cpm.txt | 55 all of which contribute to the allocatable muram pool.
|
/linux-4.4.14/arch/avr32/mach-at32ap/ |
D | at32ap700x.c | 2334 struct gen_pool *pool; in sram_init() local 2337 pool = gen_pool_create(10, -1); in sram_init() 2338 if (!pool) in sram_init() 2341 if (gen_pool_add(pool, 0x24000000, 0x8000, -1)) in sram_init() 2344 sram_pool = pool; in sram_init() 2348 gen_pool_destroy(pool); in sram_init()
|
/linux-4.4.14/arch/powerpc/boot/dts/fsl/ |
D | qoriq-qman1-portals.dtsi | 2 * QorIQ QMan Portal device tree stub for 10 portals & 15 pool channels
|
/linux-4.4.14/drivers/net/wireless/iwlegacy/ |
D | 3945-mac.c | 1091 if (rxq->pool[i].page != NULL) { in il3945_rx_queue_reset() 1092 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, in il3945_rx_queue_reset() 1095 __il_free_pages(il, rxq->pool[i].page); in il3945_rx_queue_reset() 1096 rxq->pool[i].page = NULL; in il3945_rx_queue_reset() 1098 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il3945_rx_queue_reset() 1140 if (rxq->pool[i].page != NULL) { in il3945_rx_queue_free() 1141 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, in il3945_rx_queue_free() 1144 __il_free_pages(il, rxq->pool[i].page); in il3945_rx_queue_free() 1145 rxq->pool[i].page = NULL; in il3945_rx_queue_free()
|
/linux-4.4.14/drivers/crypto/caam/ |
D | Kconfig | 110 the hw_random API for suppying the kernel entropy pool.
|