/linux-4.1.27/mm/ |
D | mempool.c | 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) in check_element() 61 __check_element(pool, element, ksize(element)); in check_element() 64 if (pool->free == mempool_free_pages) { in check_element() 65 int order = (int)(long)pool->pool_data; in check_element() [all …]
|
D | dmapool.c | 74 struct dma_pool *pool; in show_pools() local 84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools() 88 spin_lock_irq(&pool->lock); in show_pools() 89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools() 93 spin_unlock_irq(&pool->lock); in show_pools() 97 pool->name, blocks, in show_pools() 98 pages * (pool->allocation / pool->size), in show_pools() 99 pool->size, pages); in show_pools() 206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument 209 unsigned int next_boundary = pool->boundary; in pool_initialise_page() [all …]
|
D | zbud.c | 124 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) in zbud_zpool_evict() argument 126 return zpool_evict(pool, handle); in zbud_zpool_evict() 139 static void zbud_zpool_destroy(void *pool) in zbud_zpool_destroy() argument 141 zbud_destroy_pool(pool); in zbud_zpool_destroy() 144 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc() argument 147 return zbud_alloc(pool, size, gfp, handle); in zbud_zpool_malloc() 149 static void zbud_zpool_free(void *pool, unsigned long handle) in zbud_zpool_free() argument 151 zbud_free(pool, handle); in zbud_zpool_free() 154 static int zbud_zpool_shrink(void *pool, unsigned int pages, in zbud_zpool_shrink() argument 161 ret = zbud_reclaim_page(pool, 8); in zbud_zpool_shrink() [all …]
|
D | zsmalloc.c | 283 static int create_handle_cache(struct zs_pool *pool) in create_handle_cache() argument 285 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, in create_handle_cache() 287 return pool->handle_cachep ? 0 : 1; in create_handle_cache() 290 static void destroy_handle_cache(struct zs_pool *pool) in destroy_handle_cache() argument 292 if (pool->handle_cachep) in destroy_handle_cache() 293 kmem_cache_destroy(pool->handle_cachep); in destroy_handle_cache() 296 static unsigned long alloc_handle(struct zs_pool *pool) in alloc_handle() argument 298 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in alloc_handle() 299 pool->flags & ~__GFP_HIGHMEM); in alloc_handle() 302 static void free_handle(struct zs_pool *pool, unsigned long handle) in free_handle() argument [all …]
|
D | zpool.c | 24 void *pool; member 84 int zpool_evict(void *pool, unsigned long handle) in zpool_evict() argument 90 if (zpool->pool == pool) { in zpool_evict() 173 zpool->pool = driver->create(name, gfp, ops); in zpool_create_pool() 176 if (!zpool->pool) { in zpool_create_pool() 210 zpool->driver->destroy(zpool->pool); in zpool_destroy_pool() 249 return zpool->driver->malloc(zpool->pool, size, gfp, handle); in zpool_malloc() 268 zpool->driver->free(zpool->pool, handle); in zpool_free() 291 return zpool->driver->shrink(zpool->pool, pages, reclaimed); in zpool_shrink() 319 return zpool->driver->map(zpool->pool, handle, mapmode); in zpool_map_handle() [all …]
|
D | zswap.c | 532 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) in zswap_writeback_entry() argument 548 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); in zswap_writeback_entry() 550 zpool_unmap_handle(pool, handle); in zswap_writeback_entry()
|
D | Kconfig | 299 # On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often 301 # a 32-bit address to OHCI. So we need to use a bounce pool instead. 303 # We also use the bounce pool to provide stable page writes for jbd. jbd 558 compress them into a dynamically allocated RAM-based memory pool.
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_page_pool.c | 27 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) in ion_page_pool_alloc_pages() argument 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, in ion_page_pool_alloc_pages() 38 static void ion_page_pool_free_pages(struct ion_page_pool *pool, in ion_page_pool_free_pages() argument 41 __free_pages(page, pool->order); in ion_page_pool_free_pages() 44 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument 46 mutex_lock(&pool->mutex); in ion_page_pool_add() 48 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add() 49 pool->high_count++; in ion_page_pool_add() 51 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add() [all …]
|
D | ion_system_heap.c | 60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page() local 64 page = ion_page_pool_alloc(pool); in alloc_buffer_page() 87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in free_buffer_page() local 89 ion_page_pool_free(pool, page); in free_buffer_page() 220 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_shrink() local 222 nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); in ion_system_heap_shrink() 249 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_debug_show() local 252 pool->high_count, pool->order, in ion_system_heap_debug_show() 253 (PAGE_SIZE << pool->order) * pool->high_count); in ion_system_heap_debug_show() 255 pool->low_count, pool->order, in ion_system_heap_debug_show() [all …]
|
D | ion_chunk_heap.c | 29 struct gen_pool *pool; member 69 unsigned long paddr = gen_pool_alloc(chunk_heap->pool, in ion_chunk_heap_allocate() 84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), in ion_chunk_heap_allocate() 112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), in ion_chunk_heap_free() 162 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + in ion_chunk_heap_create() 164 if (!chunk_heap->pool) { in ion_chunk_heap_create() 172 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); in ion_chunk_heap_create() 191 gen_pool_destroy(chunk_heap->pool); in ion_chunk_heap_destroy()
|
D | ion_carveout_heap.c | 30 struct gen_pool *pool; member 40 unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); in ion_carveout_allocate() 56 gen_pool_free(carveout_heap->pool, addr, size); in ion_carveout_free() 170 carveout_heap->pool = gen_pool_create(12, -1); in ion_carveout_heap_create() 171 if (!carveout_heap->pool) { in ion_carveout_heap_create() 176 gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, in ion_carveout_heap_create() 190 gen_pool_destroy(carveout_heap->pool); in ion_carveout_heap_destroy()
|
D | ion_priv.h | 391 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
/linux-4.1.27/sound/core/seq/ |
D | seq_memory.c | 35 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 37 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 40 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 42 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 178 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 181 cell->next = pool->free; in free_cell() 182 pool->free = cell; in free_cell() 183 atomic_dec(&pool->counter); in free_cell() 189 struct snd_seq_pool *pool; in snd_seq_cell_free() local 193 pool = cell->pool; in snd_seq_cell_free() [all …]
|
D | seq_memory.h | 32 struct snd_seq_pool *pool; /* used pool */ member 68 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 72 static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) in snd_seq_unused_cells() argument 74 return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; in snd_seq_unused_cells() 78 static inline int snd_seq_total_cells(struct snd_seq_pool *pool) in snd_seq_total_cells() argument 80 return pool ? pool->total_elements : 0; in snd_seq_total_cells() 84 int snd_seq_pool_init(struct snd_seq_pool *pool); 87 int snd_seq_pool_done(struct snd_seq_pool *pool); 93 int snd_seq_pool_delete(struct snd_seq_pool **pool); 102 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait); [all …]
|
D | seq_fifo.c | 39 f->pool = snd_seq_pool_new(poolsize); in snd_seq_fifo_new() 40 if (f->pool == NULL) { in snd_seq_fifo_new() 44 if (snd_seq_pool_init(f->pool) < 0) { in snd_seq_fifo_new() 45 snd_seq_pool_delete(&f->pool); in snd_seq_fifo_new() 82 if (f->pool) { in snd_seq_fifo_delete() 83 snd_seq_pool_done(f->pool); in snd_seq_fifo_delete() 84 snd_seq_pool_delete(&f->pool); in snd_seq_fifo_delete() 123 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ in snd_seq_fifo_event_in() 238 if (snd_BUG_ON(!f || !f->pool)) in snd_seq_fifo_resize() 252 oldpool = f->pool; in snd_seq_fifo_resize() [all …]
|
D | seq_clientmgr.c | 119 return snd_seq_total_cells(client->pool) > 0; in snd_seq_write_pool_allocated() 229 client->pool = snd_seq_pool_new(poolsize); in seq_create_client1() 230 if (client->pool == NULL) { in seq_create_client1() 260 snd_seq_pool_delete(&client->pool); in seq_create_client1() 280 if (client->pool) in seq_free_client1() 281 snd_seq_pool_delete(&client->pool); in seq_free_client1() 959 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); in snd_seq_client_enqueue_event() 1024 if (!client->accept_output || client->pool == NULL) in snd_seq_write() 1028 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { in snd_seq_write() 1029 if (snd_seq_pool_init(client->pool) < 0) in snd_seq_write() [all …]
|
D | seq_fifo.h | 31 struct snd_seq_pool *pool; /* FIFO pool */ member
|
D | seq_clientmgr.h | 65 struct snd_seq_pool *pool; /* memory pool for this client */ member
|
/linux-4.1.27/drivers/md/ |
D | dm-thin.c | 215 struct pool { struct 267 static enum pool_mode get_pool_mode(struct pool *pool); argument 268 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 275 struct pool *pool; member 295 struct pool *pool; member 320 static void wake_worker(struct pool *pool) in wake_worker() argument 322 queue_work(pool->wq, &pool->worker); in wake_worker() 327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument 337 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain() 339 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain() [all …]
|
D | dm-io.c | 25 mempool_t *pool; member 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); in dm_io_client_create() 58 if (!client->pool) in dm_io_client_create() 68 if (client->pool) in dm_io_client_create() 69 mempool_destroy(client->pool); in dm_io_client_create() 77 mempool_destroy(client->pool); in dm_io_client_destroy() 124 mempool_free(io, io->client->pool); in complete_io() 417 io = mempool_alloc(client->pool, GFP_NOIO); in sync_io() 449 io = mempool_alloc(client->pool, GFP_NOIO); in async_io()
|
D | multipath.c | 81 mempool_free(mp_bh, conf->pool); in multipath_end_bh_io() 119 mp_bh = mempool_alloc(conf->pool, GFP_NOIO); in multipath_make_request() 127 mempool_free(mp_bh, conf->pool); in multipath_make_request() 462 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS, in multipath_run() 464 if (conf->pool == NULL) { in multipath_run() 496 if (conf->pool) in multipath_run() 497 mempool_destroy(conf->pool); in multipath_run() 509 mempool_destroy(conf->pool); in multipath_free()
|
D | multipath.h | 15 mempool_t *pool; member
|
D | Makefile | 13 dm-thin-pool-y += dm-thin.o dm-thin-metadata.o 53 obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
|
/linux-4.1.27/drivers/infiniband/core/ |
D | fmr_pool.c | 95 void (*flush_function)(struct ib_fmr_pool *pool, 114 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument 122 if (!pool->cache_bucket) in ib_fmr_cache_lookup() 125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument 144 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release() 146 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release() 159 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release() 160 pool->dirty_len = 0; in ib_fmr_batch_release() 162 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release() [all …]
|
/linux-4.1.27/net/ceph/ |
D | msgpool.c | 12 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); in msgpool_alloc() 17 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 19 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 20 msg->pool = pool; in msgpool_alloc() 27 struct ceph_msgpool *pool = arg; in msgpool_free() local 30 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 31 msg->pool = NULL; in msgpool_free() 35 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 39 pool->type = type; in ceph_msgpool_init() [all …]
|
D | osdmap.c | 380 if (l.pool < r.pool) in pgid_cmp() 382 if (l.pool > r.pool) in pgid_cmp() 433 pgid.pool, pgid.seed, pg); in __lookup_pg_mapping() 445 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed, in __remove_pg_mapping() 451 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed); in __remove_pg_mapping() 624 u64 pool; in decode_pool_names() local 629 ceph_decode_64_safe(p, end, pool, bad); in decode_pool_names() 631 dout(" pool %llu len %d\n", pool, len); in decode_pool_names() 633 pi = __lookup_pg_pool(&map->pg_pools, pool); in decode_pool_names() 803 u64 pool; in __decode_pools() local [all …]
|
D | debugfs.c | 69 struct ceph_pg_pool_info *pool = in osdmap_show() local 73 pool->id, pool->pg_num, pool->pg_num_mask, in osdmap_show() 74 pool->read_tier, pool->write_tier); in osdmap_show() 91 seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool, in osdmap_show() 102 seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool, in osdmap_show() 157 req->r_pgid.pool, req->r_pgid.seed); in osdc_show()
|
D | osd_client.c | 402 req->r_base_oloc.pool = -1; in ceph_osdc_alloc_request() 403 req->r_target_oloc.pool = -1; in ceph_osdc_alloc_request() 806 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout); in ceph_osdc_new_request() 1348 if (req->r_target_oloc.pool == -1) { in __calc_request_pg() 1361 pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool); in __calc_request_pg() 1365 req->r_target_oloc.pool = pi->read_tier; in __calc_request_pg() 1368 req->r_target_oloc.pool = pi->write_tier; in __calc_request_pg() 1440 req->r_tid, pgid.pool, pgid.seed, o, in __map_request() 1488 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); in __send_request() 1493 put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool); in __send_request() [all …]
|
/linux-4.1.27/lib/ |
D | percpu_ida.c | 60 static inline void steal_tags(struct percpu_ida *pool, in steal_tags() argument 63 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; in steal_tags() 66 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); in steal_tags() 68 cpu = cpumask_next(cpu, &pool->cpus_have_tags); in steal_tags() 71 cpu = cpumask_first(&pool->cpus_have_tags); in steal_tags() 76 pool->cpu_last_stolen = cpu; in steal_tags() 77 remote = per_cpu_ptr(pool->tag_cpu, cpu); in steal_tags() 79 cpumask_clear_cpu(cpu, &pool->cpus_have_tags); in steal_tags() 106 static inline void alloc_global_tags(struct percpu_ida *pool, in alloc_global_tags() argument 110 pool->freelist, &pool->nr_free, in alloc_global_tags() [all …]
|
D | genalloc.c | 154 struct gen_pool *pool; in gen_pool_create() local 156 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 157 if (pool != NULL) { in gen_pool_create() 158 spin_lock_init(&pool->lock); in gen_pool_create() 159 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 160 pool->min_alloc_order = min_alloc_order; in gen_pool_create() 161 pool->algo = gen_pool_first_fit; in gen_pool_create() 162 pool->data = NULL; in gen_pool_create() 164 return pool; in gen_pool_create() 181 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, in gen_pool_add_virt() argument [all …]
|
D | iommu-common.c | 110 struct iommu_pool *pool; in iommu_tbl_range_alloc() local 130 pool = &(iommu->large_pool); in iommu_tbl_range_alloc() 135 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 137 spin_lock_irqsave(&pool->lock, flags); in iommu_tbl_range_alloc() 141 (*handle >= pool->start) && (*handle < pool->end)) in iommu_tbl_range_alloc() 144 start = pool->hint; in iommu_tbl_range_alloc() 146 limit = pool->end; in iommu_tbl_range_alloc() 155 start = pool->start; in iommu_tbl_range_alloc() 164 spin_unlock(&(pool->lock)); in iommu_tbl_range_alloc() 165 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() [all …]
|
/linux-4.1.27/arch/metag/kernel/ |
D | tcm.c | 21 struct gen_pool *pool; member 29 struct tcm_pool *pool; in find_pool() local 32 pool = list_entry(lh, struct tcm_pool, list); in find_pool() 33 if (pool->tag == tag) in find_pool() 34 return pool; in find_pool() 52 struct tcm_pool *pool; in tcm_alloc() local 54 pool = find_pool(tag); in tcm_alloc() 55 if (!pool) in tcm_alloc() 58 vaddr = gen_pool_alloc(pool->pool, len); in tcm_alloc() 76 struct tcm_pool *pool; in tcm_free() local [all …]
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc_dma.c | 157 struct dma_pool *pool; member 306 static int ttm_set_pages_caching(struct dma_pool *pool, in ttm_set_pages_caching() argument 311 if (pool->type & IS_UC) { in ttm_set_pages_caching() 315 pool->dev_name, cpages); in ttm_set_pages_caching() 317 if (pool->type & IS_WC) { in ttm_set_pages_caching() 321 pool->dev_name, cpages); in ttm_set_pages_caching() 326 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) in __ttm_dma_free_page() argument 329 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); in __ttm_dma_free_page() 334 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) in __ttm_dma_alloc_page() argument 342 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, in __ttm_dma_alloc_page() [all …]
|
D | ttm_page_alloc.c | 285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, in ttm_pool_update_free_locked() argument 288 pool->npages -= freed_pages; in ttm_pool_update_free_locked() 289 pool->nfrees += freed_pages; in ttm_pool_update_free_locked() 302 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, in ttm_page_pool_free() argument 326 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_page_pool_free() 328 list_for_each_entry_reverse(p, &pool->list, lru) { in ttm_page_pool_free() 336 __list_del(p->lru.prev, &pool->list); in ttm_page_pool_free() 338 ttm_pool_update_free_locked(pool, freed_pages); in ttm_page_pool_free() 343 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_page_pool_free() 371 __list_del(&p->lru, &pool->list); in ttm_page_pool_free() [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/lov/ |
D | lov_pool.c | 55 static void lov_pool_getref(struct pool_desc *pool) in lov_pool_getref() argument 57 CDEBUG(D_INFO, "pool %p\n", pool); in lov_pool_getref() 58 atomic_inc(&pool->pool_refcount); in lov_pool_getref() 61 void lov_pool_putref(struct pool_desc *pool) in lov_pool_putref() argument 63 CDEBUG(D_INFO, "pool %p\n", pool); in lov_pool_putref() 64 if (atomic_dec_and_test(&pool->pool_refcount)) { in lov_pool_putref() 65 LASSERT(hlist_unhashed(&pool->pool_hash)); in lov_pool_putref() 66 LASSERT(list_empty(&pool->pool_list)); in lov_pool_putref() 67 LASSERT(pool->pool_proc_entry == NULL); in lov_pool_putref() 68 lov_ost_pool_free(&(pool->pool_rr.lqr_pool)); in lov_pool_putref() [all …]
|
D | lov_internal.h | 295 void lov_dump_pool(int level, struct pool_desc *pool); 297 int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool); 298 void lov_pool_putref(struct pool_desc *pool);
|
/linux-4.1.27/kernel/ |
D | workqueue.c | 199 struct worker_pool *pool; /* I: the associated pool */ member 361 #define for_each_cpu_worker_pool(pool, cpu) \ argument 362 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 363 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 364 (pool)++) 378 #define for_each_pool(pool, pi) \ argument 379 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 393 #define for_each_pool_worker(worker, pool) \ argument 394 list_for_each_entry((worker), &(pool)->workers, node) \ 395 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ [all …]
|
D | workqueue_internal.h | 38 struct worker_pool *pool; /* I: the associated pool */ member
|
/linux-4.1.27/net/rds/ |
D | ib_rdma.c | 49 struct rds_ib_mr_pool *pool; member 86 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **); 217 struct rds_ib_mr_pool *pool; in rds_ib_create_mr_pool() local 219 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in rds_ib_create_mr_pool() 220 if (!pool) in rds_ib_create_mr_pool() 223 init_llist_head(&pool->free_list); in rds_ib_create_mr_pool() 224 init_llist_head(&pool->drop_list); in rds_ib_create_mr_pool() 225 init_llist_head(&pool->clean_list); in rds_ib_create_mr_pool() 226 mutex_init(&pool->flush_lock); in rds_ib_create_mr_pool() 227 init_waitqueue_head(&pool->flush_wait); in rds_ib_create_mr_pool() [all …]
|
D | iw_rdma.c | 46 struct rds_iw_mr_pool *pool; member 78 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); 80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 81 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, 84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 336 struct rds_iw_mr_pool *pool; local 338 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 339 if (!pool) { [all …]
|
/linux-4.1.27/drivers/staging/octeon/ |
D | ethernet-mem.c | 46 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 57 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); in cvm_oct_fill_hw_skbuff() 69 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 74 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 85 pool, elements); in cvm_oct_free_hw_skbuff() 88 pool, elements); in cvm_oct_free_hw_skbuff() 99 static int cvm_oct_fill_hw_memory(int pool, int size, int elements) in cvm_oct_fill_hw_memory() argument 119 elements * size, pool); in cvm_oct_fill_hw_memory() 124 cvmx_fpa_free(fpa, pool, 0); in cvm_oct_fill_hw_memory() 136 static void cvm_oct_free_hw_memory(int pool, int size, int elements) in cvm_oct_free_hw_memory() argument [all …]
|
D | ethernet-mem.h | 28 int cvm_oct_mem_fill_fpa(int pool, int size, int elements); 29 void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
|
D | ethernet-tx.c | 292 hw_buffer.s.pool = 0; in cvm_oct_xmit() 296 hw_buffer.s.pool = 0; in cvm_oct_xmit() 620 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; in cvm_oct_xmit_pow()
|
/linux-4.1.27/drivers/dma/ |
D | coh901318_lli.c | 19 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0) argument 20 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add) argument 22 #define DEBUGFS_POOL_COUNTER_RESET(pool) argument 23 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) argument 35 int coh901318_pool_create(struct coh901318_pool *pool, in coh901318_pool_create() argument 39 spin_lock_init(&pool->lock); in coh901318_pool_create() 40 pool->dev = dev; in coh901318_pool_create() 41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); in coh901318_pool_create() 43 DEBUGFS_POOL_COUNTER_RESET(pool); in coh901318_pool_create() 47 int coh901318_pool_destroy(struct coh901318_pool *pool) in coh901318_pool_destroy() argument [all …]
|
D | coh901318.h | 53 int coh901318_pool_create(struct coh901318_pool *pool, 62 int coh901318_pool_destroy(struct coh901318_pool *pool); 72 coh901318_lli_alloc(struct coh901318_pool *pool, 80 void coh901318_lli_free(struct coh901318_pool *pool, 95 coh901318_lli_fill_memcpy(struct coh901318_pool *pool, 113 coh901318_lli_fill_single(struct coh901318_pool *pool, 134 coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
D | mmp_tdma.c | 130 struct gen_pool *pool; member 363 gpool = tdmac->pool; in mmp_tdma_free_descriptor() 413 gpool = tdmac->pool; in mmp_tdma_alloc_descriptor() 544 int type, struct gen_pool *pool) in mmp_tdma_chan_init() argument 566 tdmac->pool = pool; in mmp_tdma_chan_init() 632 struct gen_pool *pool = NULL; in mmp_tdma_probe() local 660 pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); in mmp_tdma_probe() 662 pool = sram_get_gpool("asram"); in mmp_tdma_probe() 663 if (!pool) { in mmp_tdma_probe() 679 ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); in mmp_tdma_probe()
|
D | dmaengine.c | 984 mempool_t *pool; member 1039 mempool_free(unmap, __get_unmap_pool(cnt)->pool); in dmaengine_unmap() 1056 if (p->pool) in dmaengine_destroy_unmap_pool() 1057 mempool_destroy(p->pool); in dmaengine_destroy_unmap_pool() 1058 p->pool = NULL; in dmaengine_destroy_unmap_pool() 1080 p->pool = mempool_create_slab_pool(1, p->cache); in dmaengine_init_unmap_pool() 1081 if (!p->pool) in dmaengine_init_unmap_pool() 1097 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); in dmaengine_get_unmap_data()
|
D | sun6i-dma.c | 161 struct dma_pool *pool; member 349 dma_pool_free(sdev->pool, v_lli, p_lli); in sun6i_dma_free_desc() 527 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); in sun6i_dma_prep_dma_memcpy() 587 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); in sun6i_dma_prep_slave_sg() 638 dma_pool_free(sdev->pool, v_lli, p_lli); in sun6i_dma_prep_slave_sg() 641 dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); in sun6i_dma_prep_slave_sg() 939 sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, in sun6i_dma_probe() 941 if (!sdc->pool) { in sun6i_dma_probe()
|
D | pch_dma.c | 130 struct pci_pool *pool; member 447 desc = pci_pool_alloc(pd->pool, flags, &addr); in pdc_alloc_desc() 560 pci_pool_free(pd->pool, desc, desc->txd.phys); in pd_free_chan_resources() 891 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, in pch_dma_probe() 893 if (!pd->pool) { in pch_dma_probe() 942 pci_pool_destroy(pd->pool); in pch_dma_probe() 974 pci_pool_destroy(pd->pool); in pch_dma_remove()
|
D | coh901318.c | 1283 struct coh901318_pool pool; member 1341 int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; in coh901318_debugfs_read() 1914 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); in dma_tasklet() 2145 coh901318_lli_free(&cohc->base->pool, &cohd->lli); in coh901318_terminate_all() 2154 coh901318_lli_free(&cohc->base->pool, &cohd->lli); in coh901318_terminate_all() 2261 lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); in coh901318_prep_memcpy() 2267 &cohc->base->pool, lli, src, size, dest, in coh901318_prep_memcpy() 2374 lli = coh901318_lli_alloc(&cohc->base->pool, len); in coh901318_prep_slave_sg() 2380 ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, in coh901318_prep_slave_sg() 2683 err = coh901318_pool_create(&base->pool, &pdev->dev, in coh901318_probe() [all …]
|
D | amba-pl08x.c | 269 struct dma_pool *pool; member 937 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); in pl08x_fill_llis_for_desc() 1160 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); in pl08x_free_txd() 2120 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, in pl08x_probe() 2122 if (!pl08x->pool) { in pl08x_probe() 2242 dma_pool_destroy(pl08x->pool); in pl08x_probe()
|
D | ste_dma40.c | 822 struct d40_lcla_pool *pool = &chan->base->lcla_pool; in d40_log_lli_to_lcxa() local 882 struct d40_log_lli *lcla = pool->base + lcla_offset; in d40_log_lli_to_lcxa() 917 pool->dma_addr, lcla_offset, in d40_log_lli_to_lcxa() 3406 struct d40_lcla_pool *pool = &base->lcla_pool; in d40_lcla_allocate() local 3472 pool->dma_addr = dma_map_single(base->dev, pool->base, in d40_lcla_allocate() 3475 if (dma_mapping_error(base->dev, pool->dma_addr)) { in d40_lcla_allocate() 3476 pool->dma_addr = 0; in d40_lcla_allocate()
|
/linux-4.1.27/include/linux/ |
D | zpool.h | 17 int (*evict)(struct zpool *pool, unsigned long handle); 42 char *zpool_get_type(struct zpool *pool); 44 void zpool_destroy_pool(struct zpool *pool); 46 int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, 49 void zpool_free(struct zpool *pool, unsigned long handle); 51 int zpool_shrink(struct zpool *pool, unsigned int pages, 54 void *zpool_map_handle(struct zpool *pool, unsigned long handle, 57 void zpool_unmap_handle(struct zpool *pool, unsigned long handle); 59 u64 zpool_get_total_size(struct zpool *pool); 85 void (*destroy)(void *pool); [all …]
|
D | zbud.h | 9 int (*evict)(struct zbud_pool *pool, unsigned long handle); 13 void zbud_destroy_pool(struct zbud_pool *pool); 14 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, 16 void zbud_free(struct zbud_pool *pool, unsigned long handle); 17 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); 18 void *zbud_map(struct zbud_pool *pool, unsigned long handle); 19 void zbud_unmap(struct zbud_pool *pool, unsigned long handle); 20 u64 zbud_get_pool_size(struct zbud_pool *pool);
|
D | percpu_ida.h | 65 int percpu_ida_alloc(struct percpu_ida *pool, int state); 66 void percpu_ida_free(struct percpu_ida *pool, unsigned tag); 68 void percpu_ida_destroy(struct percpu_ida *pool); 69 int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, 71 static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) in percpu_ida_init() argument 73 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE, in percpu_ida_init() 78 int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, 81 unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
|
D | zsmalloc.h | 40 void zs_destroy_pool(struct zs_pool *pool); 42 unsigned long zs_malloc(struct zs_pool *pool, size_t size); 43 void zs_free(struct zs_pool *pool, unsigned long obj); 45 void *zs_map_object(struct zs_pool *pool, unsigned long handle, 47 void zs_unmap_object(struct zs_pool *pool, unsigned long handle); 49 unsigned long zs_get_total_pages(struct zs_pool *pool); 50 unsigned long zs_compact(struct zs_pool *pool);
|
D | genalloc.h | 77 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 92 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument 95 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 99 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, 107 extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, 124 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
|
D | dmapool.h | 22 void dma_pool_destroy(struct dma_pool *pool); 24 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 27 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); 34 void dmam_pool_destroy(struct dma_pool *pool);
|
D | mempool.h | 32 extern int mempool_resize(mempool_t *pool, int new_min_nr); 33 extern void mempool_destroy(mempool_t *pool); 34 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); 35 extern void mempool_free(void *element, mempool_t *pool);
|
D | agpgart.h | 104 struct agp_memory *pool; member
|
D | pci.h | 1202 #define pci_pool_destroy(pool) dma_pool_destroy(pool) argument 1203 #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) argument 1204 #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) argument
|
/linux-4.1.27/arch/mips/include/asm/octeon/ |
D | cvmx-fpa.h | 104 static inline const char *cvmx_fpa_get_name(uint64_t pool) in cvmx_fpa_get_name() argument 106 return cvmx_fpa_pool_info[pool].name; in cvmx_fpa_get_name() 115 static inline void *cvmx_fpa_get_base(uint64_t pool) in cvmx_fpa_get_base() argument 117 return cvmx_fpa_pool_info[pool].base; in cvmx_fpa_get_base() 129 static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr) in cvmx_fpa_is_member() argument 131 return ((ptr >= cvmx_fpa_pool_info[pool].base) && in cvmx_fpa_is_member() 133 ((char *)(cvmx_fpa_pool_info[pool].base)) + in cvmx_fpa_is_member() 134 cvmx_fpa_pool_info[pool].size * in cvmx_fpa_is_member() 135 cvmx_fpa_pool_info[pool].starting_element_count)); in cvmx_fpa_is_member() 183 static inline void *cvmx_fpa_alloc(uint64_t pool) in cvmx_fpa_alloc() argument [all …]
|
D | cvmx-packet.h | 54 uint64_t pool:3; member 62 uint64_t pool:3;
|
D | cvmx-helper-util.h | 191 buffer_ptr.s.pool, 0); in cvmx_helper_free_packet_data()
|
D | cvmx-pko-defs.h | 192 uint64_t pool:3; member 198 uint64_t pool:3; 282 uint64_t pool:3; member 288 uint64_t pool:3; 297 uint64_t pool:3; member 303 uint64_t pool:3; 429 uint64_t pool:3; member 435 uint64_t pool:3; 505 uint64_t pool:3; member 511 uint64_t pool:3; [all …]
|
/linux-4.1.27/drivers/net/ethernet/ibm/ |
D | ibmveth.c | 143 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, in ibmveth_init_buffer_pool() argument 147 pool->size = pool_size; in ibmveth_init_buffer_pool() 148 pool->index = pool_index; in ibmveth_init_buffer_pool() 149 pool->buff_size = buff_size; in ibmveth_init_buffer_pool() 150 pool->threshold = pool_size * 7 / 8; in ibmveth_init_buffer_pool() 151 pool->active = pool_active; in ibmveth_init_buffer_pool() 155 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) in ibmveth_alloc_buffer_pool() argument 159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool() 161 if (!pool->free_map) in ibmveth_alloc_buffer_pool() 164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool() [all …]
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem_batch_pool.c | 47 struct i915_gem_batch_pool *pool) in i915_gem_batch_pool_init() argument 49 pool->dev = dev; in i915_gem_batch_pool_init() 50 INIT_LIST_HEAD(&pool->cache_list); in i915_gem_batch_pool_init() 59 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) in i915_gem_batch_pool_fini() argument 61 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); in i915_gem_batch_pool_fini() 63 while (!list_empty(&pool->cache_list)) { in i915_gem_batch_pool_fini() 65 list_first_entry(&pool->cache_list, in i915_gem_batch_pool_fini() 90 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, in i915_gem_batch_pool_get() argument 96 WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); in i915_gem_batch_pool_get() 99 &pool->cache_list, batch_pool_list) { in i915_gem_batch_pool_get() [all …]
|
/linux-4.1.27/drivers/net/ethernet/ti/ |
D | davinci_cpdma.c | 107 struct cpdma_desc_pool *pool; member 159 struct cpdma_desc_pool *pool; in cpdma_desc_pool_create() local 161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); in cpdma_desc_pool_create() 162 if (!pool) in cpdma_desc_pool_create() 165 spin_lock_init(&pool->lock); in cpdma_desc_pool_create() 167 pool->dev = dev; in cpdma_desc_pool_create() 168 pool->mem_size = size; in cpdma_desc_pool_create() 169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); in cpdma_desc_pool_create() 170 pool->num_desc = size / pool->desc_size; in cpdma_desc_pool_create() 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); in cpdma_desc_pool_create() [all …]
|
D | cpts.c | 74 if (list_empty(&cpts->pool)) { in cpts_fifo_read() 78 event = list_first_entry(&cpts->pool, struct cpts_event, list); in cpts_fifo_read() 119 list_add(&event->list, &cpts->pool); in cpts_systim_read() 305 list_add(&event->list, &cpts->pool); in cpts_find_ts() 314 list_add(&event->list, &cpts->pool); in cpts_find_ts() 378 INIT_LIST_HEAD(&cpts->pool); in cpts_register() 380 list_add(&cpts->pool_data[i].list, &cpts->pool); in cpts_register()
|
D | cpts.h | 125 struct list_head pool; member
|
/linux-4.1.27/drivers/mtd/ubi/ |
D | fastmap-wl.c | 57 struct ubi_fm_pool *pool) in return_unused_pool_pebs() argument 62 for (i = pool->used; i < pool->size; i++) { in return_unused_pool_pebs() 63 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs() 122 struct ubi_fm_pool *pool = &ubi->fm_pool; in ubi_refill_pools() local 129 return_unused_pool_pebs(ubi, pool); in ubi_refill_pools() 132 pool->size = 0; in ubi_refill_pools() 136 if (pool->size < pool->max_size) { in ubi_refill_pools() 144 pool->pebs[pool->size] = e->pnum; in ubi_refill_pools() 145 pool->size++; in ubi_refill_pools() 169 pool->used = 0; in ubi_refill_pools() [all …]
|
/linux-4.1.27/sound/core/seq/oss/ |
D | seq_oss_writeq.c | 40 struct snd_seq_client_pool pool; in snd_seq_oss_writeq_new() local 51 memset(&pool, 0, sizeof(pool)); in snd_seq_oss_writeq_new() 52 pool.client = dp->cseq; in snd_seq_oss_writeq_new() 53 pool.output_pool = maxlen; in snd_seq_oss_writeq_new() 54 pool.output_room = maxlen / 2; in snd_seq_oss_writeq_new() 56 snd_seq_oss_control(dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); in snd_seq_oss_writeq_new() 154 struct snd_seq_client_pool pool; in snd_seq_oss_writeq_get_free_size() local 155 pool.client = q->dp->cseq; in snd_seq_oss_writeq_get_free_size() 156 snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); in snd_seq_oss_writeq_get_free_size() 157 return pool.output_free; in snd_seq_oss_writeq_get_free_size() [all …]
|
/linux-4.1.27/drivers/scsi/lpfc/ |
D | lpfc_mem.c | 83 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc() local 115 pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) * in lpfc_mem_alloc() 117 if (!pool->elements) in lpfc_mem_alloc() 120 pool->max_count = 0; in lpfc_mem_alloc() 121 pool->current_count = 0; in lpfc_mem_alloc() 123 pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool, in lpfc_mem_alloc() 124 GFP_KERNEL, &pool->elements[i].phys); in lpfc_mem_alloc() 125 if (!pool->elements[i].virt) in lpfc_mem_alloc() 127 pool->max_count++; in lpfc_mem_alloc() 128 pool->current_count++; in lpfc_mem_alloc() [all …]
|
/linux-4.1.27/drivers/scsi/ |
D | scsi.c | 183 struct scsi_host_cmd_pool *pool = shost->cmd_pool; in scsi_host_free_command() local 187 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); in scsi_host_free_command() 188 kmem_cache_free(pool->cmd_slab, cmd); in scsi_host_free_command() 202 struct scsi_host_cmd_pool *pool = shost->cmd_pool; in scsi_host_alloc_command() local 205 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 209 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, in scsi_host_alloc_command() 210 gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 223 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); in scsi_host_alloc_command() 225 kmem_cache_free(pool->cmd_slab, cmd); in scsi_host_alloc_command() 351 scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) in scsi_free_host_cmd_pool() argument [all …]
|
D | scsi_lib.c | 48 mempool_t *pool; member 573 mempool_free(sgl, sgp->pool); in scsi_sg_free() 581 return mempool_alloc(sgp->pool, gfp_mask); in scsi_sg_alloc() 2290 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, in scsi_init_queue() 2292 if (!sgp->pool) { in scsi_init_queue() 2304 if (sgp->pool) in scsi_init_queue() 2305 mempool_destroy(sgp->pool); in scsi_init_queue() 2322 mempool_destroy(sgp->pool); in scsi_exit_queue()
|
D | libiscsi.c | 2535 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); in iscsi_pool_init() 2536 if (q->pool == NULL) in iscsi_pool_init() 2539 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); in iscsi_pool_init() 2542 q->pool[i] = kzalloc(item_size, GFP_KERNEL); in iscsi_pool_init() 2543 if (q->pool[i] == NULL) { in iscsi_pool_init() 2547 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); in iscsi_pool_init() 2551 *items = q->pool + max; in iscsi_pool_init() 2552 memcpy(*items, q->pool, max * sizeof(void *)); in iscsi_pool_init() 2568 kfree(q->pool[i]); in iscsi_pool_free() 2569 kfree(q->pool); in iscsi_pool_free()
|
/linux-4.1.27/drivers/xen/ |
D | tmem.c | 171 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, in tmem_cleancache_put_page() argument 178 if (pool < 0) in tmem_cleancache_put_page() 183 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); in tmem_cleancache_put_page() 186 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, in tmem_cleancache_get_page() argument 195 if (pool < 0) in tmem_cleancache_get_page() 199 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); in tmem_cleancache_get_page() 206 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, in tmem_cleancache_flush_page() argument 212 if (pool < 0) in tmem_cleancache_flush_page() 216 (void)xen_tmem_flush_page((u32)pool, oid, ind); in tmem_cleancache_flush_page() 219 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) in tmem_cleancache_flush_inode() argument [all …]
|
/linux-4.1.27/drivers/atm/ |
D | ambassador.c | 687 static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { in rx_give() argument 688 amb_rxq * rxq = &dev->rxq[pool]; in rx_give() 691 PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); in rx_give() 702 wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); in rx_give() 712 static int rx_take (amb_dev * dev, unsigned char pool) { in rx_take() argument 713 amb_rxq * rxq = &dev->rxq[pool]; in rx_take() 716 PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); in rx_take() 745 static void drain_rx_pool (amb_dev * dev, unsigned char pool) { in drain_rx_pool() argument 746 amb_rxq * rxq = &dev->rxq[pool]; in drain_rx_pool() 748 PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); in drain_rx_pool() [all …]
|
D | zatm.c | 178 static void refill_pool(struct atm_dev *dev,int pool) in refill_pool() argument 188 size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : in refill_pool() 189 pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); in refill_pool() 196 offset = zatm_dev->pool_info[pool].offset+ in refill_pool() 201 free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & in refill_pool() 204 if (free >= zatm_dev->pool_info[pool].low_water) return; in refill_pool() 206 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), in refill_pool() 207 zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); in refill_pool() 211 while (free < zatm_dev->pool_info[pool].high_water) { in refill_pool() 233 if (zatm_dev->last_free[pool]) in refill_pool() [all …]
|
D | zatm.h | 45 int pool; /* free buffer pool */ member 67 struct sk_buff_head pool[NR_POOLS];/* free buffer pools */ member
|
D | idt77252.h | 788 u32 pool; /* sb_pool handle */ member 796 (((struct idt77252_skb_prv *)(ATM_SKB(skb)+1))->pool)
|
D | ambassador.h | 617 unsigned char pool; member
|
/linux-4.1.27/drivers/s390/scsi/ |
D | zfcp_aux.c | 204 adapter->pool.erp_req = in zfcp_allocate_low_mem_buffers() 206 if (!adapter->pool.erp_req) in zfcp_allocate_low_mem_buffers() 209 adapter->pool.gid_pn_req = in zfcp_allocate_low_mem_buffers() 211 if (!adapter->pool.gid_pn_req) in zfcp_allocate_low_mem_buffers() 214 adapter->pool.scsi_req = in zfcp_allocate_low_mem_buffers() 216 if (!adapter->pool.scsi_req) in zfcp_allocate_low_mem_buffers() 219 adapter->pool.scsi_abort = in zfcp_allocate_low_mem_buffers() 221 if (!adapter->pool.scsi_abort) in zfcp_allocate_low_mem_buffers() 224 adapter->pool.status_read_req = in zfcp_allocate_low_mem_buffers() 227 if (!adapter->pool.status_read_req) in zfcp_allocate_low_mem_buffers() [all …]
|
D | zfcp_fsf.c | 80 if (likely(req->pool)) { in zfcp_fsf_req_free() 82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 83 mempool_free(req, req->pool); in zfcp_fsf_req_free() 217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 647 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) in zfcp_fsf_alloc() argument 651 if (likely(pool)) in zfcp_fsf_alloc() 652 req = mempool_alloc(pool, GFP_ATOMIC); in zfcp_fsf_alloc() 660 req->pool = pool; in zfcp_fsf_alloc() 664 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) in zfcp_qtcb_alloc() argument [all …]
|
D | zfcp_def.h | 185 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ member 312 mempool_t *pool; member
|
D | zfcp_fc.c | 381 adapter->pool.gid_pn_req, in zfcp_fc_ns_gid_pn_request() 401 fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); in zfcp_fc_ns_gid_pn() 415 mempool_free(fc_req, adapter->pool.gid_pn); in zfcp_fc_ns_gid_pn()
|
/linux-4.1.27/drivers/staging/i2o/ |
D | memory.c | 270 int i2o_pool_alloc(struct i2o_pool *pool, const char *name, in i2o_pool_alloc() argument 273 pool->name = kstrdup(name, GFP_KERNEL); in i2o_pool_alloc() 274 if (!pool->name) in i2o_pool_alloc() 277 pool->slab = in i2o_pool_alloc() 278 kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); in i2o_pool_alloc() 279 if (!pool->slab) in i2o_pool_alloc() 282 pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); in i2o_pool_alloc() 283 if (!pool->mempool) in i2o_pool_alloc() 289 kmem_cache_destroy(pool->slab); in i2o_pool_alloc() 292 kfree(pool->name); in i2o_pool_alloc() [all …]
|
D | i2o_block.h | 68 mempool_t *pool; member
|
D | i2o_block.c | 285 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); in i2o_block_request_alloc() 303 mempool_free(ireq, i2o_blk_req_pool.pool); in i2o_block_request_free() 1162 i2o_blk_req_pool.pool = in i2o_block_init() 1165 if (!i2o_blk_req_pool.pool) { in i2o_block_init() 1194 mempool_destroy(i2o_blk_req_pool.pool); in i2o_block_init() 1218 mempool_destroy(i2o_blk_req_pool.pool); in i2o_block_exit()
|
D | i2o.h | 707 extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name, 709 extern void i2o_pool_free(struct i2o_pool *pool);
|
/linux-4.1.27/tools/hv/ |
D | hv_kvp_daemon.c | 123 static void kvp_acquire_lock(int pool) in kvp_acquire_lock() argument 128 if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { in kvp_acquire_lock() 129 syslog(LOG_ERR, "Failed to acquire the lock pool: %d; error: %d %s", pool, in kvp_acquire_lock() 135 static void kvp_release_lock(int pool) in kvp_release_lock() argument 140 if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { in kvp_release_lock() 141 syslog(LOG_ERR, "Failed to release the lock pool: %d; error: %d %s", pool, in kvp_release_lock() 147 static void kvp_update_file(int pool) in kvp_update_file() argument 155 kvp_acquire_lock(pool); in kvp_update_file() 157 filep = fopen(kvp_file_info[pool].fname, "we"); in kvp_update_file() 159 syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, in kvp_update_file() [all …]
|
/linux-4.1.27/arch/ia64/kernel/ |
D | uncached.c | 35 struct gen_pool *pool; member 156 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); in uncached_add_chunk() 202 if (uc_pool->pool == NULL) in uncached_alloc_page() 205 uc_addr = gen_pool_alloc(uc_pool->pool, in uncached_alloc_page() 229 struct gen_pool *pool = uncached_pools[nid].pool; in uncached_free_page() local 231 if (unlikely(pool == NULL)) in uncached_free_page() 237 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); in uncached_free_page() 255 struct gen_pool *pool = uncached_pools[nid].pool; in uncached_build_memmap() local 260 if (pool != NULL) { in uncached_build_memmap() 262 (void) gen_pool_add(pool, uc_start, size, nid); in uncached_build_memmap() [all …]
|
/linux-4.1.27/drivers/scsi/megaraid/ |
D | megaraid_mm.c | 514 mm_dmapool_t *pool; in mraid_mm_attach_buf() local 531 pool = &adp->dma_pool_list[i]; in mraid_mm_attach_buf() 533 if (xferlen > pool->buf_size) in mraid_mm_attach_buf() 539 spin_lock_irqsave(&pool->lock, flags); in mraid_mm_attach_buf() 541 if (!pool->in_use) { in mraid_mm_attach_buf() 543 pool->in_use = 1; in mraid_mm_attach_buf() 545 kioc->buf_vaddr = pool->vaddr; in mraid_mm_attach_buf() 546 kioc->buf_paddr = pool->paddr; in mraid_mm_attach_buf() 548 spin_unlock_irqrestore(&pool->lock, flags); in mraid_mm_attach_buf() 552 spin_unlock_irqrestore(&pool->lock, flags); in mraid_mm_attach_buf() [all …]
|
/linux-4.1.27/drivers/usb/core/ |
D | buffer.c | 74 hcd->pool[i] = dma_pool_create(name, hcd->self.controller, in hcd_buffer_create() 76 if (!hcd->pool[i]) { in hcd_buffer_create() 97 struct dma_pool *pool = hcd->pool[i]; in hcd_buffer_destroy() local 98 if (pool) { in hcd_buffer_destroy() 99 dma_pool_destroy(pool); in hcd_buffer_destroy() 100 hcd->pool[i] = NULL; in hcd_buffer_destroy() 129 return dma_pool_alloc(hcd->pool[i], mem_flags, dma); in hcd_buffer_alloc() 155 dma_pool_free(hcd->pool[i], addr, dma); in hcd_buffer_free()
|
/linux-4.1.27/drivers/soc/ti/ |
D | knav_qmss_queue.c | 670 static void kdesc_fill_pool(struct knav_pool *pool) in kdesc_fill_pool() argument 675 region = pool->region; in kdesc_fill_pool() 676 pool->desc_size = region->desc_size; in kdesc_fill_pool() 677 for (i = 0; i < pool->num_desc; i++) { in kdesc_fill_pool() 678 int index = pool->region_offset + i; in kdesc_fill_pool() 682 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); in kdesc_fill_pool() 683 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, in kdesc_fill_pool() 685 knav_queue_push(pool->queue, dma_addr, dma_size, 0); in kdesc_fill_pool() 690 static void kdesc_empty_pool(struct knav_pool *pool) in kdesc_empty_pool() argument 697 if (!pool->queue) in kdesc_empty_pool() [all …]
|
D | knav_qmss.h | 361 #define for_each_pool(kdev, pool) \ argument 362 list_for_each_entry(pool, &kdev->pools, list)
|
/linux-4.1.27/arch/arm/common/ |
D | dmabounce.c | 56 struct dmabounce_pool *pool; member 63 struct dma_pool *pool; member 111 struct dmabounce_pool *pool; in alloc_safe_buffer() local 119 pool = &device_info->small; in alloc_safe_buffer() 121 pool = &device_info->large; in alloc_safe_buffer() 123 pool = NULL; in alloc_safe_buffer() 135 buf->pool = pool; in alloc_safe_buffer() 137 if (pool) { in alloc_safe_buffer() 138 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, in alloc_safe_buffer() 154 if (pool) in alloc_safe_buffer() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_irq.c | 75 static struct ehca_comp_pool *pool; variable 656 static int find_next_online_cpu(struct ehca_comp_pool *pool) in find_next_online_cpu() argument 665 spin_lock_irqsave(&pool->last_cpu_lock, flags); in find_next_online_cpu() 667 cpu = cpumask_next(pool->last_cpu, cpu_online_mask); in find_next_online_cpu() 670 pool->last_cpu = cpu; in find_next_online_cpu() 671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); in find_next_online_cpu() 672 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); in find_next_online_cpu() 706 cpu_id = find_next_online_cpu(pool); in queue_comp_task() 709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task() 710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task() [all …]
|
/linux-4.1.27/Documentation/device-mapper/ |
D | thin-provisioning.txt | 54 The pool device ties together the metadata volume and the data volume. 63 Setting up a fresh pool device 66 Setting up a pool device requires a valid metadata device, and a 85 Reloading a pool table 88 You may reload a pool's table, indeed this is how the pool is resized 94 Using an existing pool device 97 dmsetup create pool \ 98 --table "0 20971520 thin-pool $metadata_dev $data_dev \ 105 thin-pool is created. People primarily interested in thin provisioning 114 extend the pool device. Only one such event will be sent. [all …]
|
/linux-4.1.27/net/sunrpc/ |
D | svc_xprt.c | 325 struct svc_pool *pool; in svc_xprt_do_enqueue() local 345 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); in svc_xprt_do_enqueue() 347 atomic_long_inc(&pool->sp_stats.packets); in svc_xprt_do_enqueue() 352 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { in svc_xprt_do_enqueue() 378 atomic_long_inc(&pool->sp_stats.threads_woken); in svc_xprt_do_enqueue() 394 spin_lock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 395 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); in svc_xprt_do_enqueue() 396 pool->sp_stats.sockets_queued++; in svc_xprt_do_enqueue() 397 spin_unlock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 422 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) in svc_xprt_dequeue() argument [all …]
|
D | svc.c | 473 struct svc_pool *pool = &serv->sv_pools[i]; in __svc_create() local 478 pool->sp_id = i; in __svc_create() 479 INIT_LIST_HEAD(&pool->sp_sockets); in __svc_create() 480 INIT_LIST_HEAD(&pool->sp_all_threads); in __svc_create() 481 spin_lock_init(&pool->sp_lock); in __svc_create() 607 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) in svc_prepare_thread() argument 619 rqstp->rq_pool = pool; in svc_prepare_thread() 620 spin_lock_bh(&pool->sp_lock); in svc_prepare_thread() 621 pool->sp_nrthreads++; in svc_prepare_thread() 622 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); in svc_prepare_thread() [all …]
|
/linux-4.1.27/net/9p/ |
D | util.c | 45 struct idr pool; member 62 idr_init(&p->pool); in p9_idpool_create() 75 idr_destroy(&p->pool); in p9_idpool_destroy() 97 i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); in p9_idpool_get() 125 idr_remove(&p->pool, id); in p9_idpool_put() 138 return idr_find(&p->pool, id) != NULL; in p9_idpool_check()
|
/linux-4.1.27/drivers/scsi/libfc/ |
D | fc_exch.c | 95 struct fc_exch_pool __percpu *pool; member 418 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool, in fc_exch_ptr_get() argument 421 struct fc_exch **exches = (struct fc_exch **)(pool + 1); in fc_exch_ptr_get() 431 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, in fc_exch_ptr_set() argument 434 ((struct fc_exch **)(pool + 1))[index] = ep; in fc_exch_ptr_set() 443 struct fc_exch_pool *pool; in fc_exch_delete() local 446 pool = ep->pool; in fc_exch_delete() 447 spin_lock_bh(&pool->lock); in fc_exch_delete() 448 WARN_ON(pool->total_exches <= 0); in fc_exch_delete() 449 pool->total_exches--; in fc_exch_delete() [all …]
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | iommu.c | 191 struct iommu_pool *pool; in iommu_range_alloc() local 214 pool = &(tbl->large_pool); in iommu_range_alloc() 216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 218 spin_lock_irqsave(&(pool->lock), flags); in iommu_range_alloc() 222 (*handle >= pool->start) && (*handle < pool->end)) in iommu_range_alloc() 225 start = pool->hint; in iommu_range_alloc() 227 limit = pool->end; in iommu_range_alloc() 234 start = pool->start; in iommu_range_alloc() 243 spin_unlock(&(pool->lock)); in iommu_range_alloc() 244 pool = &(tbl->pools[0]); in iommu_range_alloc() [all …]
|
/linux-4.1.27/block/ |
D | bounce.c | 125 static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) in bounce_end_io() argument 143 mempool_free(bvec->bv_page, pool); in bounce_end_io() 161 static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) in __bounce_end_io_read() argument 168 bounce_end_io(bio, pool, err); in __bounce_end_io_read() 200 mempool_t *pool, int force) in __blk_queue_bounce() argument 224 to->bv_page = mempool_alloc(pool, q->bounce_gfp); in __blk_queue_bounce() 243 if (pool == page_pool) { in __blk_queue_bounce() 260 mempool_t *pool; in blk_queue_bounce() local 278 pool = page_pool; in blk_queue_bounce() 281 pool = isa_page_pool; in blk_queue_bounce() [all …]
|
D | bio.c | 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) in bvec_free() argument 166 mempool_free(bv, pool); in bvec_free() 175 mempool_t *pool) in bvec_alloc() argument 211 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc()
|
/linux-4.1.27/drivers/scsi/ibmvscsi/ |
D | ibmvscsi.c | 453 static int initialize_event_pool(struct event_pool *pool, in initialize_event_pool() argument 458 pool->size = size; in initialize_event_pool() 459 pool->next = 0; in initialize_event_pool() 460 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); in initialize_event_pool() 461 if (!pool->events) in initialize_event_pool() 464 pool->iu_storage = in initialize_event_pool() 466 pool->size * sizeof(*pool->iu_storage), in initialize_event_pool() 467 &pool->iu_token, 0); in initialize_event_pool() 468 if (!pool->iu_storage) { in initialize_event_pool() 469 kfree(pool->events); in initialize_event_pool() [all …]
|
D | ibmvfc.c | 748 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, in ibmvfc_valid_event() argument 751 int index = evt - pool->events; in ibmvfc_valid_event() 752 if (index < 0 || index >= pool->size) /* outside of bounds */ in ibmvfc_valid_event() 754 if (evt != pool->events + index) /* unaligned */ in ibmvfc_valid_event() 767 struct ibmvfc_event_pool *pool = &vhost->pool; in ibmvfc_free_event() local 769 BUG_ON(!ibmvfc_valid_event(pool, evt)); in ibmvfc_free_event() 1207 struct ibmvfc_event_pool *pool = &vhost->pool; in ibmvfc_init_event_pool() local 1210 pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; in ibmvfc_init_event_pool() 1211 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); in ibmvfc_init_event_pool() 1212 if (!pool->events) in ibmvfc_init_event_pool() [all …]
|
D | ibmvscsi.h | 97 struct event_pool pool; member
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.c | 1244 kib_pool_t *pool = &tpo->tpo_pool; in kiblnd_map_tx_pool() local 1245 kib_net_t *net = pool->po_owner->ps_net; in kiblnd_map_tx_pool() 1265 for (ipage = page_offset = i = 0; i < pool->po_size; i++) { in kiblnd_map_tx_pool() 1279 list_add(&tx->tx_list, &pool->po_free_list); in kiblnd_map_tx_pool() 1343 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) in kiblnd_destroy_fmr_pool() argument 1345 LASSERT(pool->fpo_map_count == 0); in kiblnd_destroy_fmr_pool() 1347 if (pool->fpo_fmr_pool != NULL) in kiblnd_destroy_fmr_pool() 1348 ib_destroy_fmr_pool(pool->fpo_fmr_pool); in kiblnd_destroy_fmr_pool() 1350 if (pool->fpo_hdev != NULL) in kiblnd_destroy_fmr_pool() 1351 kiblnd_hdev_decref(pool->fpo_hdev); in kiblnd_destroy_fmr_pool() [all …]
|
/linux-4.1.27/sound/core/ |
D | memalloc.c | 121 struct gen_pool *pool = NULL; in snd_malloc_dev_iram() local 127 pool = of_get_named_gen_pool(dev->of_node, "iram", 0); in snd_malloc_dev_iram() 129 if (!pool) in snd_malloc_dev_iram() 133 dmab->private_data = pool; in snd_malloc_dev_iram() 135 dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr); in snd_malloc_dev_iram() 144 struct gen_pool *pool = dmab->private_data; in snd_free_dev_iram() local 146 if (pool && dmab->area) in snd_free_dev_iram() 147 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_free_dev_iram()
|
/linux-4.1.27/tools/usb/usbip/libsrc/ |
D | names.c | 160 struct pool { struct 161 struct pool *next; argument 165 static struct pool *pool_head; argument 169 struct pool *p; in my_malloc() 171 p = calloc(1, sizeof(struct pool)); in my_malloc() 189 struct pool *pool; in names_free() local 194 for (pool = pool_head; pool != NULL; ) { in names_free() 195 struct pool *tmp; in names_free() 197 if (pool->mem) in names_free() 198 free(pool->mem); in names_free() [all …]
|
/linux-4.1.27/include/rdma/ |
D | ib_fmr_pool.h | 61 void (*flush_function)(struct ib_fmr_pool *pool, 69 struct ib_fmr_pool *pool; member 82 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool); 84 int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
|
/linux-4.1.27/include/linux/ceph/ |
D | msgpool.h | 13 mempool_t *pool; member 18 extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type, 21 extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
|
D | osdmap.h | 23 uint64_t pool; member 44 static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool) in ceph_can_shift_osds() argument 46 switch (pool->type) { in ceph_can_shift_osds() 57 s64 pool; member 188 pgid->pool = ceph_decode_64(p); in ceph_decode_pgid()
|
D | messenger.h | 170 struct ceph_msgpool *pool; member
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
D | client.c | 425 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) in ptlrpc_free_rq_pool() argument 430 LASSERT(pool != NULL); in ptlrpc_free_rq_pool() 432 spin_lock(&pool->prp_lock); in ptlrpc_free_rq_pool() 433 list_for_each_safe(l, tmp, &pool->prp_req_list) { in ptlrpc_free_rq_pool() 437 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); in ptlrpc_free_rq_pool() 438 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size); in ptlrpc_free_rq_pool() 441 spin_unlock(&pool->prp_lock); in ptlrpc_free_rq_pool() 442 OBD_FREE(pool, sizeof(*pool)); in ptlrpc_free_rq_pool() 449 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq) in ptlrpc_add_rqs_to_pool() argument 454 while (size < pool->prp_rq_size) in ptlrpc_add_rqs_to_pool() [all …]
|
/linux-4.1.27/drivers/char/ |
D | random.c | 423 __u32 *pool; member 451 .pool = input_pool_data 460 .pool = blocking_pool_data, 470 .pool = nonblocking_pool_data, 513 w ^= r->pool[i]; in _mix_pool_bytes() 514 w ^= r->pool[(i + tap1) & wordmask]; in _mix_pool_bytes() 515 w ^= r->pool[(i + tap2) & wordmask]; in _mix_pool_bytes() 516 w ^= r->pool[(i + tap3) & wordmask]; in _mix_pool_bytes() 517 w ^= r->pool[(i + tap4) & wordmask]; in _mix_pool_bytes() 518 w ^= r->pool[(i + tap5) & wordmask]; in _mix_pool_bytes() [all …]
|
/linux-4.1.27/Documentation/vm/ |
D | zswap.txt | 5 dynamically allocated RAM-based memory pool. zswap basically trades CPU cycles 26 device when the compressed pool reaches its size limit. This requirement had 35 evict pages from its own compressed pool on an LRU basis and write them back to 36 the backing swap device in the case that the compressed pool is full. 38 Zswap makes use of zbud for the managing the compressed memory pool. Each 41 accessed. The compressed memory pool grows on demand and shrinks as compressed 42 pages are freed. The pool is not preallocated. 61 pool can occupy. 67 A debugfs interface is provided for various statistic about pool size, number
|
D | hugetlbpage.txt | 22 persistent hugetlb pages in the kernel's huge page pool. It also displays 38 HugePages_Total is the size of the pool of huge pages. 39 HugePages_Free is the number of huge pages in the pool that are not yet 42 which a commitment to allocate from the pool has been made, 45 huge page from the pool of huge pages at fault time. 47 the pool above the value in /proc/sys/vm/nr_hugepages. The 55 pages in the kernel's huge page pool. "Persistent" huge pages will be 56 returned to the huge page pool when freed by a task. A user with root 65 pool, a user with appropriate privilege can use either the mmap system call 88 huge page pool to 20, allocating or freeing huge pages, as required. [all …]
|
D | cleancache.txt | 41 pool id which, if positive, must be saved in the filesystem's superblock; 44 the pool id, a file key, and a page index into the file. (The combination 45 of a pool id, a file key, and an index is sometimes called a "handle".) 50 all pages in all files specified by the given pool id and also surrender 51 the pool id. 53 An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache 54 to treat the pool as shared using a 128-bit UUID as a key. On systems 58 same UUID will receive the same pool id, thus allowing the pages to 64 If a get_page is successful on a non-shared pool, the page is invalidated 65 (thus making cleancache an "exclusive" cache). On a shared pool, the page [all …]
|
D | balance | 19 mapped pages from the direct mapped pool, instead of falling back on 20 the dma pool, so as to keep the dma pool filled for dma requests (atomic 23 regular memory requests by allocating one from the dma pool, instead
|
D | overcommit-accounting | 62 shmfs memory drawn from the same pool
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-bus-rbd | 9 Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name] 71 pool 73 The name of the storage pool where this rbd image resides. 74 An rbd image name is unique within its pool. 78 The unique identifier for the rbd image's pool. This is 79 a permanent attribute of the pool. A pool's id will never
|
D | sysfs-fs-ext4 | 46 block group specific preallocation pool, so that small 49 preallocation pool.
|
/linux-4.1.27/drivers/misc/ |
D | sram.c | 38 struct gen_pool *pool; member 99 sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1); in sram_probe() 100 if (!sram->pool) in sram_probe() 179 ret = gen_pool_add_virt(sram->pool, in sram_probe() 209 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) in sram_remove()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_fcoe.c | 137 if (ddp->pool) { in ixgbe_fcoe_ddp_put() 138 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); in ixgbe_fcoe_ddp_put() 139 ddp->pool = NULL; in ixgbe_fcoe_ddp_put() 206 if (!ddp_pool->pool) { in ixgbe_fcoe_ddp_setup() 219 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); in ixgbe_fcoe_ddp_setup() 224 ddp->pool = ddp_pool->pool; in ixgbe_fcoe_ddp_setup() 343 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); in ixgbe_fcoe_ddp_setup() 623 if (ddp_pool->pool) in ixgbe_fcoe_dma_pool_free() 624 dma_pool_destroy(ddp_pool->pool); in ixgbe_fcoe_dma_pool_free() 625 ddp_pool->pool = NULL; in ixgbe_fcoe_dma_pool_free() [all …]
|
D | ixgbe_fcoe.h | 67 struct dma_pool *pool; member 72 struct dma_pool *pool; member
|
D | ixgbe_x550.c | 1373 unsigned int pool) in ixgbe_set_source_address_pruning_X550() argument 1378 if (pool > 63) in ixgbe_set_source_address_pruning_X550() 1385 pfflp |= (1ULL << pool); in ixgbe_set_source_address_pruning_X550() 1387 pfflp &= ~(1ULL << pool); in ixgbe_set_source_address_pruning_X550()
|
D | ixgbe_main.c | 3569 u16 pool; in ixgbe_setup_psrtype() local 3586 for_each_set_bit(pool, &adapter->fwd_bitmask, 32) in ixgbe_setup_psrtype() 3587 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); in ixgbe_setup_psrtype() 4443 static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, in ixgbe_macvlan_set_rx_mode() argument 4450 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); in ixgbe_macvlan_set_rx_mode() 4462 ixgbe_write_uc_addr_list(adapter->netdev, pool); in ixgbe_macvlan_set_rx_mode() 4463 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); in ixgbe_macvlan_set_rx_mode() 4471 u16 pool = vadapter->pool; in ixgbe_fwd_psrtype() local 4486 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); in ixgbe_fwd_psrtype() 4584 if (!test_bit(accel->pool, &adapter->fwd_bitmask)) in ixgbe_fwd_ring_up() [all …]
|
/linux-4.1.27/Documentation/filesystems/nfs/ |
D | knfsd-stats.txt | 29 for each NFS thread pool. 35 pool 36 The id number of the NFS thread pool to which this line applies. 39 Thread pool ids are a contiguous set of small integers starting 40 at zero. The maximum value depends on the thread pool mode, but 42 Note that in the default case there will be a single thread pool 44 and thus this file will have a single line with a pool id of "0". 72 thread pool for the NFS workload (the workload is thread-limited), 74 the thread pool (the workload is CPU-limited). In the former case, 102 on all the CPUs in the nfsd thread pool. [all …]
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
D | queue.c | 184 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, in cw1200_queue_init() 186 if (!queue->pool) in cw1200_queue_init() 192 kfree(queue->pool); in cw1200_queue_init() 193 queue->pool = NULL; in cw1200_queue_init() 198 list_add_tail(&queue->pool[i].head, &queue->free_pool); in cw1200_queue_init() 250 kfree(queue->pool); in cw1200_queue_deinit() 252 queue->pool = NULL; in cw1200_queue_deinit() 305 item - queue->pool); in cw1200_queue_put() 384 item = &queue->pool[item_id]; in cw1200_queue_requeue() 435 item - queue->pool); in cw1200_queue_requeue_all() [all …]
|
D | queue.h | 35 struct cw1200_queue_item *pool; member
|
/linux-4.1.27/Documentation/ |
D | java.txt | 178 /* From Sun's Java VM Specification, as tag entries in the constant pool. */ 204 long *pool; 234 /* Reads in a value from the constant pool. */ 239 pool[*cur] = ftell(classfile); 301 pool = calloc(cp_count, sizeof(long)); 302 if(!pool) 303 error("%s: Out of memory for constant pool\n", program); 313 if(!pool[this_class] || pool[this_class] == -1) 315 if(fseek(classfile, pool[this_class] + 1, SEEK_SET)) 321 if(!pool[classinfo_ptr] || pool[classinfo_ptr] == -1) [all …]
|
D | workqueue.txt | 48 worker pool. A MT wq could provide only one execution context per CPU 59 their own thread pool. 70 * Automatically regulate worker pool and level of concurrency so that 107 When a work item is queued to a workqueue, the target worker-pool is 109 and appended on the shared worklist of the worker-pool. For example, 111 be queued on the worklist of either normal or highpri worker-pool that 114 For any worker pool implementation, managing the concurrency level 120 Each worker-pool bound to an actual CPU implements concurrency 121 management by hooking into the scheduler. The worker-pool is notified 127 workers on the CPU, the worker-pool doesn't start execution of a new [all …]
|
D | DMA-API.txt | 95 dma_pool_create() initializes a pool of DMA-coherent buffers 104 from this pool must not cross 4KByte boundaries. 107 void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, 110 This allocates memory from the pool; the returned memory will meet the 115 address usable by the CPU, and the DMA address usable by the pool's 119 void dma_pool_free(struct dma_pool *pool, void *vaddr, 122 This puts memory back into the pool. The pool is what was passed to 127 void dma_pool_destroy(struct dma_pool *pool); 129 dma_pool_destroy() frees the resources of the pool. It must be 131 memory back to the pool before you destroy it.
|
D | DMA-API-HOWTO.txt | 435 struct dma_pool *pool; 437 pool = dma_pool_create(name, dev, size, align, boundary); 443 pass 0 for boundary; passing 4096 says memory allocated from this pool 447 Allocate memory from a DMA pool like this: 449 cpu_addr = dma_pool_alloc(pool, flags, &dma_handle); 457 dma_pool_free(pool, cpu_addr, dma_handle); 459 where pool is what you passed to dma_pool_alloc(), and cpu_addr and 465 dma_pool_destroy(pool); 468 from a pool before you destroy the pool. This function may not
|
D | hw_random.txt | 17 Those tools use /dev/hw_random to fill the kernel entropy pool,
|
/linux-4.1.27/Documentation/devicetree/bindings/reserved-memory/ |
D | reserved-memory.txt | 27 reflect the purpose of the node (ie. "framebuffer" or "dma-pool"). Unit 50 - shared-dma-pool: This indicates a region of memory meant to be 51 used as a shared pool of DMA buffers for a set of devices. It can 52 be used by an operating system to instanciate the necessary pool 69 region for the default pool of the contiguous memory allocator. 100 compatible = "shared-dma-pool";
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_av.c | 189 ah->av = pci_pool_alloc(dev->av_table.pool, in mthca_create_ah() 250 pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); in mthca_destroy_ah() 338 dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev, in mthca_init_av_table() 341 if (!dev->av_table.pool) in mthca_init_av_table() 358 pci_pool_destroy(dev->av_table.pool); in mthca_init_av_table() 372 pci_pool_destroy(dev->av_table.pool); in mthca_cleanup_av_table()
|
D | mthca_dev.h | 121 struct pci_pool *pool; member 266 struct pci_pool *pool; member
|
D | mthca_cmd.c | 533 dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev, in mthca_cmd_init() 536 if (!dev->cmd.pool) { in mthca_cmd_init() 546 pci_pool_destroy(dev->cmd.pool); in mthca_cmd_cleanup() 616 mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); in mthca_alloc_mailbox() 630 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); in mthca_free_mailbox()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_fcoe.c | 173 if (ddp->pool) { in i40e_fcoe_ddp_unmap() 174 dma_pool_free(ddp->pool, ddp->udl, ddp->udp); in i40e_fcoe_ddp_unmap() 175 ddp->pool = NULL; in i40e_fcoe_ddp_unmap() 488 if (!ddp_pool->pool) { in i40e_fcoe_dma_pool_free() 492 dma_pool_destroy(ddp_pool->pool); in i40e_fcoe_dma_pool_free() 493 ddp_pool->pool = NULL; in i40e_fcoe_dma_pool_free() 510 struct dma_pool *pool; in i40e_fcoe_dma_pool_create() local 514 if (ddp_pool && ddp_pool->pool) { in i40e_fcoe_dma_pool_create() 519 pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX, in i40e_fcoe_dma_pool_create() 521 if (!pool) { in i40e_fcoe_dma_pool_create() [all …]
|
D | i40e_fcoe.h | 112 struct dma_pool *pool; member 117 struct dma_pool *pool; member
|
/linux-4.1.27/Documentation/devicetree/bindings/net/ |
D | keystone-netcp.txt | 120 - rx-pool: specifies the number of descriptors to be used & the region-id 121 for creating the rx descriptor pool. 122 - tx-pool: specifies the number of descriptors to be used & the region-id 123 for creating the tx descriptor pool. 194 rx-pool = <1024 12>; 195 tx-pool = <1024 12>; 206 rx-pool = <1024 12>; 207 tx-pool = <1024 12>;
|
/linux-4.1.27/drivers/gpu/drm/sis/ |
D | sis_mm.c | 83 void *data, int pool) in sis_drm_alloc() argument 94 if (0 == ((pool == 0) ? dev_priv->vram_initialized : in sis_drm_alloc() 109 if (pool == AGP_TYPE) { in sis_drm_alloc() 141 mem->offset = ((pool == 0) ? in sis_drm_alloc() 159 DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size, in sis_drm_alloc()
|
/linux-4.1.27/drivers/video/fbdev/ |
D | sh_mobile_meram.c | 160 struct gen_pool *pool; member 203 return gen_pool_alloc(priv->pool, size); in meram_alloc() 209 gen_pool_free(priv->pool, mem, size); in meram_free() 686 priv->pool = gen_pool_create(ilog2(MERAM_GRANULARITY), -1); in sh_mobile_meram_probe() 687 if (priv->pool == NULL) { in sh_mobile_meram_probe() 692 error = gen_pool_add(priv->pool, meram->start, resource_size(meram), in sh_mobile_meram_probe() 709 if (priv->pool) in sh_mobile_meram_probe() 710 gen_pool_destroy(priv->pool); in sh_mobile_meram_probe() 732 gen_pool_destroy(priv->pool); in sh_mobile_meram_remove()
|
/linux-4.1.27/drivers/net/ethernet/hisilicon/ |
D | hix5hd2_gmac.c | 206 struct hix5hd2_desc_sw pool[QUEUE_NUMS]; member 207 #define rx_fq pool[0] 208 #define rx_bq pool[1] 209 #define tx_bq pool[2] 210 #define tx_rq pool[3] 847 if (priv->pool[i].desc) { in hix5hd2_destroy_hw_desc_queue() 848 dma_free_coherent(priv->dev, priv->pool[i].size, in hix5hd2_destroy_hw_desc_queue() 849 priv->pool[i].desc, in hix5hd2_destroy_hw_desc_queue() 850 priv->pool[i].phys_addr); in hix5hd2_destroy_hw_desc_queue() 851 priv->pool[i].desc = NULL; in hix5hd2_destroy_hw_desc_queue() [all …]
|
/linux-4.1.27/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 240 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms); in iser_create_fmr_pool() 241 if (!IS_ERR(ib_conn->fmr.pool)) in iser_create_fmr_pool() 248 ret = PTR_ERR(ib_conn->fmr.pool); in iser_create_fmr_pool() 249 ib_conn->fmr.pool = NULL; in iser_create_fmr_pool() 265 ib_conn, ib_conn->fmr.pool); in iser_free_fmr_pool() 267 if (ib_conn->fmr.pool != NULL) in iser_free_fmr_pool() 268 ib_destroy_fmr_pool(ib_conn->fmr.pool); in iser_free_fmr_pool() 270 ib_conn->fmr.pool = NULL; in iser_free_fmr_pool() 384 INIT_LIST_HEAD(&ib_conn->fastreg.pool); in iser_create_fastreg_pool() 403 list_add_tail(&desc->list, &ib_conn->fastreg.pool); in iser_create_fastreg_pool() [all …]
|
D | iscsi_iser.h | 444 struct ib_fmr_pool *pool; member 448 struct list_head pool; member
|
D | iser_memory.c | 156 desc = list_first_entry(&ib_conn->fastreg.pool, in iser_reg_desc_get() 171 list_add(&desc->list, &ib_conn->fastreg.pool); in iser_reg_desc_put() 464 fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool, in iser_reg_page_vec()
|
/linux-4.1.27/drivers/hid/usbhid/ |
D | hid-pidff.c | 170 struct pidff_usage pool[sizeof(pidff_pool)]; member 1149 PIDFF_FIND_FIELDS(pool, PID_POOL, 0); in pidff_init_fields() 1181 if (pidff->pool[PID_SIMULTANEOUS_MAX].value) { in pidff_reset() 1182 while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) { in pidff_reset() 1186 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); in pidff_reset() 1297 if (pidff->pool[PID_SIMULTANEOUS_MAX].value) in hid_pidff_init() 1299 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); in hid_pidff_init() 1301 if (pidff->pool[PID_RAM_POOL_SIZE].value) in hid_pidff_init() 1303 pidff->pool[PID_RAM_POOL_SIZE].value[0]); in hid_pidff_init() 1305 if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && in hid_pidff_init() [all …]
|
/linux-4.1.27/include/uapi/linux/ |
D | hyperv.h | 333 __u8 pool; member 388 __u8 pool; member
|
/linux-4.1.27/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 15 queue pool management (allocation, push, pop and notify) and descriptor 16 pool management. 44 - qpend : pool of qpend(interruptible) queues 45 - general-purpose : pool of general queues, primarly used 48 - accumulator : pool of queues on PDSP accumulator channel
|
/linux-4.1.27/drivers/usb/gadget/function/ |
D | u_serial.c | 362 struct list_head *pool = &port->write_pool; in gs_start_tx() local 367 while (!port->write_busy && !list_empty(pool)) { in gs_start_tx() 374 req = list_entry(pool->next, struct usb_request, list); in gs_start_tx() 406 list_add(&req->list, pool); in gs_start_tx() 431 struct list_head *pool = &port->read_pool; in gs_start_rx() local 434 while (!list_empty(pool)) { in gs_start_rx() 447 req = list_entry(pool->next, struct usb_request, list); in gs_start_rx() 461 list_add(&req->list, pool); in gs_start_rx()
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 335 static void srp_destroy_fr_pool(struct srp_fr_pool *pool) in srp_destroy_fr_pool() argument 340 if (!pool) in srp_destroy_fr_pool() 343 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_destroy_fr_pool() 349 kfree(pool); in srp_destroy_fr_pool() 363 struct srp_fr_pool *pool; in srp_create_fr_pool() local 372 pool = kzalloc(sizeof(struct srp_fr_pool) + in srp_create_fr_pool() 374 if (!pool) in srp_create_fr_pool() 376 pool->size = pool_size; in srp_create_fr_pool() 377 pool->max_page_list_len = max_page_list_len; in srp_create_fr_pool() 378 spin_lock_init(&pool->lock); in srp_create_fr_pool() [all …]
|
/linux-4.1.27/drivers/staging/unisys/visorchipset/ |
D | visorchipset.h | 231 void *visorchipset_cache_alloc(struct kmem_cache *pool, 233 void visorchipset_cache_free(struct kmem_cache *pool, void *p,
|
/linux-4.1.27/drivers/net/ethernet/marvell/ |
D | mvpp2.c | 44 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) argument 176 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) argument 178 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) argument 180 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) argument 182 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) argument 184 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) argument 185 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) argument 188 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) argument 200 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) argument 206 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) argument [all …]
|
/linux-4.1.27/include/uapi/linux/netfilter_bridge/ |
D | ebt_among.h | 42 struct ebt_mac_wormhash_tuple pool[0]; member
|
/linux-4.1.27/drivers/iio/ |
D | industrialio-trigger.c | 173 ret = bitmap_find_free_region(trig->pool, in iio_trigger_get_irq() 186 clear_bit(irq - trig->subirq_base, trig->pool); in iio_trigger_put_irq() 202 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_attach_poll_func() 229 = (bitmap_weight(trig->pool, in iio_trigger_detach_poll_func()
|
/linux-4.1.27/drivers/hv/ |
D | hv_kvp.c | 350 __u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool; in kvp_send_key() local 364 message->kvp_hdr.pool = pool; in kvp_send_key()
|
/linux-4.1.27/net/bridge/netfilter/ |
D | ebt_among.c | 36 p = &wh->pool[i]; in ebt_mac_wormhash_contains() 43 p = &wh->pool[i]; in ebt_mac_wormhash_contains()
|
/linux-4.1.27/fs/ceph/ |
D | xattr.c | 73 s64 pool = ceph_file_layout_pg_pool(ci->i_layout); in ceph_vxattrcb_layout() local 79 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); in ceph_vxattrcb_layout() 102 (unsigned long long)pool); in ceph_vxattrcb_layout() 141 s64 pool = ceph_file_layout_pg_pool(ci->i_layout); in ceph_vxattrcb_layout_pool() local 145 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); in ceph_vxattrcb_layout_pool() 149 ret = snprintf(val, size, "%lld", (unsigned long long)pool); in ceph_vxattrcb_layout_pool() 241 XATTR_LAYOUT_FIELD(dir, layout, pool), 268 XATTR_LAYOUT_FIELD(file, layout, pool),
|
D | addr.c | 779 mempool_t *pool = NULL; /* Becomes non-null if mempool used */ in ceph_writepages_start() local 896 pool = fsc->wb_pagevec_pool; in ceph_writepages_start() 897 pages = mempool_alloc(pool, GFP_NOFS); in ceph_writepages_start() 955 !!pool, false); in ceph_writepages_start() 958 pool = NULL; in ceph_writepages_start()
|
D | ioctl.c | 215 oloc.pool = ceph_file_layout_pg_pool(ci->i_layout); in ceph_ioctl_get_dataloc()
|
/linux-4.1.27/drivers/scsi/fnic/ |
D | fnic_main.c | 547 mempool_t *pool; in fnic_probe() local 750 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); in fnic_probe() 751 if (!pool) in fnic_probe() 753 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; in fnic_probe() 755 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); in fnic_probe() 756 if (!pool) in fnic_probe() 758 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; in fnic_probe()
|
/linux-4.1.27/include/trace/events/ |
D | workqueue.h | 57 __entry->cpu = pwq->pool->cpu;
|
/linux-4.1.27/include/linux/iio/ |
D | trigger.h | 71 unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)]; member
|
/linux-4.1.27/drivers/usb/musb/ |
D | cppi_dma.h | 126 struct dma_pool *pool; member
|
D | cppi_dma.c | 126 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); in cppi_pool_init() 147 dma_pool_free(cppi->pool, bd, bd->dma); in cppi_pool_free() 1325 controller->pool = dma_pool_create("cppi", in dma_controller_create() 1329 if (!controller->pool) { in dma_controller_create() 1362 dma_pool_destroy(cppi->pool); in dma_controller_destroy()
|
/linux-4.1.27/arch/mips/cavium-octeon/executive/ |
D | cvmx-helper-util.c | 107 buffer_ptr.s.pool = wqe_pool.s.wqe_pool; in cvmx_helper_dump_packet() 139 cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool); in cvmx_helper_dump_packet()
|
D | cvmx-helper.c | 843 g_buffer.s.pool = CVMX_FPA_WQE_POOL; in __cvmx_helper_errata_fix_ipd_ptr_alignment() 855 pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL; in __cvmx_helper_errata_fix_ipd_ptr_alignment()
|
D | cvmx-pko.c | 60 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; in cvmx_pko_initialize_global()
|
/linux-4.1.27/drivers/char/agp/ |
D | frontend.c | 54 curr = agp_fe.current_controller->pool; in agp_find_mem_by_key() 88 agp_fe.current_controller->pool = next; in agp_remove_from_pool() 203 prev = agp_fe.current_controller->pool; in agp_insert_into_pool() 209 agp_fe.current_controller->pool = temp; in agp_insert_into_pool() 363 memory = controller->pool; in agp_remove_all_memory()
|
/linux-4.1.27/Documentation/devicetree/bindings/soc/fsl/ |
D | qman-portals.txt | 94 Definition: Must include "fsl,qman-pool-channel" 95 May include "fsl,<SoC>-qman-pool-channel"
|
/linux-4.1.27/scripts/kconfig/ |
D | zconf.gperf | 4 %define string-pool-name kconf_id_strings
|
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/ |
D | dxe.c | 234 struct wcn36xx_dxe_mem_pool *pool) in wcn36xx_dxe_init_tx_bd() argument 236 int i, chunk_size = pool->chunk_size; in wcn36xx_dxe_init_tx_bd() 237 dma_addr_t bd_phy_addr = pool->phy_addr; in wcn36xx_dxe_init_tx_bd() 238 void *bd_cpu_addr = pool->virt_addr; in wcn36xx_dxe_init_tx_bd()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/ |
D | rx.c | 359 if (!rxq->pool[i].page) in iwl_pcie_rxq_free_rbs() 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, in iwl_pcie_rxq_free_rbs() 364 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); in iwl_pcie_rxq_free_rbs() 365 rxq->pool[i].page = NULL; in iwl_pcie_rxq_free_rbs() 492 list_add(&rxq->pool[i].list, &rxq->rx_used); in iwl_pcie_rx_init_rxb_lists()
|
D | internal.h | 98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; member
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | lproc_osc.c | 97 struct ptlrpc_request_pool *pool = cli->cl_import->imp_rq_pool; in osc_max_rpcs_in_flight_seq_write() local 108 if (pool && val > cli->cl_max_rpcs_in_flight) in osc_max_rpcs_in_flight_seq_write() 109 pool->prp_populate(pool, val-cli->cl_max_rpcs_in_flight); in osc_max_rpcs_in_flight_seq_write()
|
/linux-4.1.27/drivers/block/ |
D | cciss_scsi.c | 111 struct cciss_scsi_cmd_stack_elem_t *pool; member 214 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); in scsi_cmd_stack_setup() 216 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) in scsi_cmd_stack_setup() 219 if (stk->pool == NULL) { in scsi_cmd_stack_setup() 226 pci_free_consistent(h->pdev, size, stk->pool, in scsi_cmd_stack_setup() 231 stk->elem[i] = &stk->pool[i]; in scsi_cmd_stack_setup() 256 pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); in scsi_cmd_stack_free() 257 stk->pool = NULL; in scsi_cmd_stack_free()
|
/linux-4.1.27/drivers/md/bcache/ |
D | bset.c | 1119 if (state->pool) in bch_bset_sort_state_free() 1120 mempool_destroy(state->pool); in bch_bset_sort_state_free() 1130 state->pool = mempool_create_page_pool(1, page_order); in bch_bset_sort_state_init() 1131 if (!state->pool) in bch_bset_sort_state_init() 1192 outp = mempool_alloc(state->pool, GFP_NOIO); in __btree_sort() 1221 mempool_free(virt_to_page(out), state->pool); in __btree_sort()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cmd.c | 767 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, in alloc_cmd_box() 783 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); in free_cmd_box() 1384 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); in mlx5_cmd_init() 1385 if (!cmd->pool) in mlx5_cmd_init() 1478 pci_pool_destroy(cmd->pool); in mlx5_cmd_init() 1492 pci_pool_destroy(cmd->pool); in mlx5_cmd_cleanup()
|
/linux-4.1.27/Documentation/block/ |
D | queue-sysfs.txt | 93 queue maintains a separate request pool per each cgroup when 95 per-block-cgroup request pool. IOW, if there are N block cgroups,
|
D | biodoc.txt | 444 used as index into pool */ 596 where it cannot allocate through normal means. If the pool is empty and it 598 replenish the pool (without deadlocking) and wait for availability in the pool. 600 could fail if the pool is empty. In general mempool always first tries to 602 pool as long it is not less that 50% full. 604 On a free, memory is released to the pool or directly freed depending on 605 the current availability in the pool. The mempool interface lets the 610 deadlocks, e.g. avoid trying to allocate more memory from the pool while 611 already holding memory obtained from the pool. 614 it ends up allocating a second bio from the same pool while [all …]
|
/linux-4.1.27/arch/sparc/kernel/ |
D | pci_sun4v.c | 531 struct iommu_pool *pool; in probe_existing_entries() local 537 pool = &(iommu->pools[pool_nr]); in probe_existing_entries() 538 for (i = pool->start; i <= pool->end; i++) { in probe_existing_entries()
|
/linux-4.1.27/Documentation/filesystems/caching/ |
D | backend-api.txt | 82 (2) that of one of the processes in the FS-Cache thread pool. 165 FS-Cache has a pool of threads that it uses to give CPU time to the 194 submit it to the thread pool. CacheFiles, for example, uses this to queue 384 This operation is run asynchronously from FS-Cache's thread pool, and 435 pool. If this is desired, the op->op.processor should be set to point to 505 This method is called asynchronously from the FS-Cache thread pool. It is 693 pool. One of the threads in the pool will invoke the retrieval record's
|
/linux-4.1.27/Documentation/power/ |
D | apm-acpi.txt | 31 apmd: http://ftp.debian.org/pool/main/a/apmd/
|
/linux-4.1.27/fs/logfs/ |
D | logfs.h | 730 static inline void logfs_mempool_destroy(mempool_t *pool) in logfs_mempool_destroy() argument 732 if (pool) in logfs_mempool_destroy() 733 mempool_destroy(pool); in logfs_mempool_destroy()
|
/linux-4.1.27/Documentation/arm/ |
D | tcm.txt | 71 allocation pool with gen_pool_create() and gen_pool_add() 142 /* Allocate some TCM memory from the pool */
|
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/ |
D | cpm.txt | 55 all of which contribute to the allocatable muram pool.
|
/linux-4.1.27/arch/avr32/mach-at32ap/ |
D | at32ap700x.c | 2357 struct gen_pool *pool; in sram_init() local 2360 pool = gen_pool_create(10, -1); in sram_init() 2361 if (!pool) in sram_init() 2364 if (gen_pool_add(pool, 0x24000000, 0x8000, -1)) in sram_init() 2367 sram_pool = pool; in sram_init() 2371 gen_pool_destroy(pool); in sram_init()
|
/linux-4.1.27/arch/powerpc/boot/dts/fsl/ |
D | qoriq-qman1-portals.dtsi | 2 * QorIQ QMan Portal device tree stub for 10 portals & 15 pool channels
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
D | 3945-mac.c | 1091 if (rxq->pool[i].page != NULL) { in il3945_rx_queue_reset() 1092 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, in il3945_rx_queue_reset() 1095 __il_free_pages(il, rxq->pool[i].page); in il3945_rx_queue_reset() 1096 rxq->pool[i].page = NULL; in il3945_rx_queue_reset() 1098 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); in il3945_rx_queue_reset() 1140 if (rxq->pool[i].page != NULL) { in il3945_rx_queue_free() 1141 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, in il3945_rx_queue_free() 1144 __il_free_pages(il, rxq->pool[i].page); in il3945_rx_queue_free() 1145 rxq->pool[i].page = NULL; in il3945_rx_queue_free()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | cmd.c | 2470 if (!priv->cmd.pool) { in mlx4_cmd_init() 2471 priv->cmd.pool = pci_pool_create("mlx4_cmd", in mlx4_cmd_init() 2475 if (!priv->cmd.pool) in mlx4_cmd_init() 2534 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { in mlx4_cmd_cleanup() 2535 pci_pool_destroy(priv->cmd.pool); in mlx4_cmd_cleanup() 2536 priv->cmd.pool = NULL; in mlx4_cmd_cleanup() 2624 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, in mlx4_alloc_cmd_mailbox() 2643 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); in mlx4_free_cmd_mailbox()
|
/linux-4.1.27/drivers/crypto/caam/ |
D | Kconfig | 110 the hw_random API for suppying the kernel entropy pool.
|
/linux-4.1.27/Documentation/filesystems/ |
D | xfs.txt | 93 space pool. 261 pool. 268 the unused space back to the free pool.
|
/linux-4.1.27/include/linux/sunrpc/ |
D | svc.h | 434 struct svc_pool *pool, int node);
|
/linux-4.1.27/include/scsi/ |
D | libiscsi.h | 258 void **pool; /* Pool of elements */ member
|
/linux-4.1.27/drivers/media/platform/coda/ |
D | coda-common.c | 2092 struct gen_pool *pool; in coda_probe() local 2163 pool = of_get_named_gen_pool(np, "iram", 0); in coda_probe() 2164 if (!pool && pdata) in coda_probe() 2165 pool = dev_get_gen_pool(pdata->iram_dev); in coda_probe() 2166 if (!pool) { in coda_probe() 2170 dev->iram_pool = pool; in coda_probe()
|