Home
last modified time | relevance | path

Searched refs:pools (Results 1 – 39 of 39) sorted by relevance

/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_page_alloc_dma.c104 struct list_head pools; /* The 'struct device->dma_pools link */ member
155 struct list_head pools; member
171 struct list_head pools; member
527 list_for_each_entry_reverse(p, &_manager->pools, pools) { in ttm_dma_free_pool()
534 list_del(&p->pools); in ttm_dma_free_pool()
539 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { in ttm_dma_free_pool()
550 list_del(&pool->pools); in ttm_dma_free_pool()
604 INIT_LIST_HEAD(&sec_pool->pools); in ttm_dma_pool_init()
610 INIT_LIST_HEAD(&pool->pools); in ttm_dma_pool_init()
633 list_add(&sec_pool->pools, &_manager->pools); in ttm_dma_pool_init()
[all …]
Dttm_page_alloc.c116 struct ttm_page_pool pools[NUM_POOLS]; member
272 return &_manager->pools[pool_index]; in ttm_get_pool()
413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; in ttm_pool_shrink_scan()
430 count += _manager->pools[i].npages; in ttm_pool_shrink_count()
858 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); in ttm_page_alloc_fini()
932 p = &_manager->pools[i]; in ttm_page_alloc_debugfs()
/linux-4.4.14/lib/
Diommu-common.c81 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init()
82 iommu->pools[i].start = start; in iommu_tbl_pool_init()
83 iommu->pools[i].hint = start; in iommu_tbl_pool_init()
85 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init()
131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc()
197 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
242 p = &tbl->pools[pool_nr]; in get_pool()
/linux-4.4.14/drivers/staging/android/ion/
Dion_system_heap.c52 struct ion_page_pool *pools[0]; member
60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page()
87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in free_buffer_page()
227 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_shrink()
264 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_debug_show()
299 heap->pools[i] = pool; in ion_system_heap_create()
307 ion_page_pool_destroy(heap->pools[i]); in ion_system_heap_create()
320 ion_page_pool_destroy(sys_heap->pools[i]); in ion_system_heap_destroy()
/linux-4.4.14/mm/
Ddmapool.c53 struct list_head pools; member
84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools()
108 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
172 INIT_LIST_HEAD(&retval->pools); in dma_pool_create()
186 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create()
194 list_del(&retval->pools); in dma_pool_create()
279 list_del(&pool->pools); in dma_pool_destroy()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
Dsec_bulk.c283 static unsigned long enc_pools_cleanup(struct page ***pools, int npools) in enc_pools_cleanup() argument
289 if (pools[i]) { in enc_pools_cleanup()
291 if (pools[i][j]) { in enc_pools_cleanup()
292 __free_page(pools[i][j]); in enc_pools_cleanup()
296 kfree(pools[i]); in enc_pools_cleanup()
297 pools[i] = NULL; in enc_pools_cleanup()
/linux-4.4.14/drivers/atm/
Dzatm.c1034 unsigned long pools; in zatm_int() local
1037 pools = zin(RQA); in zatm_int()
1038 EVENT("RQA (0x%08x)\n",pools,0); in zatm_int()
1039 for (i = 0; pools; i++) { in zatm_int()
1040 if (pools & 1) { in zatm_int()
1044 pools >>= 1; in zatm_int()
1048 unsigned long pools; in zatm_int() local
1050 pools = zin(RQU); in zatm_int()
1052 dev->number,pools); in zatm_int()
1054 for (i = 0; pools; i++) { in zatm_int()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/misc/
Dfsl,qoriq-mc.txt6 block is enabled, pools of hardware resources are available, such as
7 queues, buffer pools, I/O interfaces. These resources are building
/linux-4.4.14/Documentation/
Dworkqueue.txt66 * Use per-CPU unified worker pools shared by all wq to provide
88 called worker-pools.
92 which manages worker-pools and processes the queued work items.
94 There are two worker-pools, one for normal work items and the other
96 worker-pools to serve work items queued on unbound workqueues - the
97 number of these backing pools is dynamic.
137 For unbound workqueues, the number of backing pools is dynamic.
140 backing worker pools matching the attributes. The responsibility of
172 woker-pools which host workers which are not bound to any
175 worker-pools try to start execution of work items as soon as
[all …]
Doops-tracing.txt167 kernel's dynamic memory pools there are no fixed locations for either
DDMA-API.txt85 or more using dma_alloc_coherent(), you can use DMA pools. These work
Dkernel-parameters.txt3611 service thread pools. Depending on how many NICs
/linux-4.4.14/arch/powerpc/kernel/
Diommu.c216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
244 pool = &(tbl->pools[0]); in iommu_range_alloc()
272 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
390 p = &tbl->pools[pool_nr]; in get_pool()
689 p = &tbl->pools[i]; in iommu_init_table()
1022 spin_lock(&tbl->pools[i].lock); in iommu_take_ownership()
1038 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1051 spin_lock(&tbl->pools[i].lock); in iommu_release_ownership()
1060 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
/linux-4.4.14/drivers/soc/ti/
Dknav_qmss.h209 struct list_head pools; member
305 struct list_head pools; member
363 list_for_each_entry(pool, &kdev->pools, list)
Dknav_qmss_queue.c807 node = &region->pools; in knav_pool_create()
808 list_for_each_entry(pi, &region->pools, region_inst) { in knav_pool_create()
822 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create()
1023 list_add(&pool->region_inst, &region->pools); in knav_queue_setup_region()
1106 INIT_LIST_HEAD(&region->pools); in knav_queue_setup_regions()
1331 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst) in knav_queue_free_regions()
1713 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
/linux-4.4.14/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt6 processors(PDSP), linking RAM, descriptor pools and infrastructure
42 - queue-pools : child node classifying the queue ranges into pools.
43 Queue ranges are grouped into 3 type of pools:
144 queue-pools {
/linux-4.4.14/include/linux/
Diommu-common.h25 struct iommu_pool pools[IOMMU_NR_POOLS]; member
/linux-4.4.14/drivers/md/
Ddm.c3486 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); in dm_alloc_md_mempools() local
3491 if (!pools) in dm_alloc_md_mempools()
3505 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); in dm_alloc_md_mempools()
3506 if (!pools->rq_pool) in dm_alloc_md_mempools()
3521 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); in dm_alloc_md_mempools()
3522 if (!pools->io_pool) in dm_alloc_md_mempools()
3526 pools->bs = bioset_create_nobvec(pool_size, front_pad); in dm_alloc_md_mempools()
3527 if (!pools->bs) in dm_alloc_md_mempools()
3530 if (integrity && bioset_integrity_create(pools->bs, pool_size)) in dm_alloc_md_mempools()
3533 return pools; in dm_alloc_md_mempools()
[all …]
Ddm.h226 void dm_free_md_mempools(struct dm_md_mempools *pools);
Ddm-thin.c486 struct list_head pools; member
492 INIT_LIST_HEAD(&dm_thin_pool_table.pools); in pool_table_init()
498 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
513 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup()
529 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup_metadata_dev()
/linux-4.4.14/Documentation/arm/keystone/
Dknav-qmss.txt10 processors(PDSP), linking RAM, descriptor pools and infrastructure
23 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
/linux-4.4.14/drivers/staging/fsl-mc/
DREADME.txt33 The MC uses DPAA2 hardware resources such as queues, buffer pools, and
62 | -buffer pools -DPMCP |
327 -DPBPs for network buffer pools
/linux-4.4.14/arch/powerpc/include/asm/
Diommu.h111 struct iommu_pool pools[IOMMU_NR_POOLS]; member
/linux-4.4.14/arch/arm/boot/dts/
Dk2l-netcp.dtsi38 queue-pools {
Dk2e-netcp.dtsi38 queue-pools {
Dk2hk-netcp.dtsi51 queue-pools {
/linux-4.4.14/Documentation/devicetree/bindings/soc/fsl/
Dbman.txt14 BMan supports hardware allocation and deallocation of buffers belonging to pools
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
Dixgbe_lib.c504 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_set_sriov_queues() local
517 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { in ixgbe_set_sriov_queues()
Dixgbe_main.c7904 bool pools; in ixgbe_setup_tc() local
7913 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_setup_tc()
7914 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) in ixgbe_setup_tc()
/linux-4.4.14/Documentation/block/
Dqueue-sysfs.txt104 each request queue may have up to N request pools, each independently
Dbiodoc.txt591 subsystems like bio to maintain their own reserve memory pools for guaranteed
621 is over. If allocating from multiple pools in the same code path, the order
626 for a non-clone bio. There are the 6 pools setup for different size biovecs,
/linux-4.4.14/Documentation/filesystems/caching/
Doperations.txt192 pools.
/linux-4.4.14/Documentation/scsi/
Dosd.txt67 and initializes some global pools. This should be done once per scsi_device
DChangeLog.lpfc1574 * Added code for safety pools for following objects: mbuf/bpl,
1796 * Removed usage of all memory pools.
/linux-4.4.14/arch/sparc/kernel/
Dpci_sun4v.c537 pool = &(iommu->pools[pool_nr]); in probe_existing_entries()
/linux-4.4.14/Documentation/vm/
Dhugetlbpage.txt143 With support for multiple huge page pools at run-time available, much of
/linux-4.4.14/Documentation/filesystems/cifs/
DCHANGES400 do not get marked delete on close. Display sizes of cifs buffer pools in
442 minimum number of large and small network buffers in the buffer pools,
/linux-4.4.14/drivers/message/fusion/lsi/
Dmpi_history.txt309 * Added generic defines for hot spare pools and RAID
/linux-4.4.14/Documentation/filesystems/
Dproc.txt753 Linux uses slab pools for memory management above page level in version 2.2.