/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
D | sec_bulk.c | 284 static unsigned long enc_pools_cleanup(struct page ***pools, int npools) in enc_pools_cleanup() argument 290 if (pools[i]) { in enc_pools_cleanup() 292 if (pools[i][j]) { in enc_pools_cleanup() 293 __free_page(pools[i][j]); in enc_pools_cleanup() 297 OBD_FREE(pools[i], PAGE_CACHE_SIZE); in enc_pools_cleanup() 298 pools[i] = NULL; in enc_pools_cleanup() 312 static void enc_pools_insert(struct page ***pools, int npools, int npages) in enc_pools_insert() argument 342 LASSERT(pools[np_idx][ng_idx] != NULL); in enc_pools_insert() 344 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx]; in enc_pools_insert() 345 pools[np_idx][ng_idx] = NULL; in enc_pools_insert() [all …]
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc_dma.c | 104 struct list_head pools; /* The 'struct device->dma_pools link */ member 155 struct list_head pools; member 171 struct list_head pools; member 524 list_for_each_entry_reverse(p, &_manager->pools, pools) { in ttm_dma_free_pool() 531 list_del(&p->pools); in ttm_dma_free_pool() 536 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { in ttm_dma_free_pool() 547 list_del(&pool->pools); in ttm_dma_free_pool() 601 INIT_LIST_HEAD(&sec_pool->pools); in ttm_dma_pool_init() 607 INIT_LIST_HEAD(&pool->pools); in ttm_dma_pool_init() 630 list_add(&sec_pool->pools, &_manager->pools); in ttm_dma_pool_init() [all …]
|
D | ttm_page_alloc.c | 116 struct ttm_page_pool pools[NUM_POOLS]; member 272 return &_manager->pools[pool_index]; in ttm_get_pool() 413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; in ttm_pool_shrink_scan() 430 count += _manager->pools[i].npages; in ttm_pool_shrink_count() 858 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); in ttm_page_alloc_fini() 932 p = &_manager->pools[i]; in ttm_page_alloc_debugfs()
|
/linux-4.1.27/lib/ |
D | iommu-common.c | 85 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 86 iommu->pools[i].start = start; in iommu_tbl_pool_init() 87 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 89 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 135 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 165 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 201 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 246 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_system_heap.c | 52 struct ion_page_pool *pools[0]; member 60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page() 87 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in free_buffer_page() 220 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_shrink() 249 struct ion_page_pool *pool = sys_heap->pools[i]; in ion_system_heap_debug_show() 284 heap->pools[i] = pool; in ion_system_heap_create() 292 ion_page_pool_destroy(heap->pools[i]); in ion_system_heap_create() 305 ion_page_pool_destroy(sys_heap->pools[i]); in ion_system_heap_destroy()
|
/linux-4.1.27/mm/ |
D | dmapool.c | 53 struct list_head pools; member 84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools() 108 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 172 INIT_LIST_HEAD(&retval->pools); in dma_pool_create() 186 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create() 194 list_del(&retval->pools); in dma_pool_create() 276 list_del(&pool->pools); in dma_pool_destroy()
|
/linux-4.1.27/drivers/atm/ |
D | zatm.c | 1034 unsigned long pools; in zatm_int() local 1037 pools = zin(RQA); in zatm_int() 1038 EVENT("RQA (0x%08x)\n",pools,0); in zatm_int() 1039 for (i = 0; pools; i++) { in zatm_int() 1040 if (pools & 1) { in zatm_int() 1044 pools >>= 1; in zatm_int() 1048 unsigned long pools; in zatm_int() local 1050 pools = zin(RQU); in zatm_int() 1052 dev->number,pools); in zatm_int() 1054 for (i = 0; pools; i++) { in zatm_int() [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/misc/ |
D | fsl,qoriq-mc.txt | 6 block is enabled, pools of hardware resources are available, such as 7 queues, buffer pools, I/O interfaces. These resources are building
|
/linux-4.1.27/Documentation/ |
D | workqueue.txt | 66 * Use per-CPU unified worker pools shared by all wq to provide 88 called worker-pools. 92 which manages worker-pools and processes the queued work items. 94 There are two worker-pools, one for normal work items and the other 96 worker-pools to serve work items queued on unbound workqueues - the 97 number of these backing pools is dynamic. 137 For unbound workqueues, the number of backing pools is dynamic. 140 backing worker pools matching the attributes. The responsibility of 172 woker-pools which host workers which are not bound to any 175 worker-pools try to start execution of work items as soon as [all …]
|
D | oops-tracing.txt | 167 kernel's dynamic memory pools there are no fixed locations for either
|
D | DMA-API.txt | 85 or more using dma_alloc_coherent(), you can use DMA pools. These work
|
D | kernel-parameters.txt | 3491 service thread pools. Depending on how many NICs
|
/linux-4.1.27/drivers/soc/ti/ |
D | knav_qmss.h | 208 struct list_head pools; member 304 struct list_head pools; member 362 list_for_each_entry(pool, &kdev->pools, list)
|
D | knav_qmss_queue.c | 801 node = ®ion->pools; in knav_pool_create() 802 list_for_each_entry(pi, ®ion->pools, region_inst) { in knav_pool_create() 816 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1017 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1100 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1325 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1694 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
/linux-4.1.27/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 42 - queue-pools : child node classifying the queue ranges into pools. 43 Queue ranges are grouped into 3 type of pools: 144 queue-pools {
|
/linux-4.1.27/drivers/md/ |
D | dm.c | 3538 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); in dm_alloc_md_mempools() local 3543 if (!pools) in dm_alloc_md_mempools() 3557 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); in dm_alloc_md_mempools() 3558 if (!pools->rq_pool) in dm_alloc_md_mempools() 3573 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); in dm_alloc_md_mempools() 3574 if (!pools->io_pool) in dm_alloc_md_mempools() 3578 pools->bs = bioset_create_nobvec(pool_size, front_pad); in dm_alloc_md_mempools() 3579 if (!pools->bs) in dm_alloc_md_mempools() 3582 if (integrity && bioset_integrity_create(pools->bs, pool_size)) in dm_alloc_md_mempools() 3585 return pools; in dm_alloc_md_mempools() [all …]
|
D | dm.h | 227 void dm_free_md_mempools(struct dm_md_mempools *pools);
|
D | dm-thin.c | 404 struct list_head pools; member 410 INIT_LIST_HEAD(&dm_thin_pool_table.pools); in pool_table_init() 416 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert() 431 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup() 447 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) { in __pool_table_lookup_metadata_dev()
|
/linux-4.1.27/include/linux/ |
D | iommu-common.h | 24 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | iommu.c | 216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 244 pool = &(tbl->pools[0]); in iommu_range_alloc() 272 pool = &tbl->pools[pool_nr]; in iommu_range_alloc() 390 p = &tbl->pools[pool_nr]; in get_pool() 687 p = &tbl->pools[i]; in iommu_init_table()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | iommu.h | 75 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|
/linux-4.1.27/Documentation/devicetree/bindings/soc/fsl/ |
D | bman.txt | 14 BMan supports hardware allocation and deallocation of buffers belonging to pools
|
/linux-4.1.27/Documentation/block/ |
D | queue-sysfs.txt | 96 each request queue may have up to N request pools, each independently
|
D | biodoc.txt | 591 subsystems like bio to maintain their own reserve memory pools for guaranteed 621 is over. If allocating from multiple pools in the same code path, the order 626 for a non-clone bio. There are the 6 pools setup for different size biovecs,
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_lib.c | 504 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_set_sriov_queues() local 517 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { in ixgbe_set_sriov_queues()
|
D | ixgbe_main.c | 7694 bool pools; in ixgbe_setup_tc() local 7702 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_setup_tc() 7703 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) in ixgbe_setup_tc()
|
/linux-4.1.27/Documentation/filesystems/caching/ |
D | operations.txt | 192 pools.
|
/linux-4.1.27/Documentation/scsi/ |
D | osd.txt | 67 and initializes some global pools. This should be done once per scsi_device
|
D | ChangeLog.lpfc | 1574 * Added code for safety pools for following objects: mbuf/bpl, 1796 * Removed usage of all memory pools.
|
/linux-4.1.27/arch/sparc/kernel/ |
D | pci_sun4v.c | 537 pool = &(iommu->pools[pool_nr]); in probe_existing_entries()
|
/linux-4.1.27/Documentation/vm/ |
D | hugetlbpage.txt | 143 With support for multiple huge page pools at run-time available, much of
|
/linux-4.1.27/Documentation/filesystems/cifs/ |
D | CHANGES | 400 do not get marked delete on close. Display sizes of cifs buffer pools in 442 minimum number of large and small network buffers in the buffer pools,
|
/linux-4.1.27/drivers/message/fusion/lsi/ |
D | mpi_history.txt | 309 * Added generic defines for hot spare pools and RAID
|
/linux-4.1.27/Documentation/filesystems/ |
D | proc.txt | 735 Linux uses slab pools for memory management above page level in version 2.2.
|