uc_pool            46 arch/ia64/kernel/uncached.c 	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
uc_pool            51 arch/ia64/kernel/uncached.c 		atomic_inc(&uc_pool->status);
uc_pool            58 arch/ia64/kernel/uncached.c 	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
uc_pool            62 arch/ia64/kernel/uncached.c 		atomic_inc(&uc_pool->status);
uc_pool            75 arch/ia64/kernel/uncached.c static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
uc_pool            78 arch/ia64/kernel/uncached.c 	int status, i, nchunks_added = uc_pool->nchunks_added;
uc_pool            81 arch/ia64/kernel/uncached.c 	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
uc_pool            84 arch/ia64/kernel/uncached.c 	if (uc_pool->nchunks_added > nchunks_added) {
uc_pool            86 arch/ia64/kernel/uncached.c 		mutex_unlock(&uc_pool->add_chunk_mutex);
uc_pool            90 arch/ia64/kernel/uncached.c 	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
uc_pool            91 arch/ia64/kernel/uncached.c 		mutex_unlock(&uc_pool->add_chunk_mutex);
uc_pool           101 arch/ia64/kernel/uncached.c 		mutex_unlock(&uc_pool->add_chunk_mutex);
uc_pool           122 arch/ia64/kernel/uncached.c 		atomic_set(&uc_pool->status, 0);
uc_pool           123 arch/ia64/kernel/uncached.c 		smp_call_function(uncached_ipi_visibility, uc_pool, 1);
uc_pool           124 arch/ia64/kernel/uncached.c 		if (atomic_read(&uc_pool->status))
uc_pool           141 arch/ia64/kernel/uncached.c 	atomic_set(&uc_pool->status, 0);
uc_pool           142 arch/ia64/kernel/uncached.c 	smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
uc_pool           143 arch/ia64/kernel/uncached.c 	if (atomic_read(&uc_pool->status))
uc_pool           150 arch/ia64/kernel/uncached.c 	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
uc_pool           154 arch/ia64/kernel/uncached.c 	uc_pool->nchunks_added++;
uc_pool           155 arch/ia64/kernel/uncached.c 	mutex_unlock(&uc_pool->add_chunk_mutex);
uc_pool           164 arch/ia64/kernel/uncached.c 	mutex_unlock(&uc_pool->add_chunk_mutex);
uc_pool           182 arch/ia64/kernel/uncached.c 	struct uncached_pool *uc_pool;
uc_pool           195 arch/ia64/kernel/uncached.c 		uc_pool = &uncached_pools[nid];
uc_pool           196 arch/ia64/kernel/uncached.c 		if (uc_pool->pool == NULL)
uc_pool           199 arch/ia64/kernel/uncached.c 			uc_addr = gen_pool_alloc(uc_pool->pool,
uc_pool           203 arch/ia64/kernel/uncached.c 		} while (uncached_add_chunk(uc_pool, nid) == 0);
uc_pool           117 drivers/gpu/drm/ttm/ttm_page_alloc.c 			struct ttm_page_pool	uc_pool;
uc_pool           973 drivers/gpu/drm/ttm/ttm_page_alloc.c 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
uc_pool            58 drivers/infiniband/sw/rxe/rxe.c 	rxe_pool_cleanup(&rxe->uc_pool);
uc_pool           183 drivers/infiniband/sw/rxe/rxe.c 	err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
uc_pool           252 drivers/infiniband/sw/rxe/rxe.c 	rxe_pool_cleanup(&rxe->uc_pool);
uc_pool           150 drivers/infiniband/sw/rxe/rxe_verbs.c 	return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
uc_pool           395 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_pool		uc_pool;