fence_array      1356 drivers/gpu/drm/drm_gem.c int drm_gem_fence_array_add(struct xarray *fence_array,
fence_array      1371 drivers/gpu/drm/drm_gem.c 	xa_for_each(fence_array, index, entry) {
fence_array      1377 drivers/gpu/drm/drm_gem.c 			xa_store(fence_array, index, fence, GFP_KERNEL);
fence_array      1384 drivers/gpu/drm/drm_gem.c 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
fence_array      1406 drivers/gpu/drm/drm_gem.c int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
fence_array      1418 drivers/gpu/drm/drm_gem.c 		return drm_gem_fence_array_add(fence_array, fence);
fence_array      1427 drivers/gpu/drm/drm_gem.c 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
fence_array       682 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 	struct dma_fence_array *fence_array;
fence_array       701 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 	fence_array = to_dma_fence_array(fence);
fence_array       702 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 	for (i = 0; i < fence_array->num_fences; i++) {
fence_array       703 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 		struct dma_fence *child = fence_array->fences[i];
fence_array       399 include/drm/drm_gem.h int drm_gem_fence_array_add(struct xarray *fence_array,
fence_array       401 include/drm/drm_gem.h int drm_gem_fence_array_add_implicit(struct xarray *fence_array,