Lines Matching refs:adev

60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)  in amdgpu_vm_num_pdes()  argument
62 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; in amdgpu_vm_num_pdes()
72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) in amdgpu_vm_directory_size() argument
74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); in amdgpu_vm_directory_size()
86 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, in amdgpu_vm_get_bos() argument
140 struct amdgpu_device *adev = ring->adev; in amdgpu_vm_grab_id() local
150 owner = atomic_long_read(&adev->vm_manager.ids[id].owner); in amdgpu_vm_grab_id()
161 for (i = 1; i < adev->vm_manager.nvm; ++i) { in amdgpu_vm_grab_id()
162 struct fence *fence = adev->vm_manager.ids[i].active; in amdgpu_vm_grab_id()
184 fence = adev->vm_manager.ids[choices[i]].active; in amdgpu_vm_grab_id()
188 return amdgpu_sync_fence(ring->adev, sync, fence); in amdgpu_vm_grab_id()
247 void amdgpu_vm_fence(struct amdgpu_device *adev, in amdgpu_vm_fence() argument
254 fence_put(adev->vm_manager.ids[vm_id].active); in amdgpu_vm_fence()
255 adev->vm_manager.ids[vm_id].active = fence_get(fence); in amdgpu_vm_fence()
256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); in amdgpu_vm_fence()
299 static void amdgpu_vm_update_pages(struct amdgpu_device *adev, in amdgpu_vm_update_pages() argument
308 uint64_t src = adev->gart.table_addr + (addr >> 12) * 8; in amdgpu_vm_update_pages()
309 amdgpu_vm_copy_pte(adev, ib, pe, src, count); in amdgpu_vm_update_pages()
312 amdgpu_vm_write_pte(adev, ib, pe, addr, in amdgpu_vm_update_pages()
316 amdgpu_vm_set_pte_pde(adev, ib, pe, addr, in amdgpu_vm_update_pages()
325 amdgpu_ib_free(job->adev, &job->ibs[i]); in amdgpu_vm_free_job()
338 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, in amdgpu_vm_clear_bo() argument
341 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; in amdgpu_vm_clear_bo()
369 amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0); in amdgpu_vm_clear_bo()
370 amdgpu_vm_pad_ib(adev, ib); in amdgpu_vm_clear_bo()
372 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, in amdgpu_vm_clear_bo()
383 amdgpu_ib_free(adev, ib); in amdgpu_vm_clear_bo()
400 uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr) in amdgpu_vm_map_gart() argument
405 result = adev->gart.pages_addr[addr >> PAGE_SHIFT]; in amdgpu_vm_map_gart()
427 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, in amdgpu_vm_update_page_directory() argument
430 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; in amdgpu_vm_update_page_directory()
480 amdgpu_vm_update_pages(adev, ib, last_pde, in amdgpu_vm_update_page_directory()
494 amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count, in amdgpu_vm_update_page_directory()
498 amdgpu_vm_pad_ib(adev, ib); in amdgpu_vm_update_page_directory()
499 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); in amdgpu_vm_update_page_directory()
501 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, in amdgpu_vm_update_page_directory()
515 amdgpu_ib_free(adev, ib); in amdgpu_vm_update_page_directory()
522 amdgpu_ib_free(adev, ib); in amdgpu_vm_update_page_directory()
540 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, in amdgpu_vm_frag_ptes() argument
579 amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, in amdgpu_vm_frag_ptes()
587 amdgpu_vm_update_pages(adev, ib, pe_start, addr, count, in amdgpu_vm_frag_ptes()
594 amdgpu_vm_update_pages(adev, ib, frag_start, addr, count, in amdgpu_vm_frag_ptes()
602 amdgpu_vm_update_pages(adev, ib, frag_end, addr, count, in amdgpu_vm_frag_ptes()
621 static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, in amdgpu_vm_update_ptes() argument
646 amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); in amdgpu_vm_update_ptes()
662 amdgpu_vm_frag_ptes(adev, ib, last_pte, in amdgpu_vm_update_ptes()
680 amdgpu_vm_frag_ptes(adev, ib, last_pte, in amdgpu_vm_update_ptes()
703 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, in amdgpu_vm_bo_update_mapping() argument
709 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; in amdgpu_vm_bo_update_mapping()
772 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, in amdgpu_vm_bo_update_mapping()
777 amdgpu_ib_free(adev, ib); in amdgpu_vm_bo_update_mapping()
782 amdgpu_vm_pad_ib(adev, ib); in amdgpu_vm_bo_update_mapping()
784 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, in amdgpu_vm_bo_update_mapping()
798 amdgpu_ib_free(adev, ib); in amdgpu_vm_bo_update_mapping()
804 amdgpu_ib_free(adev, ib); in amdgpu_vm_bo_update_mapping()
821 int amdgpu_vm_bo_update(struct amdgpu_device *adev, in amdgpu_vm_bo_update() argument
834 addr += adev->vm_manager.vram_base_offset; in amdgpu_vm_bo_update()
839 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); in amdgpu_vm_bo_update()
847 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, in amdgpu_vm_bo_update()
882 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, in amdgpu_vm_clear_freed() argument
894 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); in amdgpu_vm_clear_freed()
918 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, in amdgpu_vm_clear_invalids() argument
930 r = amdgpu_vm_bo_update(adev, bo_va, NULL); in amdgpu_vm_clear_invalids()
940 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update); in amdgpu_vm_clear_invalids()
958 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, in amdgpu_vm_bo_add() argument
995 int amdgpu_vm_bo_map(struct amdgpu_device *adev, in amdgpu_vm_bo_map() argument
1018 if (last_pfn >= adev->vm_manager.max_pfn) { in amdgpu_vm_bo_map()
1019 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n", in amdgpu_vm_bo_map()
1020 last_pfn, adev->vm_manager.max_pfn); in amdgpu_vm_bo_map()
1034 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " in amdgpu_vm_bo_map()
1065 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); in amdgpu_vm_bo_map()
1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, in amdgpu_vm_bo_map()
1091 r = amdgpu_vm_clear_bo(adev, pt); in amdgpu_vm_bo_map()
1127 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, in amdgpu_vm_bo_unmap() argument
1183 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, in amdgpu_vm_bo_rmv() argument
1226 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, in amdgpu_vm_bo_invalidate() argument
1247 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_init() argument
1265 pd_size = amdgpu_vm_directory_size(adev); in amdgpu_vm_init()
1266 pd_entries = amdgpu_vm_num_pdes(adev); in amdgpu_vm_init()
1277 r = amdgpu_bo_create(adev, pd_size, align, true, in amdgpu_vm_init()
1289 r = amdgpu_vm_clear_bo(adev, vm->page_directory); in amdgpu_vm_init()
1309 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
1315 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
1327 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) in amdgpu_vm_fini()
1336 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, in amdgpu_vm_fini()
1350 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) in amdgpu_vm_manager_fini() argument
1355 fence_put(adev->vm_manager.ids[i].active); in amdgpu_vm_manager_fini()