Lines Matching refs:vm

87 					  struct amdgpu_vm *vm,  in amdgpu_vm_get_bos()  argument
93 list = drm_malloc_ab(vm->max_pde_used + 2, in amdgpu_vm_get_bos()
100 list[0].robj = vm->page_directory; in amdgpu_vm_get_bos()
104 list[0].tv.bo = &vm->page_directory->tbo; in amdgpu_vm_get_bos()
108 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in amdgpu_vm_get_bos()
109 if (!vm->page_tables[i].bo) in amdgpu_vm_get_bos()
112 list[idx].robj = vm->page_tables[i].bo; in amdgpu_vm_get_bos()
135 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, in amdgpu_vm_grab_id() argument
139 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; in amdgpu_vm_grab_id()
151 if (owner == (long)vm) { in amdgpu_vm_grab_id()
209 struct amdgpu_vm *vm, in amdgpu_vm_flush() argument
212 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); in amdgpu_vm_flush()
213 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; in amdgpu_vm_flush()
248 struct amdgpu_vm *vm, in amdgpu_vm_fence() argument
252 unsigned vm_id = vm->ids[ring->idx].id; in amdgpu_vm_fence()
256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); in amdgpu_vm_fence()
271 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
277 if (bo_va->vm == vm) { in amdgpu_vm_bo_find()
428 struct amdgpu_vm *vm) in amdgpu_vm_update_page_directory() argument
431 struct amdgpu_bo *pd = vm->page_directory; in amdgpu_vm_update_page_directory()
445 ndw += vm->max_pde_used * 6; in amdgpu_vm_update_page_directory()
463 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { in amdgpu_vm_update_page_directory()
464 struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; in amdgpu_vm_update_page_directory()
471 if (vm->page_tables[pt_idx].addr == pt) in amdgpu_vm_update_page_directory()
473 vm->page_tables[pt_idx].addr = pt; in amdgpu_vm_update_page_directory()
509 fence_put(vm->page_directory_fence); in amdgpu_vm_update_page_directory()
510 vm->page_directory_fence = fence_get(fence); in amdgpu_vm_update_page_directory()
622 struct amdgpu_vm *vm, in amdgpu_vm_update_ptes() argument
641 struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; in amdgpu_vm_update_ptes()
704 struct amdgpu_vm *vm, in amdgpu_vm_bo_update_mapping() argument
772 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, in amdgpu_vm_bo_update_mapping()
791 amdgpu_bo_fence(vm->page_directory, f, true); in amdgpu_vm_bo_update_mapping()
825 struct amdgpu_vm *vm = bo_va->vm; in amdgpu_vm_bo_update() local
841 spin_lock(&vm->status_lock); in amdgpu_vm_bo_update()
844 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_update()
847 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, in amdgpu_vm_bo_update()
861 spin_lock(&vm->status_lock); in amdgpu_vm_bo_update()
865 list_add(&bo_va->vm_status, &vm->cleared); in amdgpu_vm_bo_update()
866 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_update()
883 struct amdgpu_vm *vm) in amdgpu_vm_clear_freed() argument
888 spin_lock(&vm->freed_lock); in amdgpu_vm_clear_freed()
889 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
890 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
893 spin_unlock(&vm->freed_lock); in amdgpu_vm_clear_freed()
894 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); in amdgpu_vm_clear_freed()
899 spin_lock(&vm->freed_lock); in amdgpu_vm_clear_freed()
901 spin_unlock(&vm->freed_lock); in amdgpu_vm_clear_freed()
919 struct amdgpu_vm *vm, struct amdgpu_sync *sync) in amdgpu_vm_clear_invalids() argument
924 spin_lock(&vm->status_lock); in amdgpu_vm_clear_invalids()
925 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_clear_invalids()
926 bo_va = list_first_entry(&vm->invalidated, in amdgpu_vm_clear_invalids()
928 spin_unlock(&vm->status_lock); in amdgpu_vm_clear_invalids()
935 spin_lock(&vm->status_lock); in amdgpu_vm_clear_invalids()
937 spin_unlock(&vm->status_lock); in amdgpu_vm_clear_invalids()
959 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
968 bo_va->vm = vm; in amdgpu_vm_bo_add()
1001 struct amdgpu_vm *vm = bo_va->vm; in amdgpu_vm_bo_map() local
1027 spin_lock(&vm->it_lock); in amdgpu_vm_bo_map()
1028 it = interval_tree_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1029 spin_unlock(&vm->it_lock); in amdgpu_vm_bo_map()
1056 spin_lock(&vm->it_lock); in amdgpu_vm_bo_map()
1057 interval_tree_insert(&mapping->it, &vm->va); in amdgpu_vm_bo_map()
1058 spin_unlock(&vm->it_lock); in amdgpu_vm_bo_map()
1067 if (eaddr > vm->max_pde_used) in amdgpu_vm_bo_map()
1068 vm->max_pde_used = eaddr; in amdgpu_vm_bo_map()
1072 struct reservation_object *resv = vm->page_directory->tbo.resv; in amdgpu_vm_bo_map()
1075 if (vm->page_tables[pt_idx].bo) in amdgpu_vm_bo_map()
1089 pt->parent = amdgpu_bo_ref(vm->page_directory); in amdgpu_vm_bo_map()
1097 vm->page_tables[pt_idx].addr = 0; in amdgpu_vm_bo_map()
1098 vm->page_tables[pt_idx].bo = pt; in amdgpu_vm_bo_map()
1105 spin_lock(&vm->it_lock); in amdgpu_vm_bo_map()
1106 interval_tree_remove(&mapping->it, &vm->va); in amdgpu_vm_bo_map()
1107 spin_unlock(&vm->it_lock); in amdgpu_vm_bo_map()
1132 struct amdgpu_vm *vm = bo_va->vm; in amdgpu_vm_bo_unmap() local
1157 spin_lock(&vm->it_lock); in amdgpu_vm_bo_unmap()
1158 interval_tree_remove(&mapping->it, &vm->va); in amdgpu_vm_bo_unmap()
1159 spin_unlock(&vm->it_lock); in amdgpu_vm_bo_unmap()
1163 spin_lock(&vm->freed_lock); in amdgpu_vm_bo_unmap()
1164 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1165 spin_unlock(&vm->freed_lock); in amdgpu_vm_bo_unmap()
1187 struct amdgpu_vm *vm = bo_va->vm; in amdgpu_vm_bo_rmv() local
1191 spin_lock(&vm->status_lock); in amdgpu_vm_bo_rmv()
1193 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_rmv()
1197 spin_lock(&vm->it_lock); in amdgpu_vm_bo_rmv()
1198 interval_tree_remove(&mapping->it, &vm->va); in amdgpu_vm_bo_rmv()
1199 spin_unlock(&vm->it_lock); in amdgpu_vm_bo_rmv()
1201 spin_lock(&vm->freed_lock); in amdgpu_vm_bo_rmv()
1202 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_rmv()
1203 spin_unlock(&vm->freed_lock); in amdgpu_vm_bo_rmv()
1207 spin_lock(&vm->it_lock); in amdgpu_vm_bo_rmv()
1208 interval_tree_remove(&mapping->it, &vm->va); in amdgpu_vm_bo_rmv()
1209 spin_unlock(&vm->it_lock); in amdgpu_vm_bo_rmv()
1232 spin_lock(&bo_va->vm->status_lock); in amdgpu_vm_bo_invalidate()
1234 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); in amdgpu_vm_bo_invalidate()
1235 spin_unlock(&bo_va->vm->status_lock); in amdgpu_vm_bo_invalidate()
1247 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_init() argument
1255 vm->ids[i].id = 0; in amdgpu_vm_init()
1256 vm->ids[i].flushed_updates = NULL; in amdgpu_vm_init()
1258 vm->va = RB_ROOT; in amdgpu_vm_init()
1259 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
1260 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
1261 INIT_LIST_HEAD(&vm->cleared); in amdgpu_vm_init()
1262 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
1263 spin_lock_init(&vm->it_lock); in amdgpu_vm_init()
1264 spin_lock_init(&vm->freed_lock); in amdgpu_vm_init()
1269 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); in amdgpu_vm_init()
1270 if (vm->page_tables == NULL) { in amdgpu_vm_init()
1275 vm->page_directory_fence = NULL; in amdgpu_vm_init()
1280 NULL, NULL, &vm->page_directory); in amdgpu_vm_init()
1283 r = amdgpu_bo_reserve(vm->page_directory, false); in amdgpu_vm_init()
1285 amdgpu_bo_unref(&vm->page_directory); in amdgpu_vm_init()
1286 vm->page_directory = NULL; in amdgpu_vm_init()
1289 r = amdgpu_vm_clear_bo(adev, vm->page_directory); in amdgpu_vm_init()
1290 amdgpu_bo_unreserve(vm->page_directory); in amdgpu_vm_init()
1292 amdgpu_bo_unref(&vm->page_directory); in amdgpu_vm_init()
1293 vm->page_directory = NULL; in amdgpu_vm_init()
1309 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
1314 if (!RB_EMPTY_ROOT(&vm->va)) { in amdgpu_vm_fini()
1317 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { in amdgpu_vm_fini()
1319 interval_tree_remove(&mapping->it, &vm->va); in amdgpu_vm_fini()
1322 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
1328 amdgpu_bo_unref(&vm->page_tables[i].bo); in amdgpu_vm_fini()
1329 drm_free_large(vm->page_tables); in amdgpu_vm_fini()
1331 amdgpu_bo_unref(&vm->page_directory); in amdgpu_vm_fini()
1332 fence_put(vm->page_directory_fence); in amdgpu_vm_fini()
1334 unsigned id = vm->ids[i].id; in amdgpu_vm_fini()
1337 (long)vm, 0); in amdgpu_vm_fini()
1338 fence_put(vm->ids[i].flushed_updates); in amdgpu_vm_fini()