Lines Matching refs:vm

129 					  struct radeon_vm *vm,  in radeon_vm_get_bos()  argument
135 list = drm_malloc_ab(vm->max_pde_used + 2, in radeon_vm_get_bos()
141 list[0].robj = vm->page_directory; in radeon_vm_get_bos()
144 list[0].tv.bo = &vm->page_directory->tbo; in radeon_vm_get_bos()
149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { in radeon_vm_get_bos()
150 if (!vm->page_tables[i].bo) in radeon_vm_get_bos()
153 list[idx].robj = vm->page_tables[i].bo; in radeon_vm_get_bos()
178 struct radeon_vm *vm, int ring) in radeon_vm_grab_id() argument
181 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_grab_id()
237 struct radeon_vm *vm, in radeon_vm_flush() argument
240 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); in radeon_vm_flush()
241 struct radeon_vm_id *vm_id = &vm->ids[ring]; in radeon_vm_flush()
246 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); in radeon_vm_flush()
269 struct radeon_vm *vm, in radeon_vm_fence() argument
272 unsigned vm_id = vm->ids[fence->ring].id; in radeon_vm_fence()
277 radeon_fence_unref(&vm->ids[fence->ring].last_id_use); in radeon_vm_fence()
278 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); in radeon_vm_fence()
293 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, in radeon_vm_bo_find() argument
299 if (bo_va->vm == vm) { in radeon_vm_bo_find()
320 struct radeon_vm *vm, in radeon_vm_bo_add() argument
329 bo_va->vm = vm; in radeon_vm_bo_add()
339 mutex_lock(&vm->mutex); in radeon_vm_bo_add()
341 mutex_unlock(&vm->mutex); in radeon_vm_bo_add()
452 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_set_addr() local
477 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
482 it = interval_tree_iter_first(&vm->va, soffset, eoffset); in radeon_vm_bo_set_addr()
490 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
502 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
508 tmp->vm = vm; in radeon_vm_bo_set_addr()
511 spin_lock(&vm->status_lock); in radeon_vm_bo_set_addr()
512 list_add(&tmp->vm_status, &vm->freed); in radeon_vm_bo_set_addr()
513 spin_unlock(&vm->status_lock); in radeon_vm_bo_set_addr()
518 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_bo_set_addr()
526 interval_tree_insert(&bo_va->it, &vm->va); in radeon_vm_bo_set_addr()
537 if (eoffset > vm->max_pde_used) in radeon_vm_bo_set_addr()
538 vm->max_pde_used = eoffset; in radeon_vm_bo_set_addr()
546 if (vm->page_tables[pt_idx].bo) in radeon_vm_bo_set_addr()
550 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
566 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
567 if (vm->page_tables[pt_idx].bo) { in radeon_vm_bo_set_addr()
569 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
571 mutex_lock(&vm->mutex); in radeon_vm_bo_set_addr()
575 vm->page_tables[pt_idx].addr = 0; in radeon_vm_bo_set_addr()
576 vm->page_tables[pt_idx].bo = pt; in radeon_vm_bo_set_addr()
579 mutex_unlock(&vm->mutex); in radeon_vm_bo_set_addr()
643 struct radeon_vm *vm) in radeon_vm_update_page_directory() argument
645 struct radeon_bo *pd = vm->page_directory; in radeon_vm_update_page_directory()
657 ndw += vm->max_pde_used * 6; in radeon_vm_update_page_directory()
669 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { in radeon_vm_update_page_directory()
670 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; in radeon_vm_update_page_directory()
677 if (vm->page_tables[pt_idx].addr == pt) in radeon_vm_update_page_directory()
679 vm->page_tables[pt_idx].addr = pt; in radeon_vm_update_page_directory()
816 struct radeon_vm *vm, in radeon_vm_update_ptes() argument
829 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; in radeon_vm_update_ptes()
887 static void radeon_vm_fence_pts(struct radeon_vm *vm, in radeon_vm_fence_pts() argument
897 radeon_bo_fence(vm->page_tables[i].bo, fence, true); in radeon_vm_fence_pts()
917 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_update() local
926 bo_va->bo, vm); in radeon_vm_bo_update()
930 spin_lock(&vm->status_lock); in radeon_vm_bo_update()
932 spin_unlock(&vm->status_lock); in radeon_vm_bo_update()
1005 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); in radeon_vm_bo_update()
1008 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, in radeon_vm_bo_update()
1025 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); in radeon_vm_bo_update()
1045 struct radeon_vm *vm) in radeon_vm_clear_freed() argument
1050 spin_lock(&vm->status_lock); in radeon_vm_clear_freed()
1051 while (!list_empty(&vm->freed)) { in radeon_vm_clear_freed()
1052 bo_va = list_first_entry(&vm->freed, in radeon_vm_clear_freed()
1054 spin_unlock(&vm->status_lock); in radeon_vm_clear_freed()
1063 spin_lock(&vm->status_lock); in radeon_vm_clear_freed()
1065 spin_unlock(&vm->status_lock); in radeon_vm_clear_freed()
1082 struct radeon_vm *vm) in radeon_vm_clear_invalids() argument
1087 spin_lock(&vm->status_lock); in radeon_vm_clear_invalids()
1088 while (!list_empty(&vm->invalidated)) { in radeon_vm_clear_invalids()
1089 bo_va = list_first_entry(&vm->invalidated, in radeon_vm_clear_invalids()
1091 spin_unlock(&vm->status_lock); in radeon_vm_clear_invalids()
1097 spin_lock(&vm->status_lock); in radeon_vm_clear_invalids()
1099 spin_unlock(&vm->status_lock); in radeon_vm_clear_invalids()
1117 struct radeon_vm *vm = bo_va->vm; in radeon_vm_bo_rmv() local
1121 mutex_lock(&vm->mutex); in radeon_vm_bo_rmv()
1123 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_bo_rmv()
1124 spin_lock(&vm->status_lock); in radeon_vm_bo_rmv()
1129 list_add(&bo_va->vm_status, &vm->freed); in radeon_vm_bo_rmv()
1134 spin_unlock(&vm->status_lock); in radeon_vm_bo_rmv()
1136 mutex_unlock(&vm->mutex); in radeon_vm_bo_rmv()
1155 spin_lock(&bo_va->vm->status_lock); in radeon_vm_bo_invalidate()
1157 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); in radeon_vm_bo_invalidate()
1158 spin_unlock(&bo_va->vm->status_lock); in radeon_vm_bo_invalidate()
1171 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) in radeon_vm_init() argument
1178 vm->ib_bo_va = NULL; in radeon_vm_init()
1180 vm->ids[i].id = 0; in radeon_vm_init()
1181 vm->ids[i].flushed_updates = NULL; in radeon_vm_init()
1182 vm->ids[i].last_id_use = NULL; in radeon_vm_init()
1184 mutex_init(&vm->mutex); in radeon_vm_init()
1185 vm->va = RB_ROOT; in radeon_vm_init()
1186 spin_lock_init(&vm->status_lock); in radeon_vm_init()
1187 INIT_LIST_HEAD(&vm->invalidated); in radeon_vm_init()
1188 INIT_LIST_HEAD(&vm->freed); in radeon_vm_init()
1195 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); in radeon_vm_init()
1196 if (vm->page_tables == NULL) { in radeon_vm_init()
1203 NULL, &vm->page_directory); in radeon_vm_init()
1207 r = radeon_vm_clear_bo(rdev, vm->page_directory); in radeon_vm_init()
1209 radeon_bo_unref(&vm->page_directory); in radeon_vm_init()
1210 vm->page_directory = NULL; in radeon_vm_init()
1226 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) in radeon_vm_fini() argument
1231 if (!RB_EMPTY_ROOT(&vm->va)) { in radeon_vm_fini()
1234 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { in radeon_vm_fini()
1235 interval_tree_remove(&bo_va->it, &vm->va); in radeon_vm_fini()
1244 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { in radeon_vm_fini()
1251 radeon_bo_unref(&vm->page_tables[i].bo); in radeon_vm_fini()
1252 kfree(vm->page_tables); in radeon_vm_fini()
1254 radeon_bo_unref(&vm->page_directory); in radeon_vm_fini()
1257 radeon_fence_unref(&vm->ids[i].flushed_updates); in radeon_vm_fini()
1258 radeon_fence_unref(&vm->ids[i].last_id_use); in radeon_vm_fini()
1261 mutex_destroy(&vm->mutex); in radeon_vm_fini()