Lines Matching refs:vm
32 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_at() local
33 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_at()
38 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_at()
49 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_map_at()
70 mmu->func->flush(vm); in nvkm_vm_map_at()
77 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg_table() local
78 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg_table()
83 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_sg_table()
92 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_map_sg_table()
128 mmu->func->flush(vm); in nvkm_vm_map_sg_table()
135 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg() local
136 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg()
142 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_map_sg()
148 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_map_sg()
166 mmu->func->flush(vm); in nvkm_vm_map_sg()
184 struct nvkm_vm *vm = vma->vm; in nvkm_vm_unmap_at() local
185 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unmap_at()
190 u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde; in nvkm_vm_unmap_at()
196 struct nvkm_memory *pgt = vm->pgt[pde].mem[big]; in nvkm_vm_unmap_at()
213 mmu->func->flush(vm); in nvkm_vm_unmap_at()
223 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) in nvkm_vm_unmap_pgt() argument
225 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unmap_pgt()
232 vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_unmap_pgt()
239 list_for_each_entry(vpgd, &vm->pgd_list, head) { in nvkm_vm_unmap_pgt()
248 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) in nvkm_vm_map_pgt() argument
250 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_pgt()
251 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_map_pgt()
265 list_for_each_entry(vpgd, &vm->pgd_list, head) { in nvkm_vm_map_pgt()
274 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, in nvkm_vm_get() argument
277 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_get()
283 mutex_lock(&vm->mutex); in nvkm_vm_get()
284 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, in nvkm_vm_get()
287 mutex_unlock(&vm->mutex); in nvkm_vm_get()
295 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_get()
303 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); in nvkm_vm_get()
306 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); in nvkm_vm_get()
307 nvkm_mm_free(&vm->mm, &vma->node); in nvkm_vm_get()
308 mutex_unlock(&vm->mutex); in nvkm_vm_get()
312 mutex_unlock(&vm->mutex); in nvkm_vm_get()
314 vma->vm = NULL; in nvkm_vm_get()
315 nvkm_vm_ref(vm, &vma->vm, NULL); in nvkm_vm_get()
325 struct nvkm_vm *vm; in nvkm_vm_put() local
330 vm = vma->vm; in nvkm_vm_put()
331 mmu = vm->mmu; in nvkm_vm_put()
336 mutex_lock(&vm->mutex); in nvkm_vm_put()
337 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); in nvkm_vm_put()
338 nvkm_mm_free(&vm->mm, &vma->node); in nvkm_vm_put()
339 mutex_unlock(&vm->mutex); in nvkm_vm_put()
341 nvkm_vm_ref(NULL, &vma->vm, NULL); in nvkm_vm_put()
345 nvkm_vm_boot(struct nvkm_vm *vm, u64 size) in nvkm_vm_boot() argument
347 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_boot()
354 vm->pgt[0].refcount[0] = 1; in nvkm_vm_boot()
355 vm->pgt[0].mem[0] = pgt; in nvkm_vm_boot()
356 nvkm_memory_boot(pgt, vm); in nvkm_vm_boot()
367 struct nvkm_vm *vm; in nvkm_vm_create() local
371 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in nvkm_vm_create()
372 if (!vm) in nvkm_vm_create()
375 __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key); in nvkm_vm_create()
376 INIT_LIST_HEAD(&vm->pgd_list); in nvkm_vm_create()
377 vm->mmu = mmu; in nvkm_vm_create()
378 kref_init(&vm->refcount); in nvkm_vm_create()
379 vm->fpde = offset >> (mmu->func->pgt_bits + 12); in nvkm_vm_create()
380 vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12); in nvkm_vm_create()
382 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); in nvkm_vm_create()
383 if (!vm->pgt) { in nvkm_vm_create()
384 kfree(vm); in nvkm_vm_create()
388 ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, in nvkm_vm_create()
391 vfree(vm->pgt); in nvkm_vm_create()
392 kfree(vm); in nvkm_vm_create()
396 *pvm = vm; in nvkm_vm_create()
412 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) in nvkm_vm_link() argument
414 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_link()
427 mutex_lock(&vm->mutex); in nvkm_vm_link()
428 for (i = vm->fpde; i <= vm->lpde; i++) in nvkm_vm_link()
429 mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem); in nvkm_vm_link()
430 list_add(&vpgd->head, &vm->pgd_list); in nvkm_vm_link()
431 mutex_unlock(&vm->mutex); in nvkm_vm_link()
436 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) in nvkm_vm_unlink() argument
443 mutex_lock(&vm->mutex); in nvkm_vm_unlink()
444 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { in nvkm_vm_unlink()
451 mutex_unlock(&vm->mutex); in nvkm_vm_unlink()
457 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); in nvkm_vm_del() local
460 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { in nvkm_vm_del()
461 nvkm_vm_unlink(vm, vpgd->obj); in nvkm_vm_del()
464 nvkm_mm_fini(&vm->mm); in nvkm_vm_del()
465 vfree(vm->pgt); in nvkm_vm_del()
466 kfree(vm); in nvkm_vm_del()