Lines Matching refs:vm

32 	struct nvkm_vm *vm = vma->vm;  in nvkm_vm_map_at()  local
33 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_at()
38 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; in nvkm_vm_map_at()
49 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; in nvkm_vm_map_at()
70 mmu->flush(vm); in nvkm_vm_map_at()
77 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg_table() local
78 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg_table()
83 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; in nvkm_vm_map_sg_table()
92 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; in nvkm_vm_map_sg_table()
128 mmu->flush(vm); in nvkm_vm_map_sg_table()
135 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg() local
136 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_sg()
142 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; in nvkm_vm_map_sg()
148 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; in nvkm_vm_map_sg()
166 mmu->flush(vm); in nvkm_vm_map_sg()
184 struct nvkm_vm *vm = vma->vm; in nvkm_vm_unmap_at() local
185 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unmap_at()
190 u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; in nvkm_vm_unmap_at()
196 struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; in nvkm_vm_unmap_at()
213 mmu->flush(vm); in nvkm_vm_unmap_at()
223 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) in nvkm_vm_unmap_pgt() argument
225 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unmap_pgt()
232 vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_unmap_pgt()
239 list_for_each_entry(vpgd, &vm->pgd_list, head) { in nvkm_vm_unmap_pgt()
250 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) in nvkm_vm_map_pgt() argument
252 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_map_pgt()
253 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_map_pgt()
264 ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, in nvkm_vm_map_pgt()
279 list_for_each_entry(vpgd, &vm->pgd_list, head) { in nvkm_vm_map_pgt()
287 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, in nvkm_vm_get() argument
290 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_get()
297 ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, in nvkm_vm_get()
308 struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; in nvkm_vm_get()
316 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); in nvkm_vm_get()
319 nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); in nvkm_vm_get()
320 nvkm_mm_free(&vm->mm, &vma->node); in nvkm_vm_get()
327 vma->vm = NULL; in nvkm_vm_get()
328 nvkm_vm_ref(vm, &vma->vm, NULL); in nvkm_vm_get()
337 struct nvkm_vm *vm = vma->vm; in nvkm_vm_put() local
338 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_put()
347 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); in nvkm_vm_put()
348 nvkm_mm_free(&vm->mm, &vma->node); in nvkm_vm_put()
351 nvkm_vm_ref(NULL, &vma->vm, NULL); in nvkm_vm_put()
358 struct nvkm_vm *vm; in nvkm_vm_create() local
362 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in nvkm_vm_create()
363 if (!vm) in nvkm_vm_create()
366 INIT_LIST_HEAD(&vm->pgd_list); in nvkm_vm_create()
367 vm->mmu = mmu; in nvkm_vm_create()
368 kref_init(&vm->refcount); in nvkm_vm_create()
369 vm->fpde = offset >> (mmu->pgt_bits + 12); in nvkm_vm_create()
370 vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12); in nvkm_vm_create()
372 vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); in nvkm_vm_create()
373 if (!vm->pgt) { in nvkm_vm_create()
374 kfree(vm); in nvkm_vm_create()
378 ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, in nvkm_vm_create()
381 vfree(vm->pgt); in nvkm_vm_create()
382 kfree(vm); in nvkm_vm_create()
386 *pvm = vm; in nvkm_vm_create()
400 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) in nvkm_vm_link() argument
402 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_link()
416 for (i = vm->fpde; i <= vm->lpde; i++) in nvkm_vm_link()
417 mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); in nvkm_vm_link()
418 list_add(&vpgd->head, &vm->pgd_list); in nvkm_vm_link()
424 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) in nvkm_vm_unlink() argument
426 struct nvkm_mmu *mmu = vm->mmu; in nvkm_vm_unlink()
434 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { in nvkm_vm_unlink()
450 struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); in nvkm_vm_del() local
453 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { in nvkm_vm_del()
454 nvkm_vm_unlink(vm, vpgd->obj); in nvkm_vm_del()
457 nvkm_mm_fini(&vm->mm); in nvkm_vm_del()
458 vfree(vm->pgt); in nvkm_vm_del()
459 kfree(vm); in nvkm_vm_del()