Lines Matching refs:mmu
87 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); in nv44_vm_map_sg() local
95 nv44_vm_fill(pgt, mmu->null, list, pte, part); in nv44_vm_map_sg()
112 nv44_vm_fill(pgt, mmu->null, list, pte, cnt); in nv44_vm_map_sg()
119 struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu); in nv44_vm_unmap() local
125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part); in nv44_vm_unmap()
139 nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt); in nv44_vm_unmap()
146 struct nv04_mmu *mmu = nv04_mmu(vm->mmu); in nv44_vm_flush() local
147 struct nvkm_device *device = mmu->base.subdev.device; in nv44_vm_flush()
148 nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE); in nv44_vm_flush()
164 struct nv04_mmu *mmu = nv04_mmu(base); in nv44_mmu_oneinit() local
165 struct nvkm_device *device = mmu->base.subdev.device; in nv44_mmu_oneinit()
168 mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024, in nv44_mmu_oneinit()
169 &mmu->null, GFP_KERNEL); in nv44_mmu_oneinit()
170 if (!mmu->nullp) { in nv44_mmu_oneinit()
171 nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n"); in nv44_mmu_oneinit()
172 mmu->null = 0; in nv44_mmu_oneinit()
175 ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL, in nv44_mmu_oneinit()
176 &mmu->vm); in nv44_mmu_oneinit()
183 &mmu->vm->pgt[0].mem[0]); in nv44_mmu_oneinit()
184 mmu->vm->pgt[0].refcount[0] = 1; in nv44_mmu_oneinit()
191 struct nv04_mmu *mmu = nv04_mmu(base); in nv44_mmu_init() local
192 struct nvkm_device *device = mmu->base.subdev.device; in nv44_mmu_init()
193 struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0]; in nv44_mmu_init()
204 nvkm_wr32(device, 0x100818, mmu->null); in nv44_mmu_init()