mmu               246 arch/arc/include/asm/arcregs.h 	unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
mmu               248 arch/arc/include/asm/arcregs.h 	unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
mmu               301 arch/arc/include/asm/arcregs.h 	struct cpuinfo_arc_mmu mmu;
mmu               262 arch/arc/kernel/setup.c 		if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
mmu               386 arch/arc/kernel/setup.c 				IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
mmu               252 arch/arc/mm/tlb.c 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
mmu               255 arch/arc/mm/tlb.c 	int num_tlb = mmu->sets * mmu->ways;
mmu               728 arch/arc/mm/tlb.c 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
mmu               760 arch/arc/mm/tlb.c 	mmu->ver = (tmp >> 24);
mmu               763 arch/arc/mm/tlb.c 		if (mmu->ver <= 2) {
mmu               765 arch/arc/mm/tlb.c 			mmu->pg_sz_k = TO_KB(0x2000);
mmu               766 arch/arc/mm/tlb.c 			mmu->sets = 1 << mmu2->sets;
mmu               767 arch/arc/mm/tlb.c 			mmu->ways = 1 << mmu2->ways;
mmu               768 arch/arc/mm/tlb.c 			mmu->u_dtlb = mmu2->u_dtlb;
mmu               769 arch/arc/mm/tlb.c 			mmu->u_itlb = mmu2->u_itlb;
mmu               772 arch/arc/mm/tlb.c 			mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
mmu               773 arch/arc/mm/tlb.c 			mmu->sets = 1 << mmu3->sets;
mmu               774 arch/arc/mm/tlb.c 			mmu->ways = 1 << mmu3->ways;
mmu               775 arch/arc/mm/tlb.c 			mmu->u_dtlb = mmu3->u_dtlb;
mmu               776 arch/arc/mm/tlb.c 			mmu->u_itlb = mmu3->u_itlb;
mmu               777 arch/arc/mm/tlb.c 			mmu->sasid = mmu3->sasid;
mmu               781 arch/arc/mm/tlb.c 		mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu               782 arch/arc/mm/tlb.c 		mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu               783 arch/arc/mm/tlb.c 		mmu->sets = 64 << mmu4->n_entry;
mmu               784 arch/arc/mm/tlb.c 		mmu->ways = mmu4->n_ways * 2;
mmu               785 arch/arc/mm/tlb.c 		mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu               786 arch/arc/mm/tlb.c 		mmu->u_itlb = mmu4->u_itlb * 4;
mmu               787 arch/arc/mm/tlb.c 		mmu->sasid = mmu4->sasid;
mmu               788 arch/arc/mm/tlb.c 		pae_exists = mmu->pae = mmu4->pae;
mmu               795 arch/arc/mm/tlb.c 	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
mmu               820 arch/arc/mm/tlb.c 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
mmu               846 arch/arc/mm/tlb.c 	if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
mmu               848 arch/arc/mm/tlb.c 	else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
mmu               853 arch/arc/mm/tlb.c 		      mmu->ver, CONFIG_ARC_MMU_VER);
mmu               856 arch/arc/mm/tlb.c 	if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
mmu               860 arch/arc/mm/tlb.c 	    mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
mmu               864 arch/arc/mm/tlb.c 	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
mmu               895 arch/arc/mm/tlb.c #define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
mmu               910 arch/arc/mm/tlb.c 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
mmu               912 arch/arc/mm/tlb.c 	int set, n_ways = mmu->ways;
mmu               915 arch/arc/mm/tlb.c 	BUG_ON(mmu->ways > 4);
mmu               920 arch/arc/mm/tlb.c 	for (set = 0; set < mmu->sets; set++) {
mmu               928 arch/arc/mm/tlb.c 					  SET_WAY_TO_IDX(mmu, set, way));
mmu               961 arch/arc/mm/tlb.c 						SET_WAY_TO_IDX(mmu, set, way));
mmu                74 arch/c6x/kernel/setup.c 	const char *mmu;
mmu               117 arch/c6x/kernel/setup.c 	p->mmu = "none";
mmu               435 arch/c6x/kernel/setup.c 		   p->core_id, p->mmu, p->fpu,
mmu               400 arch/m68k/kernel/setup_mm.c 	const char *cpu, *mmu, *fpu;
mmu               449 arch/m68k/kernel/setup_mm.c 		mmu = "68851";
mmu               451 arch/m68k/kernel/setup_mm.c 		mmu = "68030";
mmu               453 arch/m68k/kernel/setup_mm.c 		mmu = "68040";
mmu               455 arch/m68k/kernel/setup_mm.c 		mmu = "68060";
mmu               457 arch/m68k/kernel/setup_mm.c 		mmu = "Sun-3";
mmu               459 arch/m68k/kernel/setup_mm.c 		mmu = "Apollo";
mmu               461 arch/m68k/kernel/setup_mm.c 		mmu = "ColdFire";
mmu               463 arch/m68k/kernel/setup_mm.c 		mmu = "unknown";
mmu               473 arch/m68k/kernel/setup_mm.c 		   cpu, mmu, fpu,
mmu               177 arch/m68k/kernel/setup_no.c 	char *cpu, *mmu, *fpu;
mmu               181 arch/m68k/kernel/setup_no.c 	mmu = "none";
mmu               191 arch/m68k/kernel/setup_no.c 		      cpu, mmu, fpu,
mmu                40 arch/microblaze/include/asm/cpuinfo.h 	u32 mmu;
mmu                73 arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c 	CI(mmu, USE_MMU);
mmu               119 arch/microblaze/kernel/cpu/cpuinfo-static.c 	ci->mmu = fcpu(cpu, "xlnx,use-mmu");
mmu                74 arch/microblaze/kernel/cpu/mb.c 	seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu);
mmu                15 arch/nios2/include/asm/cpuinfo.h 	bool mmu;
mmu                59 arch/nios2/kernel/cpuinfo.c 	cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
mmu               133 arch/nios2/kernel/cpuinfo.c 		   cpuinfo.mmu ? "present" : "none",
mmu               521 arch/powerpc/include/asm/kvm_host.h 	struct kvmppc_mmu mmu;
mmu               899 arch/powerpc/kernel/prom_init.c 	u8 mmu;
mmu              1058 arch/powerpc/kernel/prom_init.c 		.mmu = 0,
mmu              1323 arch/powerpc/kernel/prom_init.c 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
mmu              1328 arch/powerpc/kernel/prom_init.c 		ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
mmu               141 arch/powerpc/kvm/book3s.c 	vcpu->arch.mmu.reset_msr(vcpu);
mmu               461 arch/powerpc/kvm/book3s.c 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
mmu               404 arch/powerpc/kvm/book3s_32_mmu.c 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
mmu               406 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin;
mmu               407 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin;
mmu               408 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->xlate = kvmppc_mmu_book3s_32_xlate;
mmu               409 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr;
mmu               410 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->tlbie = kvmppc_mmu_book3s_32_tlbie;
mmu               411 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid;
mmu               412 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp;
mmu               413 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32;
mmu               415 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->slbmte = NULL;
mmu               416 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->slbmfee = NULL;
mmu               417 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->slbmfev = NULL;
mmu               418 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->slbfee = NULL;
mmu               419 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->slbie = NULL;
mmu               420 arch/powerpc/kvm/book3s_32_mmu.c 	mmu->slbia = NULL;
mmu               158 arch/powerpc/kvm/book3s_32_mmu_host.c 	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
mmu               311 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
mmu               668 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
mmu               670 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->mfsrin = NULL;
mmu               671 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
mmu               672 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
mmu               673 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
mmu               674 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
mmu               675 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
mmu               676 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->slbie = kvmppc_mmu_book3s_64_slbie;
mmu               677 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->slbia = kvmppc_mmu_book3s_64_slbia;
mmu               678 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->xlate = kvmppc_mmu_book3s_64_xlate;
mmu               679 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
mmu               680 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
mmu               681 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
mmu               682 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
mmu               683 arch/powerpc/kvm/book3s_64_mmu.c 	mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
mmu               106 arch/powerpc/kvm/book3s_64_mmu_host.c 	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
mmu               217 arch/powerpc/kvm/book3s_64_mmu_host.c 	vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
mmu               323 arch/powerpc/kvm/book3s_64_mmu_host.c 	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
mmu              2159 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
mmu              2163 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
mmu              2164 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
mmu               321 arch/powerpc/kvm/book3s_emulate.c 			if (vcpu->arch.mmu.mfsrin) {
mmu               323 arch/powerpc/kvm/book3s_emulate.c 				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
mmu               333 arch/powerpc/kvm/book3s_emulate.c 			if (vcpu->arch.mmu.mfsrin) {
mmu               335 arch/powerpc/kvm/book3s_emulate.c 				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
mmu               341 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.mtsrin(vcpu,
mmu               346 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.mtsrin(vcpu,
mmu               355 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.tlbie(vcpu, addr, large);
mmu               389 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbmte)
mmu               392 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.slbmte(vcpu,
mmu               397 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbie)
mmu               400 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.slbie(vcpu,
mmu               404 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbia)
mmu               407 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.slbia(vcpu);
mmu               410 arch/powerpc/kvm/book3s_emulate.c 			if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
mmu               417 arch/powerpc/kvm/book3s_emulate.c 				if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
mmu               427 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbmfee) {
mmu               433 arch/powerpc/kvm/book3s_emulate.c 				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
mmu               438 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbmfev) {
mmu               444 arch/powerpc/kvm/book3s_emulate.c 				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
mmu               755 arch/powerpc/kvm/book3s_emulate.c 		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
mmu               564 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
mmu               686 arch/powerpc/kvm/book3s_pr.c 		page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
mmu               709 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
mmu               722 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
mmu               767 arch/powerpc/kvm/book3s_pr.c 		else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
mmu              1168 arch/powerpc/kvm/book3s_pr.c 		} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
mmu              1478 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.mmu.slbmte(vcpu, 0, 0);
mmu              1479 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.mmu.slbia(vcpu);
mmu              1486 arch/powerpc/kvm/book3s_pr.c 				vcpu->arch.mmu.slbmte(vcpu, rs, rb);
mmu              1492 arch/powerpc/kvm/book3s_pr.c 			vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
mmu               114 arch/powerpc/kvm/book3s_pr_papr.c 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
mmu               202 arch/powerpc/kvm/book3s_pr_papr.c 			vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
mmu               247 arch/powerpc/kvm/book3s_pr_papr.c 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
mmu               100 arch/powerpc/platforms/powernv/opal-call.c 	bool mmu = (msr & (MSR_IR|MSR_DR));
mmu               105 arch/powerpc/platforms/powernv/opal-call.c 	if (unlikely(!mmu))
mmu               132 arch/riscv/kernel/cpu.c 	const char *compat, *isa, *mmu;
mmu               138 arch/riscv/kernel/cpu.c 	if (!of_property_read_string(node, "mmu-type", &mmu))
mmu               139 arch/riscv/kernel/cpu.c 		print_mmu(m, mmu);
mmu               680 arch/sparc/mm/init_64.c 				       unsigned long mmu)
mmu               682 arch/sparc/mm/init_64.c 	unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
mmu               686 arch/sparc/mm/init_64.c 			    "errors with %lx\n", vaddr, 0, pte, mmu, ret);
mmu               142 arch/um/kernel/skas/mmu.c 	struct mm_context *mmu = &mm->context;
mmu               150 arch/um/kernel/skas/mmu.c 	if (mmu->id.u.pid < 2) {
mmu               152 arch/um/kernel/skas/mmu.c 		       mmu->id.u.pid);
mmu               155 arch/um/kernel/skas/mmu.c 	os_kill_ptraced_process(mmu->id.u.pid, 1);
mmu               157 arch/um/kernel/skas/mmu.c 	free_page(mmu->id.stack);
mmu               158 arch/um/kernel/skas/mmu.c 	free_ldt(mmu);
mmu               236 arch/x86/hyperv/mmu.c 	pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb_others;
mmu               237 arch/x86/hyperv/mmu.c 	pv_ops.mmu.tlb_remove_table = tlb_remove_table;
mmu               580 arch/x86/include/asm/kvm_host.h 	struct kvm_mmu *mmu;
mmu              1284 arch/x86/include/asm/kvm_host.h int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
mmu              1405 arch/x86/include/asm/kvm_host.h int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu              1438 arch/x86/include/asm/kvm_host.h void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu                52 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(mmu.flush_tlb_user);
mmu                57 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(mmu.flush_tlb_kernel);
mmu                62 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
mmu                68 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
mmu                73 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
mmu                78 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.exit_mmap, mm);
mmu               119 arch/x86/include/asm/paravirt.h 	return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
mmu               124 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.write_cr2, x);
mmu               129 arch/x86/include/asm/paravirt.h 	return PVOP_CALL0(unsigned long, mmu.read_cr3);
mmu               134 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.write_cr3, x);
mmu               305 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.activate_mm, prev, next);
mmu               311 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
mmu               316 arch/x86/include/asm/paravirt.h 	return PVOP_CALL1(int, mmu.pgd_alloc, mm);
mmu               321 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.pgd_free, mm, pgd);
mmu               326 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
mmu               330 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_pte, pfn);
mmu               335 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
mmu               340 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_pmd, pfn);
mmu               345 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
mmu               349 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_pud, pfn);
mmu               354 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
mmu               359 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.release_p4d, pfn);
mmu               367 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
mmu               369 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
mmu               379 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
mmu               382 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
mmu               392 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
mmu               394 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
mmu               404 arch/x86/include/asm/paravirt.h 		ret =  PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
mmu               407 arch/x86/include/asm/paravirt.h 		ret =  PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
mmu               418 arch/x86/include/asm/paravirt.h 	ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
mmu               429 arch/x86/include/asm/paravirt.h 		pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
mmu               431 arch/x86/include/asm/paravirt.h 		PVOP_VCALL4(mmu.ptep_modify_prot_commit,
mmu               438 arch/x86/include/asm/paravirt.h 		PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
mmu               440 arch/x86/include/asm/paravirt.h 		PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
mmu               448 arch/x86/include/asm/paravirt.h 		pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
mmu               450 arch/x86/include/asm/paravirt.h 		PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
mmu               458 arch/x86/include/asm/paravirt.h 		PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
mmu               460 arch/x86/include/asm/paravirt.h 		PVOP_VCALL2(mmu.set_pmd, pmdp, val);
mmu               469 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
mmu               471 arch/x86/include/asm/paravirt.h 		ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
mmu               481 arch/x86/include/asm/paravirt.h 		ret =  PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
mmu               484 arch/x86/include/asm/paravirt.h 		ret =  PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
mmu               494 arch/x86/include/asm/paravirt.h 		PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
mmu               496 arch/x86/include/asm/paravirt.h 		PVOP_VCALL2(mmu.set_pud, pudp, val);
mmu               503 arch/x86/include/asm/paravirt.h 	ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
mmu               510 arch/x86/include/asm/paravirt.h 	return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
mmu               522 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.set_p4d, p4dp, val);
mmu               529 arch/x86/include/asm/paravirt.h 	p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
mmu               536 arch/x86/include/asm/paravirt.h 	return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
mmu               541 arch/x86/include/asm/paravirt.h 	PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
mmu               572 arch/x86/include/asm/paravirt.h 	PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
mmu               578 arch/x86/include/asm/paravirt.h 	PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
mmu               583 arch/x86/include/asm/paravirt.h 	PVOP_VCALL1(mmu.pmd_clear, pmdp);
mmu               617 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(mmu.lazy_mode.enter);
mmu               622 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(mmu.lazy_mode.leave);
mmu               627 arch/x86/include/asm/paravirt.h 	PVOP_VCALL0(mmu.lazy_mode.flush);
mmu               633 arch/x86/include/asm/paravirt.h 	pv_ops.mmu.set_fixmap(idx, phys, flags);
mmu               331 arch/x86/include/asm/paravirt_types.h 	struct pv_mmu_ops	mmu;
mmu                72 arch/x86/kernel/asm-offsets.c 	OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2);
mmu               628 arch/x86/kernel/kvm.c 		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
mmu               629 arch/x86/kernel/kvm.c 		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
mmu               359 arch/x86/kernel/paravirt.c 	.mmu.flush_tlb_user	= native_flush_tlb,
mmu               360 arch/x86/kernel/paravirt.c 	.mmu.flush_tlb_kernel	= native_flush_tlb_global,
mmu               361 arch/x86/kernel/paravirt.c 	.mmu.flush_tlb_one_user	= native_flush_tlb_one_user,
mmu               362 arch/x86/kernel/paravirt.c 	.mmu.flush_tlb_others	= native_flush_tlb_others,
mmu               363 arch/x86/kernel/paravirt.c 	.mmu.tlb_remove_table	=
mmu               366 arch/x86/kernel/paravirt.c 	.mmu.exit_mmap		= paravirt_nop,
mmu               369 arch/x86/kernel/paravirt.c 	.mmu.read_cr2		= __PV_IS_CALLEE_SAVE(native_read_cr2),
mmu               370 arch/x86/kernel/paravirt.c 	.mmu.write_cr2		= native_write_cr2,
mmu               371 arch/x86/kernel/paravirt.c 	.mmu.read_cr3		= __native_read_cr3,
mmu               372 arch/x86/kernel/paravirt.c 	.mmu.write_cr3		= native_write_cr3,
mmu               374 arch/x86/kernel/paravirt.c 	.mmu.pgd_alloc		= __paravirt_pgd_alloc,
mmu               375 arch/x86/kernel/paravirt.c 	.mmu.pgd_free		= paravirt_nop,
mmu               377 arch/x86/kernel/paravirt.c 	.mmu.alloc_pte		= paravirt_nop,
mmu               378 arch/x86/kernel/paravirt.c 	.mmu.alloc_pmd		= paravirt_nop,
mmu               379 arch/x86/kernel/paravirt.c 	.mmu.alloc_pud		= paravirt_nop,
mmu               380 arch/x86/kernel/paravirt.c 	.mmu.alloc_p4d		= paravirt_nop,
mmu               381 arch/x86/kernel/paravirt.c 	.mmu.release_pte	= paravirt_nop,
mmu               382 arch/x86/kernel/paravirt.c 	.mmu.release_pmd	= paravirt_nop,
mmu               383 arch/x86/kernel/paravirt.c 	.mmu.release_pud	= paravirt_nop,
mmu               384 arch/x86/kernel/paravirt.c 	.mmu.release_p4d	= paravirt_nop,
mmu               386 arch/x86/kernel/paravirt.c 	.mmu.set_pte		= native_set_pte,
mmu               387 arch/x86/kernel/paravirt.c 	.mmu.set_pte_at		= native_set_pte_at,
mmu               388 arch/x86/kernel/paravirt.c 	.mmu.set_pmd		= native_set_pmd,
mmu               390 arch/x86/kernel/paravirt.c 	.mmu.ptep_modify_prot_start	= __ptep_modify_prot_start,
mmu               391 arch/x86/kernel/paravirt.c 	.mmu.ptep_modify_prot_commit	= __ptep_modify_prot_commit,
mmu               395 arch/x86/kernel/paravirt.c 	.mmu.set_pte_atomic	= native_set_pte_atomic,
mmu               396 arch/x86/kernel/paravirt.c 	.mmu.pte_clear		= native_pte_clear,
mmu               397 arch/x86/kernel/paravirt.c 	.mmu.pmd_clear		= native_pmd_clear,
mmu               399 arch/x86/kernel/paravirt.c 	.mmu.set_pud		= native_set_pud,
mmu               401 arch/x86/kernel/paravirt.c 	.mmu.pmd_val		= PTE_IDENT,
mmu               402 arch/x86/kernel/paravirt.c 	.mmu.make_pmd		= PTE_IDENT,
mmu               405 arch/x86/kernel/paravirt.c 	.mmu.pud_val		= PTE_IDENT,
mmu               406 arch/x86/kernel/paravirt.c 	.mmu.make_pud		= PTE_IDENT,
mmu               408 arch/x86/kernel/paravirt.c 	.mmu.set_p4d		= native_set_p4d,
mmu               411 arch/x86/kernel/paravirt.c 	.mmu.p4d_val		= PTE_IDENT,
mmu               412 arch/x86/kernel/paravirt.c 	.mmu.make_p4d		= PTE_IDENT,
mmu               414 arch/x86/kernel/paravirt.c 	.mmu.set_pgd		= native_set_pgd,
mmu               419 arch/x86/kernel/paravirt.c 	.mmu.pte_val		= PTE_IDENT,
mmu               420 arch/x86/kernel/paravirt.c 	.mmu.pgd_val		= PTE_IDENT,
mmu               422 arch/x86/kernel/paravirt.c 	.mmu.make_pte		= PTE_IDENT,
mmu               423 arch/x86/kernel/paravirt.c 	.mmu.make_pgd		= PTE_IDENT,
mmu               425 arch/x86/kernel/paravirt.c 	.mmu.dup_mmap		= paravirt_nop,
mmu               426 arch/x86/kernel/paravirt.c 	.mmu.activate_mm	= paravirt_nop,
mmu               428 arch/x86/kernel/paravirt.c 	.mmu.lazy_mode = {
mmu               434 arch/x86/kernel/paravirt.c 	.mmu.set_fixmap		= native_set_fixmap,
mmu                97 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
mmu                98 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
mmu                99 arch/x86/kernel/paravirt_patch.c 	PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
mmu               372 arch/x86/kvm/mmu.c 	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
mmu              2360 arch/x86/kvm/mmu.c 	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
mmu              2565 arch/x86/kvm/mmu.c 	role = vcpu->arch.mmu->mmu_role.base;
mmu              2571 arch/x86/kvm/mmu.c 	if (!vcpu->arch.mmu->direct_map
mmu              2572 arch/x86/kvm/mmu.c 	    && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
mmu              2646 arch/x86/kvm/mmu.c 	iterator->level = vcpu->arch.mmu->shadow_root_level;
mmu              2649 arch/x86/kvm/mmu.c 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
mmu              2650 arch/x86/kvm/mmu.c 	    !vcpu->arch.mmu->direct_map)
mmu              2658 arch/x86/kvm/mmu.c 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
mmu              2661 arch/x86/kvm/mmu.c 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
mmu              2672 arch/x86/kvm/mmu.c 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
mmu              3335 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu              3545 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu              3717 arch/x86/kvm/mmu.c void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu              3727 arch/x86/kvm/mmu.c 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
mmu              3730 arch/x86/kvm/mmu.c 			    VALID_PAGE(mmu->prev_roots[i].hpa))
mmu              3741 arch/x86/kvm/mmu.c 			mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
mmu              3745 arch/x86/kvm/mmu.c 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
mmu              3746 arch/x86/kvm/mmu.c 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
mmu              3747 arch/x86/kvm/mmu.c 			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
mmu              3751 arch/x86/kvm/mmu.c 				if (mmu->pae_root[i] != 0)
mmu              3753 arch/x86/kvm/mmu.c 							   &mmu->pae_root[i],
mmu              3755 arch/x86/kvm/mmu.c 			mmu->root_hpa = INVALID_PAGE;
mmu              3757 arch/x86/kvm/mmu.c 		mmu->root_cr3 = 0;
mmu              3782 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
mmu              3789 arch/x86/kvm/mmu.c 				vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
mmu              3792 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = __pa(sp->spt);
mmu              3793 arch/x86/kvm/mmu.c 	} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
mmu              3795 arch/x86/kvm/mmu.c 			hpa_t root = vcpu->arch.mmu->pae_root[i];
mmu              3808 arch/x86/kvm/mmu.c 			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
mmu              3810 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
mmu              3813 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
mmu              3825 arch/x86/kvm/mmu.c 	root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
mmu              3835 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
mmu              3836 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->root_hpa;
mmu              3846 arch/x86/kvm/mmu.c 				vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
mmu              3850 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = root;
mmu              3860 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
mmu              3864 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->pae_root[i];
mmu              3867 arch/x86/kvm/mmu.c 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
mmu              3868 arch/x86/kvm/mmu.c 			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
mmu              3870 arch/x86/kvm/mmu.c 				vcpu->arch.mmu->pae_root[i] = 0;
mmu              3888 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
mmu              3890 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
mmu              3896 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
mmu              3897 arch/x86/kvm/mmu.c 		if (vcpu->arch.mmu->lm_root == NULL) {
mmu              3909 arch/x86/kvm/mmu.c 			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
mmu              3911 arch/x86/kvm/mmu.c 			vcpu->arch.mmu->lm_root = lm_root;
mmu              3914 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
mmu              3918 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->root_cr3 = root_cr3;
mmu              3925 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map)
mmu              3936 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map)
mmu              3939 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu              3944 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
mmu              3945 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->root_hpa;
mmu              3976 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->pae_root[i];
mmu              4016 arch/x86/kvm/mmu.c static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
mmu              4018 arch/x86/kvm/mmu.c 	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
mmu              4021 arch/x86/kvm/mmu.c static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
mmu              4023 arch/x86/kvm/mmu.c 	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
mmu              4050 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu              4067 arch/x86/kvm/mmu.c 		reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
mmu              4146 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu              4174 arch/x86/kvm/mmu.c 	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
mmu              4188 arch/x86/kvm/mmu.c 	arch.direct_map = vcpu->arch.mmu->direct_map;
mmu              4189 arch/x86/kvm/mmu.c 	arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
mmu              4292 arch/x86/kvm/mmu.c 	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
mmu              4367 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
mmu              4369 arch/x86/kvm/mmu.c 	root.cr3 = mmu->root_cr3;
mmu              4370 arch/x86/kvm/mmu.c 	root.hpa = mmu->root_hpa;
mmu              4373 arch/x86/kvm/mmu.c 		swap(root, mmu->prev_roots[i]);
mmu              4381 arch/x86/kvm/mmu.c 	mmu->root_hpa = root.hpa;
mmu              4382 arch/x86/kvm/mmu.c 	mmu->root_cr3 = root.cr3;
mmu              4391 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
mmu              4398 arch/x86/kvm/mmu.c 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
mmu              4399 arch/x86/kvm/mmu.c 	    mmu->root_level >= PT64_ROOT_4LEVEL) {
mmu              4427 arch/x86/kvm/mmu.c 				page_header(mmu->root_hpa));
mmu              4441 arch/x86/kvm/mmu.c 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
mmu              4460 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
mmu              4480 arch/x86/kvm/mmu.c static inline bool is_last_gpte(struct kvm_mmu *mmu,
mmu              4488 arch/x86/kvm/mmu.c 	gpte &= level - mmu->last_nonleaf_level;
mmu              4754 arch/x86/kvm/mmu.c 				      struct kvm_mmu *mmu, bool ept)
mmu              4766 arch/x86/kvm/mmu.c 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
mmu              4790 arch/x86/kvm/mmu.c 			if (!mmu->nx)
mmu              4821 arch/x86/kvm/mmu.c 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
mmu              4849 arch/x86/kvm/mmu.c static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu              4856 arch/x86/kvm/mmu.c 		mmu->pkru_mask = 0;
mmu              4862 arch/x86/kvm/mmu.c 		mmu->pkru_mask = 0;
mmu              4868 arch/x86/kvm/mmu.c 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
mmu              4896 arch/x86/kvm/mmu.c 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
mmu              4900 arch/x86/kvm/mmu.c static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu              4902 arch/x86/kvm/mmu.c 	unsigned root_level = mmu->root_level;
mmu              4904 arch/x86/kvm/mmu.c 	mmu->last_nonleaf_level = root_level;
mmu              4906 arch/x86/kvm/mmu.c 		mmu->last_nonleaf_level++;
mmu              5018 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
mmu              5090 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
mmu              5144 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
mmu              5178 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
mmu              5241 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
mmu              5244 arch/x86/kvm/mmu.c 			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
mmu              5313 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
mmu              5481 arch/x86/kvm/mmu.c 			u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
mmu              5504 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map)
mmu              5539 arch/x86/kvm/mmu.c 	bool direct = vcpu->arch.mmu->direct_map;
mmu              5542 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map) {
mmu              5555 arch/x86/kvm/mmu.c 		r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
mmu              5573 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map &&
mmu              5612 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
mmu              5619 arch/x86/kvm/mmu.c 	mmu->invlpg(vcpu, gva, mmu->root_hpa);
mmu              5633 arch/x86/kvm/mmu.c 		if (VALID_PAGE(mmu->prev_roots[i].hpa))
mmu              5634 arch/x86/kvm/mmu.c 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
mmu              5643 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
mmu              5648 arch/x86/kvm/mmu.c 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
mmu              5653 arch/x86/kvm/mmu.c 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
mmu              5654 arch/x86/kvm/mmu.c 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
mmu              5655 arch/x86/kvm/mmu.c 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
mmu              5758 arch/x86/kvm/mmu.c static void free_mmu_pages(struct kvm_mmu *mmu)
mmu              5760 arch/x86/kvm/mmu.c 	free_page((unsigned long)mmu->pae_root);
mmu              5761 arch/x86/kvm/mmu.c 	free_page((unsigned long)mmu->lm_root);
mmu              5764 arch/x86/kvm/mmu.c static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu              5785 arch/x86/kvm/mmu.c 	mmu->pae_root = page_address(page);
mmu              5787 arch/x86/kvm/mmu.c 		mmu->pae_root[i] = INVALID_PAGE;
mmu              5797 arch/x86/kvm/mmu.c 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
mmu                78 arch/x86/kvm/mmu.h 	if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
mmu               100 arch/x86/kvm/mmu.h 	if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu               101 arch/x86/kvm/mmu.h 		vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
mmu               156 arch/x86/kvm/mmu.h static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu               179 arch/x86/kvm/mmu.h 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
mmu               183 arch/x86/kvm/mmu.h 	if (unlikely(mmu->pkru_mask)) {
mmu               198 arch/x86/kvm/mmu.h 		pkru_bits &= mmu->pkru_mask >> offset;
mmu                59 arch/x86/kvm/mmu_audit.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu                62 arch/x86/kvm/mmu_audit.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
mmu                63 arch/x86/kvm/mmu_audit.c 		hpa_t root = vcpu->arch.mmu->root_hpa;
mmu                66 arch/x86/kvm/mmu_audit.c 		__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
mmu                71 arch/x86/kvm/mmu_audit.c 		hpa_t root = vcpu->arch.mmu->pae_root[i];
mmu               122 arch/x86/kvm/mmu_audit.c 			     "ent %llxn", vcpu->arch.mmu->root_level, pfn,
mmu                34 arch/x86/kvm/paging_tmpl.h 	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
mmu                54 arch/x86/kvm/paging_tmpl.h 	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
mmu                67 arch/x86/kvm/paging_tmpl.h 	#define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
mmu               104 arch/x86/kvm/paging_tmpl.h static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
mmu               110 arch/x86/kvm/paging_tmpl.h 	if (!PT_HAVE_ACCESSED_DIRTY(mmu))
mmu               131 arch/x86/kvm/paging_tmpl.h static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu               178 arch/x86/kvm/paging_tmpl.h 	if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
mmu               185 arch/x86/kvm/paging_tmpl.h 	if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
mmu               221 arch/x86/kvm/paging_tmpl.h 					     struct kvm_mmu *mmu,
mmu               232 arch/x86/kvm/paging_tmpl.h 	if (!PT_HAVE_ACCESSED_DIRTY(mmu))
mmu               272 arch/x86/kvm/paging_tmpl.h 		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
mmu               297 arch/x86/kvm/paging_tmpl.h 				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu               320 arch/x86/kvm/paging_tmpl.h 	walker->level = mmu->root_level;
mmu               321 arch/x86/kvm/paging_tmpl.h 	pte           = mmu->get_cr3(vcpu);
mmu               322 arch/x86/kvm/paging_tmpl.h 	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
mmu               327 arch/x86/kvm/paging_tmpl.h 		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
mmu               363 arch/x86/kvm/paging_tmpl.h 		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
mmu               403 arch/x86/kvm/paging_tmpl.h 		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
mmu               409 arch/x86/kvm/paging_tmpl.h 	} while (!is_last_gpte(mmu, walker->level, pte));
mmu               417 arch/x86/kvm/paging_tmpl.h 	errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
mmu               427 arch/x86/kvm/paging_tmpl.h 	real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
mmu               434 arch/x86/kvm/paging_tmpl.h 		FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
mmu               445 arch/x86/kvm/paging_tmpl.h 		ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
mmu               458 arch/x86/kvm/paging_tmpl.h 	if (fetch_fault && (mmu->nx ||
mmu               492 arch/x86/kvm/paging_tmpl.h 	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
mmu               501 arch/x86/kvm/paging_tmpl.h 	return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
mmu               530 arch/x86/kvm/paging_tmpl.h 	FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
mmu               628 arch/x86/kvm/paging_tmpl.h 	top_level = vcpu->arch.mmu->root_level;
mmu               640 arch/x86/kvm/paging_tmpl.h 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu              1048 arch/x86/kvm/paging_tmpl.h 		FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
mmu              3032 arch/x86/kvm/svm.c 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
mmu              3034 arch/x86/kvm/svm.c 	vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
mmu              3035 arch/x86/kvm/svm.c 	vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
mmu              3036 arch/x86/kvm/svm.c 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
mmu              3037 arch/x86/kvm/svm.c 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
mmu              3038 arch/x86/kvm/svm.c 	vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
mmu              3039 arch/x86/kvm/svm.c 	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
mmu              3045 arch/x86/kvm/svm.c 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
mmu               350 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
mmu               356 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->set_cr3           = vmx_set_cr3;
mmu               357 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->get_cr3           = nested_ept_get_cr3;
mmu               358 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
mmu               359 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->get_pdptr         = kvm_pdptr_read;
mmu               366 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
mmu              5042 arch/x86/kvm/vmx/nested.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
mmu              5067 arch/x86/kvm/vmx/nested.c 		mmu->ept_ad = accessed_dirty;
mmu              5068 arch/x86/kvm/vmx/nested.c 		mmu->mmu_role.base.ad_disabled = !accessed_dirty;
mmu              2851 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
mmu              2858 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
mmu              2859 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
mmu              2860 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
mmu              2861 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
mmu              2867 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
mmu              2870 arch/x86/kvm/vmx/vmx.c 		mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
mmu              2871 arch/x86/kvm/vmx/vmx.c 		mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
mmu              2872 arch/x86/kvm/vmx/vmx.c 		mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
mmu              2873 arch/x86/kvm/vmx/vmx.c 		mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
mmu              5453 arch/x86/kvm/vmx/vmx.c 			if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
mmu              5457 arch/x86/kvm/vmx/vmx.c 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
mmu               495 arch/x86/kvm/vmx/vmx.h 		if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
mmu               498 arch/x86/kvm/vmx/vmx.h 						vcpu->arch.mmu->root_hpa));
mmu               611 arch/x86/kvm/x86.c 		vcpu->arch.mmu->inject_page_fault(vcpu, fault);
mmu               663 arch/x86/kvm/x86.c int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
mmu               672 arch/x86/kvm/x86.c 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
mmu               698 arch/x86/kvm/x86.c int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
mmu               704 arch/x86/kvm/x86.c 	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
mmu               706 arch/x86/kvm/x86.c 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
mmu               722 arch/x86/kvm/x86.c 	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
mmu              5351 arch/x86/kvm/x86.c 	t_gpa  = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
mmu              6447 arch/x86/kvm/x86.c 	if (!vcpu->arch.mmu->direct_map) {
mmu              6480 arch/x86/kvm/x86.c 	if (vcpu->arch.mmu->direct_map) {
mmu              6547 arch/x86/kvm/x86.c 	if (!vcpu->arch.mmu->direct_map)
mmu              10067 arch/x86/kvm/x86.c 	if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
mmu              10075 arch/x86/kvm/x86.c 	if (!vcpu->arch.mmu->direct_map &&
mmu              10076 arch/x86/kvm/x86.c 	      work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
mmu              10079 arch/x86/kvm/x86.c 	vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
mmu              1000 arch/x86/xen/enlighten_pv.c 		pv_ops.mmu.read_cr2 =
mmu              1274 arch/x86/xen/enlighten_pv.c 		pv_ops.mmu.ptep_modify_prot_start =
mmu              1276 arch/x86/xen/enlighten_pv.c 		pv_ops.mmu.ptep_modify_prot_commit =
mmu                76 arch/x86/xen/mmu_hvm.c 		pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap;
mmu              2210 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.write_cr3 = &xen_write_cr3;
mmu              2357 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_pte = xen_set_pte;
mmu              2358 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_pmd = xen_set_pmd;
mmu              2359 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_pud = xen_set_pud;
mmu              2361 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.set_p4d = xen_set_p4d;
mmu              2366 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.alloc_pte = xen_alloc_pte;
mmu              2367 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
mmu              2368 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.release_pte = xen_release_pte;
mmu              2369 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.release_pmd = xen_release_pmd;
mmu              2371 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.alloc_pud = xen_alloc_pud;
mmu              2372 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.release_pud = xen_release_pud;
mmu              2374 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
mmu              2377 arch/x86/xen/mmu_pv.c 	pv_ops.mmu.write_cr3 = &xen_write_cr3;
mmu              2465 arch/x86/xen/mmu_pv.c 	pv_ops.mmu = xen_mmu_ops;
mmu                59 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
mmu                61 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	if (!ctx->mmu) {
mmu                99 drivers/gpu/drm/etnaviv/etnaviv_drv.c 	etnaviv_iommu_context_put(ctx->mmu);
mmu                30 drivers/gpu/drm/etnaviv/etnaviv_drv.h 	struct etnaviv_iommu_context	*mmu;
mmu                77 drivers/gpu/drm/etnaviv/etnaviv_drv.h 	struct etnaviv_iommu_context *mmu,
mmu                96 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	struct etnaviv_iommu_context *mmu, size_t mmu_size)
mmu                98 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	etnaviv_iommu_dump(mmu, iter->data);
mmu               537 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	etnaviv_iommu_context_get(submit->ctx->mmu);
mmu               538 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 	submit->mmu_context = submit->ctx->mmu;
mmu               253 drivers/gpu/drm/gma500/gtt.c 		psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
mmu               295 drivers/gpu/drm/gma500/gtt.c 		psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
mmu               167 drivers/gpu/drm/gma500/psb_drv.c 		if (dev_priv->mmu) {
mmu               173 drivers/gpu/drm/gma500/psb_drv.c 				(dev_priv->mmu),
mmu               177 drivers/gpu/drm/gma500/psb_drv.c 			psb_mmu_driver_takedown(dev_priv->mmu);
mmu               178 drivers/gpu/drm/gma500/psb_drv.c 			dev_priv->mmu = NULL;
mmu               316 drivers/gpu/drm/gma500/psb_drv.c 	dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
mmu               317 drivers/gpu/drm/gma500/psb_drv.c 	if (!dev_priv->mmu)
mmu               320 drivers/gpu/drm/gma500/psb_drv.c 	dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
mmu               330 drivers/gpu/drm/gma500/psb_drv.c 	ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
mmu               336 drivers/gpu/drm/gma500/psb_drv.c 	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
mmu               457 drivers/gpu/drm/gma500/psb_drv.h 	struct psb_mmu_driver *mmu;
mmu                60 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(gpmmu,       true,  true,  0x03000, 0x03000, mmu,      "gpmmu"),
mmu                61 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu0,      true,  true,  0x04000, 0x04000, mmu,      "ppmmu0"),
mmu                62 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu1,      false, false, 0x05000, 0x05000, mmu,      "ppmmu1"),
mmu                63 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu2,      false, false, 0x06000, 0x06000, mmu,      "ppmmu2"),
mmu                64 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu3,      false, false, 0x07000, 0x07000, mmu,      "ppmmu3"),
mmu                65 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu4,      false, false, -1,      0x1C000, mmu,      "ppmmu4"),
mmu                66 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu5,      false, false, -1,      0x1D000, mmu,      "ppmmu5"),
mmu                67 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu6,      false, false, -1,      0x1E000, mmu,      "ppmmu6"),
mmu                68 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu7,      false, false, -1,      0x1F000, mmu,      "ppmmu7"),
mmu                72 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(ppmmu_bcast, false, true,  -1,      0x15000, mmu,      NULL),
mmu               221 drivers/gpu/drm/lima/lima_device.c 	pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
mmu               261 drivers/gpu/drm/lima/lima_device.c 			pipe->mmu[pipe->num_mmu++] = ppmmu;
mmu               246 drivers/gpu/drm/lima/lima_sched.c 			lima_mmu_switch_vm(pipe->mmu[i], vm);
mmu               274 drivers/gpu/drm/lima/lima_sched.c 			lima_mmu_page_fault_resume(pipe->mmu[i]);
mmu                47 drivers/gpu/drm/lima/lima_sched.h 	struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
mmu                67 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
mmu              1455 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
mmu               886 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
mmu               721 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	struct msm_mmu *mmu;
mmu               726 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	mmu = dpu_kms->base.aspace->mmu;
mmu               728 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	mmu->funcs->detach(mmu, (const char **)iommu_ports,
mmu               755 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c 	ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
mmu               175 drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c 		aspace->mmu->funcs->detach(aspace->mmu,
mmu               527 drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c 		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
mmu               236 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c 		aspace->mmu->funcs->detach(aspace->mmu,
mmu               740 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c 		ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
mmu               257 drivers/gpu/drm/msm/msm_drv.h int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
mmu               258 drivers/gpu/drm/msm/msm_drv.h void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
mmu                24 drivers/gpu/drm/msm/msm_gem.h 	struct msm_mmu *mmu;
mmu                18 drivers/gpu/drm/msm/msm_gem_vma.c 	if (aspace->mmu)
mmu                19 drivers/gpu/drm/msm/msm_gem_vma.c 		aspace->mmu->funcs->destroy(aspace->mmu);
mmu                44 drivers/gpu/drm/msm/msm_gem_vma.c 	if (aspace->mmu)
mmu                45 drivers/gpu/drm/msm/msm_gem_vma.c 		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
mmu                77 drivers/gpu/drm/msm/msm_gem_vma.c 	if (aspace && aspace->mmu)
mmu                78 drivers/gpu/drm/msm/msm_gem_vma.c 		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
mmu               143 drivers/gpu/drm/msm/msm_gem_vma.c 	aspace->mmu = msm_iommu_new(dev, domain);
mmu               166 drivers/gpu/drm/msm/msm_gem_vma.c 	aspace->mmu = msm_gpummu_new(dev, gpu);
mmu               842 drivers/gpu/drm/msm/msm_gpu.c 	ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
mmu               999 drivers/gpu/drm/msm/msm_gpu.c 		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
mmu                24 drivers/gpu/drm/msm/msm_gpummu.c static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
mmu                30 drivers/gpu/drm/msm/msm_gpummu.c static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
mmu                35 drivers/gpu/drm/msm/msm_gpummu.c static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
mmu                38 drivers/gpu/drm/msm/msm_gpummu.c 	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
mmu                64 drivers/gpu/drm/msm/msm_gpummu.c static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
mmu                66 drivers/gpu/drm/msm/msm_gpummu.c 	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
mmu                79 drivers/gpu/drm/msm/msm_gpummu.c static void msm_gpummu_destroy(struct msm_mmu *mmu)
mmu                81 drivers/gpu/drm/msm/msm_gpummu.c 	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
mmu                83 drivers/gpu/drm/msm/msm_gpummu.c 	dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
mmu               118 drivers/gpu/drm/msm/msm_gpummu.c void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
mmu               121 drivers/gpu/drm/msm/msm_gpummu.c 	dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
mmu                26 drivers/gpu/drm/msm/msm_iommu.c static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
mmu                29 drivers/gpu/drm/msm/msm_iommu.c 	struct msm_iommu *iommu = to_msm_iommu(mmu);
mmu                31 drivers/gpu/drm/msm/msm_iommu.c 	return iommu_attach_device(iommu->domain, mmu->dev);
mmu                34 drivers/gpu/drm/msm/msm_iommu.c static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
mmu                37 drivers/gpu/drm/msm/msm_iommu.c 	struct msm_iommu *iommu = to_msm_iommu(mmu);
mmu                39 drivers/gpu/drm/msm/msm_iommu.c 	iommu_detach_device(iommu->domain, mmu->dev);
mmu                42 drivers/gpu/drm/msm/msm_iommu.c static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
mmu                45 drivers/gpu/drm/msm/msm_iommu.c 	struct msm_iommu *iommu = to_msm_iommu(mmu);
mmu                54 drivers/gpu/drm/msm/msm_iommu.c static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
mmu                56 drivers/gpu/drm/msm/msm_iommu.c 	struct msm_iommu *iommu = to_msm_iommu(mmu);
mmu                63 drivers/gpu/drm/msm/msm_iommu.c static void msm_iommu_destroy(struct msm_mmu *mmu)
mmu                65 drivers/gpu/drm/msm/msm_iommu.c 	struct msm_iommu *iommu = to_msm_iommu(mmu);
mmu                13 drivers/gpu/drm/msm/msm_mmu.h 	int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
mmu                14 drivers/gpu/drm/msm/msm_mmu.h 	void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
mmu                15 drivers/gpu/drm/msm/msm_mmu.h 	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
mmu                17 drivers/gpu/drm/msm/msm_mmu.h 	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
mmu                18 drivers/gpu/drm/msm/msm_mmu.h 	void (*destroy)(struct msm_mmu *mmu);
mmu                28 drivers/gpu/drm/msm/msm_mmu.h static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu                31 drivers/gpu/drm/msm/msm_mmu.h 	mmu->dev = dev;
mmu                32 drivers/gpu/drm/msm/msm_mmu.h 	mmu->funcs = funcs;
mmu                38 drivers/gpu/drm/msm/msm_mmu.h static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu                41 drivers/gpu/drm/msm/msm_mmu.h 	mmu->arg = arg;
mmu                42 drivers/gpu/drm/msm/msm_mmu.h 	mmu->handler = handler;
mmu                45 drivers/gpu/drm/msm/msm_mmu.h void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
mmu               157 drivers/gpu/drm/nouveau/dispnv50/disp.c 	ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
mmu               520 drivers/gpu/drm/nouveau/dispnv50/head.c 		ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
mmu                67 drivers/gpu/drm/nouveau/dispnv50/lut.c nv50_lut_init(struct nv50_disp *disp, struct nvif_mmu *mmu,
mmu                73 drivers/gpu/drm/nouveau/dispnv50/lut.c 		int ret = nvif_mem_init_map(mmu, NVIF_MEM_VRAM, size * 8,
mmu               635 drivers/gpu/drm/nouveau/dispnv50/wndw.c 	struct nvif_mmu *mmu = &drm->client.mmu;
mmu               665 drivers/gpu/drm/nouveau/dispnv50/wndw.c 		ret = nv50_lut_init(disp, mmu, &wndw->ilut);
mmu                13 drivers/gpu/drm/nouveau/include/nvif/mem.h int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
mmu                15 drivers/gpu/drm/nouveau/include/nvif/mem.h int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
mmu                37 drivers/gpu/drm/nouveau/include/nvif/mmu.h nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
mmu                39 drivers/gpu/drm/nouveau/include/nvif/mmu.h 	const u8 invalid = mmu->kind_nr - 1;
mmu                41 drivers/gpu/drm/nouveau/include/nvif/mmu.h 		if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
mmu                48 drivers/gpu/drm/nouveau/include/nvif/mmu.h nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
mmu                51 drivers/gpu/drm/nouveau/include/nvif/mmu.h 	for (i = 0; i < mmu->type_nr; i++) {
mmu                52 drivers/gpu/drm/nouveau/include/nvif/mmu.h 		if ((mmu->type[i].type & mask) == mask)
mmu               148 drivers/gpu/drm/nouveau/include/nvkm/core/device.h 	struct nvkm_mmu *mmu;
mmu               221 drivers/gpu/drm/nouveau/include/nvkm/core/device.h 	int (*mmu     )(struct nvkm_device *, int idx, struct nvkm_mmu **);
mmu                27 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h 	struct nvkm_mmu *mmu;
mmu               200 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nvif_mmu *mmu = &cli->mmu;
mmu               231 drivers/gpu/drm/nouveau/nouveau_bo.c 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
mmu               236 drivers/gpu/drm/nouveau/nouveau_bo.c 		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
mmu               241 drivers/gpu/drm/nouveau/nouveau_bo.c 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
mmu               284 drivers/gpu/drm/nouveau/nouveau_bo.c 		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
mmu               285 drivers/gpu/drm/nouveau/nouveau_bo.c 			nvbo->kind = mmu->kind[nvbo->kind];
mmu               661 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nvif_mmu *mmu = &drm->client.mmu;
mmu               678 drivers/gpu/drm/nouveau/nouveau_bo.c 			const u8 type = mmu->type[drm->ttm.type_vram].type;
mmu               181 drivers/gpu/drm/nouveau/nouveau_drm.c 	nvif_mmu_fini(&cli->mmu);
mmu               257 drivers/gpu/drm/nouveau/nouveau_drm.c 	ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
mmu               263 drivers/gpu/drm/nouveau/nouveau_drm.c 	ret = nvif_mclass(&cli->mmu.object, vmms);
mmu               275 drivers/gpu/drm/nouveau/nouveau_drm.c 	ret = nvif_mclass(&cli->mmu.object, mems);
mmu                98 drivers/gpu/drm/nouveau/nouveau_drv.h 	struct nvif_mmu mmu;
mmu               227 drivers/gpu/drm/nouveau/nouveau_drv.h 	struct nvif_mmu *mmu = &drm->client.mmu;
mmu               228 drivers/gpu/drm/nouveau/nouveau_drv.h 	return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
mmu               100 drivers/gpu/drm/nouveau/nouveau_mem.c 	struct nvif_mmu *mmu = &cli->mmu;
mmu               111 drivers/gpu/drm/nouveau/nouveau_mem.c 	if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
mmu               113 drivers/gpu/drm/nouveau/nouveau_mem.c 	if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
mmu               114 drivers/gpu/drm/nouveau/nouveau_mem.c 		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
mmu               115 drivers/gpu/drm/nouveau/nouveau_mem.c 			mem->kind = mmu->kind[mem->kind];
mmu               124 drivers/gpu/drm/nouveau/nouveau_mem.c 	ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
mmu               138 drivers/gpu/drm/nouveau/nouveau_mem.c 	struct nvif_mmu *mmu = &cli->mmu;
mmu               147 drivers/gpu/drm/nouveau/nouveau_mem.c 		ret = nvif_mem_init_type(mmu, cli->mem->oclass,
mmu               155 drivers/gpu/drm/nouveau/nouveau_mem.c 		ret = nvif_mem_init_type(mmu, cli->mem->oclass,
mmu               158 drivers/gpu/drm/nouveau/nouveau_mem.c 						.bankswz = mmu->kind[mem->kind] == 2,
mmu               338 drivers/gpu/drm/nouveau/nouveau_svm.c 	ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
mmu               173 drivers/gpu/drm/nouveau/nouveau_ttm.c 	struct nvif_mmu *mmu = &drm->client.mmu;
mmu               176 drivers/gpu/drm/nouveau/nouveau_ttm.c 	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
mmu               183 drivers/gpu/drm/nouveau/nouveau_ttm.c 	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
mmu               196 drivers/gpu/drm/nouveau/nouveau_ttm.c 	struct nvif_mmu *mmu = &drm->client.mmu;
mmu               213 drivers/gpu/drm/nouveau/nouveau_ttm.c 		typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
mmu               235 drivers/gpu/drm/nouveau/nouveau_ttm.c 				  drm->client.mmu.dmabits <= 32 ? true : false);
mmu               131 drivers/gpu/drm/nouveau/nouveau_vmm.c 	int ret = nvif_vmm_init(&cli->mmu, oclass, false, PAGE_SIZE, 0, NULL, 0,
mmu                28 drivers/gpu/drm/nouveau/nvif/mem.c nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem)
mmu                30 drivers/gpu/drm/nouveau/nvif/mem.c 	int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0,
mmu                47 drivers/gpu/drm/nouveau/nvif/mem.c nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
mmu                70 drivers/gpu/drm/nouveau/nvif/mem.c 	ret = nvif_object_init(&mmu->object, 0, oclass, args,
mmu                73 drivers/gpu/drm/nouveau/nvif/mem.c 		mem->type = mmu->type[type].type;
mmu                86 drivers/gpu/drm/nouveau/nvif/mem.c nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
mmu                93 drivers/gpu/drm/nouveau/nvif/mem.c 	for (i = 0; ret && i < mmu->type_nr; i++) {
mmu                94 drivers/gpu/drm/nouveau/nvif/mem.c 		if ((mmu->type[i].type & type) == type) {
mmu                95 drivers/gpu/drm/nouveau/nvif/mem.c 			ret = nvif_mem_init_type(mmu, oclass, i, page, size,
mmu                28 drivers/gpu/drm/nouveau/nvif/mmu.c nvif_mmu_fini(struct nvif_mmu *mmu)
mmu                30 drivers/gpu/drm/nouveau/nvif/mmu.c 	kfree(mmu->kind);
mmu                31 drivers/gpu/drm/nouveau/nvif/mmu.c 	kfree(mmu->type);
mmu                32 drivers/gpu/drm/nouveau/nvif/mmu.c 	kfree(mmu->heap);
mmu                33 drivers/gpu/drm/nouveau/nvif/mmu.c 	nvif_object_fini(&mmu->object);
mmu                37 drivers/gpu/drm/nouveau/nvif/mmu.c nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
mmu                49 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->heap = NULL;
mmu                50 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->type = NULL;
mmu                51 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->kind = NULL;
mmu                54 drivers/gpu/drm/nouveau/nvif/mmu.c 			       &mmu->object);
mmu                58 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->dmabits = args.dmabits;
mmu                59 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->heap_nr = args.heap_nr;
mmu                60 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->type_nr = args.type_nr;
mmu                61 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->kind_nr = args.kind_nr;
mmu                63 drivers/gpu/drm/nouveau/nvif/mmu.c 	ret = nvif_mclass(&mmu->object, mems);
mmu                66 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->mem = mems[ret].oclass;
mmu                68 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap),
mmu                70 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type),
mmu                72 drivers/gpu/drm/nouveau/nvif/mmu.c 	if (ret = -ENOMEM, !mmu->heap || !mmu->type)
mmu                75 drivers/gpu/drm/nouveau/nvif/mmu.c 	mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind),
mmu                77 drivers/gpu/drm/nouveau/nvif/mmu.c 	if (!mmu->kind && mmu->kind_nr)
mmu                80 drivers/gpu/drm/nouveau/nvif/mmu.c 	for (i = 0; i < mmu->heap_nr; i++) {
mmu                83 drivers/gpu/drm/nouveau/nvif/mmu.c 		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
mmu                88 drivers/gpu/drm/nouveau/nvif/mmu.c 		mmu->heap[i].size = args.size;
mmu                91 drivers/gpu/drm/nouveau/nvif/mmu.c 	for (i = 0; i < mmu->type_nr; i++) {
mmu                94 drivers/gpu/drm/nouveau/nvif/mmu.c 		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
mmu                99 drivers/gpu/drm/nouveau/nvif/mmu.c 		mmu->type[i].type = 0;
mmu               100 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
mmu               101 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
mmu               102 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
mmu               103 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
mmu               104 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.kind    ) mmu->type[i].type |= NVIF_MEM_KIND;
mmu               105 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
mmu               106 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
mmu               107 drivers/gpu/drm/nouveau/nvif/mmu.c 		if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
mmu               108 drivers/gpu/drm/nouveau/nvif/mmu.c 		mmu->type[i].heap = args.heap;
mmu               111 drivers/gpu/drm/nouveau/nvif/mmu.c 	if (mmu->kind_nr) {
mmu               113 drivers/gpu/drm/nouveau/nvif/mmu.c 		size_t argc = struct_size(kind, data, mmu->kind_nr);
mmu               118 drivers/gpu/drm/nouveau/nvif/mmu.c 		kind->count = mmu->kind_nr;
mmu               120 drivers/gpu/drm/nouveau/nvif/mmu.c 		ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
mmu               123 drivers/gpu/drm/nouveau/nvif/mmu.c 			memcpy(mmu->kind, kind->data, kind->count);
mmu               129 drivers/gpu/drm/nouveau/nvif/mmu.c 		nvif_mmu_fini(mmu);
mmu               115 drivers/gpu/drm/nouveau/nvif/vmm.c nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, bool managed, u64 addr,
mmu               133 drivers/gpu/drm/nouveau/nvif/vmm.c 	ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
mmu                88 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               109 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               131 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               151 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               173 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               195 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               217 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               239 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               261 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               283 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               305 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               327 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               349 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               371 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               393 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               416 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               439 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               461 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               484 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               510 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv41_mmu_new,
mmu               536 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv41_mmu_new,
mmu               562 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv41_mmu_new,
mmu               588 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               614 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               640 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               666 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv41_mmu_new,
mmu               692 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv41_mmu_new,
mmu               718 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv04_mmu_new,
mmu               744 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv41_mmu_new,
mmu               770 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               796 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               824 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv50_mmu_new,
mmu               851 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               877 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               903 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = nv44_mmu_new,
mmu               931 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu               963 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu               995 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1027 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1059 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1091 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1123 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1155 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1189 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1222 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = g84_mmu_new,
mmu              1255 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = mcp77_mmu_new,
mmu              1287 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = mcp77_mmu_new,
mmu              1319 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = mcp77_mmu_new,
mmu              1355 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1392 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1428 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1464 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1501 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1538 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1575 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1611 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1646 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gf100_mmu_new,
mmu              1682 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1721 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1760 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1794 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk20a_mmu_new,
mmu              1824 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1862 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1900 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1938 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              1976 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              2010 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gk104_mmu_new,
mmu              2043 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gm200_mmu_new,
mmu              2078 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gm200_mmu_new,
mmu              2113 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gm200_mmu_new,
mmu              2144 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gm20b_mmu_new,
mmu              2173 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp100_mmu_new,
mmu              2209 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp100_mmu_new,
mmu              2245 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp100_mmu_new,
mmu              2281 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp100_mmu_new,
mmu              2317 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp100_mmu_new,
mmu              2353 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp100_mmu_new,
mmu              2385 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gp10b_mmu_new,
mmu              2414 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = gv100_mmu_new,
mmu              2455 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = tu102_mmu_new,
mmu              2490 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = tu102_mmu_new,
mmu              2525 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = tu102_mmu_new,
mmu              2560 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = tu102_mmu_new,
mmu              2595 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.mmu = tu102_mmu_new,
mmu              2657 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	_(MMU     , device->mmu     , &device->mmu->subdev);
mmu              3163 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 		_(NVKM_SUBDEV_MMU     ,      mmu);
mmu              1683 drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c 	if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
mmu              1684 drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c 		bits = pdev->device.mmu->dma_bits;
mmu              1691 drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c 		pdev->device.mmu->dma_bits = 32;
mmu               370 drivers/gpu/drm/nouveau/nvkm/engine/device/user.c 		else if (device->mmu && index-- == 0)
mmu               371 drivers/gpu/drm/nouveau/nvkm/engine/device/user.c 			sclass = &device->mmu->user;
mmu                53 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c 			device->mmu->vmm->pd->pt[0]->memory;
mmu                98 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c 		if (device->mmu->func == &nv04_mmu)
mmu               394 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c 		if (vmm->mmu != device->mmu)
mmu                42 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
mmu                51 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		list_add(&ptp->head, &mmu->ptp.list);
mmu                56 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
mmu                65 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
mmu                74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
mmu                82 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
mmu                93 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		list_add(&ptp->head, &mmu->ptp.list);
mmu               120 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
mmu               124 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	list_for_each_entry(ptc, &mmu->ptc.list, head) {
mmu               134 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		list_add(&ptc->head, &mmu->ptc.list);
mmu               141 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
mmu               147 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			mutex_lock(&mmu->ptp.mutex);
mmu               148 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			nvkm_mmu_ptp_put(mmu, force, pt);
mmu               149 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			mutex_unlock(&mmu->ptp.mutex);
mmu               154 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mutex_lock(&mmu->ptc.mutex);
mmu               162 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mutex_unlock(&mmu->ptc.mutex);
mmu               167 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
mmu               175 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mutex_lock(&mmu->ptp.mutex);
mmu               176 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		pt = nvkm_mmu_ptp_get(mmu, align, zero);
mmu               177 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mutex_unlock(&mmu->ptp.mutex);
mmu               182 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mutex_lock(&mmu->ptc.mutex);
mmu               183 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	ptc = nvkm_mmu_ptc_find(mmu, size);
mmu               185 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mutex_unlock(&mmu->ptc.mutex);
mmu               196 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mutex_unlock(&mmu->ptc.mutex);
mmu               199 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mutex_unlock(&mmu->ptc.mutex);
mmu               207 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
mmu               220 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
mmu               223 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	list_for_each_entry(ptc, &mmu->ptc.list, head) {
mmu               234 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
mmu               238 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
mmu               246 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
mmu               248 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mutex_init(&mmu->ptc.mutex);
mmu               249 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	INIT_LIST_HEAD(&mmu->ptc.list);
mmu               250 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mutex_init(&mmu->ptp.mutex);
mmu               251 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	INIT_LIST_HEAD(&mmu->ptp.list);
mmu               255 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
mmu               257 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
mmu               258 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
mmu               259 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mmu->type[mmu->type_nr].heap = heap;
mmu               260 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mmu->type_nr++;
mmu               265 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
mmu               268 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
mmu               269 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			mmu->heap[mmu->heap_nr].type = type;
mmu               270 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			mmu->heap[mmu->heap_nr].size = size;
mmu               271 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			return mmu->heap_nr++;
mmu               278 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_host(struct nvkm_mmu *mmu)
mmu               280 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	struct nvkm_device *device = mmu->subdev.device;
mmu               281 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
mmu               285 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
mmu               286 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_type(mmu, heap, type);
mmu               296 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
mmu               298 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_type(mmu, heap, type);
mmu               308 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
mmu               311 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
mmu               315 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c nvkm_mmu_vram(struct nvkm_mmu *mmu)
mmu               317 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	struct nvkm_device *device = mmu->subdev.device;
mmu               322 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
mmu               327 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
mmu               331 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
mmu               332 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
mmu               338 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_type(mmu, heapU, type);
mmu               339 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_type(mmu, heapN, type);
mmu               340 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_type(mmu, heapM, type);
mmu               346 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_host(mmu);
mmu               355 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			nvkm_mmu_type(mmu, heapN, type);
mmu               356 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 			nvkm_mmu_type(mmu, heapM, type);
mmu               362 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_type(mmu, heapN, type);
mmu               363 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_type(mmu, heapM, type);
mmu               370 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
mmu               373 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
mmu               374 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_vram(mmu);
mmu               376 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		nvkm_mmu_host(mmu);
mmu               378 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	if (mmu->func->vmm.global) {
mmu               380 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 				       "gart", &mmu->vmm);
mmu               391 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
mmu               392 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	if (mmu->func->init)
mmu               393 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 		mmu->func->init(mmu);
mmu               400 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
mmu               402 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_vmm_unref(&mmu->vmm);
mmu               404 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_ptc_fini(mmu);
mmu               405 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	return mmu;
mmu               417 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	      int index, struct nvkm_mmu *mmu)
mmu               419 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu               420 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mmu->func = func;
mmu               421 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mmu->dma_bits = func->dma_bits;
mmu               422 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	nvkm_mmu_ptc_init(mmu);
mmu               423 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mmu->user.ctor = nvkm_ummu_new;
mmu               424 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c 	mmu->user.base = func->mmu.user;
mmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
mmu                33 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
mmu                78 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
mmu                74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                84 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                42 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
mmu                33 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	struct nvkm_mmu *mmu;
mmu                88 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 			dma_unmap_page(mem->mmu->subdev.device->dev,
mmu               144 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
mmu               147 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	struct device *dev = mmu->subdev.device->dev;
mmu               157 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
mmu               158 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	    !(mmu->type[type].type & NVKM_MEM_UNCACHED))
mmu               169 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	mem->mmu = mmu;
mmu               199 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	if (mmu->dma_bits > 32)
mmu               209 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
mmu               224 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
mmu               230 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	if (mmu->type[type].type & NVKM_MEM_VRAM) {
mmu               231 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		ret = mmu->func->mem.vram(mmu, type, page, size,
mmu               234 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		ret = nvkm_mem_new_host(mmu, type, page, size,
mmu                34 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
mmu                42 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	struct nvkm_device *device = mmu->subdev.device;
mmu                69 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
mmu                87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP))
mmu                92 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c 	return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
mmu                31 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
mmu                37 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c 	struct nvkm_device *device = mmu->subdev.device;
mmu                51 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
mmu                62 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c 	if (mmu->type[type].type & NVKM_MEM_MAPPABLE)
mmu                67 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c 	return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
mmu                34 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
mmu                42 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	struct nvkm_device *device = mmu->subdev.device;
mmu                66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
mmu                86 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c 	return nvkm_ram_get(mmu->subdev.device, NVKM_RAM_MM_NORMAL, type,
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c nv41_mmu_init(struct nvkm_mmu *mmu)
mmu                34 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c 	struct nvkm_device *device = mmu->subdev.device;
mmu                35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c 	nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr);
mmu                44 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c nv44_mmu_init(struct nvkm_mmu *mmu)
mmu                34 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c 	struct nvkm_device *device = mmu->subdev.device;
mmu                35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c 	struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
mmu                46 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c 	nvkm_wr32(device, 0x100818, mmu->vmm->null);
mmu                59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
mmu                66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
mmu                19 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h 	} mmu;
mmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c 	.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
mmu                73 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 			struct nvkm_device *device = umem->mmu->subdev.device;
mmu                91 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_mmu *mmu = umem->mmu;
mmu               110 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 		int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
mmu               146 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
mmu               162 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (type >= mmu->type_nr)
mmu               168 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	umem->mmu = mmu;
mmu               169 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	umem->type = mmu->type[type].type;
mmu               174 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
mmu               179 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c 	ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
mmu                 9 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h 	struct nvkm_mmu *mmu;
mmu                35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
mmu                37 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	if (mmu->func->mem.user.oclass && oclass->client->super) {
mmu                39 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 			oclass->base = mmu->func->mem.user;
mmu                45 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	if (mmu->func->vmm.user.oclass) {
mmu                47 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 			oclass->base = mmu->func->vmm.user;
mmu                59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	struct nvkm_mmu *mmu = ummu->mmu;
mmu                67 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		if ((index = args->v0.index) >= mmu->heap_nr)
mmu                69 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		args->v0.size = mmu->heap[index].size;
mmu                79 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	struct nvkm_mmu *mmu = ummu->mmu;
mmu                87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		if ((index = args->v0.index) >= mmu->type_nr)
mmu                89 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		type = mmu->type[index].type;
mmu                90 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		args->v0.heap = mmu->type[index].heap;
mmu               108 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	struct nvkm_mmu *mmu = ummu->mmu;
mmu               115 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	if (mmu->func->kind)
mmu               116 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		kind = mmu->func->kind(mmu, &count);
mmu               157 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	struct nvkm_mmu *mmu = device->mmu;
mmu               161 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	if (mmu->func->kind)
mmu               162 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		mmu->func->kind(mmu, &kinds);
mmu               165 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		args->v0.dmabits = mmu->dma_bits;
mmu               166 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		args->v0.heap_nr = mmu->heap_nr;
mmu               167 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 		args->v0.type_nr = mmu->type_nr;
mmu               175 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c 	ummu->mmu = mmu;
mmu                 9 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h 	struct nvkm_mmu *mmu;
mmu               375 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
mmu               398 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 	if (!mmu->vmm) {
mmu               399 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
mmu               409 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 		uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
mmu               190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
mmu               420 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_mmu *mmu = vmm->mmu;
mmu               428 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
mmu               998 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_mmu_ptc_dump(vmm->mmu);
mmu              1008 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
mmu              1013 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
mmu              1034 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
mmu              1046 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vmm->mmu = mmu;
mmu              1048 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	vmm->debug = mmu->subdev.debug;
mmu              1083 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 		vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
mmu              1140 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
mmu              1147 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
mmu              1339 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
mmu              1501 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
mmu              1865 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	struct nvkm_mmu *mmu = device->mmu;
mmu              1868 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c 	ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
mmu               280 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h 		nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n",            \
mmu               183 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
mmu               248 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               277 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
mmu               403 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               407 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	switch (mmu->subdev.device->fb->page) {
mmu               408 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size,
mmu               410 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size,
mmu               419 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               423 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c 	return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr,
mmu                98 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c gk104_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               102 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c 	return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, managed, addr,
mmu                67 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c gk20a_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                71 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c 	return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, managed, addr,
mmu               144 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               168 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	return nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
mmu               172 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c gm200_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               176 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
mmu               181 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c gm200_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               185 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c 	return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, managed, addr,
mmu                57 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c gm20b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                61 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c 	return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
mmu                66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                70 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c 	return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, managed, addr,
mmu                37 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct device *dev = vmm->mmu->subdev.device->dev;
mmu                78 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct device *dev = vmm->mmu->subdev.device->dev;
mmu               321 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               350 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
mmu               393 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               460 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               509 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	       struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               528 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
mmu               537 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               541 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c 	return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
mmu                45 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c gp10b_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                49 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c 	return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
mmu                83 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgv100.c 	return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
mmu                39 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c mcp77_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                43 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c 	return nv04_vmm_new_(&mcp77_vmm, mmu, 0, managed, addr, size,
mmu               102 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
mmu               112 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c 	ret = nvkm_vmm_new_(func, mmu, pd_header, managed, addr, size,
mmu               121 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c nv04_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c 	ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, managed, addr, size,
mmu                83 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
mmu               107 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               111 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c 	return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size,
mmu               187 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               208 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c nv44_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               212 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c 	struct nvkm_subdev *subdev = &mmu->subdev;
mmu               216 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c 	ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, managed, addr, size,
mmu               148 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
mmu               183 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
mmu               235 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	struct nvkm_device *device = vmm->mmu->subdev.device;
mmu               281 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
mmu               341 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
mmu               379 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu               383 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c 	return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
mmu                29 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c 	struct nvkm_subdev *subdev = &vmm->mmu->subdev;
mmu                72 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c tu102_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
mmu                76 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c 	return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
mmu               115 drivers/gpu/drm/panfrost/panfrost_device.h 	struct panfrost_mmu mmu;
mmu               421 drivers/gpu/drm/panfrost/panfrost_drv.c 		    WARN_ON_ONCE(first->mmu != &priv->mmu)) {
mmu                63 drivers/gpu/drm/panfrost/panfrost_gem.c 		if (iter->mmu == &priv->mmu) {
mmu                82 drivers/gpu/drm/panfrost/panfrost_gem.c 	priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
mmu               148 drivers/gpu/drm/panfrost/panfrost_gem.c 	mapping->mmu = &priv->mmu;
mmu               181 drivers/gpu/drm/panfrost/panfrost_gem.c 		if (iter->mmu == &priv->mmu) {
mmu                48 drivers/gpu/drm/panfrost/panfrost_gem.h 	struct panfrost_mmu *mmu;
mmu               157 drivers/gpu/drm/panfrost/panfrost_job.c 	cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
mmu               487 drivers/gpu/drm/panfrost/panfrost_job.c 				panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
mmu               100 drivers/gpu/drm/panfrost/panfrost_mmu.c 			       struct panfrost_mmu *mmu,
mmu               106 drivers/gpu/drm/panfrost/panfrost_mmu.c 	ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
mmu               111 drivers/gpu/drm/panfrost/panfrost_mmu.c static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
mmu               113 drivers/gpu/drm/panfrost/panfrost_mmu.c 	int as_nr = mmu->as;
mmu               114 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
mmu               145 drivers/gpu/drm/panfrost/panfrost_mmu.c u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
mmu               151 drivers/gpu/drm/panfrost/panfrost_mmu.c 	as = mmu->as;
mmu               153 drivers/gpu/drm/panfrost/panfrost_mmu.c 		int en = atomic_inc_return(&mmu->as_count);
mmu               161 drivers/gpu/drm/panfrost/panfrost_mmu.c 		list_move(&mmu->list, &pfdev->as_lru_list);
mmu               184 drivers/gpu/drm/panfrost/panfrost_mmu.c 	mmu->as = as;
mmu               186 drivers/gpu/drm/panfrost/panfrost_mmu.c 	atomic_set(&mmu->as_count, 1);
mmu               187 drivers/gpu/drm/panfrost/panfrost_mmu.c 	list_add(&mmu->list, &pfdev->as_lru_list);
mmu               189 drivers/gpu/drm/panfrost/panfrost_mmu.c 	dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
mmu               191 drivers/gpu/drm/panfrost/panfrost_mmu.c 	panfrost_mmu_enable(pfdev, mmu);
mmu               198 drivers/gpu/drm/panfrost/panfrost_mmu.c void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
mmu               200 drivers/gpu/drm/panfrost/panfrost_mmu.c 	atomic_dec(&mmu->as_count);
mmu               201 drivers/gpu/drm/panfrost/panfrost_mmu.c 	WARN_ON(atomic_read(&mmu->as_count) < 0);
mmu               206 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct panfrost_mmu *mmu, *mmu_tmp;
mmu               212 drivers/gpu/drm/panfrost/panfrost_mmu.c 	list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
mmu               213 drivers/gpu/drm/panfrost/panfrost_mmu.c 		mmu->as = -1;
mmu               214 drivers/gpu/drm/panfrost/panfrost_mmu.c 		atomic_set(&mmu->as_count, 0);
mmu               215 drivers/gpu/drm/panfrost/panfrost_mmu.c 		list_del_init(&mmu->list);
mmu               233 drivers/gpu/drm/panfrost/panfrost_mmu.c 				     struct panfrost_mmu *mmu,
mmu               236 drivers/gpu/drm/panfrost/panfrost_mmu.c 	if (mmu->as < 0)
mmu               243 drivers/gpu/drm/panfrost/panfrost_mmu.c 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
mmu               248 drivers/gpu/drm/panfrost/panfrost_mmu.c static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
mmu               253 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
mmu               260 drivers/gpu/drm/panfrost/panfrost_mmu.c 		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
mmu               272 drivers/gpu/drm/panfrost/panfrost_mmu.c 	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
mmu               295 drivers/gpu/drm/panfrost/panfrost_mmu.c 	mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
mmu               307 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
mmu               316 drivers/gpu/drm/panfrost/panfrost_mmu.c 		mapping->mmu->as, iova, len);
mmu               330 drivers/gpu/drm/panfrost/panfrost_mmu.c 	panfrost_mmu_flush_range(pfdev, mapping->mmu,
mmu               364 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct panfrost_mmu *mmu = &priv->mmu;
mmu               367 drivers/gpu/drm/panfrost/panfrost_mmu.c 	INIT_LIST_HEAD(&mmu->list);
mmu               368 drivers/gpu/drm/panfrost/panfrost_mmu.c 	mmu->as = -1;
mmu               370 drivers/gpu/drm/panfrost/panfrost_mmu.c 	mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
mmu               378 drivers/gpu/drm/panfrost/panfrost_mmu.c 	mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
mmu               380 drivers/gpu/drm/panfrost/panfrost_mmu.c 	if (!mmu->pgtbl_ops)
mmu               389 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct panfrost_mmu *mmu = &priv->mmu;
mmu               392 drivers/gpu/drm/panfrost/panfrost_mmu.c 	if (mmu->as >= 0) {
mmu               395 drivers/gpu/drm/panfrost/panfrost_mmu.c 			panfrost_mmu_disable(pfdev, mmu->as);
mmu               398 drivers/gpu/drm/panfrost/panfrost_mmu.c 		clear_bit(mmu->as, &pfdev->as_alloc_mask);
mmu               399 drivers/gpu/drm/panfrost/panfrost_mmu.c 		clear_bit(mmu->as, &pfdev->as_in_use_mask);
mmu               400 drivers/gpu/drm/panfrost/panfrost_mmu.c 		list_del(&mmu->list);
mmu               404 drivers/gpu/drm/panfrost/panfrost_mmu.c 	free_io_pgtable_ops(mmu->pgtbl_ops);
mmu               414 drivers/gpu/drm/panfrost/panfrost_mmu.c 	struct panfrost_mmu *mmu;
mmu               417 drivers/gpu/drm/panfrost/panfrost_mmu.c 	list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
mmu               418 drivers/gpu/drm/panfrost/panfrost_mmu.c 		if (as == mmu->as)
mmu               424 drivers/gpu/drm/panfrost/panfrost_mmu.c 	priv = container_of(mmu, struct panfrost_file_priv, mmu);
mmu               468 drivers/gpu/drm/panfrost/panfrost_mmu.c 	WARN_ON(bomapping->mmu->as != as);
mmu               525 drivers/gpu/drm/panfrost/panfrost_mmu.c 	mmu_map_sg(pfdev, bomapping->mmu, addr,
mmu                18 drivers/gpu/drm/panfrost/panfrost_mmu.h u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
mmu                19 drivers/gpu/drm/panfrost/panfrost_mmu.h void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
mmu               129 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
mmu               194 drivers/gpu/drm/panfrost/panfrost_perfcnt.c 	panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
mmu               203 drivers/infiniband/hw/hfi1/user_exp_rcv.c 				 node->mmu.len, PCI_DMA_FROMDEVICE);
mmu               779 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
mmu               780 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	node->mmu.len = npages * PAGE_SIZE;
mmu               790 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		ret = tid_rb_insert(fd, &node->mmu);
mmu               792 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
mmu               796 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			  node->rcventry, node->mmu.addr, node->phys, ret);
mmu               804 drivers/infiniband/hw/hfi1/user_exp_rcv.c 			       node->mmu.addr, node->phys, phys);
mmu               838 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		hfi1_mmu_rb_remove(fd->handler, &node->mmu);
mmu               849 drivers/infiniband/hw/hfi1/user_exp_rcv.c 				 node->npages, node->mmu.addr, node->phys,
mmu               914 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		container_of(mnode, struct tid_rb_node, mmu);
mmu               919 drivers/infiniband/hw/hfi1/user_exp_rcv.c 	trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
mmu               955 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		container_of(node, struct tid_rb_node, mmu);
mmu               975 drivers/infiniband/hw/hfi1/user_exp_rcv.c 		container_of(node, struct tid_rb_node, mmu);
mmu                68 drivers/infiniband/hw/hfi1/user_exp_rcv.h 	struct mmu_rb_node mmu;
mmu                72 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu;
mmu               228 drivers/iommu/ipmmu-vmsa.c static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
mmu               230 drivers/iommu/ipmmu-vmsa.c 	return mmu->root == mmu;
mmu               235 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
mmu               238 drivers/iommu/ipmmu-vmsa.c 	if (ipmmu_is_root(mmu))
mmu               239 drivers/iommu/ipmmu-vmsa.c 		*rootp = mmu;
mmu               256 drivers/iommu/ipmmu-vmsa.c static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
mmu               258 drivers/iommu/ipmmu-vmsa.c 	return ioread32(mmu->base + offset);
mmu               261 drivers/iommu/ipmmu-vmsa.c static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
mmu               264 drivers/iommu/ipmmu-vmsa.c 	iowrite32(data, mmu->base + offset);
mmu               270 drivers/iommu/ipmmu-vmsa.c 	return ipmmu_read(domain->mmu->root,
mmu               277 drivers/iommu/ipmmu-vmsa.c 	ipmmu_write(domain->mmu->root,
mmu               284 drivers/iommu/ipmmu-vmsa.c 	if (domain->mmu != domain->mmu->root)
mmu               285 drivers/iommu/ipmmu-vmsa.c 		ipmmu_write(domain->mmu,
mmu               288 drivers/iommu/ipmmu-vmsa.c 	ipmmu_write(domain->mmu->root,
mmu               304 drivers/iommu/ipmmu-vmsa.c 			dev_err_ratelimited(domain->mmu->dev,
mmu               329 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = domain->mmu;
mmu               337 drivers/iommu/ipmmu-vmsa.c 	ipmmu_write(mmu, IMUASID(utlb), 0);
mmu               339 drivers/iommu/ipmmu-vmsa.c 	ipmmu_write(mmu, IMUCTR(utlb),
mmu               342 drivers/iommu/ipmmu-vmsa.c 	mmu->utlb_ctx[utlb] = domain->context_id;
mmu               351 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = domain->mmu;
mmu               353 drivers/iommu/ipmmu-vmsa.c 	ipmmu_write(mmu, IMUCTR(utlb), 0);
mmu               354 drivers/iommu/ipmmu-vmsa.c 	mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
mmu               380 drivers/iommu/ipmmu-vmsa.c static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
mmu               386 drivers/iommu/ipmmu-vmsa.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               388 drivers/iommu/ipmmu-vmsa.c 	ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
mmu               389 drivers/iommu/ipmmu-vmsa.c 	if (ret != mmu->num_ctx) {
mmu               390 drivers/iommu/ipmmu-vmsa.c 		mmu->domains[ret] = domain;
mmu               391 drivers/iommu/ipmmu-vmsa.c 		set_bit(ret, mmu->ctx);
mmu               395 drivers/iommu/ipmmu-vmsa.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               400 drivers/iommu/ipmmu-vmsa.c static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
mmu               405 drivers/iommu/ipmmu-vmsa.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               407 drivers/iommu/ipmmu-vmsa.c 	clear_bit(context_id, mmu->ctx);
mmu               408 drivers/iommu/ipmmu-vmsa.c 	mmu->domains[context_id] = NULL;
mmu               410 drivers/iommu/ipmmu-vmsa.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               428 drivers/iommu/ipmmu-vmsa.c 	if (domain->mmu->features->twobit_imttbcr_sl0)
mmu               433 drivers/iommu/ipmmu-vmsa.c 	if (domain->mmu->features->cache_snoop)
mmu               444 drivers/iommu/ipmmu-vmsa.c 	if (domain->mmu->features->setup_imbuscr)
mmu               493 drivers/iommu/ipmmu-vmsa.c 	domain->cfg.iommu_dev = domain->mmu->root->dev;
mmu               498 drivers/iommu/ipmmu-vmsa.c 	ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
mmu               507 drivers/iommu/ipmmu-vmsa.c 		ipmmu_domain_free_context(domain->mmu->root,
mmu               518 drivers/iommu/ipmmu-vmsa.c 	if (!domain->mmu)
mmu               529 drivers/iommu/ipmmu-vmsa.c 	ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
mmu               539 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = domain->mmu;
mmu               561 drivers/iommu/ipmmu-vmsa.c 		dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
mmu               564 drivers/iommu/ipmmu-vmsa.c 		dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
mmu               576 drivers/iommu/ipmmu-vmsa.c 	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
mmu               579 drivers/iommu/ipmmu-vmsa.c 	dev_err_ratelimited(mmu->dev,
mmu               588 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = dev;
mmu               593 drivers/iommu/ipmmu-vmsa.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               598 drivers/iommu/ipmmu-vmsa.c 	for (i = 0; i < mmu->num_ctx; i++) {
mmu               599 drivers/iommu/ipmmu-vmsa.c 		if (!mmu->domains[i])
mmu               601 drivers/iommu/ipmmu-vmsa.c 		if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
mmu               605 drivers/iommu/ipmmu-vmsa.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               666 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
mmu               671 drivers/iommu/ipmmu-vmsa.c 	if (!mmu) {
mmu               678 drivers/iommu/ipmmu-vmsa.c 	if (!domain->mmu) {
mmu               680 drivers/iommu/ipmmu-vmsa.c 		domain->mmu = mmu;
mmu               684 drivers/iommu/ipmmu-vmsa.c 			domain->mmu = NULL;
mmu               689 drivers/iommu/ipmmu-vmsa.c 	} else if (domain->mmu != mmu) {
mmu               695 drivers/iommu/ipmmu-vmsa.c 			dev_name(mmu->dev), dev_name(domain->mmu->dev));
mmu               749 drivers/iommu/ipmmu-vmsa.c 	if (domain->mmu)
mmu               850 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
mmu               878 drivers/iommu/ipmmu-vmsa.c 	if (!mmu->mapping) {
mmu               884 drivers/iommu/ipmmu-vmsa.c 			dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
mmu               889 drivers/iommu/ipmmu-vmsa.c 		mmu->mapping = mapping;
mmu               893 drivers/iommu/ipmmu-vmsa.c 	ret = arm_iommu_attach_device(dev, mmu->mapping);
mmu               903 drivers/iommu/ipmmu-vmsa.c 	if (mmu->mapping)
mmu               904 drivers/iommu/ipmmu-vmsa.c 		arm_iommu_release_mapping(mmu->mapping);
mmu               911 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
mmu               918 drivers/iommu/ipmmu-vmsa.c 	if (!mmu)
mmu               933 drivers/iommu/ipmmu-vmsa.c 	iommu_device_link(&mmu->iommu, dev);
mmu               939 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
mmu               941 drivers/iommu/ipmmu-vmsa.c 	iommu_device_unlink(&mmu->iommu, dev);
mmu               948 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
mmu               951 drivers/iommu/ipmmu-vmsa.c 	if (mmu->group)
mmu               952 drivers/iommu/ipmmu-vmsa.c 		return iommu_group_ref_get(mmu->group);
mmu               956 drivers/iommu/ipmmu-vmsa.c 		mmu->group = group;
mmu               982 drivers/iommu/ipmmu-vmsa.c static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
mmu               987 drivers/iommu/ipmmu-vmsa.c 	for (i = 0; i < mmu->num_ctx; ++i)
mmu               988 drivers/iommu/ipmmu-vmsa.c 		ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
mmu              1048 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu;
mmu              1053 drivers/iommu/ipmmu-vmsa.c 	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
mmu              1054 drivers/iommu/ipmmu-vmsa.c 	if (!mmu) {
mmu              1059 drivers/iommu/ipmmu-vmsa.c 	mmu->dev = &pdev->dev;
mmu              1060 drivers/iommu/ipmmu-vmsa.c 	spin_lock_init(&mmu->lock);
mmu              1061 drivers/iommu/ipmmu-vmsa.c 	bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu              1062 drivers/iommu/ipmmu-vmsa.c 	mmu->features = of_device_get_match_data(&pdev->dev);
mmu              1063 drivers/iommu/ipmmu-vmsa.c 	memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
mmu              1068 drivers/iommu/ipmmu-vmsa.c 	mmu->base = devm_ioremap_resource(&pdev->dev, res);
mmu              1069 drivers/iommu/ipmmu-vmsa.c 	if (IS_ERR(mmu->base))
mmu              1070 drivers/iommu/ipmmu-vmsa.c 		return PTR_ERR(mmu->base);
mmu              1084 drivers/iommu/ipmmu-vmsa.c 	if (mmu->features->use_ns_alias_offset)
mmu              1085 drivers/iommu/ipmmu-vmsa.c 		mmu->base += IM_NS_ALIAS_OFFSET;
mmu              1087 drivers/iommu/ipmmu-vmsa.c 	mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
mmu              1093 drivers/iommu/ipmmu-vmsa.c 	if (!mmu->features->has_cache_leaf_nodes ||
mmu              1095 drivers/iommu/ipmmu-vmsa.c 		mmu->root = mmu;
mmu              1097 drivers/iommu/ipmmu-vmsa.c 		mmu->root = ipmmu_find_root();
mmu              1102 drivers/iommu/ipmmu-vmsa.c 	if (!mmu->root)
mmu              1106 drivers/iommu/ipmmu-vmsa.c 	if (ipmmu_is_root(mmu)) {
mmu              1112 drivers/iommu/ipmmu-vmsa.c 				       dev_name(&pdev->dev), mmu);
mmu              1118 drivers/iommu/ipmmu-vmsa.c 		ipmmu_device_reset(mmu);
mmu              1120 drivers/iommu/ipmmu-vmsa.c 		if (mmu->features->reserved_context) {
mmu              1122 drivers/iommu/ipmmu-vmsa.c 			set_bit(0, mmu->ctx);
mmu              1131 drivers/iommu/ipmmu-vmsa.c 	if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
mmu              1132 drivers/iommu/ipmmu-vmsa.c 		ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
mmu              1137 drivers/iommu/ipmmu-vmsa.c 		iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
mmu              1138 drivers/iommu/ipmmu-vmsa.c 		iommu_device_set_fwnode(&mmu->iommu,
mmu              1141 drivers/iommu/ipmmu-vmsa.c 		ret = iommu_device_register(&mmu->iommu);
mmu              1157 drivers/iommu/ipmmu-vmsa.c 	platform_set_drvdata(pdev, mmu);
mmu              1164 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
mmu              1166 drivers/iommu/ipmmu-vmsa.c 	iommu_device_sysfs_remove(&mmu->iommu);
mmu              1167 drivers/iommu/ipmmu-vmsa.c 	iommu_device_unregister(&mmu->iommu);
mmu              1169 drivers/iommu/ipmmu-vmsa.c 	arm_iommu_release_mapping(mmu->mapping);
mmu              1171 drivers/iommu/ipmmu-vmsa.c 	ipmmu_device_reset(mmu);
mmu              1179 drivers/iommu/ipmmu-vmsa.c 	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
mmu              1183 drivers/iommu/ipmmu-vmsa.c 	if (ipmmu_is_root(mmu)) {
mmu              1184 drivers/iommu/ipmmu-vmsa.c 		ipmmu_device_reset(mmu);
mmu              1186 drivers/iommu/ipmmu-vmsa.c 		for (i = 0; i < mmu->num_ctx; i++) {
mmu              1187 drivers/iommu/ipmmu-vmsa.c 			if (!mmu->domains[i])
mmu              1190 drivers/iommu/ipmmu-vmsa.c 			ipmmu_domain_setup_context(mmu->domains[i]);
mmu              1195 drivers/iommu/ipmmu-vmsa.c 	for (i = 0; i < mmu->features->num_utlbs; i++) {
mmu              1196 drivers/iommu/ipmmu-vmsa.c 		if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
mmu              1199 drivers/iommu/ipmmu-vmsa.c 		ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
mmu               324 drivers/iommu/mtk_iommu.c 			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
mmu               326 drivers/iommu/mtk_iommu.c 			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
mmu               215 drivers/iommu/mtk_iommu_v1.c 			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
mmu               217 drivers/iommu/mtk_iommu_v1.c 			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
mmu                86 drivers/memory/mtk-smi.c 	u32				*mmu;
mmu               152 drivers/memory/mtk-smi.c 			larb->mmu = &larb_mmu[i].mmu;
mmu               168 drivers/memory/mtk-smi.c 	for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
mmu               179 drivers/memory/mtk-smi.c 	writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
mmu               195 drivers/memory/mtk-smi.c 		if (*larb->mmu & BIT(i)) {
mmu               107 drivers/staging/media/ipu3/ipu3-dmamap.c 			  imgu->mmu->aperture_end >> shift, 0);
mmu               118 drivers/staging/media/ipu3/ipu3-dmamap.c 		rval = imgu_mmu_map(imgu->mmu, iovaddr,
mmu               150 drivers/staging/media/ipu3/ipu3-dmamap.c 	imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
mmu               169 drivers/staging/media/ipu3/ipu3-dmamap.c 	imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
mmu               222 drivers/staging/media/ipu3/ipu3-dmamap.c 			  imgu->mmu->aperture_end >> shift, 0);
mmu               229 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
mmu               254 drivers/staging/media/ipu3/ipu3-dmamap.c 	base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
mmu                78 drivers/staging/media/ipu3/ipu3-mmu.c static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
mmu                80 drivers/staging/media/ipu3/ipu3-mmu.c 	writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
mmu                83 drivers/staging/media/ipu3/ipu3-mmu.c static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
mmu                84 drivers/staging/media/ipu3/ipu3-mmu.c 				    void (*func)(struct imgu_mmu *mmu))
mmu                86 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!pm_runtime_get_if_in_use(mmu->dev))
mmu                89 drivers/staging/media/ipu3/ipu3-mmu.c 	func(mmu);
mmu                90 drivers/staging/media/ipu3/ipu3-mmu.c 	pm_runtime_put(mmu->dev);
mmu               101 drivers/staging/media/ipu3/ipu3-mmu.c static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
mmu               106 drivers/staging/media/ipu3/ipu3-mmu.c 	writel(halt, mmu->base + REG_GP_HALT);
mmu               107 drivers/staging/media/ipu3/ipu3-mmu.c 	ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
mmu               111 drivers/staging/media/ipu3/ipu3-mmu.c 		dev_err(mmu->dev, "failed to %s CIO gate halt\n",
mmu               168 drivers/staging/media/ipu3/ipu3-mmu.c static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
mmu               174 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               176 drivers/staging/media/ipu3/ipu3-mmu.c 	l2pt = mmu->l2pts[l1pt_idx];
mmu               180 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               182 drivers/staging/media/ipu3/ipu3-mmu.c 	new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
mmu               186 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               188 drivers/staging/media/ipu3/ipu3-mmu.c 	dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
mmu               191 drivers/staging/media/ipu3/ipu3-mmu.c 	l2pt = mmu->l2pts[l1pt_idx];
mmu               198 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->l2pts[l1pt_idx] = new_l2pt;
mmu               201 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->l1pt[l1pt_idx] = pteval;
mmu               204 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               208 drivers/staging/media/ipu3/ipu3-mmu.c static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
mmu               215 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu)
mmu               220 drivers/staging/media/ipu3/ipu3-mmu.c 	l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
mmu               224 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               226 drivers/staging/media/ipu3/ipu3-mmu.c 	if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
mmu               227 drivers/staging/media/ipu3/ipu3-mmu.c 		spin_unlock_irqrestore(&mmu->lock, flags);
mmu               233 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               252 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu = to_imgu_mmu(info);
mmu               261 drivers/staging/media/ipu3/ipu3-mmu.c 		dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
mmu               266 drivers/staging/media/ipu3/ipu3-mmu.c 	dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
mmu               270 drivers/staging/media/ipu3/ipu3-mmu.c 		dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
mmu               272 drivers/staging/media/ipu3/ipu3-mmu.c 		ret = __imgu_mmu_map(mmu, iova, paddr);
mmu               281 drivers/staging/media/ipu3/ipu3-mmu.c 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
mmu               300 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu = to_imgu_mmu(info);
mmu               325 drivers/staging/media/ipu3/ipu3-mmu.c 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
mmu               336 drivers/staging/media/ipu3/ipu3-mmu.c static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
mmu               344 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu)
mmu               349 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_lock_irqsave(&mmu->lock, flags);
mmu               351 drivers/staging/media/ipu3/ipu3-mmu.c 	l2pt = mmu->l2pts[l1pt_idx];
mmu               353 drivers/staging/media/ipu3/ipu3-mmu.c 		spin_unlock_irqrestore(&mmu->lock, flags);
mmu               357 drivers/staging/media/ipu3/ipu3-mmu.c 	if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
mmu               360 drivers/staging/media/ipu3/ipu3-mmu.c 	l2pt[l2pt_idx] = mmu->dummy_page_pteval;
mmu               362 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_unlock_irqrestore(&mmu->lock, flags);
mmu               380 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu = to_imgu_mmu(info);
mmu               389 drivers/staging/media/ipu3/ipu3-mmu.c 		dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
mmu               394 drivers/staging/media/ipu3/ipu3-mmu.c 	dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
mmu               401 drivers/staging/media/ipu3/ipu3-mmu.c 		unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
mmu               405 drivers/staging/media/ipu3/ipu3-mmu.c 		dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
mmu               412 drivers/staging/media/ipu3/ipu3-mmu.c 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
mmu               427 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu;
mmu               430 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
mmu               431 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu)
mmu               434 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->dev = parent;
mmu               435 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->base = base;
mmu               436 drivers/staging/media/ipu3/ipu3-mmu.c 	spin_lock_init(&mmu->lock);
mmu               439 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_set_halt(mmu, true);
mmu               445 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
mmu               446 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu->dummy_page)
mmu               448 drivers/staging/media/ipu3/ipu3-mmu.c 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
mmu               449 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->dummy_page_pteval = pteval;
mmu               455 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
mmu               456 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu->dummy_l2pt)
mmu               458 drivers/staging/media/ipu3/ipu3-mmu.c 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
mmu               459 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->dummy_l2pt_pteval = pteval;
mmu               465 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
mmu               466 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu->l2pts)
mmu               470 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
mmu               471 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!mmu->l1pt)
mmu               474 drivers/staging/media/ipu3/ipu3-mmu.c 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
mmu               475 drivers/staging/media/ipu3/ipu3-mmu.c 	writel(pteval, mmu->base + REG_L1_PHYS);
mmu               476 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_tlb_invalidate(mmu);
mmu               477 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_set_halt(mmu, false);
mmu               479 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->geometry.aperture_start = 0;
mmu               480 drivers/staging/media/ipu3/ipu3-mmu.c 	mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
mmu               482 drivers/staging/media/ipu3/ipu3-mmu.c 	return &mmu->geometry;
mmu               485 drivers/staging/media/ipu3/ipu3-mmu.c 	vfree(mmu->l2pts);
mmu               487 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_free_page_table(mmu->dummy_l2pt);
mmu               489 drivers/staging/media/ipu3/ipu3-mmu.c 	free_page((unsigned long)mmu->dummy_page);
mmu               491 drivers/staging/media/ipu3/ipu3-mmu.c 	kfree(mmu);
mmu               503 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu = to_imgu_mmu(info);
mmu               506 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_set_halt(mmu, true);
mmu               507 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_tlb_invalidate(mmu);
mmu               509 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_free_page_table(mmu->l1pt);
mmu               510 drivers/staging/media/ipu3/ipu3-mmu.c 	vfree(mmu->l2pts);
mmu               511 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_free_page_table(mmu->dummy_l2pt);
mmu               512 drivers/staging/media/ipu3/ipu3-mmu.c 	free_page((unsigned long)mmu->dummy_page);
mmu               513 drivers/staging/media/ipu3/ipu3-mmu.c 	kfree(mmu);
mmu               518 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu = to_imgu_mmu(info);
mmu               520 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_set_halt(mmu, true);
mmu               525 drivers/staging/media/ipu3/ipu3-mmu.c 	struct imgu_mmu *mmu = to_imgu_mmu(info);
mmu               528 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_set_halt(mmu, true);
mmu               530 drivers/staging/media/ipu3/ipu3-mmu.c 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
mmu               531 drivers/staging/media/ipu3/ipu3-mmu.c 	writel(pteval, mmu->base + REG_L1_PHYS);
mmu               533 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_tlb_invalidate(mmu);
mmu               534 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_set_halt(mmu, false);
mmu               353 drivers/staging/media/ipu3/ipu3.c 	imgu_mmu_resume(imgu->mmu);
mmu               359 drivers/staging/media/ipu3/ipu3.c 	imgu_mmu_suspend(imgu->mmu);
mmu               676 drivers/staging/media/ipu3/ipu3.c 	imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
mmu               677 drivers/staging/media/ipu3/ipu3.c 	if (IS_ERR(imgu->mmu)) {
mmu               678 drivers/staging/media/ipu3/ipu3.c 		r = PTR_ERR(imgu->mmu);
mmu               725 drivers/staging/media/ipu3/ipu3.c 	imgu_mmu_exit(imgu->mmu);
mmu               745 drivers/staging/media/ipu3/ipu3.c 	imgu_mmu_exit(imgu->mmu);
mmu               138 drivers/staging/media/ipu3/ipu3.h 	struct imgu_mmu_info *mmu;
mmu                20 include/soc/mediatek/smi.h 	unsigned int   mmu;