Lines Matching refs:mmu
24 #include "mmu.h"
439 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
491 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
500 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
520 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
526 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
528 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
538 vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
545 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
3666 * All the TLBs can be flushed out of mmu lock, see the comments in
4116 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
5120 if (!vcpu->arch.mmu.direct_map) {
5152 /* The instructions are well-emulated on direct mmu. */
5153 if (vcpu->arch.mmu.direct_map) {
5217 if (!vcpu->arch.mmu.direct_map)
7671 * Unpin any mmu pages first.
8071 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
8079 if (!vcpu->arch.mmu.direct_map &&
8080 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
8083 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);