Lines Matching refs:mmu
24 #include "mmu.h"
429 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
481 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
490 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
510 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
516 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
518 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
527 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
534 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
3826 * All the TLBs can be flushed out of mmu lock, see the comments in
4207 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
5172 if (!vcpu->arch.mmu.direct_map) {
5204 /* The instructions are well-emulated on direct mmu. */
5205 if (vcpu->arch.mmu.direct_map) {
5269 if (!vcpu->arch.mmu.direct_map)
7461 * Unpin any mmu pages first.
7818 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
7826 if (!vcpu->arch.mmu.direct_map &&
7827 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
7830 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);