Lines Matching refs:vcpu
140 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
163 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, in FNAME()
167 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
180 drop_spte(vcpu->kvm, spte); in FNAME()
184 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) in FNAME()
199 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, in FNAME()
247 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
251 kvm_vcpu_mark_page_dirty(vcpu, table_gfn); in FNAME()
261 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
281 pte = mmu->get_cr3(vcpu); in FNAME()
285 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); in FNAME()
293 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); in FNAME()
314 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), in FNAME()
333 host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn, in FNAME()
354 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); in FNAME()
359 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) { in FNAME()
370 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
388 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); in FNAME()
404 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) in FNAME()
425 vcpu->arch.exit_qualification &= 0x187; in FNAME()
426 vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; in FNAME()
430 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
437 struct kvm_vcpu *vcpu, gva_t addr, u32 access) in FNAME()
439 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, in FNAME()
445 struct kvm_vcpu *vcpu, gva_t addr, in FNAME()
448 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, in FNAME()
454 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME()
461 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME()
467 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); in FNAME()
469 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME()
478 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, in FNAME()
484 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME()
489 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME()
492 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, in FNAME()
505 r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa, in FNAME()
509 r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, in FNAME()
515 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, in FNAME()
529 return __direct_pte_prefetch(vcpu, sp, sptep); in FNAME()
541 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) in FNAME()
551 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, in FNAME()
563 top_level = vcpu->arch.mmu.root_level; in FNAME()
572 if (FNAME(gpte_changed)(vcpu, gw, top_level)) in FNAME()
575 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in FNAME()
578 for (shadow_walk_init(&it, vcpu, addr); in FNAME()
584 drop_large_spte(vcpu, it.sptep); in FNAME()
589 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, in FNAME()
597 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
610 validate_direct_spte(vcpu, it.sptep, direct_access); in FNAME()
612 drop_large_spte(vcpu, it.sptep); in FNAME()
619 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, in FNAME()
625 mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate, in FNAME()
627 FNAME(pte_prefetch)(vcpu, gw, it.sptep); in FNAME()
656 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, in FNAME()
665 (!is_write_protection(vcpu) && !user_fault))) in FNAME()
692 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, in FNAME()
708 r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu)); in FNAME()
720 r = mmu_topup_memory_caches(vcpu); in FNAME()
727 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); in FNAME()
735 inject_page_fault(vcpu, &walker.fault); in FNAME()
740 vcpu->arch.write_fault_to_shadow_pgtable = false; in FNAME()
742 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, in FNAME()
743 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); in FNAME()
746 level = mapping_level(vcpu, walker.gfn, &force_pt_level); in FNAME()
754 mmu_seq = vcpu->kvm->mmu_notifier_seq; in FNAME()
757 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, in FNAME()
761 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr, in FNAME()
770 !is_write_protection(vcpu) && !user_fault && in FNAME()
781 if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) in FNAME()
785 spin_lock(&vcpu->kvm->mmu_lock); in FNAME()
786 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in FNAME()
789 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); in FNAME()
790 make_mmu_pages_available(vcpu); in FNAME()
792 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); in FNAME()
793 r = FNAME(fetch)(vcpu, addr, &walker, write_fault, in FNAME()
795 ++vcpu->stat.pf_fixed; in FNAME()
796 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); in FNAME()
797 spin_unlock(&vcpu->kvm->mmu_lock); in FNAME()
802 spin_unlock(&vcpu->kvm->mmu_lock); in FNAME()
819 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) in FNAME()
826 vcpu_clear_mmio_info(vcpu, gva); in FNAME()
832 mmu_topup_memory_caches(vcpu); in FNAME()
834 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { in FNAME()
839 spin_lock(&vcpu->kvm->mmu_lock); in FNAME()
840 for_each_shadow_entry(vcpu, gva, iterator) { in FNAME()
855 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) in FNAME()
856 kvm_flush_remote_tlbs(vcpu->kvm); in FNAME()
858 if (!rmap_can_add(vcpu)) in FNAME()
861 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, in FNAME()
865 FNAME(update_pte)(vcpu, sp, sptep, &gpte); in FNAME()
871 spin_unlock(&vcpu->kvm->mmu_lock); in FNAME()
874 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, in FNAME()
881 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); in FNAME()
893 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, in FNAME()
901 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); in FNAME()
926 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in FNAME()
948 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, in FNAME()
952 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { in FNAME()
953 vcpu->kvm->tlbs_dirty++; in FNAME()
959 pte_access &= FNAME(gpte_access)(vcpu, gpte); in FNAME()
962 if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, in FNAME()
967 drop_spte(vcpu->kvm, &sp->spt[i]); in FNAME()
968 vcpu->kvm->tlbs_dirty++; in FNAME()
976 set_spte(vcpu, &sp->spt[i], pte_access, in FNAME()