Lines Matching refs:vcpu
148 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
171 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, in FNAME()
175 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
188 drop_spte(vcpu->kvm, spte); in FNAME()
192 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) in FNAME()
207 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, in FNAME()
255 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
259 mark_page_dirty(vcpu->kvm, table_gfn); in FNAME()
269 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
289 pte = mmu->get_cr3(vcpu); in FNAME()
293 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); in FNAME()
301 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu))); in FNAME()
322 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), in FNAME()
341 host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn, in FNAME()
363 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); in FNAME()
368 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) { in FNAME()
379 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
397 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); in FNAME()
413 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) in FNAME()
434 vcpu->arch.exit_qualification &= 0x187; in FNAME()
435 vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3; in FNAME()
439 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
446 struct kvm_vcpu *vcpu, gva_t addr, u32 access) in FNAME()
448 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, in FNAME()
454 struct kvm_vcpu *vcpu, gva_t addr, in FNAME()
457 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, in FNAME()
463 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME()
470 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME()
476 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); in FNAME()
478 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME()
487 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, in FNAME()
493 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME()
498 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME()
501 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, in FNAME()
514 r = kvm_read_guest_atomic(vcpu->kvm, base_gpa, in FNAME()
518 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, in FNAME()
524 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, in FNAME()
538 return __direct_pte_prefetch(vcpu, sp, sptep); in FNAME()
550 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) in FNAME()
560 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, in FNAME()
572 top_level = vcpu->arch.mmu.root_level; in FNAME()
581 if (FNAME(gpte_changed)(vcpu, gw, top_level)) in FNAME()
584 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in FNAME()
587 for (shadow_walk_init(&it, vcpu, addr); in FNAME()
593 drop_large_spte(vcpu, it.sptep); in FNAME()
598 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, in FNAME()
606 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
619 validate_direct_spte(vcpu, it.sptep, direct_access); in FNAME()
621 drop_large_spte(vcpu, it.sptep); in FNAME()
628 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, in FNAME()
634 mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate, in FNAME()
636 FNAME(pte_prefetch)(vcpu, gw, it.sptep); in FNAME()
665 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, in FNAME()
674 (!is_write_protection(vcpu) && !user_fault))) in FNAME()
701 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, in FNAME()
717 r = handle_mmio_page_fault(vcpu, addr, error_code, in FNAME()
718 mmu_is_nested(vcpu)); in FNAME()
730 r = mmu_topup_memory_caches(vcpu); in FNAME()
737 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); in FNAME()
745 inject_page_fault(vcpu, &walker.fault); in FNAME()
750 vcpu->arch.write_fault_to_shadow_pgtable = false; in FNAME()
752 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, in FNAME()
753 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); in FNAME()
756 force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn) in FNAME()
761 level = min(walker.level, mapping_level(vcpu, walker.gfn)); in FNAME()
765 mmu_seq = vcpu->kvm->mmu_notifier_seq; in FNAME()
768 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, in FNAME()
772 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr, in FNAME()
781 !is_write_protection(vcpu) && !user_fault && in FNAME()
792 if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) in FNAME()
796 spin_lock(&vcpu->kvm->mmu_lock); in FNAME()
797 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) in FNAME()
800 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); in FNAME()
801 make_mmu_pages_available(vcpu); in FNAME()
803 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); in FNAME()
804 r = FNAME(fetch)(vcpu, addr, &walker, write_fault, in FNAME()
806 ++vcpu->stat.pf_fixed; in FNAME()
807 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); in FNAME()
808 spin_unlock(&vcpu->kvm->mmu_lock); in FNAME()
813 spin_unlock(&vcpu->kvm->mmu_lock); in FNAME()
830 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) in FNAME()
837 vcpu_clear_mmio_info(vcpu, gva); in FNAME()
843 mmu_topup_memory_caches(vcpu); in FNAME()
845 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { in FNAME()
850 spin_lock(&vcpu->kvm->mmu_lock); in FNAME()
851 for_each_shadow_entry(vcpu, gva, iterator) { in FNAME()
866 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) in FNAME()
867 kvm_flush_remote_tlbs(vcpu->kvm); in FNAME()
869 if (!rmap_can_add(vcpu)) in FNAME()
872 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, in FNAME()
876 FNAME(update_pte)(vcpu, sp, sptep, &gpte); in FNAME()
882 spin_unlock(&vcpu->kvm->mmu_lock); in FNAME()
885 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, in FNAME()
892 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); in FNAME()
904 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, in FNAME()
912 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); in FNAME()
937 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in FNAME()
959 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, in FNAME()
963 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { in FNAME()
964 vcpu->kvm->tlbs_dirty++; in FNAME()
970 pte_access &= FNAME(gpte_access)(vcpu, gpte); in FNAME()
973 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access, in FNAME()
978 drop_spte(vcpu->kvm, &sp->spt[i]); in FNAME()
979 vcpu->kvm->tlbs_dirty++; in FNAME()
987 set_spte(vcpu, &sp->spt[i], pte_access, in FNAME()