Lines Matching refs:gva
1701 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in nonpaging_invlpg() argument
2846 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2858 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2923 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument
2938 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
2993 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault()
3001 gva_t gva, pfn_t *pfn, bool write, bool *writable);
3382 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument
3388 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); in nonpaging_page_fault()
3391 r = handle_mmio_page_fault(vcpu, gva, error_code, true); in nonpaging_page_fault()
3403 gfn = gva >> PAGE_SHIFT; in nonpaging_page_fault()
3405 return nonpaging_map(vcpu, gva & PAGE_MASK, in nonpaging_page_fault()
3409 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); in kvm_arch_setup_async_pf()
3431 gva_t gva, pfn_t *pfn, bool write, bool *writable) in try_async_pf() argument
3441 trace_kvm_try_async_get_page(gva, gfn); in try_async_pf()
3443 trace_kvm_async_pf_doublefault(gva, gfn); in try_async_pf()
3446 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
4237 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_unprotect_page_virt() argument
4245 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt()
4313 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_invlpg() argument
4315 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()