Lines Matching refs:gva
1772 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in nonpaging_invlpg() argument
2822 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2834 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2899 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument
2914 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
2969 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault()
2977 gva_t gva, pfn_t *pfn, bool write, bool *writable);
3397 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument
3403 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); in nonpaging_page_fault()
3406 r = handle_mmio_page_fault(vcpu, gva, true); in nonpaging_page_fault()
3418 gfn = gva >> PAGE_SHIFT; in nonpaging_page_fault()
3420 return nonpaging_map(vcpu, gva & PAGE_MASK, in nonpaging_page_fault()
3424 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); in kvm_arch_setup_async_pf()
3446 gva_t gva, pfn_t *pfn, bool write, bool *writable) in try_async_pf() argument
3458 trace_kvm_try_async_get_page(gva, gfn); in try_async_pf()
3460 trace_kvm_async_pf_doublefault(gva, gfn); in try_async_pf()
3463 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
4358 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_unprotect_page_virt() argument
4366 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt()
4434 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_invlpg() argument
4436 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()