/linux-4.4.14/include/trace/events/ |
D | kvm.h | 275 TP_PROTO(u64 gva, u64 gfn), 277 TP_ARGS(gva, gfn), 280 __field(__u64, gva) 285 __entry->gva = gva; 289 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 294 TP_PROTO(u64 gva, u64 gfn), 296 TP_ARGS(gva, gfn) 301 TP_PROTO(u64 gva, u64 gfn), 303 TP_ARGS(gva, gfn) 308 TP_PROTO(u64 token, u64 gva), [all …]
|
/linux-4.4.14/arch/x86/kvm/ |
D | x86.h | 88 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument 90 vcpu->arch.mmio_gva = gva & PAGE_MASK; in vcpu_cache_mmio_info() 107 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument 109 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info() 115 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument 118 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva()
|
D | mmutrace.h | 249 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 251 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), 255 __field(gva_t, gva) 265 __entry->gva = gva; 275 __entry->gva, __print_flags(__entry->error_code, "|",
|
D | trace.h | 765 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 766 TP_ARGS(gva, gpa, write, gpa_match), 769 __field(gva_t, gva) 776 __entry->gva = gva; 782 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
|
D | mmu.c | 1772 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in nonpaging_invlpg() argument 2822 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument 2834 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn() 2899 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument 2914 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault() 2969 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault() 2977 gva_t gva, pfn_t *pfn, bool write, bool *writable); 3397 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument 3403 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); in nonpaging_page_fault() 3406 r = handle_mmio_page_fault(vcpu, gva, true); in nonpaging_page_fault() [all …]
|
D | paging_tmpl.h | 819 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) in FNAME() 826 vcpu_clear_mmio_info(vcpu, gva); in FNAME() 840 for_each_shadow_entry(vcpu, gva, iterator) { in FNAME()
|
D | x86.c | 4121 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument 4125 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read() 4128 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument 4133 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch() 4136 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument 4141 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write() 4145 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument 4148 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system() 4272 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument 4279 if (vcpu_match_mmio_gva(vcpu, gva) in vcpu_mmio_gva_to_gpa() [all …]
|
D | vmx.c | 1272 static inline void __invvpid(int ext, u16 vpid, gva_t gva) in __invvpid() argument 1277 u64 gva; in __invvpid() member 1278 } operand = { vpid, 0, gva }; in __invvpid() 6639 gva_t gva; in nested_vmx_check_vmptr() local 6647 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) in nested_vmx_check_vmptr() 6650 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, in nested_vmx_check_vmptr() 7150 gva_t gva = 0; in handle_vmread() local 7174 vmx_instruction_info, true, &gva)) in handle_vmread() 7177 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, in handle_vmread() 7190 gva_t gva; in handle_vmwrite() local [all …]
|
/linux-4.4.14/virt/kvm/ |
D | async_pf.c | 79 gva_t gva = apf->gva; in async_pf_execute() local 95 trace_kvm_async_pf_completed(addr, gva); in async_pf_execute() 162 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, in kvm_setup_async_pf() argument 182 work->gva = gva; in kvm_setup_async_pf()
|
/linux-4.4.14/arch/s390/kvm/ |
D | gaccess.c | 539 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, in guest_translate() argument 543 union vaddress vaddr = {.addr = gva}; in guest_translate() 544 union raddress raddr = {.addr = gva}; in guest_translate() 827 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, in guest_translate_address() argument 836 gva = kvm_s390_logical_to_effective(vcpu, gva); in guest_translate_address() 839 tec->addr = gva >> PAGE_SHIFT; in guest_translate_address() 842 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { in guest_translate_address() 850 rc = guest_translate(vcpu, gva, gpa, asce, write); in guest_translate_address() 858 *gpa = kvm_s390_real_to_abs(vcpu, gva); in guest_translate_address() 869 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, in check_gva_range() argument [all …]
|
D | gaccess.h | 158 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 160 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
/linux-4.4.14/arch/x86/include/asm/ |
D | kvm_host.h | 273 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 277 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 283 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 1073 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 1080 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 1082 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 1084 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 1086 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1091 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 1093 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
|
/linux-4.4.14/arch/mips/kvm/ |
D | tlb.c | 169 unsigned long gva) in kvm_mips_translate_guest_kseg0_to_hpa() argument 172 uint32_t offset = gva & ~PAGE_MASK; in kvm_mips_translate_guest_kseg0_to_hpa() 175 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { in kvm_mips_translate_guest_kseg0_to_hpa() 177 __builtin_return_address(0), gva); in kvm_mips_translate_guest_kseg0_to_hpa() 181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); in kvm_mips_translate_guest_kseg0_to_hpa() 185 gva); in kvm_mips_translate_guest_kseg0_to_hpa()
|
D | trap_emul.c | 22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) in kvm_trap_emul_gva_to_gpa_cb() argument 25 uint32_t kseg = KSEGX(gva); in kvm_trap_emul_gva_to_gpa_cb() 28 gpa = CPHYSADDR(gva); in kvm_trap_emul_gva_to_gpa_cb() 30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); in kvm_trap_emul_gva_to_gpa_cb() 35 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); in kvm_trap_emul_gva_to_gpa_cb()
|
/linux-4.4.14/Documentation/virtual/kvm/ |
D | mmu.txt | 36 gva guest virtual address 65 guest physical addresses, to host physical addresses (gva->gpa->hpa) 122 paging: gva->gpa->hpa 123 paging, tdp: (gva->)gpa->hpa 300 (gva->gpa or ngpa->gpa)
|
/linux-4.4.14/arch/mips/include/asm/ |
D | kvm_host.h | 617 gpa_t (*gva_to_gpa)(gva_t gva); 695 unsigned long gva);
|
/linux-4.4.14/include/linux/ |
D | kvm_host.h | 195 gva_t gva; member 203 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
|