Searched refs:gva (Results 1 - 17 of 17) sorted by relevance

/linux-4.4.14/include/trace/events/
H A Dkvm.h275 TP_PROTO(u64 gva, u64 gfn),
277 TP_ARGS(gva, gfn),
280 __field(__u64, gva)
285 __entry->gva = gva;
289 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
294 TP_PROTO(u64 gva, u64 gfn),
296 TP_ARGS(gva, gfn)
301 TP_PROTO(u64 gva, u64 gfn),
303 TP_ARGS(gva, gfn)
308 TP_PROTO(u64 token, u64 gva),
310 TP_ARGS(token, gva),
314 __field(__u64, gva)
319 __entry->gva = gva;
322 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
328 TP_PROTO(u64 token, u64 gva),
330 TP_ARGS(token, gva)
335 TP_PROTO(u64 token, u64 gva),
337 TP_ARGS(token, gva)
342 TP_PROTO(unsigned long address, u64 gva),
343 TP_ARGS(address, gva),
347 __field(u64, gva)
352 __entry->gva = gva;
355 TP_printk("gva %#llx address %#lx", __entry->gva,
/linux-4.4.14/arch/x86/kvm/
H A Dx86.h88 gva_t gva, gfn_t gfn, unsigned access) vcpu_cache_mmio_info()
90 vcpu->arch.mmio_gva = gva & PAGE_MASK; vcpu_cache_mmio_info()
102 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
107 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) vcpu_clear_mmio_info() argument
109 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) vcpu_clear_mmio_info()
115 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) vcpu_match_mmio_gva() argument
118 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) vcpu_match_mmio_gva()
87 vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) vcpu_cache_mmio_info() argument
H A Dmmutrace.h249 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
251 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
255 __field(gva_t, gva)
265 __entry->gva = gva;
273 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
275 __entry->gva, __print_flags(__entry->error_code, "|",
H A Dtrace.h765 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
766 TP_ARGS(gva, gpa, write, gpa_match),
769 __field(gva_t, gva)
776 __entry->gva = gva;
782 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
H A Dmmu.c1772 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) nonpaging_invlpg() argument
2822 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, handle_abnormal_pfn() argument
2834 vcpu_cache_mmio_info(vcpu, gva, gfn, access); handle_abnormal_pfn()
2899 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, fast_page_fault() argument
2914 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) for_each_shadow_entry_lockless()
2969 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
2977 gva_t gva, pfn_t *pfn, bool write, bool *writable);
3397 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, nonpaging_page_fault() argument
3403 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); nonpaging_page_fault()
3406 r = handle_mmio_page_fault(vcpu, gva, true); nonpaging_page_fault()
3418 gfn = gva >> PAGE_SHIFT; nonpaging_page_fault()
3420 return nonpaging_map(vcpu, gva & PAGE_MASK, nonpaging_page_fault()
3424 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) kvm_arch_setup_async_pf() argument
3433 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); kvm_arch_setup_async_pf()
3446 gva_t gva, pfn_t *pfn, bool write, bool *writable) try_async_pf()
3458 trace_kvm_try_async_get_page(gva, gfn); try_async_pf()
3460 trace_kvm_async_pf_doublefault(gva, gfn); try_async_pf()
3463 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) try_async_pf()
4358 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) kvm_mmu_unprotect_page_virt() argument
4366 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); kvm_mmu_unprotect_page_virt()
4434 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) kvm_mmu_invlpg() argument
4436 vcpu->arch.mmu.invlpg(vcpu, gva); kvm_mmu_invlpg()
3445 try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable) try_async_pf() argument
H A Dpaging_tmpl.h819 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) invlpg() argument
826 vcpu_clear_mmio_info(vcpu, gva); invlpg()
840 for_each_shadow_entry(vcpu, gva, iterator) { for_each_shadow_entry()
H A Dx86.c4121 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, kvm_mmu_gva_to_gpa_read() argument
4125 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); kvm_mmu_gva_to_gpa_read()
4128 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, kvm_mmu_gva_to_gpa_fetch() argument
4133 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); kvm_mmu_gva_to_gpa_fetch()
4136 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, kvm_mmu_gva_to_gpa_write() argument
4141 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); kvm_mmu_gva_to_gpa_write()
4145 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, kvm_mmu_gva_to_gpa_system() argument
4148 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); kvm_mmu_gva_to_gpa_system()
4272 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, vcpu_mmio_gva_to_gpa() argument
4279 if (vcpu_match_mmio_gva(vcpu, gva) vcpu_mmio_gva_to_gpa()
4283 (gva & (PAGE_SIZE - 1)); vcpu_mmio_gva_to_gpa()
4284 trace_vcpu_match_mmio(gva, *gpa, write, false); vcpu_mmio_gva_to_gpa()
4288 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); vcpu_mmio_gva_to_gpa()
4298 trace_vcpu_match_mmio(gva, *gpa, write, true); vcpu_mmio_gva_to_gpa()
8083 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); kvm_arch_async_page_ready()
8159 trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_arch_async_page_not_present()
8181 trace_kvm_async_pf_ready(work->arch.token, work->gva); kvm_arch_async_page_present()
H A Dvmx.c1272 static inline void __invvpid(int ext, u16 vpid, gva_t gva) __invvpid() argument
1277 u64 gva; __invvpid() member in struct:__anon3201
1278 } operand = { vpid, 0, gva }; __invvpid()
6639 gva_t gva; nested_vmx_check_vmptr() local
6647 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) nested_vmx_check_vmptr()
6650 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, nested_vmx_check_vmptr()
7150 gva_t gva = 0; handle_vmread() local
7174 vmx_instruction_info, true, &gva)) handle_vmread()
7177 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, handle_vmread()
7190 gva_t gva; handle_vmwrite() local
7211 vmx_instruction_info, false, &gva)) handle_vmwrite()
7213 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, handle_vmwrite()
7321 gva_t gva; handle_invept() local
7358 vmx_instruction_info, false, &gva)) handle_invept()
7360 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, handle_invept()
7387 gva_t gva; handle_invvpid() local
7417 vmx_instruction_info, false, &gva)) handle_invvpid()
7419 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, handle_invvpid()
/linux-4.4.14/virt/kvm/
H A Dasync_pf.c79 gva_t gva = apf->gva; async_pf_execute() local
95 trace_kvm_async_pf_completed(addr, gva); async_pf_execute()
162 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, kvm_setup_async_pf() argument
182 work->gva = gva; kvm_setup_async_pf()
/linux-4.4.14/include/linux/
H A Dkvm_types.h40 * gva - guest virtual address
H A Dkvm_host.h195 gva_t gva; member in struct:kvm_async_pf
203 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
/linux-4.4.14/arch/s390/kvm/
H A Dgaccess.c522 * @gva: guest virtual address
539 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, guest_translate() argument
543 union vaddress vaddr = {.addr = gva}; guest_translate()
544 union raddress raddr = {.addr = gva}; guest_translate()
827 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, guest_translate_address() argument
836 gva = kvm_s390_logical_to_effective(vcpu, gva); guest_translate_address()
839 tec->addr = gva >> PAGE_SHIFT; guest_translate_address()
842 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { guest_translate_address()
850 rc = guest_translate(vcpu, gva, gpa, asce, write); guest_translate_address()
858 *gpa = kvm_s390_real_to_abs(vcpu, gva); guest_translate_address()
869 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, check_gva_range() argument
878 currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); check_gva_range()
879 rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); check_gva_range()
880 gva += currlen; check_gva_range()
H A Dgaccess.h158 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
160 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
/linux-4.4.14/arch/mips/kvm/
H A Dtlb.c169 unsigned long gva) kvm_mips_translate_guest_kseg0_to_hpa()
172 uint32_t offset = gva & ~PAGE_MASK; kvm_mips_translate_guest_kseg0_to_hpa()
175 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { kvm_mips_translate_guest_kseg0_to_hpa()
176 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, kvm_mips_translate_guest_kseg0_to_hpa()
177 __builtin_return_address(0), gva); kvm_mips_translate_guest_kseg0_to_hpa()
181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); kvm_mips_translate_guest_kseg0_to_hpa()
185 gva); kvm_mips_translate_guest_kseg0_to_hpa()
168 kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, unsigned long gva) kvm_mips_translate_guest_kseg0_to_hpa() argument
H A Dtrap_emul.c22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) kvm_trap_emul_gva_to_gpa_cb() argument
25 uint32_t kseg = KSEGX(gva); kvm_trap_emul_gva_to_gpa_cb()
28 gpa = CPHYSADDR(gva); kvm_trap_emul_gva_to_gpa_cb()
30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); kvm_trap_emul_gva_to_gpa_cb()
35 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); kvm_trap_emul_gva_to_gpa_cb()
/linux-4.4.14/arch/x86/include/asm/
H A Dkvm_host.h273 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
277 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
283 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
1073 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1080 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1082 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1084 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1086 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1091 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
1093 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
/linux-4.4.14/arch/mips/include/asm/
H A Dkvm_host.h617 gpa_t (*gva_to_gpa)(gva_t gva);
695 unsigned long gva);

Completed in 583 milliseconds