gva 800 arch/mips/include/asm/kvm_host.h gpa_t (*gva_to_gpa)(gva_t gva); gva 870 arch/mips/include/asm/kvm_host.h unsigned long gva, gva 889 arch/mips/include/asm/kvm_host.h int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, gva 937 arch/mips/include/asm/kvm_host.h unsigned long gva, gva 1042 arch/mips/kvm/mmu.c unsigned long gva, gva 1048 arch/mips/kvm/mmu.c unsigned int idx = TLB_LO_IDX(*tlb, gva); gva 1058 arch/mips/kvm/mmu.c if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) gva 1078 arch/mips/kvm/mmu.c ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); gva 1080 arch/mips/kvm/mmu.c kvm_err("No ptep for gva %lx\n", gva); gva 1089 arch/mips/kvm/mmu.c kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); gva 1197 arch/mips/kvm/mmu.c unsigned long gva, gva 1204 arch/mips/kvm/mmu.c if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { gva 1205 arch/mips/kvm/mmu.c if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) gva 1207 arch/mips/kvm/mmu.c } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || gva 1208 arch/mips/kvm/mmu.c KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { gva 1210 arch/mips/kvm/mmu.c index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) | gva 1217 arch/mips/kvm/mmu.c if (!TLB_IS_VALID(*tlb, gva)) gva 1219 arch/mips/kvm/mmu.c if (write && !TLB_IS_DIRTY(*tlb, gva)) gva 1222 arch/mips/kvm/mmu.c if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write)) gva 304 arch/mips/kvm/tlb.c int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, gva 322 arch/mips/kvm/tlb.c write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); gva 364 arch/mips/kvm/tlb.c pa = entrylo[!!(gva & pagemaskbit)]; gva 378 arch/mips/kvm/tlb.c pa |= gva & ~(pagemask | pagemaskbit); gva 23 arch/mips/kvm/trap_emul.c static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) gva 26 arch/mips/kvm/trap_emul.c gva_t kseg = KSEGX(gva); gva 27 arch/mips/kvm/trap_emul.c gva_t gkseg = KVM_GUEST_KSEGX(gva); gva 30 arch/mips/kvm/trap_emul.c gpa = CPHYSADDR(gva); gva 32 arch/mips/kvm/trap_emul.c gpa = KVM_GUEST_CPHYSADDR(gva); gva 34 arch/mips/kvm/trap_emul.c kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); gva 39 arch/mips/kvm/trap_emul.c kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); gva 183 arch/mips/kvm/vz.c static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) gva 186 arch/mips/kvm/vz.c return gva; gva 714 arch/mips/kvm/vz.c static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gva 717 arch/mips/kvm/vz.c u32 gva32 = gva; gva 720 arch/mips/kvm/vz.c if ((long)gva == (s32)gva32) { gva 775 arch/mips/kvm/vz.c } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { gva 783 arch/mips/kvm/vz.c if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { gva 797 arch/mips/kvm/vz.c *gpa = gva & 0x07ffffffffffffff; gva 803 arch/mips/kvm/vz.c return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); gva 493 arch/s390/kvm/gaccess.c static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, gva 534 arch/s390/kvm/gaccess.c tec->addr = gva >> PAGE_SHIFT; gva 614 arch/s390/kvm/gaccess.c static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, gva 618 arch/s390/kvm/gaccess.c union vaddress vaddr = {.addr = gva}; gva 619 arch/s390/kvm/gaccess.c union raddress raddr = {.addr = gva}; gva 904 arch/s390/kvm/gaccess.c int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, gva 912 arch/s390/kvm/gaccess.c gva = kvm_s390_logical_to_effective(vcpu, gva); gva 913 arch/s390/kvm/gaccess.c rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); gva 916 arch/s390/kvm/gaccess.c if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { gva 918 arch/s390/kvm/gaccess.c return trans_exc(vcpu, PGM_PROTECTION, gva, 0, gva 923 arch/s390/kvm/gaccess.c rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot); gva 925 arch/s390/kvm/gaccess.c return trans_exc(vcpu, rc, gva, 0, mode, prot); gva 927 arch/s390/kvm/gaccess.c *gpa = kvm_s390_real_to_abs(vcpu, gva); gva 929 arch/s390/kvm/gaccess.c return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0); gva 938 arch/s390/kvm/gaccess.c int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, gva 947 arch/s390/kvm/gaccess.c currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); gva 948 arch/s390/kvm/gaccess.c rc = guest_translate_address(vcpu, gva, ar, &gpa, mode); gva 949 arch/s390/kvm/gaccess.c gva += currlen; gva 161 arch/s390/kvm/gaccess.h int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, gva 163 arch/s390/kvm/gaccess.h int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, gva 393 arch/x86/include/asm/kvm_host.h void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); gva 1433 arch/x86/include/asm/kvm_host.h int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); gva 1442 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gva 1444 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, gva 1446 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, gva 1448 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, gva 1457 arch/x86/include/asm/kvm_host.h void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); gva 1458 arch/x86/include/asm/kvm_host.h void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); gva 2230 arch/x86/kvm/mmu.c static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root) gva 3431 arch/x86/kvm/mmu.c static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, gva 3441 arch/x86/kvm/mmu.c vcpu_cache_mmio_info(vcpu, gva, gfn, gva 5499 arch/x86/kvm/mmu.c int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) gva 5507 arch/x86/kvm/mmu.c gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); gva 5610 arch/x86/kvm/mmu.c void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) gva 5616 arch/x86/kvm/mmu.c if (is_noncanonical_address(gva, vcpu)) gva 5619 arch/x86/kvm/mmu.c mmu->invlpg(vcpu, gva, mmu->root_hpa); gva 5634 arch/x86/kvm/mmu.c mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); gva 5636 arch/x86/kvm/mmu.c kvm_x86_ops->tlb_flush_gva(vcpu, gva); gva 5641 arch/x86/kvm/mmu.c void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) gva 5648 arch/x86/kvm/mmu.c mmu->invlpg(vcpu, gva, mmu->root_hpa); gva 5655 arch/x86/kvm/mmu.c mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); gva 5661 arch/x86/kvm/mmu.c kvm_x86_ops->tlb_flush_gva(vcpu, gva); gva 892 arch/x86/kvm/paging_tmpl.h static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) gva 899 arch/x86/kvm/paging_tmpl.h vcpu_clear_mmio_info(vcpu, gva); gva 913 arch/x86/kvm/paging_tmpl.h for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { gva 5537 arch/x86/kvm/svm.c static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) gva 5541 arch/x86/kvm/svm.c invlpga(gva, svm->vmcb->control.asid); gva 772 arch/x86/kvm/trace.h TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), gva 773 arch/x86/kvm/trace.h TP_ARGS(gva, gpa, write, gpa_match), gva 776 arch/x86/kvm/trace.h __field(gva_t, gva) gva 783 arch/x86/kvm/trace.h __entry->gva = gva; gva 789 arch/x86/kvm/trace.h TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, gva 4333 arch/x86/kvm/vmx/nested.c gva_t gva; gva 4338 arch/x86/kvm/vmx/nested.c sizeof(*vmpointer), &gva)) gva 4341 arch/x86/kvm/vmx/nested.c if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { gva 4607 arch/x86/kvm/vmx/nested.c gva_t gva = 0; gva 4650 arch/x86/kvm/vmx/nested.c vmx_instruction_info, true, len, &gva)) gva 4653 arch/x86/kvm/vmx/nested.c if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) { gva 4690 arch/x86/kvm/vmx/nested.c gva_t gva; gva 4725 arch/x86/kvm/vmx/nested.c vmx_instruction_info, false, len, &gva)) gva 4727 arch/x86/kvm/vmx/nested.c if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) { gva 4880 arch/x86/kvm/vmx/nested.c gva_t gva; gva 4889 arch/x86/kvm/vmx/nested.c true, sizeof(gpa_t), &gva)) gva 4892 arch/x86/kvm/vmx/nested.c if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, gva 4906 arch/x86/kvm/vmx/nested.c gva_t gva; gva 4935 arch/x86/kvm/vmx/nested.c vmx_instruction_info, false, sizeof(operand), &gva)) gva 4937 arch/x86/kvm/vmx/nested.c if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { gva 4963 arch/x86/kvm/vmx/nested.c gva_t gva; gva 4995 arch/x86/kvm/vmx/nested.c vmx_instruction_info, false, sizeof(operand), &gva)) gva 4997 arch/x86/kvm/vmx/nested.c if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { gva 21 arch/x86/kvm/vmx/ops.h void invvpid_error(unsigned long ext, u16 vpid, gva_t gva); gva 251 arch/x86/kvm/vmx/ops.h static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) gva 256 arch/x86/kvm/vmx/ops.h u64 gva; gva 257 arch/x86/kvm/vmx/ops.h } operand = { vpid, 0, gva }; gva 259 arch/x86/kvm/vmx/ops.h vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva); gva 381 arch/x86/kvm/vmx/vmx.c noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) gva 384 arch/x86/kvm/vmx/vmx.c ext, vpid, gva); gva 5389 arch/x86/kvm/vmx/vmx.c gva_t gva; gva 5416 arch/x86/kvm/vmx/vmx.c sizeof(operand), &gva)) gva 5419 arch/x86/kvm/vmx/vmx.c if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { gva 5356 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, gva 5360 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); gva 5363 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, gva 5368 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); gva 5371 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, gva 5376 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); gva 5380 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, gva 5383 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); gva 5565 arch/x86/kvm/x86.c static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gva 5573 arch/x86/kvm/x86.c trace_vcpu_match_mmio(gva, gpa, write, true); gva 5580 arch/x86/kvm/x86.c static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gva 5592 arch/x86/kvm/x86.c if (vcpu_match_mmio_gva(vcpu, gva) gva 5596 arch/x86/kvm/x86.c (gva & (PAGE_SIZE - 1)); gva 5597 arch/x86/kvm/x86.c trace_vcpu_match_mmio(gva, *gpa, write, false); gva 5601 arch/x86/kvm/x86.c *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); gva 5606 arch/x86/kvm/x86.c return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); gva 187 arch/x86/kvm/x86.h gva_t gva, gfn_t gfn, unsigned access) gva 198 arch/x86/kvm/x86.h vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; gva 215 arch/x86/kvm/x86.h static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) gva 217 arch/x86/kvm/x86.h if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) gva 223 arch/x86/kvm/x86.h static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) gva 226 arch/x86/kvm/x86.h vcpu->arch.mmio_gva == (gva & PAGE_MASK)) gva 285 include/trace/events/kvm.h TP_PROTO(u64 gva, u64 gfn), gva 287 include/trace/events/kvm.h TP_ARGS(gva, gfn), gva 290 include/trace/events/kvm.h __field(__u64, gva) gva 295 include/trace/events/kvm.h __entry->gva = gva; gva 299 include/trace/events/kvm.h TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) gva 304 include/trace/events/kvm.h TP_PROTO(u64 gva, u64 gfn), gva 306 include/trace/events/kvm.h TP_ARGS(gva, gfn) gva 311 include/trace/events/kvm.h TP_PROTO(u64 gva, u64 gfn), gva 313 include/trace/events/kvm.h TP_ARGS(gva, gfn) gva 318 include/trace/events/kvm.h TP_PROTO(u64 token, u64 gva), gva 320 include/trace/events/kvm.h TP_ARGS(token, gva), gva 324 include/trace/events/kvm.h __field(__u64, gva) gva 329 include/trace/events/kvm.h __entry->gva = gva; gva 332 include/trace/events/kvm.h TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva) gva 338 include/trace/events/kvm.h TP_PROTO(u64 token, u64 gva), gva 340 include/trace/events/kvm.h TP_ARGS(token, gva) gva 345 include/trace/events/kvm.h TP_PROTO(u64 token, u64 gva), gva 347 include/trace/events/kvm.h TP_ARGS(token, gva) gva 352 include/trace/events/kvm.h TP_PROTO(unsigned long address, u64 gva), gva 353 include/trace/events/kvm.h TP_ARGS(address, gva), gva 357 include/trace/events/kvm.h __field(u64, gva) gva 362 include/trace/events/kvm.h __entry->gva = gva; gva 365 include/trace/events/kvm.h TP_printk("gva %#llx address %#lx", __entry->gva, gva 80 tools/testing/selftests/kvm/include/kvm_util.h int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva, gva 109 tools/testing/selftests/kvm/include/kvm_util.h void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); gva 111 tools/testing/selftests/kvm/include/kvm_util.h vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva); gva 24 tools/testing/selftests/kvm/lib/aarch64/processor.c static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) gva 29 tools/testing/selftests/kvm/lib/aarch64/processor.c return (gva >> shift) & mask; gva 32 tools/testing/selftests/kvm/lib/aarch64/processor.c static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) gva 40 tools/testing/selftests/kvm/lib/aarch64/processor.c return (gva >> shift) & mask; gva 43 tools/testing/selftests/kvm/lib/aarch64/processor.c static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) gva 51 tools/testing/selftests/kvm/lib/aarch64/processor.c return (gva >> shift) & mask; gva 54 tools/testing/selftests/kvm/lib/aarch64/processor.c static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) gva 57 tools/testing/selftests/kvm/lib/aarch64/processor.c return (gva >> vm->page_shift) & mask; gva 148 tools/testing/selftests/kvm/lib/aarch64/processor.c vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) gva 155 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; gva 161 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; gva 166 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; gva 171 tools/testing/selftests/kvm/lib/aarch64/processor.c ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; gva 179 tools/testing/selftests/kvm/lib/aarch64/processor.c return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); gva 183 tools/testing/selftests/kvm/lib/aarch64/processor.c "gva: 0x%lx", gva); gva 99 tools/testing/selftests/kvm/lib/aarch64/ucall.c vm_vaddr_t gva; gva 103 tools/testing/selftests/kvm/lib/aarch64/ucall.c memcpy(&gva, run->mmio.data, sizeof(gva)); gva 104 tools/testing/selftests/kvm/lib/aarch64/ucall.c memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); gva 506 tools/testing/selftests/kvm/lib/kvm_util.c int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) gva 521 tools/testing/selftests/kvm/lib/kvm_util.c uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); gva 1615 tools/testing/selftests/kvm/lib/kvm_util.c void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) gva 1617 tools/testing/selftests/kvm/lib/kvm_util.c return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); gva 70 tools/testing/selftests/kvm/lib/s390x/processor.c void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, gva 76 tools/testing/selftests/kvm/lib/s390x/processor.c TEST_ASSERT((gva % vm->page_size) == 0, gva 79 tools/testing/selftests/kvm/lib/s390x/processor.c gva, vm->page_size); gva 81 tools/testing/selftests/kvm/lib/s390x/processor.c (gva >> vm->page_shift)), gva 83 tools/testing/selftests/kvm/lib/s390x/processor.c gva); gva 87 tools/testing/selftests/kvm/lib/s390x/processor.c gva, vm->page_size); gva 91 tools/testing/selftests/kvm/lib/s390x/processor.c gva, vm->max_gfn, vm->page_size); gva 96 tools/testing/selftests/kvm/lib/s390x/processor.c idx = (gva >> (64 - 11 * ri)) & 0x7ffu; gva 103 tools/testing/selftests/kvm/lib/s390x/processor.c idx = (gva >> 12) & 0x0ffu; /* page index */ gva 130 tools/testing/selftests/kvm/lib/s390x/processor.c vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) gva 140 tools/testing/selftests/kvm/lib/s390x/processor.c idx = (gva >> (64 - 11 * ri)) & 0x7ffu; gva 143 tools/testing/selftests/kvm/lib/s390x/processor.c gva); gva 147 tools/testing/selftests/kvm/lib/s390x/processor.c idx = (gva >> 12) & 0x0ffu; /* page index */ gva 150 tools/testing/selftests/kvm/lib/s390x/processor.c "No page mapping for vm virtual address 0x%lx", gva); gva 152 tools/testing/selftests/kvm/lib/s390x/processor.c return (entry[idx] & ~0xffful) + (gva & 0xffful); gva 542 tools/testing/selftests/kvm/lib/x86_64/processor.c vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) gva 553 tools/testing/selftests/kvm/lib/x86_64/processor.c index[0] = (gva >> 12) & 0x1ffu; gva 554 tools/testing/selftests/kvm/lib/x86_64/processor.c index[1] = (gva >> 21) & 0x1ffu; gva 555 tools/testing/selftests/kvm/lib/x86_64/processor.c index[2] = (gva >> 30) & 0x1ffu; gva 556 tools/testing/selftests/kvm/lib/x86_64/processor.c index[3] = (gva >> 39) & 0x1ffu; gva 576 tools/testing/selftests/kvm/lib/x86_64/processor.c return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu); gva 580 tools/testing/selftests/kvm/lib/x86_64/processor.c "gva: 0x%lx", gva);