gfn 471 arch/mips/kvm/mmu.c int (*handler)(struct kvm *kvm, gfn_t gfn, gfn 486 arch/mips/kvm/mmu.c gfn_t gfn, gfn_end; gfn 498 arch/mips/kvm/mmu.c gfn = hva_to_gfn_memslot(hva_start, memslot); gfn 501 arch/mips/kvm/mmu.c ret |= handler(kvm, gfn, gfn_end, memslot, data); gfn 508 arch/mips/kvm/mmu.c static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, gfn 511 arch/mips/kvm/mmu.c kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); gfn 523 arch/mips/kvm/mmu.c static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, gfn 526 arch/mips/kvm/mmu.c gpa_t gpa = gfn << PAGE_SHIFT; gfn 565 arch/mips/kvm/mmu.c static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, gfn 568 arch/mips/kvm/mmu.c return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); gfn 571 arch/mips/kvm/mmu.c static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, gfn 574 arch/mips/kvm/mmu.c gpa_t gpa = gfn << PAGE_SHIFT; gfn 615 arch/mips/kvm/mmu.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 646 arch/mips/kvm/mmu.c mark_page_dirty(kvm, gfn); gfn 692 arch/mips/kvm/mmu.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 733 arch/mips/kvm/mmu.c pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); gfn 761 arch/mips/kvm/mmu.c mark_page_dirty(kvm, gfn); gfn 216 arch/powerpc/include/asm/kvm_book3s.h unsigned long gfn); gfn 218 arch/powerpc/include/asm/kvm_book3s.h unsigned long gfn); gfn 220 arch/powerpc/include/asm/kvm_book3s.h unsigned long gfn); gfn 243 arch/powerpc/include/asm/kvm_book3s.h unsigned long gfn, unsigned long psize); gfn 430 arch/powerpc/kvm/book3s.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 448 arch/powerpc/kvm/book3s.c return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); gfn 88 arch/powerpc/kvm/book3s_64_mmu_host.c unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; gfn 127 arch/powerpc/kvm/book3s_64_mmu_host.c mark_page_dirty(vcpu->kvm, gfn); gfn 500 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gpa, gfn, hva, pfn; gfn 562 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = gpa >> PAGE_SHIFT; gfn 563 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot = gfn_to_memslot(kvm, gfn); gfn 591 arch/powerpc/kvm/book3s_64_mmu_hv.c hva = gfn_to_hva_memslot(memslot, gfn); gfn 767 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn); gfn 782 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn_t gfn, gfn_end; gfn 793 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = hva_to_gfn_memslot(hva_start, memslot); gfn 796 arch/powerpc/kvm/book3s_64_mmu_hv.c for (; gfn < gfn_end; ++gfn) { gfn 797 arch/powerpc/kvm/book3s_64_mmu_hv.c ret = handler(kvm, memslot, gfn); gfn 814 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long *rmapp, unsigned long gfn) gfn 838 arch/powerpc/kvm/book3s_64_mmu_hv.c hpte_rpn(ptel, psize) == gfn) { gfn 846 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_update_dirty_map(memslot, gfn, psize); gfn 855 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn) gfn 861 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 884 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); gfn 903 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn; gfn 907 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = memslot->base_gfn; gfn 914 arch/powerpc/kvm/book3s_64_mmu_hv.c for (n = memslot->npages; n; --n, ++gfn) { gfn 922 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm_unmap_rmapp(kvm, memslot, gfn); gfn 928 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn) gfn 936 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 991 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn) gfn 999 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 1133 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn; gfn 1137 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = vpa->gpa >> PAGE_SHIFT; gfn 1138 arch/powerpc/kvm/book3s_64_mmu_hv.c if (gfn < memslot->base_gfn || gfn 1139 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn >= memslot->base_gfn + memslot->npages) gfn 1144 arch/powerpc/kvm/book3s_64_mmu_hv.c __set_bit_le(gfn - memslot->base_gfn, map); gfn 1174 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn = gpa >> PAGE_SHIFT; gfn 1181 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot = gfn_to_memslot(kvm, gfn); gfn 1184 arch/powerpc/kvm/book3s_64_mmu_hv.c hva = gfn_to_hva_memslot(memslot, gfn); gfn 1206 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn; gfn 1215 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn = gpa >> PAGE_SHIFT; gfn 1217 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot = gfn_to_memslot(kvm, gfn); gfn 1219 arch/powerpc/kvm/book3s_64_mmu_hv.c set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); gfn 1290 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long gfn = hpte_rpn(guest_rpte, apsize); gfn 1293 arch/powerpc/kvm/book3s_64_mmu_hv.c __gfn_to_memslot(kvm_memslots(kvm), gfn); gfn 1297 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 1300 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); gfn 382 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gfn = gpa >> PAGE_SHIFT; gfn 394 arch/powerpc/kvm/book3s_64_mmu_radix.c memslot = gfn_to_memslot(kvm, gfn); gfn 411 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_update_dirty_map(memslot, gfn, page_size); gfn 775 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long hva, gfn = gpa >> PAGE_SHIFT; gfn 793 arch/powerpc/kvm/book3s_64_mmu_radix.c hva = gfn_to_hva_memslot(memslot, gfn); gfn 800 arch/powerpc/kvm/book3s_64_mmu_radix.c pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, gfn 893 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gpa, gfn; gfn 914 arch/powerpc/kvm/book3s_64_mmu_radix.c gfn = gpa >> PAGE_SHIFT; gfn 919 arch/powerpc/kvm/book3s_64_mmu_radix.c memslot = gfn_to_memslot(kvm, gfn); gfn 969 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gfn) gfn 972 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gpa = gfn << PAGE_SHIFT; gfn 984 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gfn) gfn 987 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gpa = gfn << PAGE_SHIFT; gfn 998 arch/powerpc/kvm/book3s_64_mmu_radix.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 1009 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gfn) gfn 1012 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gpa = gfn << PAGE_SHIFT; gfn 1026 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gfn = memslot->base_gfn + pagenum; gfn 1027 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long gpa = gfn << PAGE_SHIFT; gfn 1043 arch/powerpc/kvm/book3s_64_mmu_radix.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 336 arch/powerpc/kvm/book3s_64_vio.c unsigned long gfn = tce >> PAGE_SHIFT; gfn 339 arch/powerpc/kvm/book3s_64_vio.c memslot = search_memslots(kvm_memslots(kvm), gfn); gfn 343 arch/powerpc/kvm/book3s_64_vio.c *ua = __gfn_to_hva_memslot(memslot, gfn) | gfn 81 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long gfn = tce >> PAGE_SHIFT; gfn 84 arch/powerpc/kvm/book3s_64_vio_hv.c memslot = search_memslots(kvm_memslots_raw(kvm), gfn); gfn 88 arch/powerpc/kvm/book3s_64_vio_hv.c *ua = __gfn_to_hva_memslot(memslot, gfn) | gfn 92 arch/powerpc/kvm/book3s_64_vio_hv.c *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 882 arch/powerpc/kvm/book3s_hv_nested.c unsigned long gfn, end_gfn; gfn 887 arch/powerpc/kvm/book3s_hv_nested.c gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; gfn 888 arch/powerpc/kvm/book3s_hv_nested.c end_gfn = gfn + (nbytes >> PAGE_SHIFT); gfn 893 arch/powerpc/kvm/book3s_hv_nested.c for (; gfn < end_gfn; gfn++) { gfn 894 arch/powerpc/kvm/book3s_hv_nested.c unsigned long *rmap = &memslot->arch.rmap[gfn]; gfn 1273 arch/powerpc/kvm/book3s_hv_nested.c unsigned long n_gpa, gpa, gfn, perm = 0UL; gfn 1331 arch/powerpc/kvm/book3s_hv_nested.c gfn = gpa >> PAGE_SHIFT; gfn 1335 arch/powerpc/kvm/book3s_hv_nested.c memslot = gfn_to_memslot(kvm, gfn); gfn 1383 arch/powerpc/kvm/book3s_hv_nested.c gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT; gfn 1416 arch/powerpc/kvm/book3s_hv_nested.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; gfn 110 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long gfn, unsigned long psize) gfn 117 arch/powerpc/kvm/book3s_hv_rm_mmu.c gfn -= memslot->base_gfn; gfn 118 arch/powerpc/kvm/book3s_hv_rm_mmu.c set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); gfn 126 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long gfn; gfn 130 arch/powerpc/kvm/book3s_hv_rm_mmu.c gfn = hpte_rpn(hpte_gr, psize); gfn 131 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); gfn 133 arch/powerpc/kvm/book3s_hv_rm_mmu.c kvmppc_update_dirty_map(memslot, gfn, psize); gfn 144 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long gfn; gfn 146 arch/powerpc/kvm/book3s_hv_rm_mmu.c gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); gfn 147 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); gfn 151 arch/powerpc/kvm/book3s_hv_rm_mmu.c *gfnp = gfn; gfn 155 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); gfn 169 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long gfn; gfn 173 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn); gfn 192 arch/powerpc/kvm/book3s_hv_rm_mmu.c kvmppc_update_dirty_map(memslot, gfn, gfn 201 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long i, pa, gpa, gfn, psize; gfn 231 arch/powerpc/kvm/book3s_hv_rm_mmu.c gfn = gpa >> PAGE_SHIFT; gfn 232 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); gfn 246 arch/powerpc/kvm/book3s_hv_rm_mmu.c slot_fn = gfn - memslot->base_gfn; gfn 250 arch/powerpc/kvm/book3s_hv_rm_mmu.c hva = __gfn_to_hva_memslot(memslot, gfn); gfn 897 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long gfn, hva, pa, psize = PAGE_SHIFT; gfn 902 arch/powerpc/kvm/book3s_hv_rm_mmu.c gfn = gpa >> PAGE_SHIFT; gfn 903 arch/powerpc/kvm/book3s_hv_rm_mmu.c memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); gfn 908 arch/powerpc/kvm/book3s_hv_rm_mmu.c hva = __gfn_to_hva_memslot(memslot, gfn); gfn 403 arch/powerpc/kvm/book3s_pr.c gfn_t gfn, gfn_end; gfn 414 arch/powerpc/kvm/book3s_pr.c gfn = hva_to_gfn_memslot(hva_start, memslot); gfn 417 arch/powerpc/kvm/book3s_pr.c kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, gfn 565 arch/powerpc/kvm/book3s_xive_native.c gfn_t gfn; gfn 639 arch/powerpc/kvm/book3s_xive_native.c gfn = gpa_to_gfn(kvm_eq.qaddr); gfn 641 arch/powerpc/kvm/book3s_xive_native.c page_size = kvm_host_page_size(vcpu, gfn); gfn 648 arch/powerpc/kvm/book3s_xive_native.c page = gfn_to_page(kvm, gfn); gfn 1239 arch/powerpc/kvm/booke.c gfn_t gfn; gfn 1268 arch/powerpc/kvm/booke.c gfn = gpaddr >> PAGE_SHIFT; gfn 1270 arch/powerpc/kvm/booke.c if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { gfn 1296 arch/powerpc/kvm/booke.c gfn_t gfn; gfn 1316 arch/powerpc/kvm/booke.c gfn = gpaddr >> PAGE_SHIFT; gfn 1318 arch/powerpc/kvm/booke.c if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { gfn 323 arch/powerpc/kvm/e500_mmu_host.c u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, gfn 353 arch/powerpc/kvm/e500_mmu_host.c slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); gfn 354 arch/powerpc/kvm/e500_mmu_host.c hva = gfn_to_hva_memslot(slot, gfn); gfn 381 arch/powerpc/kvm/e500_mmu_host.c slot_start = pfn - (gfn - slot->base_gfn); gfn 409 arch/powerpc/kvm/e500_mmu_host.c gfn_start = gfn & ~(tsize_pages - 1); gfn 412 arch/powerpc/kvm/e500_mmu_host.c if (gfn_start + pfn - gfn < start) gfn 414 arch/powerpc/kvm/e500_mmu_host.c if (gfn_end + pfn - gfn > end) gfn 416 arch/powerpc/kvm/e500_mmu_host.c if ((gfn & (tsize_pages - 1)) != gfn 449 arch/powerpc/kvm/e500_mmu_host.c pfn = gfn_to_pfn_memslot(slot, gfn); gfn 453 arch/powerpc/kvm/e500_mmu_host.c __func__, (long)gfn); gfn 488 arch/powerpc/kvm/e500_mmu_host.c __func__, (long)gfn, pfn); gfn 560 arch/powerpc/kvm/e500_mmu_host.c u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, gfn 567 arch/powerpc/kvm/e500_mmu_host.c r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, gfn 612 arch/powerpc/kvm/e500_mmu_host.c gfn_t gfn = gpaddr >> PAGE_SHIFT; gfn 613 arch/powerpc/kvm/e500_mmu_host.c kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, gfn 1916 arch/s390/kvm/kvm-s390.c static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) gfn 1922 arch/s390/kvm/kvm-s390.c if (gfn >= memslots[slot].base_gfn && gfn 1923 arch/s390/kvm/kvm-s390.c gfn < memslots[slot].base_gfn + memslots[slot].npages) gfn 1929 arch/s390/kvm/kvm-s390.c if (gfn >= memslots[slot].base_gfn) gfn 1938 arch/s390/kvm/kvm-s390.c if (gfn >= memslots[start].base_gfn && gfn 1939 arch/s390/kvm/kvm-s390.c gfn < memslots[start].base_gfn + memslots[start].npages) { gfn 1125 arch/s390/kvm/priv.c unsigned long gfn, hva, res, pgstev, ptev; gfn 1134 arch/s390/kvm/priv.c gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; gfn 1135 arch/s390/kvm/priv.c hva = gfn_to_hva(vcpu->kvm, gfn); gfn 1170 arch/s390/kvm/priv.c cbrlo[entries] = gfn << PAGE_SHIFT; gfn 1174 arch/s390/kvm/priv.c struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn); gfn 1177 arch/s390/kvm/priv.c if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) gfn 120 arch/x86/include/asm/kvm_host.h static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) gfn 123 arch/x86/include/asm/kvm_host.h return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - gfn 327 arch/x86/include/asm/kvm_host.h gfn_t gfn; gfn 1107 arch/x86/include/asm/kvm_host.h u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); gfn 1225 arch/x86/include/asm/kvm_host.h gfn_t gfn; gfn 1406 arch/x86/include/asm/kvm_host.h gfn_t gfn, void *data, int offset, int len, gfn 1432 arch/x86/include/asm/kvm_host.h int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); gfn 1590 arch/x86/include/asm/kvm_host.h extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 58 arch/x86/include/asm/kvm_page_track.h struct kvm_memory_slot *slot, gfn_t gfn, gfn 61 arch/x86/include/asm/kvm_page_track.h struct kvm_memory_slot *slot, gfn_t gfn, gfn 63 arch/x86/include/asm/kvm_page_track.h bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 260 arch/x86/include/asm/xen/page.h static inline unsigned long gfn_to_pfn(unsigned long gfn) gfn 263 arch/x86/include/asm/xen/page.h return gfn; gfn 265 arch/x86/include/asm/xen/page.h return mfn_to_pfn(gfn); gfn 939 arch/x86/kvm/hyperv.c u64 gfn; gfn 951 arch/x86/kvm/hyperv.c gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; gfn 956 arch/x86/kvm/hyperv.c if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), gfn 965 arch/x86/kvm/hyperv.c if (kvm_write_guest(kvm, gfn_to_gpa(gfn), gfn 974 arch/x86/kvm/hyperv.c if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) gfn 988 arch/x86/kvm/hyperv.c kvm_write_guest(kvm, gfn_to_gpa(gfn), gfn 1008 arch/x86/kvm/hyperv.c u64 gfn; gfn 1019 arch/x86/kvm/hyperv.c gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; gfn 1020 arch/x86/kvm/hyperv.c addr = gfn_to_hva(kvm, gfn); gfn 1028 arch/x86/kvm/hyperv.c mark_page_dirty(kvm, gfn); gfn 1111 arch/x86/kvm/hyperv.c u64 gfn; gfn 1120 arch/x86/kvm/hyperv.c gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT; gfn 1121 arch/x86/kvm/hyperv.c addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); gfn 1133 arch/x86/kvm/hyperv.c kvm_vcpu_mark_page_dirty(vcpu, gfn); gfn 1135 arch/x86/kvm/hyperv.c gfn_to_gpa(gfn) | KVM_MSR_ENABLED, gfn 456 arch/x86/kvm/mmu.c static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, gfn 461 arch/x86/kvm/mmu.c u64 gpa = gfn << PAGE_SHIFT; gfn 469 arch/x86/kvm/mmu.c trace_mark_mmio_spte(sptep, gfn, access, gen); gfn 488 arch/x86/kvm/mmu.c static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, gfn 492 arch/x86/kvm/mmu.c mark_mmio_spte(vcpu, sptep, gfn, access); gfn 1160 arch/x86/kvm/mmu.c return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); gfn 1163 arch/x86/kvm/mmu.c static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) gfn 1166 arch/x86/kvm/mmu.c sp->gfns[index] = gfn; gfn 1170 arch/x86/kvm/mmu.c if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) gfn 1173 arch/x86/kvm/mmu.c sp->gfn, gfn 1174 arch/x86/kvm/mmu.c kvm_mmu_page_get_gfn(sp, index), gfn); gfn 1181 arch/x86/kvm/mmu.c static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, gfn 1187 arch/x86/kvm/mmu.c idx = gfn_to_index(gfn, slot->base_gfn, level); gfn 1192 arch/x86/kvm/mmu.c gfn_t gfn, int count) gfn 1198 arch/x86/kvm/mmu.c linfo = lpage_info_slot(gfn, slot, i); gfn 1204 arch/x86/kvm/mmu.c void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) gfn 1206 arch/x86/kvm/mmu.c update_gfn_disallow_lpage_count(slot, gfn, 1); gfn 1209 arch/x86/kvm/mmu.c void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) gfn 1211 arch/x86/kvm/mmu.c update_gfn_disallow_lpage_count(slot, gfn, -1); gfn 1218 arch/x86/kvm/mmu.c gfn_t gfn; gfn 1221 arch/x86/kvm/mmu.c gfn = sp->gfn; gfn 1223 arch/x86/kvm/mmu.c slot = __gfn_to_memslot(slots, gfn); gfn 1227 arch/x86/kvm/mmu.c return kvm_slot_page_track_add_page(kvm, slot, gfn, gfn 1230 arch/x86/kvm/mmu.c kvm_mmu_gfn_disallow_lpage(slot, gfn); gfn 1248 arch/x86/kvm/mmu.c gfn_t gfn; gfn 1251 arch/x86/kvm/mmu.c gfn = sp->gfn; gfn 1253 arch/x86/kvm/mmu.c slot = __gfn_to_memslot(slots, gfn); gfn 1255 arch/x86/kvm/mmu.c return kvm_slot_page_track_remove_page(kvm, slot, gfn, gfn 1258 arch/x86/kvm/mmu.c kvm_mmu_gfn_allow_lpage(slot, gfn); gfn 1268 arch/x86/kvm/mmu.c static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level, gfn 1274 arch/x86/kvm/mmu.c linfo = lpage_info_slot(gfn, slot, level); gfn 1281 arch/x86/kvm/mmu.c static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 1286 arch/x86/kvm/mmu.c slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 1287 arch/x86/kvm/mmu.c return __mmu_gfn_lpage_is_disallowed(gfn, level, slot); gfn 1290 arch/x86/kvm/mmu.c static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1295 arch/x86/kvm/mmu.c page_size = kvm_host_page_size(vcpu, gfn); gfn 1319 arch/x86/kvm/mmu.c gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 1324 arch/x86/kvm/mmu.c slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 1469 arch/x86/kvm/mmu.c static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, gfn 1474 arch/x86/kvm/mmu.c idx = gfn_to_index(gfn, slot->base_gfn, level); gfn 1478 arch/x86/kvm/mmu.c static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, gfn 1485 arch/x86/kvm/mmu.c slot = __gfn_to_memslot(slots, gfn); gfn 1486 arch/x86/kvm/mmu.c return __gfn_to_rmap(gfn, sp->role.level, slot); gfn 1497 arch/x86/kvm/mmu.c static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) gfn 1503 arch/x86/kvm/mmu.c kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); gfn 1504 arch/x86/kvm/mmu.c rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); gfn 1511 arch/x86/kvm/mmu.c gfn_t gfn; gfn 1515 arch/x86/kvm/mmu.c gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); gfn 1516 arch/x86/kvm/mmu.c rmap_head = gfn_to_rmap(kvm, gfn, sp); gfn 1621 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, gfn 1831 arch/x86/kvm/mmu.c struct kvm_memory_slot *slot, u64 gfn) gfn 1838 arch/x86/kvm/mmu.c rmap_head = __gfn_to_rmap(gfn, i, slot); gfn 1845 arch/x86/kvm/mmu.c static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) gfn 1849 arch/x86/kvm/mmu.c slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 1850 arch/x86/kvm/mmu.c return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); gfn 1870 arch/x86/kvm/mmu.c struct kvm_memory_slot *slot, gfn_t gfn, int level, gfn 1877 arch/x86/kvm/mmu.c struct kvm_memory_slot *slot, gfn_t gfn, int level, gfn 1893 arch/x86/kvm/mmu.c sptep, *sptep, gfn, level); gfn 1915 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); gfn 1931 arch/x86/kvm/mmu.c gfn_t gfn; gfn 1943 arch/x86/kvm/mmu.c iterator->gfn = iterator->start_gfn; gfn 1944 arch/x86/kvm/mmu.c iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); gfn 1971 arch/x86/kvm/mmu.c iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); gfn 1997 arch/x86/kvm/mmu.c gfn_t gfn, gfn 2030 arch/x86/kvm/mmu.c iterator.gfn, iterator.level, data); gfn 2042 arch/x86/kvm/mmu.c gfn_t gfn, int level, gfn 2059 arch/x86/kvm/mmu.c struct kvm_memory_slot *slot, gfn_t gfn, int level, gfn 2069 arch/x86/kvm/mmu.c trace_kvm_age_page(gfn, level, slot, young); gfn 2074 arch/x86/kvm/mmu.c struct kvm_memory_slot *slot, gfn_t gfn, gfn 2088 arch/x86/kvm/mmu.c static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) gfn 2095 arch/x86/kvm/mmu.c rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); gfn 2097 arch/x86/kvm/mmu.c kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); gfn 2098 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, gfn 2151 arch/x86/kvm/mmu.c static unsigned kvm_page_table_hashfn(gfn_t gfn) gfn 2153 arch/x86/kvm/mmu.c return hash_64(gfn, KVM_MMU_HASH_SHIFT); gfn 2348 arch/x86/kvm/mmu.c if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else gfn 2414 arch/x86/kvm/mmu.c static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 2420 arch/x86/kvm/mmu.c for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { gfn 2517 arch/x86/kvm/mmu.c protected |= rmap_write_protect(vcpu, sp->gfn); gfn 2551 arch/x86/kvm/mmu.c gfn_t gfn, gfn 2577 arch/x86/kvm/mmu.c for_each_valid_sp(vcpu->kvm, sp, gfn) { gfn 2578 arch/x86/kvm/mmu.c if (sp->gfn != gfn) { gfn 2612 arch/x86/kvm/mmu.c sp->gfn = gfn; gfn 2615 arch/x86/kvm/mmu.c &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); gfn 2624 arch/x86/kvm/mmu.c rmap_write_protect(vcpu, gfn)) gfn 2625 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); gfn 2628 arch/x86/kvm/mmu.c flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); gfn 2744 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); gfn 2932 arch/x86/kvm/mmu.c int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) gfn 2938 arch/x86/kvm/mmu.c pgprintk("%s: looking for gfn %llx\n", __func__, gfn); gfn 2941 arch/x86/kvm/mmu.c for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { gfn 2942 arch/x86/kvm/mmu.c pgprintk("%s: gfn %llx role %x\n", __func__, gfn, gfn 2963 arch/x86/kvm/mmu.c static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 2968 arch/x86/kvm/mmu.c if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) gfn 2971 arch/x86/kvm/mmu.c for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { gfn 3051 arch/x86/kvm/mmu.c gfn_t gfn, kvm_pfn_t pfn, bool speculative, gfn 3058 arch/x86/kvm/mmu.c if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) gfn 3093 arch/x86/kvm/mmu.c spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, gfn 3115 arch/x86/kvm/mmu.c mmu_gfn_lpage_is_disallowed(vcpu, gfn, level)) gfn 3129 arch/x86/kvm/mmu.c if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { gfn 3131 arch/x86/kvm/mmu.c __func__, gfn); gfn 3139 arch/x86/kvm/mmu.c kvm_vcpu_mark_page_dirty(vcpu, gfn); gfn 3154 arch/x86/kvm/mmu.c int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, gfn 3164 arch/x86/kvm/mmu.c *sptep, write_fault, gfn); gfn 3188 arch/x86/kvm/mmu.c set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, gfn 3197 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, gfn 3204 arch/x86/kvm/mmu.c trace_kvm_mmu_set_spte(level, gfn, sptep); gfn 3210 arch/x86/kvm/mmu.c rmap_count = rmap_add(vcpu, sptep, gfn); gfn 3212 arch/x86/kvm/mmu.c rmap_recycle(vcpu, sptep, gfn); gfn 3219 arch/x86/kvm/mmu.c static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 3224 arch/x86/kvm/mmu.c slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); gfn 3228 arch/x86/kvm/mmu.c return gfn_to_pfn_memslot_atomic(slot, gfn); gfn 3239 arch/x86/kvm/mmu.c gfn_t gfn; gfn 3241 arch/x86/kvm/mmu.c gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); gfn 3242 arch/x86/kvm/mmu.c slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); gfn 3246 arch/x86/kvm/mmu.c ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); gfn 3250 arch/x86/kvm/mmu.c for (i = 0; i < ret; i++, gfn++, start++) { gfn 3251 arch/x86/kvm/mmu.c mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn, gfn 3303 arch/x86/kvm/mmu.c gfn_t gfn, kvm_pfn_t *pfnp, int *levelp) gfn 3320 arch/x86/kvm/mmu.c *pfnp |= gfn & page_mask; gfn 3332 arch/x86/kvm/mmu.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 3333 arch/x86/kvm/mmu.c gfn_t base_gfn = gfn; gfn 3344 arch/x86/kvm/mmu.c disallowed_hugepage_adjust(it, gfn, &pfn, &level); gfn 3346 arch/x86/kvm/mmu.c base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); gfn 3374 arch/x86/kvm/mmu.c static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) gfn 3385 arch/x86/kvm/mmu.c kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); gfn 3393 arch/x86/kvm/mmu.c gfn_t gfn, kvm_pfn_t *pfnp, gfn 3408 arch/x86/kvm/mmu.c !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { gfn 3421 arch/x86/kvm/mmu.c VM_BUG_ON((gfn & mask) != (pfn & mask)); gfn 3431 arch/x86/kvm/mmu.c static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, gfn 3436 arch/x86/kvm/mmu.c *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); gfn 3441 arch/x86/kvm/mmu.c vcpu_cache_mmio_info(vcpu, gva, gfn, gfn 3488 arch/x86/kvm/mmu.c gfn_t gfn; gfn 3512 arch/x86/kvm/mmu.c gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); gfn 3513 arch/x86/kvm/mmu.c kvm_vcpu_mark_page_dirty(vcpu, gfn); gfn 3641 arch/x86/kvm/mmu.c static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gfn 3647 arch/x86/kvm/mmu.c gfn_t gfn, bool prefault) gfn 3659 arch/x86/kvm/mmu.c level = mapping_level(vcpu, gfn, &force_pt_level); gfn 3669 arch/x86/kvm/mmu.c gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); gfn 3678 arch/x86/kvm/mmu.c if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) gfn 3681 arch/x86/kvm/mmu.c if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r)) gfn 3691 arch/x86/kvm/mmu.c transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); gfn 4100 arch/x86/kvm/mmu.c gfn_t gfn = get_mmio_spte_gfn(spte); gfn 4109 arch/x86/kvm/mmu.c trace_handle_mmio_page_fault(addr, gfn, access); gfn 4110 arch/x86/kvm/mmu.c vcpu_cache_mmio_info(vcpu, addr, gfn, access); gfn 4122 arch/x86/kvm/mmu.c u32 error_code, gfn_t gfn) gfn 4135 arch/x86/kvm/mmu.c if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) gfn 4161 arch/x86/kvm/mmu.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 4167 arch/x86/kvm/mmu.c if (page_fault_handle_page_track(vcpu, error_code, gfn)) gfn 4178 arch/x86/kvm/mmu.c error_code, gfn, prefault); gfn 4182 arch/x86/kvm/mmu.c gfn_t gfn) gfn 4187 arch/x86/kvm/mmu.c arch.gfn = gfn; gfn 4192 arch/x86/kvm/mmu.c kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); gfn 4195 arch/x86/kvm/mmu.c static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gfn 4205 arch/x86/kvm/mmu.c if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) { gfn 4210 arch/x86/kvm/mmu.c slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 4212 arch/x86/kvm/mmu.c *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); gfn 4217 arch/x86/kvm/mmu.c trace_kvm_try_async_get_page(cr2_or_gpa, gfn); gfn 4218 arch/x86/kvm/mmu.c if (kvm_find_async_pf_gfn(vcpu, gfn)) { gfn 4219 arch/x86/kvm/mmu.c trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); gfn 4222 arch/x86/kvm/mmu.c } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) gfn 4226 arch/x86/kvm/mmu.c *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); gfn 4269 arch/x86/kvm/mmu.c check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) gfn 4273 arch/x86/kvm/mmu.c gfn &= ~(page_num - 1); gfn 4275 arch/x86/kvm/mmu.c return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num); gfn 4285 arch/x86/kvm/mmu.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 4294 arch/x86/kvm/mmu.c if (page_fault_handle_page_track(vcpu, error_code, gfn)) gfn 4303 arch/x86/kvm/mmu.c !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL); gfn 4304 arch/x86/kvm/mmu.c level = mapping_level(vcpu, gfn, &force_pt_level); gfn 4307 arch/x86/kvm/mmu.c !check_hugepage_cache_consistency(vcpu, gfn, level)) gfn 4309 arch/x86/kvm/mmu.c gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); gfn 4318 arch/x86/kvm/mmu.c if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) gfn 4321 arch/x86/kvm/mmu.c if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) gfn 4331 arch/x86/kvm/mmu.c transparent_hugepage_adjust(vcpu, gfn, &pfn, &level); gfn 4463 arch/x86/kvm/mmu.c static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, gfn 4467 arch/x86/kvm/mmu.c if (gfn != get_mmio_spte_gfn(*sptep)) { gfn 4473 arch/x86/kvm/mmu.c mark_mmio_spte(vcpu, sptep, gfn, access); gfn 5435 arch/x86/kvm/mmu.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 5467 arch/x86/kvm/mmu.c for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { gfn 5707 arch/x86/kvm/mmu.c iterator.gfn - start_gfn + 1); gfn 6039 arch/x86/kvm/mmu.c kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, gfn 208 arch/x86/kvm/mmu.h void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); gfn 209 arch/x86/kvm/mmu.h void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); gfn 211 arch/x86/kvm/mmu.h struct kvm_memory_slot *slot, u64 gfn); gfn 96 arch/x86/kvm/mmu_audit.c gfn_t gfn; gfn 113 arch/x86/kvm/mmu_audit.c gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); gfn 114 arch/x86/kvm/mmu_audit.c pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); gfn 133 arch/x86/kvm/mmu_audit.c gfn_t gfn; gfn 136 arch/x86/kvm/mmu_audit.c gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); gfn 139 arch/x86/kvm/mmu_audit.c slot = __gfn_to_memslot(slots, gfn); gfn 143 arch/x86/kvm/mmu_audit.c audit_printk(kvm, "no memslot for gfn %llx\n", gfn); gfn 145 arch/x86/kvm/mmu_audit.c (long int)(sptep - rev_sp->spt), rev_sp->gfn); gfn 150 arch/x86/kvm/mmu_audit.c rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); gfn 202 arch/x86/kvm/mmu_audit.c slot = __gfn_to_memslot(slots, sp->gfn); gfn 203 arch/x86/kvm/mmu_audit.c rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot); gfn 209 arch/x86/kvm/mmu_audit.c sp->gfn, sp->role.word); gfn 13 arch/x86/kvm/mmutrace.h __field(__u64, gfn) \ gfn 20 arch/x86/kvm/mmutrace.h __entry->gfn = sp->gfn; \ gfn 37 arch/x86/kvm/mmutrace.h __entry->gfn, role.level, \ gfn 205 arch/x86/kvm/mmutrace.h TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen), gfn 206 arch/x86/kvm/mmutrace.h TP_ARGS(sptep, gfn, access, gen), gfn 210 arch/x86/kvm/mmutrace.h __field(gfn_t, gfn) gfn 217 arch/x86/kvm/mmutrace.h __entry->gfn = gfn; gfn 223 arch/x86/kvm/mmutrace.h __entry->gfn, __entry->access, __entry->gen) gfn 228 arch/x86/kvm/mmutrace.h TP_PROTO(u64 addr, gfn_t gfn, unsigned access), gfn 229 arch/x86/kvm/mmutrace.h TP_ARGS(addr, gfn, access), gfn 233 arch/x86/kvm/mmutrace.h __field(gfn_t, gfn) gfn 239 arch/x86/kvm/mmutrace.h __entry->gfn = gfn; gfn 243 arch/x86/kvm/mmutrace.h TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, gfn 331 arch/x86/kvm/mmutrace.h TP_PROTO(int level, gfn_t gfn, u64 *sptep), gfn 332 arch/x86/kvm/mmutrace.h TP_ARGS(level, gfn, sptep), gfn 335 arch/x86/kvm/mmutrace.h __field(u64, gfn) gfn 346 arch/x86/kvm/mmutrace.h __entry->gfn = gfn; gfn 356 arch/x86/kvm/mmutrace.h __entry->gfn, __entry->spte, gfn 371 arch/x86/kvm/mmutrace.h __field(u64, gfn) gfn 377 arch/x86/kvm/mmutrace.h __entry->gfn = addr >> PAGE_SHIFT; gfn 378 arch/x86/kvm/mmutrace.h __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); gfn 383 arch/x86/kvm/mmutrace.h __entry->gfn, __entry->pfn, __entry->level gfn 619 arch/x86/kvm/mtrr.c u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 628 arch/x86/kvm/mtrr.c start = gfn_to_gpa(gfn); gfn 695 arch/x86/kvm/mtrr.c bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 703 arch/x86/kvm/mtrr.c start = gfn_to_gpa(gfn); gfn 704 arch/x86/kvm/mtrr.c end = gfn_to_gpa(gfn + page_num); gfn 63 arch/x86/kvm/page_track.c static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, gfn 68 arch/x86/kvm/page_track.c index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); gfn 91 arch/x86/kvm/page_track.c struct kvm_memory_slot *slot, gfn_t gfn, gfn 98 arch/x86/kvm/page_track.c update_gfn_track(slot, gfn, mode, 1); gfn 104 arch/x86/kvm/page_track.c kvm_mmu_gfn_disallow_lpage(slot, gfn); gfn 107 arch/x86/kvm/page_track.c if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) gfn 126 arch/x86/kvm/page_track.c struct kvm_memory_slot *slot, gfn_t gfn, gfn 132 arch/x86/kvm/page_track.c update_gfn_track(slot, gfn, mode, -1); gfn 138 arch/x86/kvm/page_track.c kvm_mmu_gfn_allow_lpage(slot, gfn); gfn 145 arch/x86/kvm/page_track.c bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 154 arch/x86/kvm/page_track.c slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 158 arch/x86/kvm/page_track.c index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); gfn 95 arch/x86/kvm/paging_tmpl.h gfn_t gfn; gfn 316 arch/x86/kvm/paging_tmpl.h gfn_t gfn; gfn 421 arch/x86/kvm/paging_tmpl.h gfn = gpte_to_gfn_lvl(pte, walker->level); gfn 422 arch/x86/kvm/paging_tmpl.h gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; gfn 425 arch/x86/kvm/paging_tmpl.h gfn += pse36_gfn_delta(pte); gfn 427 arch/x86/kvm/paging_tmpl.h real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); gfn 431 arch/x86/kvm/paging_tmpl.h walker->gfn = real_gpa >> PAGE_SHIFT; gfn 520 arch/x86/kvm/paging_tmpl.h gfn_t gfn; gfn 528 arch/x86/kvm/paging_tmpl.h gfn = gpte_to_gfn(gpte); gfn 531 arch/x86/kvm/paging_tmpl.h pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, gfn 540 arch/x86/kvm/paging_tmpl.h mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn, gfn 624 arch/x86/kvm/paging_tmpl.h gfn_t gfn, base_gfn; gfn 673 arch/x86/kvm/paging_tmpl.h gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); gfn 674 arch/x86/kvm/paging_tmpl.h base_gfn = gfn; gfn 685 arch/x86/kvm/paging_tmpl.h disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel); gfn 687 arch/x86/kvm/paging_tmpl.h base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); gfn 745 arch/x86/kvm/paging_tmpl.h gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; gfn 747 arch/x86/kvm/paging_tmpl.h self_changed |= !(gfn & mask); gfn 748 arch/x86/kvm/paging_tmpl.h *write_fault_to_shadow_pgtable |= !gfn; gfn 811 arch/x86/kvm/paging_tmpl.h if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { gfn 822 arch/x86/kvm/paging_tmpl.h level = mapping_level(vcpu, walker.gfn, &force_pt_level); gfn 825 arch/x86/kvm/paging_tmpl.h walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); gfn 833 arch/x86/kvm/paging_tmpl.h if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, gfn 837 arch/x86/kvm/paging_tmpl.h if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) gfn 869 arch/x86/kvm/paging_tmpl.h transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level); gfn 889 arch/x86/kvm/paging_tmpl.h return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); gfn 930 arch/x86/kvm/paging_tmpl.h sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); gfn 959 arch/x86/kvm/paging_tmpl.h gpa = gfn_to_gpa(walker.gfn); gfn 985 arch/x86/kvm/paging_tmpl.h gpa = gfn_to_gpa(walker.gfn); gfn 1023 arch/x86/kvm/paging_tmpl.h gfn_t gfn; gfn 1045 arch/x86/kvm/paging_tmpl.h gfn = gpte_to_gfn(gpte); gfn 1050 arch/x86/kvm/paging_tmpl.h if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, gfn 1054 arch/x86/kvm/paging_tmpl.h if (gfn != sp->gfns[i]) { gfn 1071 arch/x86/kvm/paging_tmpl.h gfn, spte_to_pfn(sp->spt[i]), gfn 5938 arch/x86/kvm/svm.c static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) gfn 3388 arch/x86/kvm/vmx/nested.c gfn_t gfn; gfn 3396 arch/x86/kvm/vmx/nested.c gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; gfn 3397 arch/x86/kvm/vmx/nested.c kvm_vcpu_mark_page_dirty(vcpu, gfn); gfn 3401 arch/x86/kvm/vmx/nested.c gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; gfn 3402 arch/x86/kvm/vmx/nested.c kvm_vcpu_mark_page_dirty(vcpu, gfn); gfn 3749 arch/x86/kvm/vmx/vmx.c WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) gfn 6863 arch/x86/kvm/vmx/vmx.c static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) gfn 6899 arch/x86/kvm/vmx/vmx.c cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); gfn 682 arch/x86/kvm/x86.c static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 685 arch/x86/kvm/x86.c return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, gfn 738 arch/x86/kvm/x86.c gfn_t gfn; gfn 748 arch/x86/kvm/x86.c gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; gfn 750 arch/x86/kvm/x86.c r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), gfn 10082 arch/x86/kvm/x86.c static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) gfn 10084 arch/x86/kvm/x86.c return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); gfn 10092 arch/x86/kvm/x86.c static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 10094 arch/x86/kvm/x86.c u32 key = kvm_async_pf_hash_fn(gfn); gfn 10099 arch/x86/kvm/x86.c vcpu->arch.apf.gfns[key] = gfn; gfn 10102 arch/x86/kvm/x86.c static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 10105 arch/x86/kvm/x86.c u32 key = kvm_async_pf_hash_fn(gfn); gfn 10108 arch/x86/kvm/x86.c (vcpu->arch.apf.gfns[key] != gfn && gfn 10115 arch/x86/kvm/x86.c bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 10117 arch/x86/kvm/x86.c return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; gfn 10120 arch/x86/kvm/x86.c static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 10124 arch/x86/kvm/x86.c i = j = kvm_async_pf_gfn_slot(vcpu, gfn); gfn 10193 arch/x86/kvm/x86.c kvm_add_async_pf_gfn(vcpu, work->arch.gfn); gfn 10226 arch/x86/kvm/x86.c kvm_del_async_pf_gfn(vcpu, work->arch.gfn); gfn 187 arch/x86/kvm/x86.h gva_t gva, gfn_t gfn, unsigned access) gfn 200 arch/x86/kvm/x86.h vcpu->arch.mmio_gfn = gfn; gfn 282 arch/x86/kvm/x86.h u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 286 arch/x86/kvm/x86.h bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 355 drivers/block/xen-blkfront.c unsigned long gfn, gfn 373 drivers/block/xen-blkfront.c gfn, 0); gfn 584 drivers/block/xen-blkfront.c static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset, gfn 625 drivers/block/xen-blkfront.c gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo); gfn 1369 drivers/block/xen-blkfront.c static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset, gfn 661 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, gfn 690 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, gfn 746 drivers/gpu/drm/i915/gvt/gtt.c if (spt->guest_page.gfn) { gfn 750 drivers/gpu/drm/i915/gvt/gtt.c intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); gfn 798 drivers/gpu/drm/i915/gvt/gtt.c struct intel_vgpu *vgpu, unsigned long gfn) gfn 802 drivers/gpu/drm/i915/gvt/gtt.c track = intel_vgpu_find_page_track(vgpu, gfn); gfn 871 drivers/gpu/drm/i915/gvt/gtt.c unsigned long gfn, bool guest_pde_ips) gfn 883 drivers/gpu/drm/i915/gvt/gtt.c ret = intel_vgpu_register_page_track(vgpu, gfn, gfn 891 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn = gfn; gfn 894 drivers/gpu/drm/i915/gvt/gtt.c trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); gfn 1000 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn, spt->shadow_page.type); gfn 1036 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn, spt->shadow_page.type); gfn 1105 drivers/gpu/drm/i915/gvt/gtt.c ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); gfn 1113 drivers/gpu/drm/i915/gvt/gtt.c trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, gfn 1250 drivers/gpu/drm/i915/gvt/gtt.c unsigned long gfn, page_size = PAGE_SIZE; gfn 1257 drivers/gpu/drm/i915/gvt/gtt.c gfn = pte_ops->get_pfn(ge); gfn 1288 drivers/gpu/drm/i915/gvt/gtt.c ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, gfn 1305 drivers/gpu/drm/i915/gvt/gtt.c unsigned long gfn, i; gfn 1309 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn, spt->shadow_page.type); gfn 1322 drivers/gpu/drm/i915/gvt/gtt.c gfn = ops->get_pfn(&ge); gfn 1323 drivers/gpu/drm/i915/gvt/gtt.c if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { gfn 1442 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn << PAGE_SHIFT, vgpu); gfn 1490 drivers/gpu/drm/i915/gvt/gtt.c spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, gfn 1510 drivers/gpu/drm/i915/gvt/gtt.c ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); gfn 1556 drivers/gpu/drm/i915/gvt/gtt.c return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); gfn 2214 drivers/gpu/drm/i915/gvt/gtt.c unsigned long gma, gfn; gfn 2282 drivers/gpu/drm/i915/gvt/gtt.c gfn = ops->get_pfn(&e); gfn 2289 drivers/gpu/drm/i915/gvt/gtt.c if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { gfn 2294 drivers/gpu/drm/i915/gvt/gtt.c ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, gfn 247 drivers/gpu/drm/i915/gvt/gtt.h unsigned long gfn; gfn 122 drivers/gpu/drm/i915/gvt/gvt.h u32 gfn[INTEL_GVT_OPREGION_PAGES]; gfn 53 drivers/gpu/drm/i915/gvt/hypercall.h int (*enable_page_track)(unsigned long handle, u64 gfn); gfn 54 drivers/gpu/drm/i915/gvt/hypercall.h int (*disable_page_track)(unsigned long handle, u64 gfn); gfn 59 drivers/gpu/drm/i915/gvt/hypercall.h unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); gfn 61 drivers/gpu/drm/i915/gvt/hypercall.h int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, gfn 65 drivers/gpu/drm/i915/gvt/hypercall.h int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, gfn 73 drivers/gpu/drm/i915/gvt/hypercall.h bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); gfn 87 drivers/gpu/drm/i915/gvt/kvmgt.c gfn_t gfn; gfn 105 drivers/gpu/drm/i915/gvt/kvmgt.c gfn_t gfn; gfn 120 drivers/gpu/drm/i915/gvt/kvmgt.c static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, gfn 130 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned long cur_gfn = gfn + npage; gfn 138 drivers/gpu/drm/i915/gvt/kvmgt.c static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, gfn 152 drivers/gpu/drm/i915/gvt/kvmgt.c unsigned long cur_gfn = gfn + npage; gfn 183 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); gfn 187 drivers/gpu/drm/i915/gvt/kvmgt.c static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, gfn 194 drivers/gpu/drm/i915/gvt/kvmgt.c ret = gvt_pin_guest_page(vgpu, gfn, size, &page); gfn 203 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_unpin_guest_page(vgpu, gfn, size); gfn 210 drivers/gpu/drm/i915/gvt/kvmgt.c static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, gfn 216 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_unpin_guest_page(vgpu, gfn, size); gfn 238 drivers/gpu/drm/i915/gvt/kvmgt.c static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) gfn 246 drivers/gpu/drm/i915/gvt/kvmgt.c if (gfn < itr->gfn) gfn 248 drivers/gpu/drm/i915/gvt/kvmgt.c else if (gfn > itr->gfn) gfn 256 drivers/gpu/drm/i915/gvt/kvmgt.c static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, gfn 267 drivers/gpu/drm/i915/gvt/kvmgt.c new->gfn = gfn; gfn 278 drivers/gpu/drm/i915/gvt/kvmgt.c if (gfn < itr->gfn) gfn 327 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); gfn 359 drivers/gpu/drm/i915/gvt/kvmgt.c __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) gfn 363 drivers/gpu/drm/i915/gvt/kvmgt.c hash_for_each_possible(info->ptable, p, hnode, gfn) { gfn 364 drivers/gpu/drm/i915/gvt/kvmgt.c if (gfn == p->gfn) { gfn 374 drivers/gpu/drm/i915/gvt/kvmgt.c gfn_t gfn) gfn 378 drivers/gpu/drm/i915/gvt/kvmgt.c p = __kvmgt_protect_table_find(info, gfn); gfn 382 drivers/gpu/drm/i915/gvt/kvmgt.c static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) gfn 386 drivers/gpu/drm/i915/gvt/kvmgt.c if (kvmgt_gfn_is_write_protected(info, gfn)) gfn 390 drivers/gpu/drm/i915/gvt/kvmgt.c if (WARN(!p, "gfn: 0x%llx\n", gfn)) gfn 393 drivers/gpu/drm/i915/gvt/kvmgt.c p->gfn = gfn; gfn 394 drivers/gpu/drm/i915/gvt/kvmgt.c hash_add(info->ptable, &p->hnode, gfn); gfn 398 drivers/gpu/drm/i915/gvt/kvmgt.c gfn_t gfn) gfn 402 drivers/gpu/drm/i915/gvt/kvmgt.c p = __kvmgt_protect_table_find(info, gfn); gfn 717 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, gfn 1634 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_page_track_add(unsigned long handle, u64 gfn) gfn 1648 drivers/gpu/drm/i915/gvt/kvmgt.c slot = gfn_to_memslot(kvm, gfn); gfn 1656 drivers/gpu/drm/i915/gvt/kvmgt.c if (kvmgt_gfn_is_write_protected(info, gfn)) gfn 1659 drivers/gpu/drm/i915/gvt/kvmgt.c kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); gfn 1660 drivers/gpu/drm/i915/gvt/kvmgt.c kvmgt_protect_table_add(info, gfn); gfn 1668 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) gfn 1682 drivers/gpu/drm/i915/gvt/kvmgt.c slot = gfn_to_memslot(kvm, gfn); gfn 1690 drivers/gpu/drm/i915/gvt/kvmgt.c if (!kvmgt_gfn_is_write_protected(info, gfn)) gfn 1693 drivers/gpu/drm/i915/gvt/kvmgt.c kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE); gfn 1694 drivers/gpu/drm/i915/gvt/kvmgt.c kvmgt_protect_table_del(info, gfn); gfn 1719 drivers/gpu/drm/i915/gvt/kvmgt.c gfn_t gfn; gfn 1725 drivers/gpu/drm/i915/gvt/kvmgt.c gfn = slot->base_gfn + i; gfn 1726 drivers/gpu/drm/i915/gvt/kvmgt.c if (kvmgt_gfn_is_write_protected(info, gfn)) { gfn 1727 drivers/gpu/drm/i915/gvt/kvmgt.c kvm_slot_page_track_remove_page(kvm, slot, gfn, gfn 1729 drivers/gpu/drm/i915/gvt/kvmgt.c kvmgt_protect_table_del(info, gfn); gfn 1867 drivers/gpu/drm/i915/gvt/kvmgt.c static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) gfn 1877 drivers/gpu/drm/i915/gvt/kvmgt.c pfn = gfn_to_pfn(info->kvm, gfn); gfn 1884 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, gfn 1900 drivers/gpu/drm/i915/gvt/kvmgt.c entry = __gvt_cache_find_gfn(info->vgpu, gfn); gfn 1902 drivers/gpu/drm/i915/gvt/kvmgt.c ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); gfn 1906 drivers/gpu/drm/i915/gvt/kvmgt.c ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); gfn 1911 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); gfn 1914 drivers/gpu/drm/i915/gvt/kvmgt.c ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); gfn 1918 drivers/gpu/drm/i915/gvt/kvmgt.c ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); gfn 1930 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); gfn 1940 drivers/gpu/drm/i915/gvt/kvmgt.c gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, gfn 2012 drivers/gpu/drm/i915/gvt/kvmgt.c static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) gfn 2026 drivers/gpu/drm/i915/gvt/kvmgt.c ret = kvm_is_visible_gfn(kvm, gfn); gfn 163 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, unsigned long gfn) gfn 165 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); gfn 177 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, unsigned long gfn) gfn 179 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); gfn 223 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, unsigned long gfn) gfn 225 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); gfn 239 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, gfn 242 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, gfn 269 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, unsigned long gfn, gfn 277 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, gfn 372 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, unsigned long gfn) gfn 377 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); gfn 272 drivers/gpu/drm/i915/gvt/opregion.c vgpu_opregion(vgpu)->gfn[i], gfn 305 drivers/gpu/drm/i915/gvt/opregion.c vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; gfn 316 drivers/gpu/drm/i915/gvt/opregion.c vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; gfn 482 drivers/gpu/drm/i915/gvt/opregion.c scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + gfn 484 drivers/gpu/drm/i915/gvt/opregion.c parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + gfn 35 drivers/gpu/drm/i915/gvt/page_track.c struct intel_vgpu *vgpu, unsigned long gfn) gfn 37 drivers/gpu/drm/i915/gvt/page_track.c return radix_tree_lookup(&vgpu->page_track_tree, gfn); gfn 50 drivers/gpu/drm/i915/gvt/page_track.c int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, gfn 56 drivers/gpu/drm/i915/gvt/page_track.c track = intel_vgpu_find_page_track(vgpu, gfn); gfn 67 drivers/gpu/drm/i915/gvt/page_track.c ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); gfn 83 drivers/gpu/drm/i915/gvt/page_track.c unsigned long gfn) gfn 87 drivers/gpu/drm/i915/gvt/page_track.c track = radix_tree_delete(&vgpu->page_track_tree, gfn); gfn 90 drivers/gpu/drm/i915/gvt/page_track.c intel_gvt_hypervisor_disable_page_track(vgpu, gfn); gfn 103 drivers/gpu/drm/i915/gvt/page_track.c int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) gfn 108 drivers/gpu/drm/i915/gvt/page_track.c track = intel_vgpu_find_page_track(vgpu, gfn); gfn 115 drivers/gpu/drm/i915/gvt/page_track.c ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn); gfn 130 drivers/gpu/drm/i915/gvt/page_track.c int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) gfn 135 drivers/gpu/drm/i915/gvt/page_track.c track = intel_vgpu_find_page_track(vgpu, gfn); gfn 142 drivers/gpu/drm/i915/gvt/page_track.c ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn); gfn 42 drivers/gpu/drm/i915/gvt/page_track.h struct intel_vgpu *vgpu, unsigned long gfn); gfn 45 drivers/gpu/drm/i915/gvt/page_track.h unsigned long gfn, gvt_page_track_handler_t handler, gfn 48 drivers/gpu/drm/i915/gvt/page_track.h unsigned long gfn); gfn 50 drivers/gpu/drm/i915/gvt/page_track.h int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); gfn 51 drivers/gpu/drm/i915/gvt/page_track.h int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); gfn 153 drivers/gpu/drm/i915/gvt/trace.h TP_PROTO(int id, char *action, void *spt, unsigned long gfn, gfn 156 drivers/gpu/drm/i915/gvt/trace.h TP_ARGS(id, action, spt, gfn, type), gfn 165 drivers/gpu/drm/i915/gvt/trace.h id, action, spt, gfn, type); gfn 428 drivers/net/xen-netfront.c static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, gfn 446 drivers/net/xen-netfront.c gfn, GNTMAP_readonly); gfn 478 drivers/net/xen-netfront.c static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, gfn 485 drivers/net/xen-netfront.c xennet_tx_setup_grant(gfn, offset, len, data); gfn 715 drivers/pinctrl/tegra/pinctrl-tegra.c int fn, gn, gfn; gfn 747 drivers/pinctrl/tegra/pinctrl-tegra.c for (gfn = 0; gfn < 4; gfn++) gfn 748 drivers/pinctrl/tegra/pinctrl-tegra.c if (g->funcs[gfn] == fn) gfn 750 drivers/pinctrl/tegra/pinctrl-tegra.c if (gfn == 4) gfn 1337 drivers/scsi/bfa/bfa_fcbuild.c fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); gfn 1344 drivers/scsi/bfa/bfa_fcbuild.c memset(gfn, 0, sizeof(fcgs_gfn_req_t)); gfn 1345 drivers/scsi/bfa/bfa_fcbuild.c gfn->wwn = wwn; gfn 191 drivers/tty/hvc/hvc_xen.c unsigned long gfn; gfn 220 drivers/tty/hvc/hvc_xen.c gfn = v; gfn 221 drivers/tty/hvc/hvc_xen.c info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE); gfn 837 drivers/xen/gntdev.c bool writeable, unsigned long *gfn) gfn 851 drivers/xen/gntdev.c *gfn = pfn_to_gfn(xen_pfn); gfn 928 drivers/xen/gntdev.c unsigned long gfn; gfn 952 drivers/xen/gntdev.c ret = gntdev_get_page(batch, virt, false, &gfn); gfn 956 drivers/xen/gntdev.c op->source.u.gmfn = gfn; gfn 971 drivers/xen/gntdev.c ret = gntdev_get_page(batch, virt, true, &gfn); gfn 975 drivers/xen/gntdev.c op->dest.u.gmfn = gfn; gfn 376 drivers/xen/privcmd.c xen_pfn_t gfn; gfn 378 drivers/xen/privcmd.c ret = get_user(gfn, st->user_gfn); gfn 386 drivers/xen/privcmd.c gfn |= (err == -ENOENT) ? gfn 389 drivers/xen/privcmd.c return __put_user(gfn, st->user_gfn++); gfn 536 drivers/xen/xenbus/xenbus_client.c static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, gfn 542 drivers/xen/xenbus/xenbus_client.c unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); gfn 809 drivers/xen/xenbus/xenbus_client.c static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, gfn 816 drivers/xen/xenbus/xenbus_client.c info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); gfn 45 drivers/xen/xlate_mmu.c typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); gfn 84 drivers/xen/xlate_mmu.c static void setup_hparams(unsigned long gfn, void *data) gfn 89 drivers/xen/xlate_mmu.c info->h_gpfns[info->h_iter] = gfn; gfn 145 drivers/xen/xlate_mmu.c xen_pfn_t *gfn, int nr, gfn 158 drivers/xen/xlate_mmu.c data.fgfn = gfn; gfn 174 drivers/xen/xlate_mmu.c static void unmap_gfn(unsigned long gfn, void *data) gfn 179 drivers/xen/xlate_mmu.c xrp.gpfn = gfn; gfn 197 drivers/xen/xlate_mmu.c static void setup_balloon_gfn(unsigned long gfn, void *data) gfn 201 drivers/xen/xlate_mmu.c info->pfns[info->idx++] = gfn; gfn 241 include/linux/kvm_host.h kvm_pfn_t gfn; gfn 702 include/linux/kvm_host.h int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, gfn 705 include/linux/kvm_host.h struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); gfn 706 include/linux/kvm_host.h unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); gfn 707 include/linux/kvm_host.h unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); gfn 708 include/linux/kvm_host.h unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); gfn 709 include/linux/kvm_host.h unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, gfn 715 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); gfn 716 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); gfn 717 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, gfn 719 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); gfn 720 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); gfn 721 include/linux/kvm_host.h kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, gfn 732 include/linux/kvm_host.h int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, gfn 739 include/linux/kvm_host.h int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, gfn 750 include/linux/kvm_host.h int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); gfn 752 include/linux/kvm_host.h struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); gfn 753 include/linux/kvm_host.h bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); gfn 754 include/linux/kvm_host.h unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 755 include/linux/kvm_host.h void mark_page_dirty(struct kvm *kvm, gfn_t gfn); gfn 758 include/linux/kvm_host.h struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 759 include/linux/kvm_host.h kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 760 include/linux/kvm_host.h kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 762 include/linux/kvm_host.h int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, gfn 764 include/linux/kvm_host.h struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 768 include/linux/kvm_host.h unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 769 include/linux/kvm_host.h unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); gfn 770 include/linux/kvm_host.h int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, gfn 776 include/linux/kvm_host.h int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, gfn 780 include/linux/kvm_host.h void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); gfn 1011 include/linux/kvm_host.h search_memslots(struct kvm_memslots *slots, gfn_t gfn) gfn 1017 include/linux/kvm_host.h if (gfn >= memslots[slot].base_gfn && gfn 1018 include/linux/kvm_host.h gfn < memslots[slot].base_gfn + memslots[slot].npages) gfn 1024 include/linux/kvm_host.h if (gfn >= memslots[slot].base_gfn) gfn 1030 include/linux/kvm_host.h if (start < slots->used_slots && gfn >= memslots[start].base_gfn && gfn 1031 include/linux/kvm_host.h gfn < memslots[start].base_gfn + memslots[start].npages) { gfn 1040 include/linux/kvm_host.h __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) gfn 1042 include/linux/kvm_host.h return search_memslots(slots, gfn); gfn 1046 include/linux/kvm_host.h __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) gfn 1048 include/linux/kvm_host.h return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; gfn 1051 include/linux/kvm_host.h static inline int memslot_id(struct kvm *kvm, gfn_t gfn) gfn 1053 include/linux/kvm_host.h return gfn_to_memslot(kvm, gfn)->id; gfn 1064 include/linux/kvm_host.h static inline gpa_t gfn_to_gpa(gfn_t gfn) gfn 1066 include/linux/kvm_host.h return (gpa_t)gfn << PAGE_SHIFT; gfn 54 include/linux/kvm_types.h gfn_t gfn; gfn 259 include/trace/events/kvm.h TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), gfn 260 include/trace/events/kvm.h TP_ARGS(gfn, level, slot, ref), gfn 264 include/trace/events/kvm.h __field( u64, gfn ) gfn 270 include/trace/events/kvm.h __entry->gfn = gfn; gfn 272 include/trace/events/kvm.h __entry->hva = ((gfn - slot->base_gfn) << gfn 278 include/trace/events/kvm.h __entry->hva, __entry->gfn, __entry->level, gfn 285 include/trace/events/kvm.h TP_PROTO(u64 gva, u64 gfn), gfn 287 include/trace/events/kvm.h TP_ARGS(gva, gfn), gfn 291 include/trace/events/kvm.h __field(u64, gfn) gfn 296 include/trace/events/kvm.h __entry->gfn = gfn; gfn 299 include/trace/events/kvm.h TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) gfn 304 include/trace/events/kvm.h TP_PROTO(u64 gva, u64 gfn), gfn 306 include/trace/events/kvm.h TP_ARGS(gva, gfn) gfn 311 include/trace/events/kvm.h TP_PROTO(u64 gva, u64 gfn), gfn 313 include/trace/events/kvm.h TP_ARGS(gva, gfn) gfn 52 include/xen/arm/page.h static inline unsigned long gfn_to_pfn(unsigned long gfn) gfn 54 include/xen/arm/page.h return gfn; gfn 270 include/xen/grant_table.h typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset, gfn 84 include/xen/xen-ops.h xen_pfn_t *gfn, int nr, gfn 97 include/xen/xen-ops.h xen_pfn_t *gfn, int nr, gfn 134 include/xen/xen-ops.h xen_pfn_t *gfn, int nr, gfn 140 include/xen/xen-ops.h return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, gfn 148 include/xen/xen-ops.h return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, gfn 196 include/xen/xen-ops.h xen_pfn_t gfn, int nr, gfn 203 include/xen/xen-ops.h return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, gfn 1379 virt/kvm/arm/mmu.c gfn_t gfn = *ipap >> PAGE_SHIFT; gfn 1408 virt/kvm/arm/mmu.c VM_BUG_ON((gfn & mask) != (pfn & mask)); gfn 1677 virt/kvm/arm/mmu.c gfn_t gfn = fault_ipa >> PAGE_SHIFT; gfn 1721 virt/kvm/arm/mmu.c gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; gfn 1742 virt/kvm/arm/mmu.c pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); gfn 1838 virt/kvm/arm/mmu.c mark_page_dirty(kvm, gfn); gfn 1915 virt/kvm/arm/mmu.c gfn_t gfn; gfn 1953 virt/kvm/arm/mmu.c gfn = fault_ipa >> PAGE_SHIFT; gfn 1954 virt/kvm/arm/mmu.c memslot = gfn_to_memslot(vcpu->kvm, gfn); gfn 1955 virt/kvm/arm/mmu.c hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); gfn 904 virt/kvm/arm/vgic/vgic-its.c gfn_t gfn; gfn 928 virt/kvm/arm/vgic/vgic-its.c gfn = addr >> PAGE_SHIFT; gfn 959 virt/kvm/arm/vgic/vgic-its.c gfn = indirect_ptr >> PAGE_SHIFT; gfn 966 virt/kvm/arm/vgic/vgic-its.c ret = kvm_is_visible_gfn(its->dev->kvm, gfn); gfn 147 virt/kvm/kvm_main.c static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); gfn 1380 virt/kvm/kvm_main.c struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) gfn 1382 virt/kvm/kvm_main.c return __gfn_to_memslot(kvm_memslots(kvm), gfn); gfn 1386 virt/kvm/kvm_main.c struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1388 virt/kvm/kvm_main.c return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); gfn 1391 virt/kvm/kvm_main.c bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) gfn 1393 virt/kvm/kvm_main.c struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); gfn 1403 virt/kvm/kvm_main.c unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1410 virt/kvm/kvm_main.c addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); gfn 1432 virt/kvm/kvm_main.c static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn 1442 virt/kvm/kvm_main.c *nr_pages = slot->npages - (gfn - slot->base_gfn); gfn 1444 virt/kvm/kvm_main.c return __gfn_to_hva_memslot(slot, gfn); gfn 1447 virt/kvm/kvm_main.c static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn 1450 virt/kvm/kvm_main.c return __gfn_to_hva_many(slot, gfn, nr_pages, true); gfn 1454 virt/kvm/kvm_main.c gfn_t gfn) gfn 1456 virt/kvm/kvm_main.c return gfn_to_hva_many(slot, gfn, NULL); gfn 1460 virt/kvm/kvm_main.c unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) gfn 1462 virt/kvm/kvm_main.c return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); gfn 1466 virt/kvm/kvm_main.c unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1468 virt/kvm/kvm_main.c return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); gfn 1481 virt/kvm/kvm_main.c gfn_t gfn, bool *writable) gfn 1483 virt/kvm/kvm_main.c unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); gfn 1491 virt/kvm/kvm_main.c unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) gfn 1493 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); gfn 1495 virt/kvm/kvm_main.c return gfn_to_hva_memslot_prot(slot, gfn, writable); gfn 1498 virt/kvm/kvm_main.c unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) gfn 1500 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 1502 virt/kvm/kvm_main.c return gfn_to_hva_memslot_prot(slot, gfn, writable); gfn 1705 virt/kvm/kvm_main.c kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, gfn 1709 virt/kvm/kvm_main.c unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); gfn 1734 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, gfn 1737 virt/kvm/kvm_main.c return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, gfn 1742 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) gfn 1744 virt/kvm/kvm_main.c return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); gfn 1748 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) gfn 1750 virt/kvm/kvm_main.c return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); gfn 1754 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) gfn 1756 virt/kvm/kvm_main.c return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); gfn 1760 virt/kvm/kvm_main.c kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1762 virt/kvm/kvm_main.c return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); gfn 1766 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) gfn 1768 virt/kvm/kvm_main.c return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); gfn 1772 virt/kvm/kvm_main.c kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1774 virt/kvm/kvm_main.c return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); gfn 1778 virt/kvm/kvm_main.c int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, gfn 1784 virt/kvm/kvm_main.c addr = gfn_to_hva_many(slot, gfn, &entry); gfn 1808 virt/kvm/kvm_main.c struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) gfn 1812 virt/kvm/kvm_main.c pfn = gfn_to_pfn(kvm, gfn); gfn 1824 virt/kvm/kvm_main.c cache->pfn = cache->gfn = 0; gfn 1832 virt/kvm/kvm_main.c static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn, gfn 1837 virt/kvm/kvm_main.c cache->pfn = gfn_to_pfn_memslot(slot, gfn); gfn 1838 virt/kvm/kvm_main.c cache->gfn = gfn; gfn 1843 virt/kvm/kvm_main.c static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, gfn 1851 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); gfn 1858 virt/kvm/kvm_main.c if (!cache->pfn || cache->gfn != gfn || gfn 1862 virt/kvm/kvm_main.c kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); gfn 1868 virt/kvm/kvm_main.c pfn = gfn_to_pfn_memslot(slot, gfn); gfn 1893 virt/kvm/kvm_main.c map->gfn = gfn; gfn 1898 virt/kvm/kvm_main.c int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, gfn 1901 virt/kvm/kvm_main.c return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, gfn 1906 virt/kvm/kvm_main.c int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) gfn 1908 virt/kvm/kvm_main.c return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, gfn 1938 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, map->gfn); gfn 1952 virt/kvm/kvm_main.c __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, gfn 1960 virt/kvm/kvm_main.c __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL, gfn 1965 virt/kvm/kvm_main.c struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 1969 virt/kvm/kvm_main.c pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); gfn 2037 virt/kvm/kvm_main.c static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, gfn 2043 virt/kvm/kvm_main.c addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); gfn 2052 virt/kvm/kvm_main.c int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, gfn 2055 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); gfn 2057 virt/kvm/kvm_main.c return __kvm_read_guest_page(slot, gfn, data, offset, len); gfn 2061 virt/kvm/kvm_main.c int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, gfn 2064 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 2066 virt/kvm/kvm_main.c return __kvm_read_guest_page(slot, gfn, data, offset, len); gfn 2072 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2078 virt/kvm/kvm_main.c ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); gfn 2084 virt/kvm/kvm_main.c ++gfn; gfn 2092 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2098 virt/kvm/kvm_main.c ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); gfn 2104 virt/kvm/kvm_main.c ++gfn; gfn 2110 virt/kvm/kvm_main.c static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, gfn 2116 virt/kvm/kvm_main.c addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); gfn 2130 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2131 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); gfn 2134 virt/kvm/kvm_main.c return __kvm_read_guest_atomic(slot, gfn, data, offset, len); gfn 2141 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2142 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 2145 virt/kvm/kvm_main.c return __kvm_read_guest_atomic(slot, gfn, data, offset, len); gfn 2149 virt/kvm/kvm_main.c static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, gfn 2155 virt/kvm/kvm_main.c addr = gfn_to_hva_memslot(memslot, gfn); gfn 2161 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, gfn); gfn 2165 virt/kvm/kvm_main.c int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, gfn 2168 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); gfn 2170 virt/kvm/kvm_main.c return __kvm_write_guest_page(slot, gfn, data, offset, len); gfn 2174 virt/kvm/kvm_main.c int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, gfn 2177 virt/kvm/kvm_main.c struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 2179 virt/kvm/kvm_main.c return __kvm_write_guest_page(slot, gfn, data, offset, len); gfn 2186 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2192 virt/kvm/kvm_main.c ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); gfn 2198 virt/kvm/kvm_main.c ++gfn; gfn 2207 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2213 virt/kvm/kvm_main.c ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); gfn 2219 virt/kvm/kvm_main.c ++gfn; gfn 2331 virt/kvm/kvm_main.c int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) gfn 2335 virt/kvm/kvm_main.c return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); gfn 2341 virt/kvm/kvm_main.c gfn_t gfn = gpa >> PAGE_SHIFT; gfn 2347 virt/kvm/kvm_main.c ret = kvm_clear_guest_page(kvm, gfn, offset, seg); gfn 2352 virt/kvm/kvm_main.c ++gfn; gfn 2359 virt/kvm/kvm_main.c gfn_t gfn) gfn 2362 virt/kvm/kvm_main.c unsigned long rel_gfn = gfn - memslot->base_gfn; gfn 2368 virt/kvm/kvm_main.c void mark_page_dirty(struct kvm *kvm, gfn_t gfn) gfn 2372 virt/kvm/kvm_main.c memslot = gfn_to_memslot(kvm, gfn); gfn 2373 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, gfn); gfn 2377 virt/kvm/kvm_main.c void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) gfn 2381 virt/kvm/kvm_main.c memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); gfn 2382 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, gfn);