Lines Matching defs:gfn
231 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte()
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn()
774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot()
789 gfn_t gfn; in account_shadowed() local
807 gfn_t gfn; in unaccount_shadowed() local
821 static int __has_wrprotected_page(gfn_t gfn, int level, in __has_wrprotected_page()
834 static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in has_wrprotected_page()
842 static int host_mapping_level(struct kvm *kvm, gfn_t gfn) in host_mapping_level()
871 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap()
1035 static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, in __gfn_to_rmap()
1047 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp) in gfn_to_rmap()
1065 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add()
1079 gfn_t gfn; in rmap_remove() local
1342 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) in rmap_write_protect()
1377 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_unmap_rmapp()
1384 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_set_pte_rmapp()
1435 gfn_t gfn; member
1501 gfn_t gfn, in kvm_handle_hva_range()
1545 gfn_t gfn, int level, in kvm_handle_hva()
1567 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_age_rmapp()
1588 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_test_age_rmapp()
1614 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle()
1696 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn()
1943 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_sync_pages()
2082 gfn_t gfn, in kvm_mmu_get_page()
2415 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page()
2446 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_unsync_pages()
2458 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, in mmu_need_write_protect()
2489 gfn_t gfn, pfn_t pfn, bool speculative, in set_spte()
2569 int level, gfn_t gfn, pfn_t pfn, bool speculative, in mmu_set_spte()
2629 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in pte_prefetch_gfn_to_pfn()
2649 gfn_t gfn; in direct_pte_prefetch_many() local
2712 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map()
2762 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page()
2785 gfn_t gfn = *gfnp; in transparent_hugepage_adjust() local
2822 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn()
2866 gfn_t gfn; in fast_pf_fix_direct_spte() local
2981 gfn_t gfn, bool prefault) in nonpaging_map()
3375 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault() local
3400 gfn_t gfn; in nonpaging_page_fault() local
3424 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf()
3445 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, in try_async_pf()
3472 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in check_hugepage_cache_consistency()
3488 gfn_t gfn = gpa >> PAGE_SHIFT; in tdp_page_fault() local
3577 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte()
4287 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_pte_write() local