Lines Matching refs:gfn
231 static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
238 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT; in mark_mmio_spte()
240 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
261 static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
265 mark_mmio_spte(kvm, sptep, gfn, access); in set_mmio_spte()
759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
762 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); in kvm_mmu_page_set_gfn()
767 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
774 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument
780 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
784 static void account_shadowed(struct kvm *kvm, gfn_t gfn) in account_shadowed() argument
790 slot = gfn_to_memslot(kvm, gfn); in account_shadowed()
793 linfo = lpage_info_slot(gfn, slot, i); in account_shadowed()
799 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) in unaccount_shadowed() argument
805 slot = gfn_to_memslot(kvm, gfn); in unaccount_shadowed()
808 linfo = lpage_info_slot(gfn, slot, i); in unaccount_shadowed()
816 gfn_t gfn, in has_wrprotected_page() argument
822 slot = gfn_to_memslot(kvm, gfn); in has_wrprotected_page()
824 linfo = lpage_info_slot(gfn, slot, level); in has_wrprotected_page()
831 static int host_mapping_level(struct kvm *kvm, gfn_t gfn) in host_mapping_level() argument
836 page_size = kvm_host_page_size(kvm, gfn); in host_mapping_level()
850 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, in gfn_to_memslot_dirty_bitmap() argument
855 slot = gfn_to_memslot(vcpu->kvm, gfn); in gfn_to_memslot_dirty_bitmap()
1010 static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, in __gfn_to_rmap() argument
1015 idx = gfn_to_index(gfn, slot->base_gfn, level); in __gfn_to_rmap()
1022 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) in gfn_to_rmap() argument
1026 slot = gfn_to_memslot(kvm, gfn); in gfn_to_rmap()
1027 return __gfn_to_rmap(gfn, level, slot); in gfn_to_rmap()
1038 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1044 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1045 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_add()
1052 gfn_t gfn; in rmap_remove() local
1056 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1057 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); in rmap_remove()
1322 static bool rmap_write_protect(struct kvm *kvm, u64 gfn) in rmap_write_protect() argument
1329 slot = gfn_to_memslot(kvm, gfn); in rmap_write_protect()
1333 rmapp = __gfn_to_rmap(gfn, i, slot); in rmap_write_protect()
1341 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_unmap_rmapp() argument
1351 sptep, *sptep, gfn, level); in kvm_unmap_rmapp()
1361 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_set_pte_rmapp() argument
1377 sptep, *sptep, gfn, level); in kvm_set_pte_rmapp()
1411 gfn_t gfn, in kvm_handle_hva_range() argument
1442 gfn_t gfn = gfn_start; in kvm_handle_hva_range() local
1454 ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j))) in kvm_handle_hva_range()
1456 gfn, j, data); in kvm_handle_hva_range()
1467 gfn_t gfn, int level, in kvm_handle_hva() argument
1489 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_age_rmapp() argument
1508 trace_kvm_age_page(gfn, level, slot, young); in kvm_age_rmapp()
1513 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_test_age_rmapp() argument
1543 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1550 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); in rmap_recycle()
1552 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1625 static unsigned kvm_page_table_hashfn(gfn_t gfn) in kvm_page_table_hashfn() argument
1627 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); in kvm_page_table_hashfn()
1818 if ((_sp)->gfn != (_gfn)) {} else
1872 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_sync_pages() argument
1878 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_sync_pages()
1970 protected |= rmap_write_protect(vcpu->kvm, sp->gfn); in mmu_sync_children()
2011 gfn_t gfn, in kvm_mmu_get_page() argument
2035 for_each_gfn_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_get_page()
2063 sp->gfn = gfn; in kvm_mmu_get_page()
2066 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); in kvm_mmu_get_page()
2068 if (rmap_write_protect(vcpu->kvm, gfn)) in kvm_mmu_get_page()
2071 kvm_sync_pages(vcpu, gfn); in kvm_mmu_get_page()
2073 account_shadowed(vcpu->kvm, gfn); in kvm_mmu_get_page()
2254 unaccount_shadowed(kvm, sp->gfn); in kvm_mmu_prepare_zap_page()
2344 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page() argument
2350 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); in kvm_mmu_unprotect_page()
2353 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { in kvm_mmu_unprotect_page()
2354 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, in kvm_mmu_unprotect_page()
2459 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_get_guest_memory_type() argument
2463 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, in kvm_get_guest_memory_type()
2464 (gfn << PAGE_SHIFT) + PAGE_SIZE); in kvm_get_guest_memory_type()
2480 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_unsync_pages() argument
2484 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in kvm_unsync_pages()
2492 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, in mmu_need_write_protect() argument
2498 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { in mmu_need_write_protect()
2509 kvm_unsync_pages(vcpu, gfn); in mmu_need_write_protect()
2515 gfn_t gfn, pfn_t pfn, bool speculative, in set_spte() argument
2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) in set_spte()
2539 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2558 has_wrprotected_page(vcpu->kvm, gfn, level)) in set_spte()
2572 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { in set_spte()
2574 __func__, gfn); in set_spte()
2582 mark_page_dirty(vcpu->kvm, gfn); in set_spte()
2595 int level, gfn_t gfn, pfn_t pfn, bool speculative, in mmu_set_spte() argument
2602 *sptep, write_fault, gfn); in mmu_set_spte()
2626 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2639 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, in mmu_set_spte()
2646 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
2648 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
2655 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, in pte_prefetch_gfn_to_pfn() argument
2660 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); in pte_prefetch_gfn_to_pfn()
2664 return gfn_to_pfn_memslot_atomic(slot, gfn); in pte_prefetch_gfn_to_pfn()
2674 gfn_t gfn; in direct_pte_prefetch_many() local
2676 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); in direct_pte_prefetch_many()
2677 if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK)) in direct_pte_prefetch_many()
2680 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); in direct_pte_prefetch_many()
2684 for (i = 0; i < ret; i++, gfn++, start++) in direct_pte_prefetch_many()
2686 sp->role.level, gfn, page_to_pfn(pages[i]), in direct_pte_prefetch_many()
2736 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map() argument
2747 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { in __direct_map()
2750 write, &emulate, level, gfn, pfn, in __direct_map()
2786 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) in kvm_handle_bad_page() argument
2798 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); in kvm_handle_bad_page()
2809 gfn_t gfn = *gfnp; in transparent_hugepage_adjust() local
2821 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { in transparent_hugepage_adjust()
2834 VM_BUG_ON((gfn & mask) != (pfn & mask)); in transparent_hugepage_adjust()
2836 gfn &= ~mask; in transparent_hugepage_adjust()
2837 *gfnp = gfn; in transparent_hugepage_adjust()
2846 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2853 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); in handle_abnormal_pfn()
2858 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2890 gfn_t gfn; in fast_pf_fix_direct_spte() local
2898 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in fast_pf_fix_direct_spte()
2913 mark_page_dirty(vcpu->kvm, gfn); in fast_pf_fix_direct_spte()
3000 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3005 gfn_t gfn, bool prefault) in nonpaging_map() argument
3014 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); in nonpaging_map()
3016 level = mapping_level(vcpu, gfn); in nonpaging_map()
3025 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); in nonpaging_map()
3035 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) in nonpaging_map()
3038 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) in nonpaging_map()
3046 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3047 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3350 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault_common() local
3359 trace_handle_mmio_page_fault(addr, gfn, access); in handle_mmio_page_fault_common()
3360 vcpu_cache_mmio_info(vcpu, addr, gfn, access); in handle_mmio_page_fault_common()
3385 gfn_t gfn; in nonpaging_page_fault() local
3403 gfn = gva >> PAGE_SHIFT; in nonpaging_page_fault()
3406 error_code, gfn, prefault); in nonpaging_page_fault()
3409 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3414 arch.gfn = gfn; in kvm_arch_setup_async_pf()
3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); in kvm_arch_setup_async_pf()
3430 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, in try_async_pf() argument
3435 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); in try_async_pf()
3441 trace_kvm_try_async_get_page(gva, gfn); in try_async_pf()
3442 if (kvm_find_async_pf_gfn(vcpu, gfn)) { in try_async_pf()
3443 trace_kvm_async_pf_doublefault(gva, gfn); in try_async_pf()
3446 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
3450 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); in try_async_pf()
3462 gfn_t gfn = gpa >> PAGE_SHIFT; in tdp_page_fault() local
3480 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); in tdp_page_fault()
3482 level = mapping_level(vcpu, gfn); in tdp_page_fault()
3483 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); in tdp_page_fault()
3493 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) in tdp_page_fault()
3496 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) in tdp_page_fault()
3504 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3506 level, gfn, pfn, prefault); in tdp_page_fault()
3548 static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
3552 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
3558 mark_mmio_spte(kvm, sptep, gfn, access); in sync_mmio_spte()
4167 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mmu_pte_write() local
4205 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { in kvm_mmu_pte_write()