Lines Matching refs:gfn

500 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
503 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
550 gfn_t gfn;
560 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
562 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
1990 u64 gfn;
2001 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
2002 addr = gfn_to_hva(kvm, gfn);
2010 mark_page_dirty(kvm, gfn);
2014 u64 gfn;
2020 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
2021 if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
2024 mark_page_dirty(kvm, gfn);
2039 u64 gfn;
2048 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
2049 addr = gfn_to_hva(vcpu->kvm, gfn);
2055 mark_page_dirty(vcpu->kvm, gfn);
2056 if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
7564 * If the gfn and userspace address are not aligned wrt each
7833 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
7835 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
7843 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7845 u32 key = kvm_async_pf_hash_fn(gfn);
7850 vcpu->arch.apf.gfns[key] = gfn;
7853 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
7856 u32 key = kvm_async_pf_hash_fn(gfn);
7859 (vcpu->arch.apf.gfns[key] != gfn &&
7866 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7868 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
7871 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7875 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
7907 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
7932 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);