ghc              2681 arch/x86/kvm/lapic.c 	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
ghc              2691 arch/x86/kvm/lapic.c 	if (addr == ghc->gpa && len <= ghc->len)
ghc              2692 arch/x86/kvm/lapic.c 		new_len = ghc->len;
ghc              2696 arch/x86/kvm/lapic.c 	return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
ghc               737 include/linux/kvm_host.h int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc               743 include/linux/kvm_host.h int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc               745 include/linux/kvm_host.h int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc               748 include/linux/kvm_host.h int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc              2226 virt/kvm/kvm_main.c 				       struct gfn_to_hva_cache *ghc,
ghc              2236 virt/kvm/kvm_main.c 	ghc->gpa = gpa;
ghc              2237 virt/kvm/kvm_main.c 	ghc->generation = slots->generation;
ghc              2238 virt/kvm/kvm_main.c 	ghc->len = len;
ghc              2239 virt/kvm/kvm_main.c 	ghc->hva = KVM_HVA_ERR_BAD;
ghc              2246 virt/kvm/kvm_main.c 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
ghc              2247 virt/kvm/kvm_main.c 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
ghc              2249 virt/kvm/kvm_main.c 		if (kvm_is_error_hva(ghc->hva))
ghc              2256 virt/kvm/kvm_main.c 		ghc->hva += offset;
ghc              2258 virt/kvm/kvm_main.c 		ghc->memslot = NULL;
ghc              2263 virt/kvm/kvm_main.c int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc              2267 virt/kvm/kvm_main.c 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
ghc              2271 virt/kvm/kvm_main.c int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc              2277 virt/kvm/kvm_main.c 	gpa_t gpa = ghc->gpa + offset;
ghc              2279 virt/kvm/kvm_main.c 	BUG_ON(len + offset > ghc->len);
ghc              2281 virt/kvm/kvm_main.c 	if (slots->generation != ghc->generation)
ghc              2282 virt/kvm/kvm_main.c 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
ghc              2284 virt/kvm/kvm_main.c 	if (kvm_is_error_hva(ghc->hva))
ghc              2287 virt/kvm/kvm_main.c 	if (unlikely(!ghc->memslot))
ghc              2290 virt/kvm/kvm_main.c 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
ghc              2293 virt/kvm/kvm_main.c 	mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
ghc              2299 virt/kvm/kvm_main.c int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc              2302 virt/kvm/kvm_main.c 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
ghc              2306 virt/kvm/kvm_main.c int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc              2312 virt/kvm/kvm_main.c 	BUG_ON(len > ghc->len);
ghc              2314 virt/kvm/kvm_main.c 	if (slots->generation != ghc->generation)
ghc              2315 virt/kvm/kvm_main.c 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
ghc              2317 virt/kvm/kvm_main.c 	if (kvm_is_error_hva(ghc->hva))
ghc              2320 virt/kvm/kvm_main.c 	if (unlikely(!ghc->memslot))
ghc              2321 virt/kvm/kvm_main.c 		return kvm_read_guest(kvm, ghc->gpa, data, len);
ghc              2323 virt/kvm/kvm_main.c 	r = __copy_from_user(data, (void __user *)ghc->hva, len);