Lines Matching refs:gfn

592 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
595 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
596 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
597 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
598 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
599 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
605 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
606 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
607 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
609 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
610 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
611 pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
619 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
626 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
634 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
636 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
637 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
638 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
639 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
642 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
643 pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
644 pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
645 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
646 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
647 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
648 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
654 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
658 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
918 search_memslots(struct kvm_memslots *slots, gfn_t gfn) in search_memslots() argument
924 if (gfn >= memslots[slot].base_gfn && in search_memslots()
925 gfn < memslots[slot].base_gfn + memslots[slot].npages) in search_memslots()
931 if (gfn >= memslots[slot].base_gfn) in search_memslots()
937 if (gfn >= memslots[start].base_gfn && in search_memslots()
938 gfn < memslots[start].base_gfn + memslots[start].npages) { in search_memslots()
947 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) in __gfn_to_memslot() argument
949 return search_memslots(slots, gfn); in __gfn_to_memslot()
953 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) in __gfn_to_hva_memslot() argument
955 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; in __gfn_to_hva_memslot()
958 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) in memslot_id() argument
960 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
971 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa() argument
973 return (gpa_t)gfn << PAGE_SHIFT; in gfn_to_gpa()