Lines Matching refs:gfn

107 				    struct kvm_memory_slot *memslot, gfn_t gfn);
1089 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1091 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1095 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1097 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1107 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument
1114 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size()
1136 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many() argument
1146 *nr_pages = slot->npages - (gfn - slot->base_gfn); in __gfn_to_hva_many()
1148 return __gfn_to_hva_memslot(slot, gfn); in __gfn_to_hva_many()
1151 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in gfn_to_hva_many() argument
1154 return __gfn_to_hva_many(slot, gfn, nr_pages, true); in gfn_to_hva_many()
1158 gfn_t gfn) in gfn_to_hva_memslot() argument
1160 return gfn_to_hva_many(slot, gfn, NULL); in gfn_to_hva_memslot()
1164 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
1166 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
1175 gfn_t gfn, bool *writable) in gfn_to_hva_memslot_prot() argument
1177 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); in gfn_to_hva_memslot_prot()
1185 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
1187 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
1189 return gfn_to_hva_memslot_prot(slot, gfn, writable); in gfn_to_hva_prot()
1360 __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, in __gfn_to_pfn_memslot() argument
1363 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); in __gfn_to_pfn_memslot()
1381 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async, in __gfn_to_pfn() argument
1389 slot = gfn_to_memslot(kvm, gfn); in __gfn_to_pfn()
1391 return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault, in __gfn_to_pfn()
1395 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn_atomic() argument
1397 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL); in gfn_to_pfn_atomic()
1401 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, in gfn_to_pfn_async() argument
1404 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable); in gfn_to_pfn_async()
1408 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
1410 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL); in gfn_to_pfn()
1414 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
1417 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable); in gfn_to_pfn_prot()
1421 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) in gfn_to_pfn_memslot() argument
1423 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); in gfn_to_pfn_memslot()
1426 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) in gfn_to_pfn_memslot_atomic() argument
1428 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); in gfn_to_pfn_memslot_atomic()
1432 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, in gfn_to_page_many_atomic() argument
1438 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry); in gfn_to_page_many_atomic()
1462 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
1466 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
1534 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
1540 addr = gfn_to_hva_prot(kvm, gfn, NULL); in kvm_read_guest_page()
1552 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_read_guest() local
1558 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
1564 ++gfn; in kvm_read_guest()
1575 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_read_guest_atomic() local
1578 addr = gfn_to_hva_prot(kvm, gfn, NULL); in kvm_read_guest_atomic()
1590 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, in kvm_write_guest_page() argument
1596 addr = gfn_to_hva(kvm, gfn); in kvm_write_guest_page()
1602 mark_page_dirty(kvm, gfn); in kvm_write_guest_page()
1610 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_write_guest() local
1616 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
1622 ++gfn; in kvm_write_guest()
1716 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) in kvm_clear_guest_page() argument
1720 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest_page()
1726 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_clear_guest() local
1732 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); in kvm_clear_guest()
1737 ++gfn; in kvm_clear_guest()
1745 gfn_t gfn) in mark_page_dirty_in_slot() argument
1748 unsigned long rel_gfn = gfn - memslot->base_gfn; in mark_page_dirty_in_slot()
1754 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
1758 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
1759 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()