Lines Matching refs:gfn
46 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, in kvm_pin_pages() argument
52 pfn = gfn_to_pfn_memslot(slot, gfn); in kvm_pin_pages()
53 end_gfn = gfn + npages; in kvm_pin_pages()
54 gfn += 1; in kvm_pin_pages()
59 while (gfn < end_gfn) in kvm_pin_pages()
60 gfn_to_pfn_memslot(slot, gfn++); in kvm_pin_pages()
75 gfn_t gfn, end_gfn; in kvm_iommu_map_pages() local
85 gfn = slot->base_gfn; in kvm_iommu_map_pages()
86 end_gfn = gfn + slot->npages; in kvm_iommu_map_pages()
95 while (gfn < end_gfn) { in kvm_iommu_map_pages()
99 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { in kvm_iommu_map_pages()
100 gfn += 1; in kvm_iommu_map_pages()
105 page_size = kvm_host_page_size(kvm, gfn); in kvm_iommu_map_pages()
108 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) in kvm_iommu_map_pages()
112 while ((gfn << PAGE_SHIFT) & (page_size - 1)) in kvm_iommu_map_pages()
116 while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) in kvm_iommu_map_pages()
123 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); in kvm_iommu_map_pages()
125 gfn += 1; in kvm_iommu_map_pages()
130 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), in kvm_iommu_map_pages()
139 gfn += page_size >> PAGE_SHIFT; in kvm_iommu_map_pages()
147 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); in kvm_iommu_map_pages()
275 gfn_t end_gfn, gfn; in kvm_iommu_put_pages() local
281 gfn = base_gfn; in kvm_iommu_put_pages()
287 while (gfn < end_gfn) { in kvm_iommu_put_pages()
292 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); in kvm_iommu_put_pages()
295 gfn++; in kvm_iommu_put_pages()
302 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); in kvm_iommu_put_pages()
308 gfn += unmap_pages; in kvm_iommu_put_pages()