Lines Matching refs:kvm

42 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
43 static void kvm_iommu_put_pages(struct kvm *kvm,
65 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) in kvm_unpin_pages() argument
73 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_iommu_map_pages() argument
78 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_iommu_map_pages()
91 if (!kvm->arch.iommu_noncoherent) in kvm_iommu_map_pages()
105 page_size = kvm_host_page_size(kvm, gfn); in kvm_iommu_map_pages()
135 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); in kvm_iommu_map_pages()
147 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); in kvm_iommu_map_pages()
151 static int kvm_iommu_map_memslots(struct kvm *kvm) in kvm_iommu_map_memslots() argument
157 if (kvm->arch.iommu_noncoherent) in kvm_iommu_map_memslots()
158 kvm_arch_register_noncoherent_dma(kvm); in kvm_iommu_map_memslots()
160 idx = srcu_read_lock(&kvm->srcu); in kvm_iommu_map_memslots()
161 slots = kvm_memslots(kvm); in kvm_iommu_map_memslots()
164 r = kvm_iommu_map_pages(kvm, memslot); in kvm_iommu_map_memslots()
168 srcu_read_unlock(&kvm->srcu, idx); in kvm_iommu_map_memslots()
173 int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev) in kvm_assign_device() argument
175 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_assign_device()
195 if (noncoherent != kvm->arch.iommu_noncoherent) { in kvm_assign_device()
196 kvm_iommu_unmap_memslots(kvm); in kvm_assign_device()
197 kvm->arch.iommu_noncoherent = noncoherent; in kvm_assign_device()
198 r = kvm_iommu_map_memslots(kvm); in kvm_assign_device()
209 kvm_iommu_unmap_memslots(kvm); in kvm_assign_device()
213 int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev) in kvm_deassign_device() argument
215 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_deassign_device()
233 int kvm_iommu_map_guest(struct kvm *kvm) in kvm_iommu_map_guest() argument
242 mutex_lock(&kvm->slots_lock); in kvm_iommu_map_guest()
244 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); in kvm_iommu_map_guest()
245 if (!kvm->arch.iommu_domain) { in kvm_iommu_map_guest()
256 iommu_domain_free(kvm->arch.iommu_domain); in kvm_iommu_map_guest()
257 kvm->arch.iommu_domain = NULL; in kvm_iommu_map_guest()
262 r = kvm_iommu_map_memslots(kvm); in kvm_iommu_map_guest()
264 kvm_iommu_unmap_memslots(kvm); in kvm_iommu_map_guest()
267 mutex_unlock(&kvm->slots_lock); in kvm_iommu_map_guest()
271 static void kvm_iommu_put_pages(struct kvm *kvm, in kvm_iommu_put_pages() argument
279 domain = kvm->arch.iommu_domain; in kvm_iommu_put_pages()
306 kvm_unpin_pages(kvm, pfn, unmap_pages); in kvm_iommu_put_pages()
314 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_iommu_unmap_pages() argument
316 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); in kvm_iommu_unmap_pages()
319 static int kvm_iommu_unmap_memslots(struct kvm *kvm) in kvm_iommu_unmap_memslots() argument
325 idx = srcu_read_lock(&kvm->srcu); in kvm_iommu_unmap_memslots()
326 slots = kvm_memslots(kvm); in kvm_iommu_unmap_memslots()
329 kvm_iommu_unmap_pages(kvm, memslot); in kvm_iommu_unmap_memslots()
331 srcu_read_unlock(&kvm->srcu, idx); in kvm_iommu_unmap_memslots()
333 if (kvm->arch.iommu_noncoherent) in kvm_iommu_unmap_memslots()
334 kvm_arch_unregister_noncoherent_dma(kvm); in kvm_iommu_unmap_memslots()
339 int kvm_iommu_unmap_guest(struct kvm *kvm) in kvm_iommu_unmap_guest() argument
341 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_iommu_unmap_guest()
347 mutex_lock(&kvm->slots_lock); in kvm_iommu_unmap_guest()
348 kvm_iommu_unmap_memslots(kvm); in kvm_iommu_unmap_guest()
349 kvm->arch.iommu_domain = NULL; in kvm_iommu_unmap_guest()
350 kvm->arch.iommu_noncoherent = false; in kvm_iommu_unmap_guest()
351 mutex_unlock(&kvm->slots_lock); in kvm_iommu_unmap_guest()