Lines Matching refs:kvm

42 static int kvm_iommu_unmap_memslots(struct kvm *kvm);
43 static void kvm_iommu_put_pages(struct kvm *kvm,
65 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) in kvm_unpin_pages() argument
73 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_iommu_map_pages() argument
78 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_iommu_map_pages()
91 if (!kvm->arch.iommu_noncoherent) in kvm_iommu_map_pages()
105 page_size = kvm_host_page_size(kvm, gfn); in kvm_iommu_map_pages()
135 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); in kvm_iommu_map_pages()
147 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); in kvm_iommu_map_pages()
151 static int kvm_iommu_map_memslots(struct kvm *kvm) in kvm_iommu_map_memslots() argument
157 if (kvm->arch.iommu_noncoherent) in kvm_iommu_map_memslots()
158 kvm_arch_register_noncoherent_dma(kvm); in kvm_iommu_map_memslots()
160 idx = srcu_read_lock(&kvm->srcu); in kvm_iommu_map_memslots()
161 slots = kvm_memslots(kvm); in kvm_iommu_map_memslots()
164 r = kvm_iommu_map_pages(kvm, memslot); in kvm_iommu_map_memslots()
168 srcu_read_unlock(&kvm->srcu, idx); in kvm_iommu_map_memslots()
173 int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev) in kvm_assign_device() argument
175 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_assign_device()
195 if (noncoherent != kvm->arch.iommu_noncoherent) { in kvm_assign_device()
196 kvm_iommu_unmap_memslots(kvm); in kvm_assign_device()
197 kvm->arch.iommu_noncoherent = noncoherent; in kvm_assign_device()
198 r = kvm_iommu_map_memslots(kvm); in kvm_assign_device()
203 kvm_arch_start_assignment(kvm); in kvm_assign_device()
210 kvm_iommu_unmap_memslots(kvm); in kvm_assign_device()
214 int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev) in kvm_deassign_device() argument
216 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_deassign_device()
228 kvm_arch_end_assignment(kvm); in kvm_deassign_device()
235 int kvm_iommu_map_guest(struct kvm *kvm) in kvm_iommu_map_guest() argument
244 mutex_lock(&kvm->slots_lock); in kvm_iommu_map_guest()
246 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); in kvm_iommu_map_guest()
247 if (!kvm->arch.iommu_domain) { in kvm_iommu_map_guest()
258 iommu_domain_free(kvm->arch.iommu_domain); in kvm_iommu_map_guest()
259 kvm->arch.iommu_domain = NULL; in kvm_iommu_map_guest()
264 r = kvm_iommu_map_memslots(kvm); in kvm_iommu_map_guest()
266 kvm_iommu_unmap_memslots(kvm); in kvm_iommu_map_guest()
269 mutex_unlock(&kvm->slots_lock); in kvm_iommu_map_guest()
273 static void kvm_iommu_put_pages(struct kvm *kvm, in kvm_iommu_put_pages() argument
281 domain = kvm->arch.iommu_domain; in kvm_iommu_put_pages()
308 kvm_unpin_pages(kvm, pfn, unmap_pages); in kvm_iommu_put_pages()
316 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_iommu_unmap_pages() argument
318 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); in kvm_iommu_unmap_pages()
321 static int kvm_iommu_unmap_memslots(struct kvm *kvm) in kvm_iommu_unmap_memslots() argument
327 idx = srcu_read_lock(&kvm->srcu); in kvm_iommu_unmap_memslots()
328 slots = kvm_memslots(kvm); in kvm_iommu_unmap_memslots()
331 kvm_iommu_unmap_pages(kvm, memslot); in kvm_iommu_unmap_memslots()
333 srcu_read_unlock(&kvm->srcu, idx); in kvm_iommu_unmap_memslots()
335 if (kvm->arch.iommu_noncoherent) in kvm_iommu_unmap_memslots()
336 kvm_arch_unregister_noncoherent_dma(kvm); in kvm_iommu_unmap_memslots()
341 int kvm_iommu_unmap_guest(struct kvm *kvm) in kvm_iommu_unmap_guest() argument
343 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_iommu_unmap_guest()
349 mutex_lock(&kvm->slots_lock); in kvm_iommu_unmap_guest()
350 kvm_iommu_unmap_memslots(kvm); in kvm_iommu_unmap_guest()
351 kvm->arch.iommu_domain = NULL; in kvm_iommu_unmap_guest()
352 kvm->arch.iommu_noncoherent = false; in kvm_iommu_unmap_guest()
353 mutex_unlock(&kvm->slots_lock); in kvm_iommu_unmap_guest()