Lines Matching refs:svm

149 static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,  in intel_flush_svm_range_dev()  argument
159 desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | in intel_flush_svm_range_dev()
162 desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | in intel_flush_svm_range_dev()
168 desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) | in intel_flush_svm_range_dev()
173 qi_submit_sync(&desc, svm->iommu); in intel_flush_svm_range_dev()
176 desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) | in intel_flush_svm_range_dev()
191 qi_submit_sync(&desc, svm->iommu); in intel_flush_svm_range_dev()
195 static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, in intel_flush_svm_range() argument
201 if (svm->iommu->pasid_state_table && in intel_flush_svm_range()
202 !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63)) in intel_flush_svm_range()
206 list_for_each_entry_rcu(sdev, &svm->devs, list) in intel_flush_svm_range()
207 intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); in intel_flush_svm_range()
214 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); in intel_change_pte() local
216 intel_flush_svm_range(svm, address, 1, 1, 0); in intel_change_pte()
222 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); in intel_invalidate_page() local
224 intel_flush_svm_range(svm, address, 1, 1, 0); in intel_invalidate_page()
232 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); in intel_invalidate_range() local
234 intel_flush_svm_range(svm, start, in intel_invalidate_range()
239 static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid) in intel_flush_pasid_dev() argument
246 qi_submit_sync(&desc, svm->iommu); in intel_flush_pasid_dev()
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); in intel_mm_release() local
266 svm->iommu->pasid_table[svm->pasid].val = 0; in intel_mm_release()
270 list_for_each_entry_rcu(sdev, &svm->devs, list) { in intel_mm_release()
271 intel_flush_pasid_dev(svm, sdev, svm->pasid); in intel_mm_release()
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); in intel_mm_release()
291 struct intel_svm *svm = NULL; in intel_svm_bind_mm() local
318 idr_for_each_entry(&iommu->pasid_idr, svm, i) { in intel_svm_bind_mm()
319 if (svm->mm != mm || in intel_svm_bind_mm()
320 (svm->flags & SVM_FLAG_PRIVATE_PASID)) in intel_svm_bind_mm()
323 if (svm->pasid >= pasid_max) { in intel_svm_bind_mm()
326 svm->pasid); in intel_svm_bind_mm()
331 list_for_each_entry(sdev, &svm->devs, list) { in intel_svm_bind_mm()
365 if (!svm) { in intel_svm_bind_mm()
366 svm = kzalloc(sizeof(*svm), GFP_KERNEL); in intel_svm_bind_mm()
367 if (!svm) { in intel_svm_bind_mm()
372 svm->iommu = iommu; in intel_svm_bind_mm()
378 ret = idr_alloc(&iommu->pasid_idr, svm, in intel_svm_bind_mm()
382 kfree(svm); in intel_svm_bind_mm()
385 svm->pasid = ret; in intel_svm_bind_mm()
386 svm->notifier.ops = &intel_mmuops; in intel_svm_bind_mm()
387 svm->mm = mm; in intel_svm_bind_mm()
388 svm->flags = flags; in intel_svm_bind_mm()
389 INIT_LIST_HEAD_RCU(&svm->devs); in intel_svm_bind_mm()
392 ret = mmu_notifier_register(&svm->notifier, mm); in intel_svm_bind_mm()
394 idr_remove(&svm->iommu->pasid_idr, svm->pasid); in intel_svm_bind_mm()
395 kfree(svm); in intel_svm_bind_mm()
399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; in intel_svm_bind_mm()
401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); in intel_svm_bind_mm()
413 intel_flush_pasid_dev(svm, sdev, 0); in intel_svm_bind_mm()
415 list_add_rcu(&sdev->list, &svm->devs); in intel_svm_bind_mm()
418 *pasid = svm->pasid; in intel_svm_bind_mm()
432 struct intel_svm *svm; in intel_svm_unbind_mm() local
440 svm = idr_find(&iommu->pasid_idr, pasid); in intel_svm_unbind_mm()
441 if (!svm) in intel_svm_unbind_mm()
444 list_for_each_entry(sdev, &svm->devs, list) { in intel_svm_unbind_mm()
457 intel_flush_pasid_dev(svm, sdev, svm->pasid); in intel_svm_unbind_mm()
458 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); in intel_svm_unbind_mm()
461 if (list_empty(&svm->devs)) { in intel_svm_unbind_mm()
463 idr_remove(&svm->iommu->pasid_idr, svm->pasid); in intel_svm_unbind_mm()
464 if (svm->mm) in intel_svm_unbind_mm()
465 mmu_notifier_unregister(&svm->notifier, svm->mm); in intel_svm_unbind_mm()
471 memset(svm, 0x6b, sizeof(*svm)); in intel_svm_unbind_mm()
472 kfree(svm); in intel_svm_unbind_mm()
524 struct intel_svm *svm = NULL; in prq_event_thread() local
554 if (!svm || svm->pasid != req->pasid) { in prq_event_thread()
556 svm = idr_find(&iommu->pasid_idr, req->pasid); in prq_event_thread()
562 if (!svm) { in prq_event_thread()
573 if (!svm->mm) in prq_event_thread()
576 if (!atomic_inc_not_zero(&svm->mm->mm_users)) in prq_event_thread()
578 down_read(&svm->mm->mmap_sem); in prq_event_thread()
579 vma = find_extend_vma(svm->mm, address); in prq_event_thread()
586 ret = handle_mm_fault(svm->mm, vma, address, in prq_event_thread()
593 up_read(&svm->mm->mmap_sem); in prq_event_thread()
594 mmput(svm->mm); in prq_event_thread()
598 list_for_each_entry_rcu(sdev, &svm->devs, list) { in prq_event_thread()
607 if (WARN_ON(&sdev->list == &svm->devs)) in prq_event_thread()
618 svm = NULL; in prq_event_thread()