Lines Matching refs:iommu

37 int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)  in intel_svm_alloc_pasid_tables()  argument
42 order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; in intel_svm_alloc_pasid_tables()
49 iommu->name); in intel_svm_alloc_pasid_tables()
52 iommu->pasid_table = page_address(pages); in intel_svm_alloc_pasid_tables()
53 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); in intel_svm_alloc_pasid_tables()
55 if (ecap_dis(iommu->ecap)) { in intel_svm_alloc_pasid_tables()
58 iommu->pasid_state_table = page_address(pages); in intel_svm_alloc_pasid_tables()
61 iommu->name); in intel_svm_alloc_pasid_tables()
64 idr_init(&iommu->pasid_idr); in intel_svm_alloc_pasid_tables()
69 int intel_svm_free_pasid_tables(struct intel_iommu *iommu) in intel_svm_free_pasid_tables() argument
73 order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; in intel_svm_free_pasid_tables()
77 if (iommu->pasid_table) { in intel_svm_free_pasid_tables()
78 free_pages((unsigned long)iommu->pasid_table, order); in intel_svm_free_pasid_tables()
79 iommu->pasid_table = NULL; in intel_svm_free_pasid_tables()
81 if (iommu->pasid_state_table) { in intel_svm_free_pasid_tables()
82 free_pages((unsigned long)iommu->pasid_state_table, order); in intel_svm_free_pasid_tables()
83 iommu->pasid_state_table = NULL; in intel_svm_free_pasid_tables()
85 idr_destroy(&iommu->pasid_idr); in intel_svm_free_pasid_tables()
91 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
99 iommu->name); in intel_svm_enable_prq()
102 iommu->prq = page_address(pages); in intel_svm_enable_prq()
104 irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
107 iommu->name); in intel_svm_enable_prq()
110 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
111 iommu->prq = NULL; in intel_svm_enable_prq()
114 iommu->pr_irq = irq; in intel_svm_enable_prq()
116 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
119 iommu->prq_name, iommu); in intel_svm_enable_prq()
122 iommu->name); in intel_svm_enable_prq()
126 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
127 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
128 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
133 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
135 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
136 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
137 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
139 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
140 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
141 iommu->pr_irq = 0; in intel_svm_finish_prq()
143 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
144 iommu->prq = NULL; in intel_svm_finish_prq()
173 qi_submit_sync(&desc, svm->iommu); in intel_flush_svm_range_dev()
191 qi_submit_sync(&desc, svm->iommu); in intel_flush_svm_range_dev()
201 if (svm->iommu->pasid_state_table && in intel_flush_svm_range()
202 !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63)) in intel_flush_svm_range()
246 qi_submit_sync(&desc, svm->iommu); in intel_flush_pasid_dev()
266 svm->iommu->pasid_table[svm->pasid].val = 0; in intel_mm_release()
289 struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); in intel_svm_bind_mm() local
296 if (WARN_ON(!iommu)) in intel_svm_bind_mm()
307 if (!ecap_srs(iommu->ecap)) in intel_svm_bind_mm()
318 idr_for_each_entry(&iommu->pasid_idr, svm, i) { in intel_svm_bind_mm()
353 ret = intel_iommu_enable_pasid(iommu, sdev); in intel_svm_bind_mm()
372 svm->iommu = iommu; in intel_svm_bind_mm()
374 if (pasid_max > 2 << ecap_pss(iommu->ecap)) in intel_svm_bind_mm()
375 pasid_max = 2 << ecap_pss(iommu->ecap); in intel_svm_bind_mm()
378 ret = idr_alloc(&iommu->pasid_idr, svm, in intel_svm_bind_mm()
379 !!cap_caching_mode(iommu->cap), in intel_svm_bind_mm()
394 idr_remove(&svm->iommu->pasid_idr, svm->pasid); in intel_svm_bind_mm()
399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; in intel_svm_bind_mm()
401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); in intel_svm_bind_mm()
412 if (cap_caching_mode(iommu->cap)) in intel_svm_bind_mm()
431 struct intel_iommu *iommu; in intel_svm_unbind_mm() local
436 iommu = intel_svm_device_to_iommu(dev); in intel_svm_unbind_mm()
437 if (!iommu || !iommu->pasid_table) in intel_svm_unbind_mm()
440 svm = idr_find(&iommu->pasid_idr, pasid); in intel_svm_unbind_mm()
463 idr_remove(&svm->iommu->pasid_idr, svm->pasid); in intel_svm_unbind_mm()
523 struct intel_iommu *iommu = d; in prq_event_thread() local
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
543 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
549 iommu->name, ((unsigned long long *)req)[0], in prq_event_thread()
556 svm = idr_find(&iommu->pasid_idr, req->pasid); in prq_event_thread()
564 iommu->name, req->pasid, ((unsigned long long *)req)[0], in prq_event_thread()
629 qi_submit_sync(&resp, iommu); in prq_event_thread()
638 qi_submit_sync(&resp, iommu); in prq_event_thread()
644 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()