Lines Matching refs:iommu
31 struct intel_iommu *iommu; member
38 struct intel_iommu *iommu; member
45 struct intel_iommu *iommu; member
81 static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
84 static bool ir_pre_enabled(struct intel_iommu *iommu) in ir_pre_enabled() argument
86 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); in ir_pre_enabled()
89 static void clear_ir_pre_enabled(struct intel_iommu *iommu) in clear_ir_pre_enabled() argument
91 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in clear_ir_pre_enabled()
94 static void init_ir_status(struct intel_iommu *iommu) in init_ir_status() argument
98 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_ir_status()
100 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; in init_ir_status()
103 static int alloc_irte(struct intel_iommu *iommu, int irq, in alloc_irte() argument
106 struct ir_table *table = iommu->ir_table; in alloc_irte()
119 if (mask > ecap_max_handle_mask(iommu->ecap)) { in alloc_irte()
122 ecap_max_handle_mask(iommu->ecap)); in alloc_irte()
130 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); in alloc_irte()
132 irq_iommu->iommu = iommu; in alloc_irte()
143 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) in qi_flush_iec() argument
151 return qi_submit_sync(&desc, iommu); in qi_flush_iec()
157 struct intel_iommu *iommu; in modify_irte() local
167 iommu = irq_iommu->iommu; in modify_irte()
170 irte = &iommu->ir_table->base[index]; in modify_irte()
192 __iommu_flush_cache(iommu, irte, sizeof(*irte)); in modify_irte()
194 rc = qi_flush_iec(iommu, index, 0); in modify_irte()
208 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) in map_hpet_to_ir()
209 return ir_hpet[i].iommu; in map_hpet_to_ir()
218 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) in map_ioapic_to_ir()
219 return ir_ioapic[i].iommu; in map_ioapic_to_ir()
231 return drhd->iommu; in map_dev_to_ir()
237 struct intel_iommu *iommu; in clear_entries() local
243 iommu = irq_iommu->iommu; in clear_entries()
246 start = iommu->ir_table->base + index; in clear_entries()
253 bitmap_release_region(iommu->ir_table->bitmap, index, in clear_entries()
256 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); in clear_entries()
304 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { in set_ioapic_sid()
331 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { in set_hpet_sid()
403 static int iommu_load_old_irte(struct intel_iommu *iommu) in iommu_load_old_irte() argument
413 iommu->name); in iommu_load_old_irte()
414 clear_ir_pre_enabled(iommu); in iommu_load_old_irte()
415 iommu_disable_irq_remapping(iommu); in iommu_load_old_irte()
420 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); in iommu_load_old_irte()
434 memcpy(iommu->ir_table->base, old_ir_table, size); in iommu_load_old_irte()
436 __iommu_flush_cache(iommu, iommu->ir_table->base, size); in iommu_load_old_irte()
443 if (iommu->ir_table->base[i].present) in iommu_load_old_irte()
444 bitmap_set(iommu->ir_table->bitmap, i, 1); in iommu_load_old_irte()
453 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) in iommu_set_irq_remapping() argument
459 addr = virt_to_phys((void *)iommu->ir_table->base); in iommu_set_irq_remapping()
461 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_set_irq_remapping()
463 dmar_writeq(iommu->reg + DMAR_IRTA_REG, in iommu_set_irq_remapping()
467 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_irq_remapping()
469 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_irq_remapping()
471 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_set_irq_remapping()
477 qi_global_iec(iommu); in iommu_set_irq_remapping()
480 static void iommu_enable_irq_remapping(struct intel_iommu *iommu) in iommu_enable_irq_remapping() argument
485 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
488 iommu->gcmd |= DMA_GCMD_IRE; in iommu_enable_irq_remapping()
489 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ in iommu_enable_irq_remapping()
490 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_irq_remapping()
492 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_irq_remapping()
505 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_irq_remapping()
508 static int intel_setup_irq_remapping(struct intel_iommu *iommu) in intel_setup_irq_remapping() argument
514 if (iommu->ir_table) in intel_setup_irq_remapping()
521 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, in intel_setup_irq_remapping()
525 iommu->seq_id, INTR_REMAP_PAGE_ORDER); in intel_setup_irq_remapping()
532 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); in intel_setup_irq_remapping()
536 iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(), in intel_setup_irq_remapping()
539 iommu); in intel_setup_irq_remapping()
540 if (!iommu->ir_domain) { in intel_setup_irq_remapping()
541 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); in intel_setup_irq_remapping()
544 iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); in intel_setup_irq_remapping()
548 iommu->ir_table = ir_table; in intel_setup_irq_remapping()
554 if (!iommu->qi) { in intel_setup_irq_remapping()
558 dmar_fault(-1, iommu); in intel_setup_irq_remapping()
559 dmar_disable_qi(iommu); in intel_setup_irq_remapping()
561 if (dmar_enable_qi(iommu)) { in intel_setup_irq_remapping()
567 init_ir_status(iommu); in intel_setup_irq_remapping()
569 if (ir_pre_enabled(iommu)) { in intel_setup_irq_remapping()
570 if (iommu_load_old_irte(iommu)) in intel_setup_irq_remapping()
572 iommu->name); in intel_setup_irq_remapping()
575 iommu->name); in intel_setup_irq_remapping()
578 iommu_set_irq_remapping(iommu, eim_mode); in intel_setup_irq_remapping()
589 iommu->ir_table = NULL; in intel_setup_irq_remapping()
594 static void intel_teardown_irq_remapping(struct intel_iommu *iommu) in intel_teardown_irq_remapping() argument
596 if (iommu && iommu->ir_table) { in intel_teardown_irq_remapping()
597 if (iommu->ir_msi_domain) { in intel_teardown_irq_remapping()
598 irq_domain_remove(iommu->ir_msi_domain); in intel_teardown_irq_remapping()
599 iommu->ir_msi_domain = NULL; in intel_teardown_irq_remapping()
601 if (iommu->ir_domain) { in intel_teardown_irq_remapping()
602 irq_domain_remove(iommu->ir_domain); in intel_teardown_irq_remapping()
603 iommu->ir_domain = NULL; in intel_teardown_irq_remapping()
605 free_pages((unsigned long)iommu->ir_table->base, in intel_teardown_irq_remapping()
607 kfree(iommu->ir_table->bitmap); in intel_teardown_irq_remapping()
608 kfree(iommu->ir_table); in intel_teardown_irq_remapping()
609 iommu->ir_table = NULL; in intel_teardown_irq_remapping()
616 static void iommu_disable_irq_remapping(struct intel_iommu *iommu) in iommu_disable_irq_remapping() argument
621 if (!ecap_ir_support(iommu->ecap)) in iommu_disable_irq_remapping()
628 qi_global_iec(iommu); in iommu_disable_irq_remapping()
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
632 sts = readl(iommu->reg + DMAR_GSTS_REG); in iommu_disable_irq_remapping()
636 iommu->gcmd &= ~DMA_GCMD_IRE; in iommu_disable_irq_remapping()
637 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_irq_remapping()
639 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_irq_remapping()
643 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_irq_remapping()
658 struct intel_iommu *iommu; in intel_cleanup_irq_remapping() local
660 for_each_iommu(iommu, drhd) { in intel_cleanup_irq_remapping()
661 if (ecap_ir_support(iommu->ecap)) { in intel_cleanup_irq_remapping()
662 iommu_disable_irq_remapping(iommu); in intel_cleanup_irq_remapping()
663 intel_teardown_irq_remapping(iommu); in intel_cleanup_irq_remapping()
674 struct intel_iommu *iommu; in intel_prepare_irq_remapping() local
699 for_each_iommu(iommu, drhd) in intel_prepare_irq_remapping()
700 if (!ecap_ir_support(iommu->ecap)) in intel_prepare_irq_remapping()
712 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
713 if (eim && !ecap_eim_support(iommu->ecap)) { in intel_prepare_irq_remapping()
714 pr_info("%s does not support EIM\n", iommu->name); in intel_prepare_irq_remapping()
724 for_each_iommu(iommu, drhd) { in intel_prepare_irq_remapping()
725 if (intel_setup_irq_remapping(iommu)) { in intel_prepare_irq_remapping()
727 iommu->name); in intel_prepare_irq_remapping()
745 struct intel_iommu *iommu; in set_irq_posting_cap() local
759 for_each_iommu(iommu, drhd) in set_irq_posting_cap()
760 if (!cap_pi_support(iommu->cap)) { in set_irq_posting_cap()
771 struct intel_iommu *iommu; in intel_enable_irq_remapping() local
777 for_each_iommu(iommu, drhd) { in intel_enable_irq_remapping()
778 if (!ir_pre_enabled(iommu)) in intel_enable_irq_remapping()
779 iommu_enable_irq_remapping(iommu); in intel_enable_irq_remapping()
800 struct intel_iommu *iommu, in ir_parse_one_hpet_scope() argument
823 if (ir_hpet[count].iommu == iommu && in ir_parse_one_hpet_scope()
826 else if (ir_hpet[count].iommu == NULL && free == -1) in ir_parse_one_hpet_scope()
834 ir_hpet[free].iommu = iommu; in ir_parse_one_hpet_scope()
845 struct intel_iommu *iommu, in ir_parse_one_ioapic_scope() argument
868 if (ir_ioapic[count].iommu == iommu && in ir_parse_one_ioapic_scope()
871 else if (ir_ioapic[count].iommu == NULL && free == -1) in ir_parse_one_ioapic_scope()
881 ir_ioapic[free].iommu = iommu; in ir_parse_one_ioapic_scope()
884 scope->enumeration_id, drhd->address, iommu->seq_id); in ir_parse_one_ioapic_scope()
890 struct intel_iommu *iommu) in ir_parse_ioapic_hpet_scope() argument
904 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
906 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); in ir_parse_ioapic_hpet_scope()
913 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) in ir_remove_ioapic_hpet_scope() argument
918 if (ir_hpet[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
919 ir_hpet[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
922 if (ir_ioapic[i].iommu == iommu) in ir_remove_ioapic_hpet_scope()
923 ir_ioapic[i].iommu = NULL; in ir_remove_ioapic_hpet_scope()
933 struct intel_iommu *iommu; in parse_ioapics_under_ir() local
937 for_each_iommu(iommu, drhd) { in parse_ioapics_under_ir()
940 if (!ecap_ir_support(iommu->ecap)) in parse_ioapics_under_ir()
943 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); in parse_ioapics_under_ir()
984 struct intel_iommu *iommu = NULL; in disable_irq_remapping() local
989 for_each_iommu(iommu, drhd) { in disable_irq_remapping()
990 if (!ecap_ir_support(iommu->ecap)) in disable_irq_remapping()
993 iommu_disable_irq_remapping(iommu); in disable_irq_remapping()
1007 struct intel_iommu *iommu = NULL; in reenable_irq_remapping() local
1009 for_each_iommu(iommu, drhd) in reenable_irq_remapping()
1010 if (iommu->qi) in reenable_irq_remapping()
1011 dmar_reenable_qi(iommu); in reenable_irq_remapping()
1016 for_each_iommu(iommu, drhd) { in reenable_irq_remapping()
1017 if (!ecap_ir_support(iommu->ecap)) in reenable_irq_remapping()
1021 iommu_set_irq_remapping(iommu, eim); in reenable_irq_remapping()
1022 iommu_enable_irq_remapping(iommu); in reenable_irq_remapping()
1062 struct intel_iommu *iommu = NULL; in intel_get_ir_irq_domain() local
1069 iommu = map_ioapic_to_ir(info->ioapic_id); in intel_get_ir_irq_domain()
1072 iommu = map_hpet_to_ir(info->hpet_id); in intel_get_ir_irq_domain()
1076 iommu = map_dev_to_ir(info->msi_dev); in intel_get_ir_irq_domain()
1083 return iommu ? iommu->ir_domain : NULL; in intel_get_ir_irq_domain()
1088 struct intel_iommu *iommu; in intel_get_irq_domain() local
1096 iommu = map_dev_to_ir(info->msi_dev); in intel_get_irq_domain()
1097 if (iommu) in intel_get_irq_domain()
1098 return iommu->ir_msi_domain; in intel_get_irq_domain()
1305 struct intel_iommu *iommu = domain->host_data; in intel_irq_remapping_alloc() local
1312 if (!info || !iommu) in intel_irq_remapping_alloc()
1335 index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs); in intel_irq_remapping_alloc()
1412 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) in dmar_ir_add() argument
1417 if (eim && !ecap_eim_support(iommu->ecap)) { in dmar_ir_add()
1419 iommu->reg_phys, iommu->ecap); in dmar_ir_add()
1423 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { in dmar_ir_add()
1425 iommu->reg_phys); in dmar_ir_add()
1432 ret = intel_setup_irq_remapping(iommu); in dmar_ir_add()
1435 iommu->name); in dmar_ir_add()
1436 intel_teardown_irq_remapping(iommu); in dmar_ir_add()
1437 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_add()
1439 iommu_enable_irq_remapping(iommu); in dmar_ir_add()
1448 struct intel_iommu *iommu = dmaru->iommu; in dmar_ir_hotplug() local
1452 if (iommu == NULL) in dmar_ir_hotplug()
1454 if (!ecap_ir_support(iommu->ecap)) in dmar_ir_hotplug()
1457 !cap_pi_support(iommu->cap)) in dmar_ir_hotplug()
1461 if (!iommu->ir_table) in dmar_ir_hotplug()
1462 ret = dmar_ir_add(dmaru, iommu); in dmar_ir_hotplug()
1464 if (iommu->ir_table) { in dmar_ir_hotplug()
1465 if (!bitmap_empty(iommu->ir_table->bitmap, in dmar_ir_hotplug()
1469 iommu_disable_irq_remapping(iommu); in dmar_ir_hotplug()
1470 intel_teardown_irq_remapping(iommu); in dmar_ir_hotplug()
1471 ir_remove_ioapic_hpet_scope(iommu); in dmar_ir_hotplug()