Lines Matching refs:iommu

76 static void free_iommu(struct intel_iommu *iommu);
427 if (dmaru->iommu) in dmar_free_drhd()
428 free_iommu(dmaru->iommu); in dmar_free_drhd()
466 drhd->iommu->node = node; in dmar_parse_one_rhsa()
892 x86_init.iommu.iommu_init = intel_iommu_init; in detect_intel_iommu()
903 static void unmap_iommu(struct intel_iommu *iommu) in unmap_iommu() argument
905 iounmap(iommu->reg); in unmap_iommu()
906 release_mem_region(iommu->reg_phys, iommu->reg_size); in unmap_iommu()
917 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr) in map_iommu() argument
921 iommu->reg_phys = phys_addr; in map_iommu()
922 iommu->reg_size = VTD_PAGE_SIZE; in map_iommu()
924 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { in map_iommu()
930 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
931 if (!iommu->reg) { in map_iommu()
937 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); in map_iommu()
938 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); in map_iommu()
940 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { in map_iommu()
947 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), in map_iommu()
948 cap_max_fault_reg_offset(iommu->cap)); in map_iommu()
950 if (map_size > iommu->reg_size) { in map_iommu()
951 iounmap(iommu->reg); in map_iommu()
952 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
953 iommu->reg_size = map_size; in map_iommu()
954 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, in map_iommu()
955 iommu->name)) { in map_iommu()
960 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); in map_iommu()
961 if (!iommu->reg) { in map_iommu()
971 iounmap(iommu->reg); in map_iommu()
973 release_mem_region(iommu->reg_phys, iommu->reg_size); in map_iommu()
978 static int dmar_alloc_seq_id(struct intel_iommu *iommu) in dmar_alloc_seq_id() argument
980 iommu->seq_id = find_first_zero_bit(dmar_seq_ids, in dmar_alloc_seq_id()
982 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { in dmar_alloc_seq_id()
983 iommu->seq_id = -1; in dmar_alloc_seq_id()
985 set_bit(iommu->seq_id, dmar_seq_ids); in dmar_alloc_seq_id()
986 sprintf(iommu->name, "dmar%d", iommu->seq_id); in dmar_alloc_seq_id()
989 return iommu->seq_id; in dmar_alloc_seq_id()
992 static void dmar_free_seq_id(struct intel_iommu *iommu) in dmar_free_seq_id() argument
994 if (iommu->seq_id >= 0) { in dmar_free_seq_id()
995 clear_bit(iommu->seq_id, dmar_seq_ids); in dmar_free_seq_id()
996 iommu->seq_id = -1; in dmar_free_seq_id()
1002 struct intel_iommu *iommu; in alloc_iommu() local
1013 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); in alloc_iommu()
1014 if (!iommu) in alloc_iommu()
1017 if (dmar_alloc_seq_id(iommu) < 0) { in alloc_iommu()
1023 err = map_iommu(iommu, drhd->reg_base_addr); in alloc_iommu()
1025 pr_err("Failed to map %s\n", iommu->name); in alloc_iommu()
1030 agaw = iommu_calculate_agaw(iommu); in alloc_iommu()
1033 iommu->seq_id); in alloc_iommu()
1036 msagaw = iommu_calculate_max_sagaw(iommu); in alloc_iommu()
1039 iommu->seq_id); in alloc_iommu()
1042 iommu->agaw = agaw; in alloc_iommu()
1043 iommu->msagaw = msagaw; in alloc_iommu()
1044 iommu->segment = drhd->segment; in alloc_iommu()
1046 iommu->node = -1; in alloc_iommu()
1048 ver = readl(iommu->reg + DMAR_VER_REG); in alloc_iommu()
1050 iommu->name, in alloc_iommu()
1053 (unsigned long long)iommu->cap, in alloc_iommu()
1054 (unsigned long long)iommu->ecap); in alloc_iommu()
1057 sts = readl(iommu->reg + DMAR_GSTS_REG); in alloc_iommu()
1059 iommu->gcmd |= DMA_GCMD_IRE; in alloc_iommu()
1061 iommu->gcmd |= DMA_GCMD_TE; in alloc_iommu()
1063 iommu->gcmd |= DMA_GCMD_QIE; in alloc_iommu()
1065 raw_spin_lock_init(&iommu->register_lock); in alloc_iommu()
1067 drhd->iommu = iommu; in alloc_iommu()
1070 iommu->iommu_dev = iommu_device_create(NULL, iommu, in alloc_iommu()
1072 "%s", iommu->name); in alloc_iommu()
1077 unmap_iommu(iommu); in alloc_iommu()
1079 dmar_free_seq_id(iommu); in alloc_iommu()
1081 kfree(iommu); in alloc_iommu()
1085 static void free_iommu(struct intel_iommu *iommu) in free_iommu() argument
1087 iommu_device_destroy(iommu->iommu_dev); in free_iommu()
1089 if (iommu->irq) { in free_iommu()
1090 if (iommu->pr_irq) { in free_iommu()
1091 free_irq(iommu->pr_irq, iommu); in free_iommu()
1092 dmar_free_hwirq(iommu->pr_irq); in free_iommu()
1093 iommu->pr_irq = 0; in free_iommu()
1095 free_irq(iommu->irq, iommu); in free_iommu()
1096 dmar_free_hwirq(iommu->irq); in free_iommu()
1097 iommu->irq = 0; in free_iommu()
1100 if (iommu->qi) { in free_iommu()
1101 free_page((unsigned long)iommu->qi->desc); in free_iommu()
1102 kfree(iommu->qi->desc_status); in free_iommu()
1103 kfree(iommu->qi); in free_iommu()
1106 if (iommu->reg) in free_iommu()
1107 unmap_iommu(iommu); in free_iommu()
1109 dmar_free_seq_id(iommu); in free_iommu()
1110 kfree(iommu); in free_iommu()
1126 static int qi_check_fault(struct intel_iommu *iommu, int index) in qi_check_fault() argument
1130 struct q_inval *qi = iommu->qi; in qi_check_fault()
1136 fault = readl(iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1144 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1152 __iommu_flush_cache(iommu, &qi->desc[index], in qi_check_fault()
1154 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1164 head = readl(iommu->reg + DMAR_IQH_REG); in qi_check_fault()
1167 tail = readl(iommu->reg + DMAR_IQT_REG); in qi_check_fault()
1170 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1183 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); in qi_check_fault()
1192 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) in qi_submit_sync() argument
1195 struct q_inval *qi = iommu->qi; in qi_submit_sync()
1228 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); in qi_submit_sync()
1229 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); in qi_submit_sync()
1238 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1248 rc = qi_check_fault(iommu, index); in qi_submit_sync()
1271 void qi_global_iec(struct intel_iommu *iommu) in qi_global_iec() argument
1279 qi_submit_sync(&desc, iommu); in qi_global_iec()
1282 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, in qi_flush_context() argument
1291 qi_submit_sync(&desc, iommu); in qi_flush_context()
1294 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, in qi_flush_iotlb() argument
1302 if (cap_write_drain(iommu->cap)) in qi_flush_iotlb()
1305 if (cap_read_drain(iommu->cap)) in qi_flush_iotlb()
1313 qi_submit_sync(&desc, iommu); in qi_flush_iotlb()
1316 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, in qi_flush_dev_iotlb() argument
1334 qi_submit_sync(&desc, iommu); in qi_flush_dev_iotlb()
1340 void dmar_disable_qi(struct intel_iommu *iommu) in dmar_disable_qi() argument
1346 if (!ecap_qis(iommu->ecap)) in dmar_disable_qi()
1349 raw_spin_lock_irqsave(&iommu->register_lock, flags); in dmar_disable_qi()
1351 sts = readl(iommu->reg + DMAR_GSTS_REG); in dmar_disable_qi()
1358 while ((readl(iommu->reg + DMAR_IQT_REG) != in dmar_disable_qi()
1359 readl(iommu->reg + DMAR_IQH_REG)) && in dmar_disable_qi()
1363 iommu->gcmd &= ~DMA_GCMD_QIE; in dmar_disable_qi()
1364 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in dmar_disable_qi()
1366 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, in dmar_disable_qi()
1369 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in dmar_disable_qi()
1375 static void __dmar_enable_qi(struct intel_iommu *iommu) in __dmar_enable_qi() argument
1379 struct q_inval *qi = iommu->qi; in __dmar_enable_qi()
1384 raw_spin_lock_irqsave(&iommu->register_lock, flags); in __dmar_enable_qi()
1387 writel(0, iommu->reg + DMAR_IQT_REG); in __dmar_enable_qi()
1389 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); in __dmar_enable_qi()
1391 iommu->gcmd |= DMA_GCMD_QIE; in __dmar_enable_qi()
1392 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in __dmar_enable_qi()
1395 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); in __dmar_enable_qi()
1397 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in __dmar_enable_qi()
1405 int dmar_enable_qi(struct intel_iommu *iommu) in dmar_enable_qi() argument
1410 if (!ecap_qis(iommu->ecap)) in dmar_enable_qi()
1416 if (iommu->qi) in dmar_enable_qi()
1419 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1420 if (!iommu->qi) in dmar_enable_qi()
1423 qi = iommu->qi; in dmar_enable_qi()
1426 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); in dmar_enable_qi()
1429 iommu->qi = NULL; in dmar_enable_qi()
1439 iommu->qi = NULL; in dmar_enable_qi()
1445 __dmar_enable_qi(iommu); in dmar_enable_qi()
1503 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq) in dmar_msi_reg() argument
1505 if (iommu->irq == irq) in dmar_msi_reg()
1507 else if (iommu->pr_irq == irq) in dmar_msi_reg()
1515 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_unmask() local
1516 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_unmask()
1520 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_unmask()
1521 writel(0, iommu->reg + reg); in dmar_msi_unmask()
1523 readl(iommu->reg + reg); in dmar_msi_unmask()
1524 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_unmask()
1529 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); in dmar_msi_mask() local
1530 int reg = dmar_msi_reg(iommu, data->irq); in dmar_msi_mask()
1534 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_mask()
1535 writel(DMA_FECTL_IM, iommu->reg + reg); in dmar_msi_mask()
1537 readl(iommu->reg + reg); in dmar_msi_mask()
1538 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_mask()
1543 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_write() local
1544 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_write()
1547 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_write()
1548 writel(msg->data, iommu->reg + reg + 4); in dmar_msi_write()
1549 writel(msg->address_lo, iommu->reg + reg + 8); in dmar_msi_write()
1550 writel(msg->address_hi, iommu->reg + reg + 12); in dmar_msi_write()
1551 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_write()
1556 struct intel_iommu *iommu = irq_get_handler_data(irq); in dmar_msi_read() local
1557 int reg = dmar_msi_reg(iommu, irq); in dmar_msi_read()
1560 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_msi_read()
1561 msg->data = readl(iommu->reg + reg + 4); in dmar_msi_read()
1562 msg->address_lo = readl(iommu->reg + reg + 8); in dmar_msi_read()
1563 msg->address_hi = readl(iommu->reg + reg + 12); in dmar_msi_read()
1564 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_msi_read()
1567 static int dmar_fault_do_one(struct intel_iommu *iommu, int type, in dmar_fault_do_one() argument
1595 struct intel_iommu *iommu = dev_id; in dmar_fault() local
1600 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1601 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1610 reg = cap_fault_reg_offset(iommu->cap); in dmar_fault()
1619 data = readl(iommu->reg + reg + in dmar_fault()
1627 data = readl(iommu->reg + reg + in dmar_fault()
1631 guest_addr = dmar_readq(iommu->reg + reg + in dmar_fault()
1635 writel(DMA_FRCD_F, iommu->reg + reg + in dmar_fault()
1638 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
1640 dmar_fault_do_one(iommu, type, fault_reason, in dmar_fault()
1644 if (fault_index >= cap_num_fault_regs(iommu->cap)) in dmar_fault()
1646 raw_spin_lock_irqsave(&iommu->register_lock, flag); in dmar_fault()
1649 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); in dmar_fault()
1652 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in dmar_fault()
1656 int dmar_set_interrupt(struct intel_iommu *iommu) in dmar_set_interrupt() argument
1663 if (iommu->irq) in dmar_set_interrupt()
1666 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); in dmar_set_interrupt()
1668 iommu->irq = irq; in dmar_set_interrupt()
1674 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); in dmar_set_interrupt()
1683 struct intel_iommu *iommu; in enable_drhd_fault_handling() local
1688 for_each_iommu(iommu, drhd) { in enable_drhd_fault_handling()
1690 int ret = dmar_set_interrupt(iommu); in enable_drhd_fault_handling()
1701 dmar_fault(iommu->irq, iommu); in enable_drhd_fault_handling()
1702 fault_status = readl(iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
1703 writel(fault_status, iommu->reg + DMAR_FSTS_REG); in enable_drhd_fault_handling()
1712 int dmar_reenable_qi(struct intel_iommu *iommu) in dmar_reenable_qi() argument
1714 if (!ecap_qis(iommu->ecap)) in dmar_reenable_qi()
1717 if (!iommu->qi) in dmar_reenable_qi()
1723 dmar_disable_qi(iommu); in dmar_reenable_qi()
1729 __dmar_enable_qi(iommu); in dmar_reenable_qi()