Lines Matching refs:iommu

430 	struct intel_iommu *iommu; /* IOMMU used by this device */  member
485 static void domain_context_clear(struct intel_iommu *iommu,
488 struct intel_iommu *iommu);
529 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ argument
530 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
533 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \ argument
534 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
545 static bool translation_pre_enabled(struct intel_iommu *iommu) in translation_pre_enabled() argument
547 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
550 static void clear_translation_pre_enabled(struct intel_iommu *iommu) in clear_translation_pre_enabled() argument
552 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
555 static void init_translation_status(struct intel_iommu *iommu) in init_translation_status() argument
559 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
561 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
615 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) in get_iommu_domain() argument
620 domains = iommu->domains[idx]; in get_iommu_domain()
627 static void set_iommu_domain(struct intel_iommu *iommu, u16 did, in set_iommu_domain() argument
633 if (!iommu->domains[idx]) { in set_iommu_domain()
635 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); in set_iommu_domain()
638 domains = iommu->domains[idx]; in set_iommu_domain()
705 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) in __iommu_calculate_agaw() argument
710 sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_agaw()
723 int iommu_calculate_max_sagaw(struct intel_iommu *iommu) in iommu_calculate_max_sagaw() argument
725 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); in iommu_calculate_max_sagaw()
733 int iommu_calculate_agaw(struct intel_iommu *iommu) in iommu_calculate_agaw() argument
735 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); in iommu_calculate_agaw()
757 struct intel_iommu *iommu; in domain_update_iommu_coherency() local
775 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_coherency()
776 if (!ecap_coherent(iommu->ecap)) { in domain_update_iommu_coherency()
787 struct intel_iommu *iommu; in domain_update_iommu_snooping() local
791 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_snooping()
792 if (iommu != skip) { in domain_update_iommu_snooping()
793 if (!ecap_sc_support(iommu->ecap)) { in domain_update_iommu_snooping()
807 struct intel_iommu *iommu; in domain_update_iommu_superpage() local
816 for_each_active_iommu(iommu, drhd) { in domain_update_iommu_superpage()
817 if (iommu != skip) { in domain_update_iommu_superpage()
818 mask &= cap_super_page_val(iommu->cap); in domain_update_iommu_superpage()
836 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu, in iommu_context_addr() argument
839 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
844 if (ecs_enabled(iommu)) { in iommu_context_addr()
858 context = alloc_pgtable_page(iommu->node); in iommu_context_addr()
862 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); in iommu_context_addr()
865 __iommu_flush_cache(iommu, entry, sizeof(*entry)); in iommu_context_addr()
872 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; in iommu_dummy()
878 struct intel_iommu *iommu; in device_to_iommu() local
894 for_each_active_iommu(iommu, drhd) { in device_to_iommu()
923 iommu = NULL; in device_to_iommu()
927 return iommu; in device_to_iommu()
937 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) in device_context_mapped() argument
943 spin_lock_irqsave(&iommu->lock, flags); in device_context_mapped()
944 context = iommu_context_addr(iommu, bus, devfn, 0); in device_context_mapped()
947 spin_unlock_irqrestore(&iommu->lock, flags); in device_context_mapped()
951 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) in clear_context_table() argument
956 spin_lock_irqsave(&iommu->lock, flags); in clear_context_table()
957 context = iommu_context_addr(iommu, bus, devfn, 0); in clear_context_table()
960 __iommu_flush_cache(iommu, context, sizeof(*context)); in clear_context_table()
962 spin_unlock_irqrestore(&iommu->lock, flags); in clear_context_table()
965 static void free_context_table(struct intel_iommu *iommu) in free_context_table() argument
971 spin_lock_irqsave(&iommu->lock, flags); in free_context_table()
972 if (!iommu->root_entry) { in free_context_table()
976 context = iommu_context_addr(iommu, i, 0, 0); in free_context_table()
980 if (!ecs_enabled(iommu)) in free_context_table()
983 context = iommu_context_addr(iommu, i, 0x80, 0); in free_context_table()
988 free_pgtable_page(iommu->root_entry); in free_context_table()
989 iommu->root_entry = NULL; in free_context_table()
991 spin_unlock_irqrestore(&iommu->lock, flags); in free_context_table()
1287 static int iommu_alloc_root_entry(struct intel_iommu *iommu) in iommu_alloc_root_entry() argument
1292 root = (struct root_entry *)alloc_pgtable_page(iommu->node); in iommu_alloc_root_entry()
1295 iommu->name); in iommu_alloc_root_entry()
1299 __iommu_flush_cache(iommu, root, ROOT_SIZE); in iommu_alloc_root_entry()
1301 spin_lock_irqsave(&iommu->lock, flags); in iommu_alloc_root_entry()
1302 iommu->root_entry = root; in iommu_alloc_root_entry()
1303 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_alloc_root_entry()
1308 static void iommu_set_root_entry(struct intel_iommu *iommu) in iommu_set_root_entry() argument
1314 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1315 if (ecs_enabled(iommu)) in iommu_set_root_entry()
1318 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1319 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1321 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1324 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_set_root_entry()
1327 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1330 static void iommu_flush_write_buffer(struct intel_iommu *iommu) in iommu_flush_write_buffer() argument
1335 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1338 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1339 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1342 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_flush_write_buffer()
1345 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1349 static void __iommu_flush_context(struct intel_iommu *iommu, in __iommu_flush_context() argument
1372 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1373 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1376 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, in __iommu_flush_context()
1379 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1383 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, in __iommu_flush_iotlb() argument
1386 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1412 if (cap_read_drain(iommu->cap)) in __iommu_flush_iotlb()
1415 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1418 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1421 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1422 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1425 IOMMU_WAIT_OP(iommu, tlb_offset + 8, in __iommu_flush_iotlb()
1428 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1440 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, in iommu_support_dev_iotlb() argument
1447 if (!iommu->qi) in iommu_support_dev_iotlb()
1451 if (info->iommu == iommu && info->bus == bus && in iommu_support_dev_iotlb()
1527 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); in iommu_flush_dev_iotlb()
1532 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, in iommu_flush_iotlb_psi() argument
1539 u16 did = domain->iommu_did[iommu->seq_id]; in iommu_flush_iotlb_psi()
1551 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) in iommu_flush_iotlb_psi()
1552 iommu->flush.flush_iotlb(iommu, did, 0, 0, in iommu_flush_iotlb_psi()
1555 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, in iommu_flush_iotlb_psi()
1562 if (!cap_caching_mode(iommu->cap) || !map) in iommu_flush_iotlb_psi()
1563 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), in iommu_flush_iotlb_psi()
1567 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) in iommu_disable_protect_mem_regions() argument
1572 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1573 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1575 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1578 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, in iommu_disable_protect_mem_regions()
1581 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1584 static void iommu_enable_translation(struct intel_iommu *iommu) in iommu_enable_translation() argument
1589 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1590 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1591 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1594 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_enable_translation()
1597 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1600 static void iommu_disable_translation(struct intel_iommu *iommu) in iommu_disable_translation() argument
1605 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1606 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1607 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1610 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, in iommu_disable_translation()
1613 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1617 static int iommu_init_domains(struct intel_iommu *iommu) in iommu_init_domains() argument
1622 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1624 iommu->name, ndomains); in iommu_init_domains()
1627 spin_lock_init(&iommu->lock); in iommu_init_domains()
1629 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); in iommu_init_domains()
1630 if (!iommu->domain_ids) { in iommu_init_domains()
1632 iommu->name); in iommu_init_domains()
1637 iommu->domains = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1639 if (iommu->domains) { in iommu_init_domains()
1641 iommu->domains[0] = kzalloc(size, GFP_KERNEL); in iommu_init_domains()
1644 if (!iommu->domains || !iommu->domains[0]) { in iommu_init_domains()
1646 iommu->name); in iommu_init_domains()
1647 kfree(iommu->domain_ids); in iommu_init_domains()
1648 kfree(iommu->domains); in iommu_init_domains()
1649 iommu->domain_ids = NULL; in iommu_init_domains()
1650 iommu->domains = NULL; in iommu_init_domains()
1662 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1667 static void disable_dmar_iommu(struct intel_iommu *iommu) in disable_dmar_iommu() argument
1672 if (!iommu->domains || !iommu->domain_ids) in disable_dmar_iommu()
1679 if (info->iommu != iommu) in disable_dmar_iommu()
1694 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1695 iommu_disable_translation(iommu); in disable_dmar_iommu()
1698 static void free_dmar_iommu(struct intel_iommu *iommu) in free_dmar_iommu() argument
1700 if ((iommu->domains) && (iommu->domain_ids)) { in free_dmar_iommu()
1701 int elems = (cap_ndoms(iommu->cap) >> 8) + 1; in free_dmar_iommu()
1705 kfree(iommu->domains[i]); in free_dmar_iommu()
1706 kfree(iommu->domains); in free_dmar_iommu()
1707 kfree(iommu->domain_ids); in free_dmar_iommu()
1708 iommu->domains = NULL; in free_dmar_iommu()
1709 iommu->domain_ids = NULL; in free_dmar_iommu()
1712 g_iommus[iommu->seq_id] = NULL; in free_dmar_iommu()
1715 free_context_table(iommu); in free_dmar_iommu()
1718 if (pasid_enabled(iommu)) { in free_dmar_iommu()
1719 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1720 intel_svm_finish_prq(iommu); in free_dmar_iommu()
1721 intel_svm_free_pasid_tables(iommu); in free_dmar_iommu()
1744 struct intel_iommu *iommu) in domain_attach_iommu() argument
1750 assert_spin_locked(&iommu->lock); in domain_attach_iommu()
1752 domain->iommu_refcnt[iommu->seq_id] += 1; in domain_attach_iommu()
1754 if (domain->iommu_refcnt[iommu->seq_id] == 1) { in domain_attach_iommu()
1755 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1756 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1759 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1760 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_attach_iommu()
1765 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1766 set_iommu_domain(iommu, num, domain); in domain_attach_iommu()
1768 domain->iommu_did[iommu->seq_id] = num; in domain_attach_iommu()
1769 domain->nid = iommu->node; in domain_attach_iommu()
1778 struct intel_iommu *iommu) in domain_detach_iommu() argument
1783 assert_spin_locked(&iommu->lock); in domain_detach_iommu()
1785 domain->iommu_refcnt[iommu->seq_id] -= 1; in domain_detach_iommu()
1787 if (domain->iommu_refcnt[iommu->seq_id] == 0) { in domain_detach_iommu()
1788 num = domain->iommu_did[iommu->seq_id]; in domain_detach_iommu()
1789 clear_bit(num, iommu->domain_ids); in domain_detach_iommu()
1790 set_iommu_domain(iommu, num, NULL); in domain_detach_iommu()
1793 domain->iommu_did[iommu->seq_id] = 0; in domain_detach_iommu()
1861 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, in domain_init() argument
1872 if (guest_width > cap_mgaw(iommu->cap)) in domain_init()
1873 guest_width = cap_mgaw(iommu->cap); in domain_init()
1877 sagaw = cap_sagaw(iommu->cap); in domain_init()
1887 if (ecap_coherent(iommu->ecap)) in domain_init()
1892 if (ecap_sc_support(iommu->ecap)) in domain_init()
1898 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); in domain_init()
1902 domain->nid = iommu->node; in domain_init()
1908 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); in domain_init()
1940 struct intel_iommu *iommu, in domain_context_mapping_one() argument
1943 u16 did = domain->iommu_did[iommu->seq_id]; in domain_context_mapping_one()
1962 spin_lock(&iommu->lock); in domain_context_mapping_one()
1965 context = iommu_context_addr(iommu, bus, devfn, 1); in domain_context_mapping_one()
1983 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { in domain_context_mapping_one()
1990 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); in domain_context_mapping_one()
1997 context_set_address_width(context, iommu->agaw); in domain_context_mapping_one()
2004 context_set_address_width(context, iommu->msagaw); in domain_context_mapping_one()
2018 if (cap_caching_mode(iommu->cap)) { in domain_context_mapping_one()
2019 iommu->flush.flush_context(iommu, 0, in domain_context_mapping_one()
2023 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in domain_context_mapping_one()
2025 iommu_flush_write_buffer(iommu); in domain_context_mapping_one()
2032 spin_unlock(&iommu->lock); in domain_context_mapping_one()
2040 struct intel_iommu *iommu; member
2048 return domain_context_mapping_one(data->domain, data->iommu, in domain_context_mapping_cb()
2055 struct intel_iommu *iommu; in domain_context_mapping() local
2059 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapping()
2060 if (!iommu) in domain_context_mapping()
2064 return domain_context_mapping_one(domain, iommu, bus, devfn); in domain_context_mapping()
2067 data.iommu = iommu; in domain_context_mapping()
2076 struct intel_iommu *iommu = opaque; in domain_context_mapped_cb() local
2078 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_mapped_cb()
2083 struct intel_iommu *iommu; in domain_context_mapped() local
2086 iommu = device_to_iommu(dev, &bus, &devfn); in domain_context_mapped()
2087 if (!iommu) in domain_context_mapped()
2091 return device_context_mapped(iommu, bus, devfn); in domain_context_mapped()
2094 domain_context_mapped_cb, iommu); in domain_context_mapped()
2257 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) in domain_context_clear_one() argument
2259 if (!iommu) in domain_context_clear_one()
2262 clear_context_table(iommu, bus, devfn); in domain_context_clear_one()
2263 iommu->flush.flush_context(iommu, 0, 0, 0, in domain_context_clear_one()
2265 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in domain_context_clear_one()
2274 info->dev->archdata.iommu = NULL; in unlink_domain_info()
2297 info = dev->archdata.iommu; in find_domain()
2309 if (info->iommu->segment == segment && info->bus == bus && in dmar_search_domain_by_dev_info()
2316 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, in dmar_insert_one_dev_info() argument
2337 info->iommu = iommu; in dmar_insert_one_dev_info()
2342 if (ecap_dev_iotlb_support(iommu->ecap) && in dmar_insert_one_dev_info()
2347 if (ecs_enabled(iommu)) { in dmar_insert_one_dev_info()
2348 if (pasid_enabled(iommu)) { in dmar_insert_one_dev_info()
2354 if (info->ats_supported && ecap_prs(iommu->ecap) && in dmar_insert_one_dev_info()
2366 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); in dmar_insert_one_dev_info()
2380 spin_lock(&iommu->lock); in dmar_insert_one_dev_info()
2381 ret = domain_attach_iommu(domain, iommu); in dmar_insert_one_dev_info()
2382 spin_unlock(&iommu->lock); in dmar_insert_one_dev_info()
2393 dev->archdata.iommu = info; in dmar_insert_one_dev_info()
2416 struct intel_iommu *iommu; in get_domain_for_dev() local
2425 iommu = device_to_iommu(dev, &bus, &devfn); in get_domain_for_dev()
2426 if (!iommu) in get_domain_for_dev()
2441 iommu = info->iommu; in get_domain_for_dev()
2455 if (domain_init(domain, iommu, gaw)) { in get_domain_for_dev()
2462 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), in get_domain_for_dev()
2475 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in get_domain_for_dev()
2571 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) in iommu_prepare_rmrr_dev()
2644 info = dev->archdata.iommu; in identity_mapping()
2654 struct intel_iommu *iommu; in domain_add_dev_info() local
2657 iommu = device_to_iommu(dev, &bus, &devfn); in domain_add_dev_info()
2658 if (!iommu) in domain_add_dev_info()
2661 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); in domain_add_dev_info()
2820 struct intel_iommu *iommu; in iommu_prepare_static_identity_mapping() local
2831 for_each_active_iommu(iommu, drhd) in iommu_prepare_static_identity_mapping()
2854 static void intel_iommu_init_qi(struct intel_iommu *iommu) in intel_iommu_init_qi() argument
2862 if (!iommu->qi) { in intel_iommu_init_qi()
2866 dmar_fault(-1, iommu); in intel_iommu_init_qi()
2871 dmar_disable_qi(iommu); in intel_iommu_init_qi()
2874 if (dmar_enable_qi(iommu)) { in intel_iommu_init_qi()
2878 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
2879 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
2881 iommu->name); in intel_iommu_init_qi()
2883 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
2884 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
2885 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
2889 static int copy_context_table(struct intel_iommu *iommu, in copy_context_table() argument
2911 __iommu_flush_cache(iommu, new_ce, in copy_context_table()
2941 new_ce = alloc_pgtable_page(iommu->node); in copy_context_table()
2955 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2956 set_bit(did, iommu->domain_ids); in copy_context_table()
2982 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table()
2991 static int copy_translation_tables(struct intel_iommu *iommu) in copy_translation_tables() argument
3002 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
3004 new_ext = !!ecap_ecs(iommu->ecap); in copy_translation_tables()
3031 ret = copy_context_table(iommu, &old_rt[bus], in copy_translation_tables()
3035 iommu->name, bus); in copy_translation_tables()
3040 spin_lock_irqsave(&iommu->lock, flags); in copy_translation_tables()
3049 iommu->root_entry[bus].lo = val; in copy_translation_tables()
3056 iommu->root_entry[bus].hi = val; in copy_translation_tables()
3059 spin_unlock_irqrestore(&iommu->lock, flags); in copy_translation_tables()
3063 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
3079 struct intel_iommu *iommu; in init_dmars() local
3120 for_each_active_iommu(iommu, drhd) { in init_dmars()
3121 g_iommus[iommu->seq_id] = iommu; in init_dmars()
3123 intel_iommu_init_qi(iommu); in init_dmars()
3125 ret = iommu_init_domains(iommu); in init_dmars()
3129 init_translation_status(iommu); in init_dmars()
3131 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { in init_dmars()
3132 iommu_disable_translation(iommu); in init_dmars()
3133 clear_translation_pre_enabled(iommu); in init_dmars()
3135 iommu->name); in init_dmars()
3143 ret = iommu_alloc_root_entry(iommu); in init_dmars()
3147 if (translation_pre_enabled(iommu)) { in init_dmars()
3150 ret = copy_translation_tables(iommu); in init_dmars()
3162 iommu->name); in init_dmars()
3163 iommu_disable_translation(iommu); in init_dmars()
3164 clear_translation_pre_enabled(iommu); in init_dmars()
3167 iommu->name); in init_dmars()
3172 iommu_flush_write_buffer(iommu); in init_dmars()
3173 iommu_set_root_entry(iommu); in init_dmars()
3174 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in init_dmars()
3175 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_dmars()
3177 if (!ecap_pass_through(iommu->ecap)) in init_dmars()
3180 if (pasid_enabled(iommu)) in init_dmars()
3181 intel_svm_alloc_pasid_tables(iommu); in init_dmars()
3257 for_each_iommu(iommu, drhd) { in init_dmars()
3264 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3268 iommu_flush_write_buffer(iommu); in init_dmars()
3271 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) { in init_dmars()
3272 ret = intel_svm_enable_prq(iommu); in init_dmars()
3277 ret = dmar_set_interrupt(iommu); in init_dmars()
3281 if (!translation_pre_enabled(iommu)) in init_dmars()
3282 iommu_enable_translation(iommu); in init_dmars()
3284 iommu_disable_protect_mem_regions(iommu); in init_dmars()
3290 for_each_active_iommu(iommu, drhd) { in init_dmars()
3291 disable_dmar_iommu(iommu); in init_dmars()
3292 free_dmar_iommu(iommu); in init_dmars()
3373 info = dev->archdata.iommu; in get_valid_domain_for_dev()
3432 struct intel_iommu *iommu; in __intel_map_single() local
3444 iommu = domain_get_iommu(domain); in __intel_map_single()
3456 !cap_zlr(iommu->cap)) in __intel_map_single()
3472 if (cap_caching_mode(iommu->cap)) in __intel_map_single()
3473 iommu_flush_iotlb_psi(iommu, domain, in __intel_map_single()
3477 iommu_flush_write_buffer(iommu); in __intel_map_single()
3508 struct intel_iommu *iommu = g_iommus[i]; in flush_unmaps() local
3509 if (!iommu) in flush_unmaps()
3516 if (!cap_caching_mode(iommu->cap)) in flush_unmaps()
3517 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in flush_unmaps()
3525 if (cap_caching_mode(iommu->cap)) in flush_unmaps()
3526 iommu_flush_iotlb_psi(iommu, domain, in flush_unmaps()
3557 struct intel_iommu *iommu; in add_unmap() local
3563 iommu = domain_get_iommu(dom); in add_unmap()
3564 iommu_id = iommu->seq_id; in add_unmap()
3585 struct intel_iommu *iommu; in intel_unmap() local
3594 iommu = domain_get_iommu(domain); in intel_unmap()
3610 iommu_flush_iotlb_psi(iommu, domain, start_pfn, in intel_unmap()
3724 struct intel_iommu *iommu; in intel_map_sg() local
3734 iommu = domain_get_iommu(domain); in intel_map_sg()
3751 !cap_zlr(iommu->cap)) in intel_map_sg()
3767 if (cap_caching_mode(iommu->cap)) in intel_map_sg()
3768 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1); in intel_map_sg()
3770 iommu_flush_write_buffer(iommu); in intel_map_sg()
3878 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in quirk_ioat_snb_local_iommu()
3918 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; in init_no_remapping_devices()
3927 struct intel_iommu *iommu = NULL; in init_iommu_hw() local
3929 for_each_active_iommu(iommu, drhd) in init_iommu_hw()
3930 if (iommu->qi) in init_iommu_hw()
3931 dmar_reenable_qi(iommu); in init_iommu_hw()
3933 for_each_iommu(iommu, drhd) { in init_iommu_hw()
3940 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3944 iommu_flush_write_buffer(iommu); in init_iommu_hw()
3946 iommu_set_root_entry(iommu); in init_iommu_hw()
3948 iommu->flush.flush_context(iommu, 0, 0, 0, in init_iommu_hw()
3950 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in init_iommu_hw()
3951 iommu_enable_translation(iommu); in init_iommu_hw()
3952 iommu_disable_protect_mem_regions(iommu); in init_iommu_hw()
3961 struct intel_iommu *iommu; in iommu_flush_all() local
3963 for_each_active_iommu(iommu, drhd) { in iommu_flush_all()
3964 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
3966 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
3974 struct intel_iommu *iommu = NULL; in iommu_suspend() local
3977 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3978 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, in iommu_suspend()
3980 if (!iommu->iommu_state) in iommu_suspend()
3986 for_each_active_iommu(iommu, drhd) { in iommu_suspend()
3987 iommu_disable_translation(iommu); in iommu_suspend()
3989 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
3991 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
3992 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
3993 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
3994 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
3995 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
3996 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
3997 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
3998 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
4000 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
4005 for_each_active_iommu(iommu, drhd) in iommu_suspend()
4006 kfree(iommu->iommu_state); in iommu_suspend()
4014 struct intel_iommu *iommu = NULL; in iommu_resume() local
4025 for_each_active_iommu(iommu, drhd) { in iommu_resume()
4027 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
4029 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
4030 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
4031 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
4032 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
4033 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
4034 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
4035 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
4036 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
4038 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
4041 for_each_active_iommu(iommu, drhd) in iommu_resume()
4042 kfree(iommu->iommu_state); in iommu_resume()
4189 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add() local
4191 if (g_iommus[iommu->seq_id]) in intel_iommu_add()
4194 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { in intel_iommu_add()
4196 iommu->name); in intel_iommu_add()
4199 if (!ecap_sc_support(iommu->ecap) && in intel_iommu_add()
4200 domain_update_iommu_snooping(iommu)) { in intel_iommu_add()
4202 iommu->name); in intel_iommu_add()
4205 sp = domain_update_iommu_superpage(iommu) - 1; in intel_iommu_add()
4206 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { in intel_iommu_add()
4208 iommu->name); in intel_iommu_add()
4215 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
4216 iommu_disable_translation(iommu); in intel_iommu_add()
4218 g_iommus[iommu->seq_id] = iommu; in intel_iommu_add()
4219 ret = iommu_init_domains(iommu); in intel_iommu_add()
4221 ret = iommu_alloc_root_entry(iommu); in intel_iommu_add()
4226 if (pasid_enabled(iommu)) in intel_iommu_add()
4227 intel_svm_alloc_pasid_tables(iommu); in intel_iommu_add()
4235 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4239 intel_iommu_init_qi(iommu); in intel_iommu_add()
4240 iommu_flush_write_buffer(iommu); in intel_iommu_add()
4243 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) { in intel_iommu_add()
4244 ret = intel_svm_enable_prq(iommu); in intel_iommu_add()
4249 ret = dmar_set_interrupt(iommu); in intel_iommu_add()
4253 iommu_set_root_entry(iommu); in intel_iommu_add()
4254 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in intel_iommu_add()
4255 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in intel_iommu_add()
4256 iommu_enable_translation(iommu); in intel_iommu_add()
4258 iommu_disable_protect_mem_regions(iommu); in intel_iommu_add()
4262 disable_dmar_iommu(iommu); in intel_iommu_add()
4264 free_dmar_iommu(iommu); in intel_iommu_add()
4271 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug() local
4275 if (iommu == NULL) in dmar_iommu_hotplug()
4281 disable_dmar_iommu(iommu); in dmar_iommu_hotplug()
4282 free_dmar_iommu(iommu); in dmar_iommu_hotplug()
4458 struct intel_iommu *iommu; in intel_iommu_memory_notifier() local
4480 for_each_active_iommu(iommu, drhd) in intel_iommu_memory_notifier()
4481 iommu_flush_iotlb_psi(iommu, si_domain, in intel_iommu_memory_notifier()
4506 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_version() local
4507 u32 ver = readl(iommu->reg + DMAR_VER_REG); in intel_iommu_show_version()
4517 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_address() local
4518 return sprintf(buf, "%llx\n", iommu->reg_phys); in intel_iommu_show_address()
4526 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_cap() local
4527 return sprintf(buf, "%llx\n", iommu->cap); in intel_iommu_show_cap()
4535 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_ecap() local
4536 return sprintf(buf, "%llx\n", iommu->ecap); in intel_iommu_show_ecap()
4544 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_ndoms() local
4545 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); in intel_iommu_show_ndoms()
4553 struct intel_iommu *iommu = dev_get_drvdata(dev); in intel_iommu_show_ndoms_used() local
4554 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, in intel_iommu_show_ndoms_used()
4555 cap_ndoms(iommu->cap))); in intel_iommu_show_ndoms_used()
4583 struct intel_iommu *iommu; in intel_iommu_init() local
4642 for_each_active_iommu(iommu, drhd) in intel_iommu_init()
4643 iommu->iommu_dev = iommu_device_create(NULL, iommu, in intel_iommu_init()
4645 "%s", iommu->name); in intel_iommu_init()
4667 struct intel_iommu *iommu = opaque; in domain_context_clear_one_cb() local
4669 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); in domain_context_clear_one_cb()
4679 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) in domain_context_clear() argument
4681 if (!iommu || !dev || !dev_is_pci(dev)) in domain_context_clear()
4684 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); in domain_context_clear()
4689 struct intel_iommu *iommu; in __dmar_remove_one_dev_info() local
4697 iommu = info->iommu; in __dmar_remove_one_dev_info()
4701 domain_context_clear(iommu, info->dev); in __dmar_remove_one_dev_info()
4706 spin_lock_irqsave(&iommu->lock, flags); in __dmar_remove_one_dev_info()
4707 domain_detach_iommu(info->domain, iommu); in __dmar_remove_one_dev_info()
4708 spin_unlock_irqrestore(&iommu->lock, flags); in __dmar_remove_one_dev_info()
4720 info = dev->archdata.iommu; in dmar_remove_one_dev_info()
4788 struct intel_iommu *iommu; in intel_iommu_attach_device() local
4813 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_attach_device()
4814 if (!iommu) in intel_iommu_attach_device()
4818 addr_width = agaw_to_width(iommu->agaw); in intel_iommu_attach_device()
4819 if (addr_width > cap_mgaw(iommu->cap)) in intel_iommu_attach_device()
4820 addr_width = cap_mgaw(iommu->cap); in intel_iommu_attach_device()
4833 while (iommu->agaw < dmar_domain->agaw) { in intel_iommu_attach_device()
4897 struct intel_iommu *iommu; in intel_iommu_unmap() local
4917 iommu = g_iommus[iommu_id]; in intel_iommu_unmap()
4958 struct intel_iommu *iommu; in intel_iommu_add_device() local
4962 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_add_device()
4963 if (!iommu) in intel_iommu_add_device()
4966 iommu_device_link(iommu->iommu_dev, dev); in intel_iommu_add_device()
4979 struct intel_iommu *iommu; in intel_iommu_remove_device() local
4982 iommu = device_to_iommu(dev, &bus, &devfn); in intel_iommu_remove_device()
4983 if (!iommu) in intel_iommu_remove_device()
4988 iommu_device_unlink(iommu->iommu_dev, dev); in intel_iommu_remove_device()
4992 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) in intel_iommu_enable_pasid() argument
5006 spin_lock(&iommu->lock); in intel_iommu_enable_pasid()
5009 info = sdev->dev->archdata.iommu; in intel_iommu_enable_pasid()
5013 context = iommu_context_addr(iommu, info->bus, info->devfn, 0); in intel_iommu_enable_pasid()
5019 sdev->did = domain->iommu_did[iommu->seq_id]; in intel_iommu_enable_pasid()
5023 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); in intel_iommu_enable_pasid()
5024 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap); in intel_iommu_enable_pasid()
5043 if (iommu->pasid_state_table) in intel_iommu_enable_pasid()
5049 iommu->flush.flush_context(iommu, sdev->did, sdev->sid, in intel_iommu_enable_pasid()
5067 spin_unlock(&iommu->lock); in intel_iommu_enable_pasid()
5075 struct intel_iommu *iommu; in intel_svm_device_to_iommu() local
5084 iommu = device_to_iommu(dev, &bus, &devfn); in intel_svm_device_to_iommu()
5085 if ((!iommu)) { in intel_svm_device_to_iommu()
5090 if (!iommu->pasid_table) { in intel_svm_device_to_iommu()
5095 return iommu; in intel_svm_device_to_iommu()