Lines Matching refs:devid

100 	u16 devid;			  /* PCI Device ID */  member
135 static struct iommu_dev_data *alloc_dev_data(u16 devid) in alloc_dev_data() argument
146 dev_data->devid = devid; in alloc_dev_data()
166 static struct iommu_dev_data *search_dev_data(u16 devid) in search_dev_data() argument
173 if (dev_data->devid == devid) in search_dev_data()
185 static struct iommu_dev_data *find_dev_data(u16 devid) in find_dev_data() argument
189 dev_data = search_dev_data(devid); in find_dev_data()
192 dev_data = alloc_dev_data(devid); in find_dev_data()
240 static struct dma_ops_domain *find_protection_domain(u16 devid) in find_protection_domain() argument
244 u16 alias = amd_iommu_alias_table[devid]; in find_protection_domain()
252 if (entry->target_dev == devid || in find_protection_domain()
270 u16 devid; in check_device() local
279 devid = get_device_id(dev); in check_device()
282 if (devid > amd_iommu_last_bdf) in check_device()
285 if (amd_iommu_rlookup_table[devid] == NULL) in check_device()
309 u16 devid, ivrs_alias, pci_alias; in get_alias() local
311 devid = get_device_id(dev); in get_alias()
312 ivrs_alias = amd_iommu_alias_table[devid]; in get_alias()
327 if (ivrs_alias == devid) { in get_alias()
330 amd_iommu_rlookup_table[devid]; in get_alias()
332 amd_iommu_dev_table[devid].data, in get_alias()
350 if (pci_alias == devid && in get_alias()
377 if (alias != dev_data->devid) { in iommu_init_device()
396 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
402 iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, in iommu_init_device()
410 u16 devid, alias; in iommu_ignore_device() local
412 devid = get_device_id(dev); in iommu_ignore_device()
413 alias = amd_iommu_alias_table[devid]; in iommu_ignore_device()
415 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); in iommu_ignore_device()
418 amd_iommu_rlookup_table[devid] = NULL; in iommu_ignore_device()
429 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, in iommu_uninit_device()
565 static void dump_dte_entry(u16 devid) in dump_dte_entry() argument
571 amd_iommu_dev_table[devid].data[i]); in dump_dte_entry()
585 int type, devid, domid, flags; in iommu_print_event() local
592 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in iommu_print_event()
613 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
615 dump_dte_entry(devid); in iommu_print_event()
620 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
626 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
632 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
646 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
652 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
841 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) in build_inv_dte() argument
844 cmd->data[0] = devid; in build_inv_dte()
879 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, in build_inv_iotlb_pages() argument
900 cmd->data[0] = devid; in build_inv_iotlb_pages()
902 cmd->data[1] = devid; in build_inv_iotlb_pages()
928 static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, in build_inv_iotlb_pasid() argument
935 cmd->data[0] = devid; in build_inv_iotlb_pasid()
938 cmd->data[1] = devid; in build_inv_iotlb_pasid()
948 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid, in build_complete_ppr() argument
953 cmd->data[0] = devid; in build_complete_ppr()
970 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid) in build_inv_irt() argument
973 cmd->data[0] = devid; in build_inv_irt()
1051 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
1055 build_inv_dte(&cmd, devid); in iommu_flush_dte()
1062 u32 devid; in iommu_flush_dte_all() local
1064 for (devid = 0; devid <= 0xffff; ++devid) in iommu_flush_dte_all()
1065 iommu_flush_dte(iommu, devid); in iommu_flush_dte_all()
1098 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1102 build_inv_irt(&cmd, devid); in iommu_flush_irt()
1109 u32 devid; in iommu_flush_irt_all() local
1111 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++) in iommu_flush_irt_all()
1112 iommu_flush_irt(iommu, devid); in iommu_flush_irt_all()
1139 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1141 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1154 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1156 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1538 u16 devid) in init_unity_mappings_for_device() argument
1544 if (!(devid >= e->devid_start && devid <= e->devid_end)) in init_unity_mappings_for_device()
2049 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) in set_dte_entry() argument
2061 flags = amd_iommu_dev_table[devid].data[1]; in set_dte_entry()
2095 amd_iommu_dev_table[devid].data[1] = flags; in set_dte_entry()
2096 amd_iommu_dev_table[devid].data[0] = pte_root; in set_dte_entry()
2099 static void clear_dte_entry(u16 devid) in clear_dte_entry() argument
2102 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; in clear_dte_entry()
2103 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK; in clear_dte_entry()
2105 amd_iommu_apply_erratum_63(devid); in clear_dte_entry()
2114 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
2120 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
2134 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2143 clear_dte_entry(dev_data->devid); in do_detach()
2420 u16 devid; in device_change_notifier() local
2425 devid = get_device_id(dev); in device_change_notifier()
2426 iommu = amd_iommu_rlookup_table[devid]; in device_change_notifier()
2450 dma_domain = find_protection_domain(devid); in device_change_notifier()
2455 dma_domain->target_dev = devid; in device_change_notifier()
2505 u16 devid = get_device_id(dev); in get_domain() local
2518 dma_dom = find_protection_domain(devid); in get_domain()
2520 dma_dom = amd_iommu_rlookup_table[devid]->default_dom; in get_domain()
2533 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
3033 u16 devid; in prealloc_protection_domains() local
3055 devid = get_device_id(&dev->dev); in prealloc_protection_domains()
3060 init_unity_mappings_for_device(dma_dom, devid); in prealloc_protection_domains()
3061 dma_dom->target_dev = devid; in prealloc_protection_domains()
3303 u16 devid; in amd_iommu_detach_device() local
3308 devid = get_device_id(dev); in amd_iommu_detach_device()
3313 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
3333 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3589 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3591 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
3758 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3760 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3866 static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) in set_dte_irq_entry() argument
3870 dte = amd_iommu_dev_table[devid].data[2]; in set_dte_irq_entry()
3877 amd_iommu_dev_table[devid].data[2] = dte; in set_dte_irq_entry()
3882 static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) in get_irq_table() argument
3891 iommu = amd_iommu_rlookup_table[devid]; in get_irq_table()
3895 table = irq_lookup_table[devid]; in get_irq_table()
3899 alias = amd_iommu_alias_table[devid]; in get_irq_table()
3902 irq_lookup_table[devid] = table; in get_irq_table()
3903 set_dte_irq_entry(devid, table); in get_irq_table()
3904 iommu_flush_dte(iommu, devid); in get_irq_table()
3936 irq_lookup_table[devid] = table; in get_irq_table()
3937 set_dte_irq_entry(devid, table); in get_irq_table()
3938 iommu_flush_dte(iommu, devid); in get_irq_table()
3939 if (devid != alias) { in get_irq_table()
3954 static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) in alloc_irq_index() argument
3960 table = get_irq_table(devid, false); in alloc_irq_index()
3985 irte_info->devid = devid; in alloc_irq_index()
4000 static int get_irte(u16 devid, int index, union irte *irte) in get_irte() argument
4005 table = get_irq_table(devid, false); in get_irte()
4016 static int modify_irte(u16 devid, int index, union irte irte) in modify_irte() argument
4022 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
4026 table = get_irq_table(devid, false); in modify_irte()
4034 iommu_flush_irt(iommu, devid); in modify_irte()
4040 static void free_irte(u16 devid, int index) in free_irte() argument
4046 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
4050 table = get_irq_table(devid, false); in free_irte()
4058 iommu_flush_irt(iommu, devid); in free_irte()
4072 int devid; in setup_ioapic_entry() local
4081 devid = get_ioapic_devid(ioapic_id); in setup_ioapic_entry()
4083 if (devid < 0) in setup_ioapic_entry()
4084 return devid; in setup_ioapic_entry()
4086 table = get_irq_table(devid, true); in setup_ioapic_entry()
4094 irte_info->devid = devid; in setup_ioapic_entry()
4105 ret = modify_irte(devid, index, irte); in setup_ioapic_entry()
4145 if (get_irte(irte_info->devid, irte_info->index, &irte)) in set_affinity()
4161 modify_irte(irte_info->devid, irte_info->index, irte); in set_affinity()
4182 free_irte(irte_info->devid, irte_info->index); in free_irq()
4208 modify_irte(irte_info->devid, irte_info->index, irte); in compose_msi_msg()
4219 u16 devid; in msi_alloc_irq() local
4228 devid = get_device_id(&pdev->dev); in msi_alloc_irq()
4229 index = alloc_irq_index(cfg, devid, nvec); in msi_alloc_irq()
4239 u16 devid; in msi_setup_irq() local
4251 devid = get_device_id(&pdev->dev); in msi_setup_irq()
4255 irte_info->devid = devid; in msi_setup_irq()
4265 int index, devid; in alloc_hpet_msi() local
4272 devid = get_hpet_devid(id); in alloc_hpet_msi()
4273 if (devid < 0) in alloc_hpet_msi()
4274 return devid; in alloc_hpet_msi()
4276 index = alloc_irq_index(cfg, devid, 1); in alloc_hpet_msi()
4281 irte_info->devid = devid; in alloc_hpet_msi()