Lines Matching refs:devid

93 	u16 devid;			  /* PCI Device ID */  member
136 static struct iommu_dev_data *alloc_dev_data(u16 devid) in alloc_dev_data() argument
145 dev_data->devid = devid; in alloc_dev_data()
154 static struct iommu_dev_data *search_dev_data(u16 devid) in search_dev_data() argument
161 if (dev_data->devid == devid) in search_dev_data()
182 u16 devid, ivrs_alias, pci_alias; in get_alias() local
184 devid = get_device_id(dev); in get_alias()
185 ivrs_alias = amd_iommu_alias_table[devid]; in get_alias()
200 if (ivrs_alias == devid) { in get_alias()
203 amd_iommu_rlookup_table[devid]; in get_alias()
205 amd_iommu_dev_table[devid].data, in get_alias()
223 if (pci_alias == devid && in get_alias()
235 static struct iommu_dev_data *find_dev_data(u16 devid) in find_dev_data() argument
239 dev_data = search_dev_data(devid); in find_dev_data()
242 dev_data = alloc_dev_data(devid); in find_dev_data()
303 u16 devid; in init_unity_mappings_for_device() local
305 devid = get_device_id(dev); in init_unity_mappings_for_device()
308 if (!(devid >= e->devid_start && devid <= e->devid_end)) in init_unity_mappings_for_device()
320 u16 devid; in check_device() local
329 devid = get_device_id(dev); in check_device()
332 if (devid > amd_iommu_last_bdf) in check_device()
335 if (amd_iommu_rlookup_table[devid] == NULL) in check_device()
379 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
385 iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, in iommu_init_device()
393 u16 devid, alias; in iommu_ignore_device() local
395 devid = get_device_id(dev); in iommu_ignore_device()
398 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); in iommu_ignore_device()
401 amd_iommu_rlookup_table[devid] = NULL; in iommu_ignore_device()
412 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, in iommu_uninit_device()
496 static void dump_dte_entry(u16 devid) in dump_dte_entry() argument
502 amd_iommu_dev_table[devid].data[i]); in dump_dte_entry()
516 int type, devid, domid, flags; in iommu_print_event() local
523 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; in iommu_print_event()
544 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
546 dump_dte_entry(devid); in iommu_print_event()
551 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
557 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
563 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
577 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
583 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), in iommu_print_event()
772 static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) in build_inv_dte() argument
775 cmd->data[0] = devid; in build_inv_dte()
810 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, in build_inv_iotlb_pages() argument
831 cmd->data[0] = devid; in build_inv_iotlb_pages()
833 cmd->data[1] = devid; in build_inv_iotlb_pages()
859 static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, in build_inv_iotlb_pasid() argument
866 cmd->data[0] = devid; in build_inv_iotlb_pasid()
869 cmd->data[1] = devid; in build_inv_iotlb_pasid()
879 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid, in build_complete_ppr() argument
884 cmd->data[0] = devid; in build_complete_ppr()
901 static void build_inv_irt(struct iommu_cmd *cmd, u16 devid) in build_inv_irt() argument
904 cmd->data[0] = devid; in build_inv_irt()
980 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
984 build_inv_dte(&cmd, devid); in iommu_flush_dte()
991 u32 devid; in iommu_flush_dte_all() local
993 for (devid = 0; devid <= 0xffff; ++devid) in iommu_flush_dte_all()
994 iommu_flush_dte(iommu, devid); in iommu_flush_dte_all()
1027 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1031 build_inv_irt(&cmd, devid); in iommu_flush_irt()
1038 u32 devid; in iommu_flush_irt_all() local
1040 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++) in iommu_flush_irt_all()
1041 iommu_flush_irt(iommu, devid); in iommu_flush_irt_all()
1068 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1070 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1084 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1087 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1088 if (!ret && alias != dev_data->devid) in device_flush_dte()
1892 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) in set_dte_entry() argument
1904 flags = amd_iommu_dev_table[devid].data[1]; in set_dte_entry()
1938 amd_iommu_dev_table[devid].data[1] = flags; in set_dte_entry()
1939 amd_iommu_dev_table[devid].data[0] = pte_root; in set_dte_entry()
1942 static void clear_dte_entry(u16 devid) in clear_dte_entry() argument
1945 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; in clear_dte_entry()
1946 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK; in clear_dte_entry()
1948 amd_iommu_apply_erratum_63(devid); in clear_dte_entry()
1958 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1971 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
1972 if (alias != dev_data->devid) in do_attach()
1992 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
2002 clear_dte_entry(dev_data->devid); in do_detach()
2003 if (alias != dev_data->devid) in do_detach()
2233 u16 devid; in amd_iommu_add_device() local
2239 devid = get_device_id(dev); in amd_iommu_add_device()
2240 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_add_device()
2277 u16 devid; in amd_iommu_remove_device() local
2282 devid = get_device_id(dev); in amd_iommu_remove_device()
2283 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_remove_device()
2326 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
2998 u16 devid; in amd_iommu_detach_device() local
3003 devid = get_device_id(dev); in amd_iommu_detach_device()
3008 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
3028 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3121 u16 devid; in amd_iommu_get_dm_regions() local
3123 devid = get_device_id(dev); in amd_iommu_get_dm_regions()
3128 if (devid < entry->devid_start || devid > entry->devid_end) in amd_iommu_get_dm_regions()
3308 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3310 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
3477 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3479 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3581 u16 devid; /* Device ID for IRTE table */ member
3600 static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) in set_dte_irq_entry() argument
3604 dte = amd_iommu_dev_table[devid].data[2]; in set_dte_irq_entry()
3611 amd_iommu_dev_table[devid].data[2] = dte; in set_dte_irq_entry()
3616 static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) in get_irq_table() argument
3625 iommu = amd_iommu_rlookup_table[devid]; in get_irq_table()
3629 table = irq_lookup_table[devid]; in get_irq_table()
3633 alias = amd_iommu_alias_table[devid]; in get_irq_table()
3636 irq_lookup_table[devid] = table; in get_irq_table()
3637 set_dte_irq_entry(devid, table); in get_irq_table()
3638 iommu_flush_dte(iommu, devid); in get_irq_table()
3670 irq_lookup_table[devid] = table; in get_irq_table()
3671 set_dte_irq_entry(devid, table); in get_irq_table()
3672 iommu_flush_dte(iommu, devid); in get_irq_table()
3673 if (devid != alias) { in get_irq_table()
3688 static int alloc_irq_index(u16 devid, int count) in alloc_irq_index() argument
3694 table = get_irq_table(devid, false); in alloc_irq_index()
3726 static int modify_irte(u16 devid, int index, union irte irte) in modify_irte() argument
3732 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
3736 table = get_irq_table(devid, false); in modify_irte()
3744 iommu_flush_irt(iommu, devid); in modify_irte()
3750 static void free_irte(u16 devid, int index) in free_irte() argument
3756 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
3760 table = get_irq_table(devid, false); in free_irte()
3768 iommu_flush_irt(iommu, devid); in free_irte()
3774 int devid = -1; in get_devid() local
3778 devid = get_ioapic_devid(info->ioapic_id); in get_devid()
3781 devid = get_hpet_devid(info->hpet_id); in get_devid()
3785 devid = get_device_id(&info->msi_dev->dev); in get_devid()
3792 return devid; in get_devid()
3798 int devid; in get_ir_irq_domain() local
3803 devid = get_devid(info); in get_ir_irq_domain()
3804 if (devid >= 0) { in get_ir_irq_domain()
3805 iommu = amd_iommu_rlookup_table[devid]; in get_ir_irq_domain()
3816 int devid; in get_irq_domain() local
3824 devid = get_device_id(&info->msi_dev->dev); in get_irq_domain()
3825 if (devid >= 0) { in get_irq_domain()
3826 iommu = amd_iommu_rlookup_table[devid]; in get_irq_domain()
3851 int devid, int index, int sub_handle) in irq_remapping_prepare_irte() argument
3858 data->irq_2_irte.devid = devid; in irq_remapping_prepare_irte()
3905 int i, ret, devid; in irq_remapping_alloc() local
3921 devid = get_devid(info); in irq_remapping_alloc()
3922 if (devid < 0) in irq_remapping_alloc()
3930 if (get_irq_table(devid, true)) in irq_remapping_alloc()
3935 index = alloc_irq_index(devid, nr_irqs); in irq_remapping_alloc()
3955 irq_data->hwirq = (devid << 16) + i; in irq_remapping_alloc()
3958 irq_remapping_prepare_irte(data, cfg, info, devid, index, i); in irq_remapping_alloc()
3971 free_irte(devid, index + i); in irq_remapping_alloc()
3990 free_irte(irte_info->devid, irte_info->index); in irq_remapping_free()
4003 modify_irte(irte_info->devid, irte_info->index, data->irte_entry); in irq_remapping_activate()
4014 modify_irte(irte_info->devid, irte_info->index, data->irte_entry); in irq_remapping_deactivate()
4043 modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry); in amd_ir_set_affinity()