Lines Matching refs:dev_data

138 	struct iommu_dev_data *dev_data;  in alloc_dev_data()  local
141 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
142 if (!dev_data) in alloc_dev_data()
145 dev_data->devid = devid; in alloc_dev_data()
148 list_add_tail(&dev_data->dev_data_list, &dev_data_list); in alloc_dev_data()
151 return dev_data; in alloc_dev_data()
156 struct iommu_dev_data *dev_data; in search_dev_data() local
160 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) { in search_dev_data()
161 if (dev_data->devid == devid) in search_dev_data()
165 dev_data = NULL; in search_dev_data()
170 return dev_data; in search_dev_data()
237 struct iommu_dev_data *dev_data; in find_dev_data() local
239 dev_data = search_dev_data(devid); in find_dev_data()
241 if (dev_data == NULL) in find_dev_data()
242 dev_data = alloc_dev_data(devid); in find_dev_data()
244 return dev_data; in find_dev_data()
272 struct iommu_dev_data *dev_data; in pdev_pri_erratum() local
274 dev_data = get_dev_data(&pdev->dev); in pdev_pri_erratum()
276 return dev_data->errata & (1 << erratum) ? true : false; in pdev_pri_erratum()
365 struct iommu_dev_data *dev_data; in iommu_init_device() local
370 dev_data = find_dev_data(get_device_id(dev)); in iommu_init_device()
371 if (!dev_data) in iommu_init_device()
374 dev_data->alias = get_alias(dev); in iommu_init_device()
379 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
380 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
383 dev->archdata.iommu = dev_data; in iommu_init_device()
385 iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, in iommu_init_device()
407 struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev)); in iommu_uninit_device() local
409 if (!dev_data) in iommu_uninit_device()
412 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, in iommu_uninit_device()
1060 static int device_flush_iotlb(struct iommu_dev_data *dev_data, in device_flush_iotlb() argument
1067 qdep = dev_data->ats.qdep; in device_flush_iotlb()
1068 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1070 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); in device_flush_iotlb()
1078 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1084 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1085 alias = dev_data->alias; in device_flush_dte()
1087 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1088 if (!ret && alias != dev_data->devid) in device_flush_dte()
1093 if (dev_data->ats.enabled) in device_flush_dte()
1094 ret = device_flush_iotlb(dev_data, 0, ~0UL); in device_flush_dte()
1107 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1124 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1126 if (!dev_data->ats.enabled) in __domain_flush_pages()
1129 ret |= device_flush_iotlb(dev_data, address, size); in __domain_flush_pages()
1175 struct iommu_dev_data *dev_data; in domain_flush_devices() local
1177 list_for_each_entry(dev_data, &domain->dev_list, list) in domain_flush_devices()
1178 device_flush_dte(dev_data); in domain_flush_devices()
1951 static void do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
1958 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1959 alias = dev_data->alias; in do_attach()
1960 ats = dev_data->ats.enabled; in do_attach()
1963 dev_data->domain = domain; in do_attach()
1964 list_add(&dev_data->list, &domain->dev_list); in do_attach()
1971 set_dte_entry(dev_data->devid, domain, ats); in do_attach()
1972 if (alias != dev_data->devid) in do_attach()
1975 device_flush_dte(dev_data); in do_attach()
1978 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
1989 if (!dev_data->domain) in do_detach()
1992 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1993 alias = dev_data->alias; in do_detach()
1996 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
1997 dev_data->domain->dev_cnt -= 1; in do_detach()
2000 dev_data->domain = NULL; in do_detach()
2001 list_del(&dev_data->list); in do_detach()
2002 clear_dte_entry(dev_data->devid); in do_detach()
2003 if (alias != dev_data->devid) in do_detach()
2007 device_flush_dte(dev_data); in do_detach()
2014 static int __attach_device(struct iommu_dev_data *dev_data, in __attach_device() argument
2029 if (dev_data->domain != NULL) in __attach_device()
2033 do_attach(dev_data, domain); in __attach_device()
2140 struct iommu_dev_data *dev_data; in attach_device() local
2144 dev_data = get_dev_data(dev); in attach_device()
2147 if (!dev_data->passthrough) in attach_device()
2150 if (dev_data->iommu_v2) { in attach_device()
2154 dev_data->ats.enabled = true; in attach_device()
2155 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2156 dev_data->pri_tlp = pci_pri_tlp_required(pdev); in attach_device()
2160 dev_data->ats.enabled = true; in attach_device()
2161 dev_data->ats.qdep = pci_ats_queue_depth(pdev); in attach_device()
2165 ret = __attach_device(dev_data, domain); in attach_device()
2181 static void __detach_device(struct iommu_dev_data *dev_data) in __detach_device() argument
2191 if (WARN_ON(!dev_data->domain)) in __detach_device()
2194 domain = dev_data->domain; in __detach_device()
2198 do_detach(dev_data); in __detach_device()
2209 struct iommu_dev_data *dev_data; in detach_device() local
2212 dev_data = get_dev_data(dev); in detach_device()
2213 domain = dev_data->domain; in detach_device()
2217 __detach_device(dev_data); in detach_device()
2220 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) in detach_device()
2222 else if (dev_data->ats.enabled) in detach_device()
2225 dev_data->ats.enabled = false; in detach_device()
2230 struct iommu_dev_data *dev_data; in amd_iommu_add_device() local
2254 dev_data = get_dev_data(dev); in amd_iommu_add_device()
2256 BUG_ON(!dev_data); in amd_iommu_add_device()
2258 if (iommu_pass_through || dev_data->iommu_v2) in amd_iommu_add_device()
2264 dev_data->passthrough = true; in amd_iommu_add_device()
2323 struct iommu_dev_data *dev_data; in update_device_table() local
2325 list_for_each_entry(dev_data, &domain->dev_list, list) in update_device_table()
2326 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); in update_device_table()
2996 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_detach_device() local
3005 if (dev_data->domain != NULL) in amd_iommu_detach_device()
3019 struct iommu_dev_data *dev_data; in amd_iommu_attach_device() local
3026 dev_data = dev->archdata.iommu; in amd_iommu_attach_device()
3028 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3032 if (dev_data->domain) in amd_iommu_attach_device()
3270 struct iommu_dev_data *dev_data; in __flush_pasid() local
3296 list_for_each_entry(dev_data, &domain->dev_list, list) { in __flush_pasid()
3304 if (!dev_data->ats.enabled) in __flush_pasid()
3307 qdep = dev_data->ats.qdep; in __flush_pasid()
3308 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3310 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, in __flush_pasid()
3470 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
3476 dev_data = get_dev_data(&pdev->dev); in amd_iommu_complete_ppr()
3477 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3479 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
3480 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
3504 struct iommu_dev_data *dev_data; in amd_iommu_enable_device_erratum() local
3509 dev_data = get_dev_data(&pdev->dev); in amd_iommu_enable_device_erratum()
3510 dev_data->errata |= (1 << erratum); in amd_iommu_enable_device_erratum()