viommu 65 drivers/iommu/virtio-iommu.c struct viommu_dev *viommu; viommu 78 drivers/iommu/virtio-iommu.c struct viommu_dev *viommu; viommu 136 drivers/iommu/virtio-iommu.c static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, viommu 143 drivers/iommu/virtio-iommu.c return len - viommu->probe_size - tail_size; viommu 154 drivers/iommu/virtio-iommu.c static int __viommu_sync_req(struct viommu_dev *viommu) viommu 160 drivers/iommu/virtio-iommu.c struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; viommu 162 drivers/iommu/virtio-iommu.c assert_spin_locked(&viommu->request_lock); viommu 166 drivers/iommu/virtio-iommu.c while (!list_empty(&viommu->requests)) { viommu 188 drivers/iommu/virtio-iommu.c static int viommu_sync_req(struct viommu_dev *viommu) viommu 193 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&viommu->request_lock, flags); viommu 194 drivers/iommu/virtio-iommu.c ret = __viommu_sync_req(viommu); viommu 196 drivers/iommu/virtio-iommu.c dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); viommu 197 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&viommu->request_lock, flags); viommu 218 drivers/iommu/virtio-iommu.c static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, viommu 226 drivers/iommu/virtio-iommu.c struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; viommu 228 drivers/iommu/virtio-iommu.c assert_spin_locked(&viommu->request_lock); viommu 230 drivers/iommu/virtio-iommu.c write_offset = viommu_get_write_desc_offset(viommu, buf, len); viommu 251 drivers/iommu/virtio-iommu.c if (!__viommu_sync_req(viommu)) viommu 257 drivers/iommu/virtio-iommu.c list_add_tail(&req->list, &viommu->requests); viommu 265 drivers/iommu/virtio-iommu.c static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) viommu 270 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&viommu->request_lock, flags); viommu 271 drivers/iommu/virtio-iommu.c ret = __viommu_add_req(viommu, buf, len, false); viommu 273 drivers/iommu/virtio-iommu.c dev_dbg(viommu->dev, "could not add request: %d\n", ret); viommu 274 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&viommu->request_lock, flags); viommu 283 drivers/iommu/virtio-iommu.c static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, viommu 289 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&viommu->request_lock, flags); viommu 291 drivers/iommu/virtio-iommu.c ret = __viommu_add_req(viommu, buf, len, true); viommu 293 drivers/iommu/virtio-iommu.c dev_dbg(viommu->dev, "could not add request (%d)\n", ret); viommu 297 drivers/iommu/virtio-iommu.c ret = __viommu_sync_req(viommu); viommu 299 drivers/iommu/virtio-iommu.c dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); viommu 305 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&viommu->request_lock, flags); viommu 408 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); viommu 461 drivers/iommu/virtio-iommu.c static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) viommu 475 drivers/iommu/virtio-iommu.c probe_len = sizeof(*probe) + viommu->probe_size + viommu 488 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(viommu, probe, probe_len); viommu 496 drivers/iommu/virtio-iommu.c cur < viommu->probe_size) { viommu 511 drivers/iommu/virtio-iommu.c if (cur >= viommu->probe_size) viommu 523 drivers/iommu/virtio-iommu.c static int viommu_fault_handler(struct viommu_dev *viommu, viommu 548 drivers/iommu/virtio-iommu.c dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", viommu 554 drivers/iommu/virtio-iommu.c dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", viommu 565 drivers/iommu/virtio-iommu.c struct viommu_dev *viommu = vq->vdev->priv; viommu 569 drivers/iommu/virtio-iommu.c dev_err(viommu->dev, viommu 573 drivers/iommu/virtio-iommu.c viommu_fault_handler(viommu, &evt->fault); viommu 579 drivers/iommu/virtio-iommu.c dev_err(viommu->dev, "could not add event buffer\n"); viommu 611 drivers/iommu/virtio-iommu.c static int viommu_domain_finalise(struct viommu_dev *viommu, viommu 617 drivers/iommu/virtio-iommu.c ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, viommu 618 drivers/iommu/virtio-iommu.c viommu->last_domain, GFP_KERNEL); viommu 624 drivers/iommu/virtio-iommu.c domain->pgsize_bitmap = viommu->pgsize_bitmap; viommu 625 drivers/iommu/virtio-iommu.c domain->geometry = viommu->geometry; viommu 627 drivers/iommu/virtio-iommu.c vdomain->map_flags = viommu->map_flags; viommu 628 drivers/iommu/virtio-iommu.c vdomain->viommu = viommu; viommu 642 drivers/iommu/virtio-iommu.c if (vdomain->viommu) viommu 643 drivers/iommu/virtio-iommu.c ida_free(&vdomain->viommu->domain_ids, vdomain->id); viommu 658 drivers/iommu/virtio-iommu.c if (!vdomain->viommu) { viommu 663 drivers/iommu/virtio-iommu.c ret = viommu_domain_finalise(vdev->viommu, domain); viommu 664 drivers/iommu/virtio-iommu.c } else if (vdomain->viommu != vdev->viommu) { viommu 696 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); viommu 748 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); viommu 778 drivers/iommu/virtio-iommu.c ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); viommu 807 drivers/iommu/virtio-iommu.c viommu_sync_req(vdomain->viommu); viommu 873 drivers/iommu/virtio-iommu.c struct viommu_dev *viommu = NULL; viommu 879 drivers/iommu/virtio-iommu.c viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); viommu 880 drivers/iommu/virtio-iommu.c if (!viommu) viommu 888 drivers/iommu/virtio-iommu.c vdev->viommu = viommu; viommu 892 drivers/iommu/virtio-iommu.c if (viommu->probe_size) { viommu 894 drivers/iommu/virtio-iommu.c ret = viommu_probe_endpoint(viommu, dev); viommu 899 drivers/iommu/virtio-iommu.c ret = iommu_device_link(&viommu->iommu, dev); viommu 918 drivers/iommu/virtio-iommu.c iommu_device_unlink(&viommu->iommu, dev); viommu 937 drivers/iommu/virtio-iommu.c iommu_device_unlink(&vdev->viommu->iommu, dev); viommu 971 drivers/iommu/virtio-iommu.c static int viommu_init_vqs(struct viommu_dev *viommu) viommu 973 drivers/iommu/virtio-iommu.c struct virtio_device *vdev = dev_to_virtio(viommu->dev); viommu 980 drivers/iommu/virtio-iommu.c return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, viommu 984 drivers/iommu/virtio-iommu.c static int viommu_fill_evtq(struct viommu_dev *viommu) viommu 989 drivers/iommu/virtio-iommu.c struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; viommu 992 drivers/iommu/virtio-iommu.c viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, viommu 1010 drivers/iommu/virtio-iommu.c struct viommu_dev *viommu = NULL; viommu 1020 drivers/iommu/virtio-iommu.c viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); viommu 1021 drivers/iommu/virtio-iommu.c if (!viommu) viommu 1024 drivers/iommu/virtio-iommu.c spin_lock_init(&viommu->request_lock); viommu 1025 drivers/iommu/virtio-iommu.c ida_init(&viommu->domain_ids); viommu 1026 drivers/iommu/virtio-iommu.c viommu->dev = dev; viommu 1027 drivers/iommu/virtio-iommu.c viommu->vdev = vdev; viommu 1028 drivers/iommu/virtio-iommu.c INIT_LIST_HEAD(&viommu->requests); viommu 1030 drivers/iommu/virtio-iommu.c ret = viommu_init_vqs(viommu); viommu 1035 drivers/iommu/virtio-iommu.c &viommu->pgsize_bitmap); viommu 1037 drivers/iommu/virtio-iommu.c if (!viommu->pgsize_bitmap) { viommu 1042 drivers/iommu/virtio-iommu.c viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; viommu 1043 drivers/iommu/virtio-iommu.c viommu->last_domain = ~0U; viommu 1056 drivers/iommu/virtio-iommu.c &viommu->first_domain); viommu 1060 drivers/iommu/virtio-iommu.c &viommu->last_domain); viommu 1064 drivers/iommu/virtio-iommu.c &viommu->probe_size); viommu 1066 drivers/iommu/virtio-iommu.c viommu->geometry = (struct iommu_domain_geometry) { viommu 1073 drivers/iommu/virtio-iommu.c viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; viommu 1075 drivers/iommu/virtio-iommu.c viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; viommu 1080 drivers/iommu/virtio-iommu.c ret = viommu_fill_evtq(viommu); viommu 1084 drivers/iommu/virtio-iommu.c ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", viommu 1089 drivers/iommu/virtio-iommu.c iommu_device_set_ops(&viommu->iommu, &viommu_ops); viommu 1090 drivers/iommu/virtio-iommu.c iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); viommu 1092 drivers/iommu/virtio-iommu.c iommu_device_register(&viommu->iommu); viommu 1115 drivers/iommu/virtio-iommu.c vdev->priv = viommu; viommu 1118 drivers/iommu/virtio-iommu.c order_base_2(viommu->geometry.aperture_end)); viommu 1119 drivers/iommu/virtio-iommu.c dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); viommu 1124 drivers/iommu/virtio-iommu.c iommu_device_sysfs_remove(&viommu->iommu); viommu 1125 drivers/iommu/virtio-iommu.c iommu_device_unregister(&viommu->iommu); viommu 1134 drivers/iommu/virtio-iommu.c struct viommu_dev *viommu = vdev->priv; viommu 1136 drivers/iommu/virtio-iommu.c iommu_device_sysfs_remove(&viommu->iommu); viommu 1137 drivers/iommu/virtio-iommu.c iommu_device_unregister(&viommu->iommu);