vdomain 79 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain; vdomain 314 drivers/iommu/virtio-iommu.c static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, vdomain 329 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&vdomain->mappings_lock, irqflags); vdomain 330 drivers/iommu/virtio-iommu.c interval_tree_insert(&mapping->iova, &vdomain->mappings); vdomain 331 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); vdomain 346 drivers/iommu/virtio-iommu.c static size_t viommu_del_mappings(struct viommu_domain *vdomain, vdomain 355 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&vdomain->mappings_lock, flags); vdomain 356 drivers/iommu/virtio-iommu.c next = interval_tree_iter_first(&vdomain->mappings, iova, last); vdomain 372 drivers/iommu/virtio-iommu.c interval_tree_remove(node, &vdomain->mappings); vdomain 375 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&vdomain->mappings_lock, flags); vdomain 387 drivers/iommu/virtio-iommu.c static int viommu_replay_mappings(struct viommu_domain *vdomain) vdomain 395 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&vdomain->mappings_lock, flags); vdomain 396 drivers/iommu/virtio-iommu.c node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); vdomain 401 drivers/iommu/virtio-iommu.c .domain = cpu_to_le32(vdomain->id), vdomain 408 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); vdomain 414 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&vdomain->mappings_lock, flags); vdomain 589 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain; vdomain 594 drivers/iommu/virtio-iommu.c vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); vdomain 595 drivers/iommu/virtio-iommu.c if (!vdomain) vdomain 598 drivers/iommu/virtio-iommu.c mutex_init(&vdomain->mutex); vdomain 599 drivers/iommu/virtio-iommu.c spin_lock_init(&vdomain->mappings_lock); vdomain 600 drivers/iommu/virtio-iommu.c vdomain->mappings = RB_ROOT_CACHED; vdomain 603 drivers/iommu/virtio-iommu.c iommu_get_dma_cookie(&vdomain->domain)) { vdomain 604 drivers/iommu/virtio-iommu.c kfree(vdomain); vdomain 608 drivers/iommu/virtio-iommu.c return &vdomain->domain; vdomain 615 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 622 drivers/iommu/virtio-iommu.c vdomain->id = (unsigned int)ret; vdomain 627 drivers/iommu/virtio-iommu.c vdomain->map_flags = viommu->map_flags; vdomain 628 drivers/iommu/virtio-iommu.c vdomain->viommu = viommu; vdomain 635 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 640 drivers/iommu/virtio-iommu.c viommu_del_mappings(vdomain, 0, 0); vdomain 642 drivers/iommu/virtio-iommu.c if (vdomain->viommu) vdomain 643 drivers/iommu/virtio-iommu.c ida_free(&vdomain->viommu->domain_ids, vdomain->id); vdomain 645 drivers/iommu/virtio-iommu.c kfree(vdomain); vdomain 655 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 657 drivers/iommu/virtio-iommu.c mutex_lock(&vdomain->mutex); vdomain 658 drivers/iommu/virtio-iommu.c if (!vdomain->viommu) { vdomain 664 drivers/iommu/virtio-iommu.c } else if (vdomain->viommu != vdev->viommu) { vdomain 668 drivers/iommu/virtio-iommu.c mutex_unlock(&vdomain->mutex); vdomain 685 drivers/iommu/virtio-iommu.c if (vdev->vdomain) vdomain 686 drivers/iommu/virtio-iommu.c vdev->vdomain->nr_endpoints--; vdomain 690 drivers/iommu/virtio-iommu.c .domain = cpu_to_le32(vdomain->id), vdomain 696 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); vdomain 701 drivers/iommu/virtio-iommu.c if (!vdomain->nr_endpoints) { vdomain 706 drivers/iommu/virtio-iommu.c ret = viommu_replay_mappings(vdomain); vdomain 711 drivers/iommu/virtio-iommu.c vdomain->nr_endpoints++; vdomain 712 drivers/iommu/virtio-iommu.c vdev->vdomain = vdomain; vdomain 723 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 729 drivers/iommu/virtio-iommu.c if (flags & ~vdomain->map_flags) vdomain 732 drivers/iommu/virtio-iommu.c ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); vdomain 738 drivers/iommu/virtio-iommu.c .domain = cpu_to_le32(vdomain->id), vdomain 745 drivers/iommu/virtio-iommu.c if (!vdomain->nr_endpoints) vdomain 748 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); vdomain 750 drivers/iommu/virtio-iommu.c viommu_del_mappings(vdomain, iova, size); vdomain 761 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 763 drivers/iommu/virtio-iommu.c unmapped = viommu_del_mappings(vdomain, iova, size); vdomain 768 drivers/iommu/virtio-iommu.c if (!vdomain->nr_endpoints) vdomain 773 drivers/iommu/virtio-iommu.c .domain = cpu_to_le32(vdomain->id), vdomain 778 drivers/iommu/virtio-iommu.c ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); vdomain 789 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 791 drivers/iommu/virtio-iommu.c spin_lock_irqsave(&vdomain->mappings_lock, flags); vdomain 792 drivers/iommu/virtio-iommu.c node = interval_tree_iter_first(&vdomain->mappings, iova, iova); vdomain 797 drivers/iommu/virtio-iommu.c spin_unlock_irqrestore(&vdomain->mappings_lock, flags); vdomain 805 drivers/iommu/virtio-iommu.c struct viommu_domain *vdomain = to_viommu_domain(domain); vdomain 807 drivers/iommu/virtio-iommu.c viommu_sync_req(vdomain->viommu);