Lines Matching refs:iommu

249 	return dev->archdata.iommu;  in get_dev_data()
367 if (dev->archdata.iommu) in iommu_init_device()
377 struct amd_iommu *iommu; in iommu_init_device() local
379 iommu = amd_iommu_rlookup_table[dev_data->devid]; in iommu_init_device()
380 dev_data->iommu_v2 = iommu->is_iommu_v2; in iommu_init_device()
383 dev->archdata.iommu = dev_data; in iommu_init_device()
514 static void iommu_print_event(struct amd_iommu *iommu, void *__evt) in iommu_print_event() argument
593 static void iommu_poll_events(struct amd_iommu *iommu) in iommu_poll_events() argument
597 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
598 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); in iommu_poll_events()
601 iommu_print_event(iommu, iommu->evt_buf + head); in iommu_poll_events()
605 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); in iommu_poll_events()
608 static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) in iommu_handle_ppr_entry() argument
628 static void iommu_poll_ppr_log(struct amd_iommu *iommu) in iommu_poll_ppr_log() argument
632 if (iommu->ppr_log == NULL) in iommu_poll_ppr_log()
635 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
636 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
643 raw = (u64 *)(iommu->ppr_log + head); in iommu_poll_ppr_log()
668 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
671 iommu_handle_ppr_entry(iommu, entry); in iommu_poll_ppr_log()
674 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); in iommu_poll_ppr_log()
675 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); in iommu_poll_ppr_log()
681 struct amd_iommu *iommu = (struct amd_iommu *) data; in amd_iommu_int_thread() local
682 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
687 iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
691 iommu_poll_events(iommu); in amd_iommu_int_thread()
696 iommu_poll_ppr_log(iommu); in amd_iommu_int_thread()
712 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); in amd_iommu_int_thread()
745 static void copy_cmd_to_buffer(struct amd_iommu *iommu, in copy_cmd_to_buffer() argument
751 target = iommu->cmd_buf + tail; in copy_cmd_to_buffer()
758 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in copy_cmd_to_buffer()
912 static int iommu_queue_command_sync(struct amd_iommu *iommu, in iommu_queue_command_sync() argument
920 spin_lock_irqsave(&iommu->lock, flags); in iommu_queue_command_sync()
922 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); in iommu_queue_command_sync()
923 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); in iommu_queue_command_sync()
933 copy_cmd_to_buffer(iommu, &sync_cmd, tail); in iommu_queue_command_sync()
935 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
943 copy_cmd_to_buffer(iommu, cmd, tail); in iommu_queue_command_sync()
946 iommu->need_sync = sync; in iommu_queue_command_sync()
948 spin_unlock_irqrestore(&iommu->lock, flags); in iommu_queue_command_sync()
953 static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) in iommu_queue_command() argument
955 return iommu_queue_command_sync(iommu, cmd, true); in iommu_queue_command()
962 static int iommu_completion_wait(struct amd_iommu *iommu) in iommu_completion_wait() argument
968 if (!iommu->need_sync) in iommu_completion_wait()
973 ret = iommu_queue_command_sync(iommu, &cmd, false); in iommu_completion_wait()
980 static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) in iommu_flush_dte() argument
986 return iommu_queue_command(iommu, &cmd); in iommu_flush_dte()
989 static void iommu_flush_dte_all(struct amd_iommu *iommu) in iommu_flush_dte_all() argument
994 iommu_flush_dte(iommu, devid); in iommu_flush_dte_all()
996 iommu_completion_wait(iommu); in iommu_flush_dte_all()
1003 static void iommu_flush_tlb_all(struct amd_iommu *iommu) in iommu_flush_tlb_all() argument
1011 iommu_queue_command(iommu, &cmd); in iommu_flush_tlb_all()
1014 iommu_completion_wait(iommu); in iommu_flush_tlb_all()
1017 static void iommu_flush_all(struct amd_iommu *iommu) in iommu_flush_all() argument
1023 iommu_queue_command(iommu, &cmd); in iommu_flush_all()
1024 iommu_completion_wait(iommu); in iommu_flush_all()
1027 static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) in iommu_flush_irt() argument
1033 iommu_queue_command(iommu, &cmd); in iommu_flush_irt()
1036 static void iommu_flush_irt_all(struct amd_iommu *iommu) in iommu_flush_irt_all() argument
1041 iommu_flush_irt(iommu, devid); in iommu_flush_irt_all()
1043 iommu_completion_wait(iommu); in iommu_flush_irt_all()
1046 void iommu_flush_all_caches(struct amd_iommu *iommu) in iommu_flush_all_caches() argument
1048 if (iommu_feature(iommu, FEATURE_IA)) { in iommu_flush_all_caches()
1049 iommu_flush_all(iommu); in iommu_flush_all_caches()
1051 iommu_flush_dte_all(iommu); in iommu_flush_all_caches()
1052 iommu_flush_irt_all(iommu); in iommu_flush_all_caches()
1053 iommu_flush_tlb_all(iommu); in iommu_flush_all_caches()
1063 struct amd_iommu *iommu; in device_flush_iotlb() local
1068 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_iotlb()
1072 return iommu_queue_command(iommu, &cmd); in device_flush_iotlb()
1080 struct amd_iommu *iommu; in device_flush_dte() local
1084 iommu = amd_iommu_rlookup_table[dev_data->devid]; in device_flush_dte()
1087 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1089 ret = iommu_flush_dte(iommu, alias); in device_flush_dte()
1444 struct amd_iommu *iommu; in alloc_new_range() local
1497 for_each_iommu(iommu) { in alloc_new_range()
1498 if (iommu->exclusion_start && in alloc_new_range()
1499 iommu->exclusion_start >= dma_dom->aperture[index]->offset in alloc_new_range()
1500 && iommu->exclusion_start < dma_dom->aperture_size) { in alloc_new_range()
1502 int pages = iommu_num_pages(iommu->exclusion_start, in alloc_new_range()
1503 iommu->exclusion_length, in alloc_new_range()
1505 startpage = iommu->exclusion_start >> PAGE_SHIFT; in alloc_new_range()
1954 struct amd_iommu *iommu; in do_attach() local
1958 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_attach()
1967 domain->dev_iommu[iommu->index] += 1; in do_attach()
1980 struct amd_iommu *iommu; in do_detach() local
1992 iommu = amd_iommu_rlookup_table[dev_data->devid]; in do_detach()
1996 dev_data->domain->dev_iommu[iommu->index] -= 1; in do_detach()
2232 struct amd_iommu *iommu; in amd_iommu_add_device() local
2240 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_add_device()
2269 iommu_completion_wait(iommu); in amd_iommu_add_device()
2276 struct amd_iommu *iommu; in amd_iommu_remove_device() local
2283 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_remove_device()
2286 iommu_completion_wait(iommu); in amd_iommu_remove_device()
2996 struct iommu_dev_data *dev_data = dev->archdata.iommu; in amd_iommu_detach_device()
2997 struct amd_iommu *iommu; in amd_iommu_detach_device() local
3008 iommu = amd_iommu_rlookup_table[devid]; in amd_iommu_detach_device()
3009 if (!iommu) in amd_iommu_detach_device()
3012 iommu_completion_wait(iommu); in amd_iommu_detach_device()
3020 struct amd_iommu *iommu; in amd_iommu_attach_device() local
3026 dev_data = dev->archdata.iommu; in amd_iommu_attach_device()
3028 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_attach_device()
3029 if (!iommu) in amd_iommu_attach_device()
3037 iommu_completion_wait(iommu); in amd_iommu_attach_device()
3297 struct amd_iommu *iommu; in __flush_pasid() local
3308 iommu = amd_iommu_rlookup_table[dev_data->devid]; in __flush_pasid()
3313 ret = iommu_queue_command(iommu, &cmd); in __flush_pasid()
3471 struct amd_iommu *iommu; in amd_iommu_complete_ppr() local
3477 iommu = amd_iommu_rlookup_table[dev_data->devid]; in amd_iommu_complete_ppr()
3482 return iommu_queue_command(iommu, &cmd); in amd_iommu_complete_ppr()
3619 struct amd_iommu *iommu; in get_irq_table() local
3625 iommu = amd_iommu_rlookup_table[devid]; in get_irq_table()
3626 if (!iommu) in get_irq_table()
3638 iommu_flush_dte(iommu, devid); in get_irq_table()
3672 iommu_flush_dte(iommu, devid); in get_irq_table()
3676 iommu_flush_dte(iommu, alias); in get_irq_table()
3680 iommu_completion_wait(iommu); in get_irq_table()
3729 struct amd_iommu *iommu; in modify_irte() local
3732 iommu = amd_iommu_rlookup_table[devid]; in modify_irte()
3733 if (iommu == NULL) in modify_irte()
3744 iommu_flush_irt(iommu, devid); in modify_irte()
3745 iommu_completion_wait(iommu); in modify_irte()
3753 struct amd_iommu *iommu; in free_irte() local
3756 iommu = amd_iommu_rlookup_table[devid]; in free_irte()
3757 if (iommu == NULL) in free_irte()
3768 iommu_flush_irt(iommu, devid); in free_irte()
3769 iommu_completion_wait(iommu); in free_irte()
3797 struct amd_iommu *iommu; in get_ir_irq_domain() local
3805 iommu = amd_iommu_rlookup_table[devid]; in get_ir_irq_domain()
3806 if (iommu) in get_ir_irq_domain()
3807 return iommu->ir_domain; in get_ir_irq_domain()
3815 struct amd_iommu *iommu; in get_irq_domain() local
3826 iommu = amd_iommu_rlookup_table[devid]; in get_irq_domain()
3827 if (iommu) in get_irq_domain()
3828 return iommu->msi_domain; in get_irq_domain()
4068 int amd_iommu_create_irq_domain(struct amd_iommu *iommu) in amd_iommu_create_irq_domain() argument
4070 iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu); in amd_iommu_create_irq_domain()
4071 if (!iommu->ir_domain) in amd_iommu_create_irq_domain()
4074 iommu->ir_domain->parent = arch_get_ir_parent_domain(); in amd_iommu_create_irq_domain()
4075 iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); in amd_iommu_create_irq_domain()