Lines Matching refs:iommu
274 static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) in rk_iommu_read() argument
276 return readl(iommu->base + offset); in rk_iommu_read()
279 static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) in rk_iommu_write() argument
281 writel(value, iommu->base + offset); in rk_iommu_write()
284 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
286 writel(command, iommu->base + RK_MMU_COMMAND); in rk_iommu_command()
289 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, in rk_iommu_zap_lines() argument
298 rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
301 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
303 return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; in rk_iommu_is_stall_active()
306 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) in rk_iommu_is_paging_enabled() argument
308 return rk_iommu_read(iommu, RK_MMU_STATUS) & in rk_iommu_is_paging_enabled()
312 static int rk_iommu_enable_stall(struct rk_iommu *iommu) in rk_iommu_enable_stall() argument
316 if (rk_iommu_is_stall_active(iommu)) in rk_iommu_enable_stall()
320 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_stall()
323 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); in rk_iommu_enable_stall()
325 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); in rk_iommu_enable_stall()
327 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", in rk_iommu_enable_stall()
328 rk_iommu_read(iommu, RK_MMU_STATUS)); in rk_iommu_enable_stall()
333 static int rk_iommu_disable_stall(struct rk_iommu *iommu) in rk_iommu_disable_stall() argument
337 if (!rk_iommu_is_stall_active(iommu)) in rk_iommu_disable_stall()
340 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); in rk_iommu_disable_stall()
342 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); in rk_iommu_disable_stall()
344 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", in rk_iommu_disable_stall()
345 rk_iommu_read(iommu, RK_MMU_STATUS)); in rk_iommu_disable_stall()
350 static int rk_iommu_enable_paging(struct rk_iommu *iommu) in rk_iommu_enable_paging() argument
354 if (rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_paging()
357 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); in rk_iommu_enable_paging()
359 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); in rk_iommu_enable_paging()
361 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", in rk_iommu_enable_paging()
362 rk_iommu_read(iommu, RK_MMU_STATUS)); in rk_iommu_enable_paging()
367 static int rk_iommu_disable_paging(struct rk_iommu *iommu) in rk_iommu_disable_paging() argument
371 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_disable_paging()
374 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); in rk_iommu_disable_paging()
376 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); in rk_iommu_disable_paging()
378 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", in rk_iommu_disable_paging()
379 rk_iommu_read(iommu, RK_MMU_STATUS)); in rk_iommu_disable_paging()
384 static int rk_iommu_force_reset(struct rk_iommu *iommu) in rk_iommu_force_reset() argument
393 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); in rk_iommu_force_reset()
395 dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); in rk_iommu_force_reset()
397 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); in rk_iommu_force_reset()
401 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); in rk_iommu_force_reset()
403 ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, in rk_iommu_force_reset()
406 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); in rk_iommu_force_reset()
411 static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) in log_iova() argument
428 mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); in log_iova()
449 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", in log_iova()
451 …dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa … in log_iova()
459 struct rk_iommu *iommu = dev_id; in rk_iommu_irq() local
464 int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); in rk_iommu_irq()
468 iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); in rk_iommu_irq()
473 status = rk_iommu_read(iommu, RK_MMU_STATUS); in rk_iommu_irq()
477 dev_err(iommu->dev, "Page fault at %pad of type %s\n", in rk_iommu_irq()
481 log_iova(iommu, iova); in rk_iommu_irq()
488 if (iommu->domain) in rk_iommu_irq()
489 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_iommu_irq()
492 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); in rk_iommu_irq()
494 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); in rk_iommu_irq()
495 rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); in rk_iommu_irq()
499 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); in rk_iommu_irq()
502 dev_err(iommu->dev, "unexpected int_status: %#08x\n", in rk_iommu_irq()
505 rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); in rk_iommu_irq()
547 struct rk_iommu *iommu; in rk_iommu_zap_iova() local
548 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_zap_iova()
549 rk_iommu_zap_lines(iommu, iova, size); in rk_iommu_zap_iova()
746 struct rk_iommu *iommu; in rk_iommu_attach_device() local
756 iommu = rk_iommu_from_dev(dev); in rk_iommu_attach_device()
757 if (!iommu) in rk_iommu_attach_device()
760 ret = rk_iommu_enable_stall(iommu); in rk_iommu_attach_device()
764 ret = rk_iommu_force_reset(iommu); in rk_iommu_attach_device()
768 iommu->domain = domain; in rk_iommu_attach_device()
770 ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, in rk_iommu_attach_device()
771 IRQF_SHARED, dev_name(dev), iommu); in rk_iommu_attach_device()
776 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); in rk_iommu_attach_device()
777 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); in rk_iommu_attach_device()
778 rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); in rk_iommu_attach_device()
780 ret = rk_iommu_enable_paging(iommu); in rk_iommu_attach_device()
785 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
790 rk_iommu_disable_stall(iommu); in rk_iommu_attach_device()
798 struct rk_iommu *iommu; in rk_iommu_detach_device() local
803 iommu = rk_iommu_from_dev(dev); in rk_iommu_detach_device()
804 if (!iommu) in rk_iommu_detach_device()
808 list_del_init(&iommu->node); in rk_iommu_detach_device()
812 rk_iommu_enable_stall(iommu); in rk_iommu_detach_device()
813 rk_iommu_disable_paging(iommu); in rk_iommu_detach_device()
814 rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); in rk_iommu_detach_device()
815 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); in rk_iommu_detach_device()
816 rk_iommu_disable_stall(iommu); in rk_iommu_detach_device()
818 devm_free_irq(dev, iommu->irq, iommu); in rk_iommu_detach_device()
820 iommu->domain = NULL; in rk_iommu_detach_device()
989 struct rk_iommu *iommu; in rk_iommu_probe() local
992 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); in rk_iommu_probe()
993 if (!iommu) in rk_iommu_probe()
996 platform_set_drvdata(pdev, iommu); in rk_iommu_probe()
997 iommu->dev = dev; in rk_iommu_probe()
1000 iommu->base = devm_ioremap_resource(&pdev->dev, res); in rk_iommu_probe()
1001 if (IS_ERR(iommu->base)) in rk_iommu_probe()
1002 return PTR_ERR(iommu->base); in rk_iommu_probe()
1004 iommu->irq = platform_get_irq(pdev, 0); in rk_iommu_probe()
1005 if (iommu->irq < 0) { in rk_iommu_probe()
1006 dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); in rk_iommu_probe()