cdd 88 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd; cdd 254 drivers/dma/ti/cppi41.c static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc) cdd 262 drivers/dma/ti/cppi41.c if (!((desc >= cdd->descs_phys) && cdd 263 drivers/dma/ti/cppi41.c (desc < (cdd->descs_phys + descs_size)))) { cdd 267 drivers/dma/ti/cppi41.c desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc); cdd 269 drivers/dma/ti/cppi41.c c = cdd->chan_busy[desc_num]; cdd 270 drivers/dma/ti/cppi41.c cdd->chan_busy[desc_num] = NULL; cdd 273 drivers/dma/ti/cppi41.c pm_runtime_put(cdd->ddev.dev); cdd 293 drivers/dma/ti/cppi41.c static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) cdd 297 drivers/dma/ti/cppi41.c desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); cdd 304 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = data; cdd 305 drivers/dma/ti/cppi41.c u16 first_completion_queue = cdd->first_completion_queue; cdd 306 drivers/dma/ti/cppi41.c u16 qmgr_num_pend = cdd->qmgr_num_pend; cdd 315 drivers/dma/ti/cppi41.c val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); cdd 336 drivers/dma/ti/cppi41.c WARN_ON(cdd->is_suspended); cdd 341 drivers/dma/ti/cppi41.c desc = cppi41_pop_desc(cdd, q_num); cdd 342 drivers/dma/ti/cppi41.c c = desc_to_chan(cdd, desc); cdd 374 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 377 drivers/dma/ti/cppi41.c error = pm_runtime_get_sync(cdd->ddev.dev); cdd 379 drivers/dma/ti/cppi41.c dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", cdd 381 drivers/dma/ti/cppi41.c pm_runtime_put_noidle(cdd->ddev.dev); cdd 393 drivers/dma/ti/cppi41.c pm_runtime_mark_last_busy(cdd->ddev.dev); cdd 394 drivers/dma/ti/cppi41.c pm_runtime_put_autosuspend(cdd->ddev.dev); cdd 402 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 405 drivers/dma/ti/cppi41.c error = pm_runtime_get_sync(cdd->ddev.dev); cdd 407 drivers/dma/ti/cppi41.c pm_runtime_put_noidle(cdd->ddev.dev); cdd 412 drivers/dma/ti/cppi41.c WARN_ON(!list_empty(&cdd->pending)); cdd 414 drivers/dma/ti/cppi41.c pm_runtime_mark_last_busy(cdd->ddev.dev); cdd 415 drivers/dma/ti/cppi41.c pm_runtime_put_autosuspend(cdd->ddev.dev); cdd 433 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 463 drivers/dma/ti/cppi41.c pm_runtime_get(cdd->ddev.dev); cdd 466 drivers/dma/ti/cppi41.c desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); cdd 467 drivers/dma/ti/cppi41.c WARN_ON(cdd->chan_busy[desc_num]); cdd 468 drivers/dma/ti/cppi41.c cdd->chan_busy[desc_num] = c; cdd 472 drivers/dma/ti/cppi41.c cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); cdd 480 drivers/dma/ti/cppi41.c static void cppi41_run_queue(struct cppi41_dd *cdd) cdd 484 drivers/dma/ti/cppi41.c list_for_each_entry_safe(c, _c, &cdd->pending, node) { cdd 493 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 497 drivers/dma/ti/cppi41.c error = pm_runtime_get(cdd->ddev.dev); cdd 499 drivers/dma/ti/cppi41.c pm_runtime_put_noidle(cdd->ddev.dev); cdd 500 drivers/dma/ti/cppi41.c dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", cdd 506 drivers/dma/ti/cppi41.c spin_lock_irqsave(&cdd->lock, flags); cdd 507 drivers/dma/ti/cppi41.c list_add_tail(&c->node, &cdd->pending); cdd 508 drivers/dma/ti/cppi41.c if (!cdd->is_suspended) cdd 509 drivers/dma/ti/cppi41.c cppi41_run_queue(cdd); cdd 510 drivers/dma/ti/cppi41.c spin_unlock_irqrestore(&cdd->lock, flags); cdd 512 drivers/dma/ti/cppi41.c pm_runtime_mark_last_busy(cdd->ddev.dev); cdd 513 drivers/dma/ti/cppi41.c pm_runtime_put_autosuspend(cdd->ddev.dev); cdd 590 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 596 drivers/dma/ti/cppi41.c error = pm_runtime_get(cdd->ddev.dev); cdd 598 drivers/dma/ti/cppi41.c pm_runtime_put_noidle(cdd->ddev.dev); cdd 603 drivers/dma/ti/cppi41.c if (cdd->is_suspended) cdd 630 drivers/dma/ti/cppi41.c pm_runtime_mark_last_busy(cdd->ddev.dev); cdd 631 drivers/dma/ti/cppi41.c pm_runtime_put_autosuspend(cdd->ddev.dev); cdd 644 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 650 drivers/dma/ti/cppi41.c td = cdd->cd; cdd 651 drivers/dma/ti/cppi41.c td += cdd->first_td_desc; cdd 653 drivers/dma/ti/cppi41.c td_desc_phys = cdd->descs_phys; cdd 654 drivers/dma/ti/cppi41.c td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc); cdd 662 drivers/dma/ti/cppi41.c cppi_writel(reg, cdd->qmgr_mem + cdd 663 drivers/dma/ti/cppi41.c QMGR_QUEUE_D(cdd->td_queue.submit)); cdd 669 drivers/dma/ti/cppi41.c reg |= cdd->td_queue.complete; cdd 679 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); cdd 681 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); cdd 717 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, c->q_num); cdd 719 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); cdd 739 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; cdd 745 drivers/dma/ti/cppi41.c desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); cdd 746 drivers/dma/ti/cppi41.c if (!cdd->chan_busy[desc_num]) { cdd 754 drivers/dma/ti/cppi41.c list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { cdd 767 drivers/dma/ti/cppi41.c WARN_ON(!cdd->chan_busy[desc_num]); cdd 768 drivers/dma/ti/cppi41.c cdd->chan_busy[desc_num] = NULL; cdd 771 drivers/dma/ti/cppi41.c pm_runtime_put(cdd->ddev.dev); cdd 776 drivers/dma/ti/cppi41.c static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) cdd 780 drivers/dma/ti/cppi41.c u32 n_chans = cdd->n_chans; cdd 795 drivers/dma/ti/cppi41.c cchan->cdd = cdd; cdd 797 drivers/dma/ti/cppi41.c cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1); cdd 800 drivers/dma/ti/cppi41.c cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1); cdd 804 drivers/dma/ti/cppi41.c cchan->desc = &cdd->cd[i]; cdd 805 drivers/dma/ti/cppi41.c cchan->desc_phys = cdd->descs_phys; cdd 807 drivers/dma/ti/cppi41.c cchan->chan.device = &cdd->ddev; cdd 808 drivers/dma/ti/cppi41.c list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels); cdd 810 drivers/dma/ti/cppi41.c cdd->first_td_desc = n_chans; cdd 815 drivers/dma/ti/cppi41.c static void purge_descs(struct device *dev, struct cppi41_dd *cdd) cdd 824 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); cdd 825 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); cdd 827 drivers/dma/ti/cppi41.c dma_free_coherent(dev, mem_decs, cdd->cd, cdd 828 drivers/dma/ti/cppi41.c cdd->descs_phys); cdd 832 drivers/dma/ti/cppi41.c static void disable_sched(struct cppi41_dd *cdd) cdd 834 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); cdd 837 drivers/dma/ti/cppi41.c static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) cdd 839 drivers/dma/ti/cppi41.c disable_sched(cdd); cdd 841 drivers/dma/ti/cppi41.c purge_descs(dev, cdd); cdd 843 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cdd 844 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cdd 845 drivers/dma/ti/cppi41.c dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, cdd 846 drivers/dma/ti/cppi41.c cdd->scratch_phys); cdd 849 drivers/dma/ti/cppi41.c static int init_descs(struct device *dev, struct cppi41_dd *cdd) cdd 873 drivers/dma/ti/cppi41.c cdd->cd = dma_alloc_coherent(dev, mem_decs, cdd 874 drivers/dma/ti/cppi41.c &cdd->descs_phys, GFP_KERNEL); cdd 875 drivers/dma/ti/cppi41.c if (!cdd->cd) cdd 878 drivers/dma/ti/cppi41.c cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); cdd 879 drivers/dma/ti/cppi41.c cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i)); cdd 886 drivers/dma/ti/cppi41.c static void init_sched(struct cppi41_dd *cdd) cdd 893 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); cdd 894 drivers/dma/ti/cppi41.c for (ch = 0; ch < cdd->n_chans; ch += 2) { cdd 901 drivers/dma/ti/cppi41.c cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); cdd 904 drivers/dma/ti/cppi41.c reg = cdd->n_chans * 2 - 1; cdd 906 drivers/dma/ti/cppi41.c cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); cdd 909 drivers/dma/ti/cppi41.c static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) cdd 914 drivers/dma/ti/cppi41.c cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, cdd 915 drivers/dma/ti/cppi41.c &cdd->scratch_phys, GFP_KERNEL); cdd 916 drivers/dma/ti/cppi41.c if (!cdd->qmgr_scratch) cdd 919 drivers/dma/ti/cppi41.c cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); cdd 920 drivers/dma/ti/cppi41.c cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE); cdd 921 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); cdd 923 drivers/dma/ti/cppi41.c ret = init_descs(dev, cdd); cdd 927 drivers/dma/ti/cppi41.c cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); cdd 928 drivers/dma/ti/cppi41.c init_sched(cdd); cdd 932 drivers/dma/ti/cppi41.c deinit_cppi41(dev, cdd); cdd 949 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd; cdd 963 drivers/dma/ti/cppi41.c cdd = cchan->cdd; cdd 965 drivers/dma/ti/cppi41.c queues = cdd->queues_tx; cdd 967 drivers/dma/ti/cppi41.c queues = cdd->queues_rx; cdd 1039 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd; cdd 1051 drivers/dma/ti/cppi41.c cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL); cdd 1052 drivers/dma/ti/cppi41.c if (!cdd) cdd 1055 drivers/dma/ti/cppi41.c dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask); cdd 1056 drivers/dma/ti/cppi41.c cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources; cdd 1057 drivers/dma/ti/cppi41.c cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources; cdd 1058 drivers/dma/ti/cppi41.c cdd->ddev.device_tx_status = cppi41_dma_tx_status; cdd 1059 drivers/dma/ti/cppi41.c cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd 1060 drivers/dma/ti/cppi41.c cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; cdd 1061 drivers/dma/ti/cppi41.c cdd->ddev.device_terminate_all = cppi41_stop_chan; cdd 1062 drivers/dma/ti/cppi41.c cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); cdd 1063 drivers/dma/ti/cppi41.c cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS; cdd 1064 drivers/dma/ti/cppi41.c cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS; cdd 1065 drivers/dma/ti/cppi41.c cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; cdd 1066 drivers/dma/ti/cppi41.c cdd->ddev.dev = dev; cdd 1067 drivers/dma/ti/cppi41.c INIT_LIST_HEAD(&cdd->ddev.channels); cdd 1068 drivers/dma/ti/cppi41.c cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; cdd 1076 drivers/dma/ti/cppi41.c cdd->ctrl_mem = devm_ioremap_resource(dev, mem); cdd 1077 drivers/dma/ti/cppi41.c if (IS_ERR(cdd->ctrl_mem)) cdd 1078 drivers/dma/ti/cppi41.c return PTR_ERR(cdd->ctrl_mem); cdd 1081 drivers/dma/ti/cppi41.c cdd->sched_mem = devm_ioremap_resource(dev, mem); cdd 1082 drivers/dma/ti/cppi41.c if (IS_ERR(cdd->sched_mem)) cdd 1083 drivers/dma/ti/cppi41.c return PTR_ERR(cdd->sched_mem); cdd 1086 drivers/dma/ti/cppi41.c cdd->qmgr_mem = devm_ioremap_resource(dev, mem); cdd 1087 drivers/dma/ti/cppi41.c if (IS_ERR(cdd->qmgr_mem)) cdd 1088 drivers/dma/ti/cppi41.c return PTR_ERR(cdd->qmgr_mem); cdd 1090 drivers/dma/ti/cppi41.c spin_lock_init(&cdd->lock); cdd 1091 drivers/dma/ti/cppi41.c INIT_LIST_HEAD(&cdd->pending); cdd 1093 drivers/dma/ti/cppi41.c platform_set_drvdata(pdev, cdd); cdd 1102 drivers/dma/ti/cppi41.c cdd->queues_rx = glue_info->queues_rx; cdd 1103 drivers/dma/ti/cppi41.c cdd->queues_tx = glue_info->queues_tx; cdd 1104 drivers/dma/ti/cppi41.c cdd->td_queue = glue_info->td_queue; cdd 1105 drivers/dma/ti/cppi41.c cdd->qmgr_num_pend = glue_info->qmgr_num_pend; cdd 1106 drivers/dma/ti/cppi41.c cdd->first_completion_queue = glue_info->first_completion_queue; cdd 1109 drivers/dma/ti/cppi41.c "#dma-channels", &cdd->n_chans); cdd 1113 drivers/dma/ti/cppi41.c ret = init_cppi41(dev, cdd); cdd 1117 drivers/dma/ti/cppi41.c ret = cppi41_add_chans(dev, cdd); cdd 1128 drivers/dma/ti/cppi41.c dev_name(dev), cdd); cdd 1131 drivers/dma/ti/cppi41.c cdd->irq = irq; cdd 1133 drivers/dma/ti/cppi41.c ret = dma_async_device_register(&cdd->ddev); cdd 1147 drivers/dma/ti/cppi41.c dma_async_device_unregister(&cdd->ddev); cdd 1149 drivers/dma/ti/cppi41.c deinit_cppi41(dev, cdd); cdd 1161 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = platform_get_drvdata(pdev); cdd 1169 drivers/dma/ti/cppi41.c dma_async_device_unregister(&cdd->ddev); cdd 1171 drivers/dma/ti/cppi41.c devm_free_irq(&pdev->dev, cdd->irq, cdd); cdd 1172 drivers/dma/ti/cppi41.c deinit_cppi41(&pdev->dev, cdd); cdd 1181 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = dev_get_drvdata(dev); cdd 1183 drivers/dma/ti/cppi41.c cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); cdd 1184 drivers/dma/ti/cppi41.c disable_sched(cdd); cdd 1191 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = dev_get_drvdata(dev); cdd 1196 drivers/dma/ti/cppi41.c cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); cdd 1198 drivers/dma/ti/cppi41.c list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) cdd 1202 drivers/dma/ti/cppi41.c init_sched(cdd); cdd 1204 drivers/dma/ti/cppi41.c cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); cdd 1205 drivers/dma/ti/cppi41.c cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); cdd 1206 drivers/dma/ti/cppi41.c cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); cdd 1207 drivers/dma/ti/cppi41.c cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); cdd 1214 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = dev_get_drvdata(dev); cdd 1217 drivers/dma/ti/cppi41.c spin_lock_irqsave(&cdd->lock, flags); cdd 1218 drivers/dma/ti/cppi41.c cdd->is_suspended = true; cdd 1219 drivers/dma/ti/cppi41.c WARN_ON(!list_empty(&cdd->pending)); cdd 1220 drivers/dma/ti/cppi41.c spin_unlock_irqrestore(&cdd->lock, flags); cdd 1227 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = dev_get_drvdata(dev); cdd 1230 drivers/dma/ti/cppi41.c spin_lock_irqsave(&cdd->lock, flags); cdd 1231 drivers/dma/ti/cppi41.c cdd->is_suspended = false; cdd 1232 drivers/dma/ti/cppi41.c cppi41_run_queue(cdd); cdd 1233 drivers/dma/ti/cppi41.c spin_unlock_irqrestore(&cdd->lock, flags);