Lines Matching refs:schan

55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)  in shdma_chan_xfer_ld_queue()  argument
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_chan_xfer_ld_queue()
62 if (ops->channel_busy(schan)) in shdma_chan_xfer_ld_queue()
66 list_for_each_entry(sdesc, &schan->ld_queue, node) in shdma_chan_xfer_ld_queue()
68 ops->start_xfer(schan, sdesc); in shdma_chan_xfer_ld_queue()
77 struct shdma_chan *schan = to_shdma_chan(tx->chan); in shdma_tx_submit() local
82 spin_lock_irq(&schan->chan_lock); in shdma_tx_submit()
84 power_up = list_empty(&schan->ld_queue); in shdma_tx_submit()
97 &chunk->node == &schan->ld_free)) in shdma_tx_submit()
108 list_move_tail(&chunk->node, &schan->ld_queue); in shdma_tx_submit()
110 dev_dbg(schan->dev, "submit #%d@%p on %d\n", in shdma_tx_submit()
111 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit()
116 schan->pm_state = SHDMA_PM_BUSY; in shdma_tx_submit()
118 ret = pm_runtime_get(schan->dev); in shdma_tx_submit()
120 spin_unlock_irq(&schan->chan_lock); in shdma_tx_submit()
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); in shdma_tx_submit()
124 pm_runtime_barrier(schan->dev); in shdma_tx_submit()
126 spin_lock_irq(&schan->chan_lock); in shdma_tx_submit()
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) { in shdma_tx_submit()
131 to_shdma_dev(schan->dma_chan.device); in shdma_tx_submit()
133 dev_dbg(schan->dev, "Bring up channel %d\n", in shdma_tx_submit()
134 schan->id); in shdma_tx_submit()
140 ops->setup_xfer(schan, schan->slave_id); in shdma_tx_submit()
142 if (schan->pm_state == SHDMA_PM_PENDING) in shdma_tx_submit()
143 shdma_chan_xfer_ld_queue(schan); in shdma_tx_submit()
144 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_tx_submit()
151 schan->pm_state = SHDMA_PM_PENDING; in shdma_tx_submit()
154 spin_unlock_irq(&schan->chan_lock); in shdma_tx_submit()
160 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) in shdma_get_desc() argument
164 list_for_each_entry(sdesc, &schan->ld_free, node) in shdma_get_desc()
174 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) in shdma_setup_slave() argument
176 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_setup_slave()
180 if (schan->dev->of_node) { in shdma_setup_slave()
181 match = schan->hw_req; in shdma_setup_slave()
182 ret = ops->set_slave(schan, match, slave_addr, true); in shdma_setup_slave()
186 match = schan->real_slave_id; in shdma_setup_slave()
189 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) in shdma_setup_slave()
192 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) in shdma_setup_slave()
195 ret = ops->set_slave(schan, match, slave_addr, false); in shdma_setup_slave()
197 clear_bit(schan->real_slave_id, shdma_slave_used); in shdma_setup_slave()
201 schan->slave_id = schan->real_slave_id; in shdma_setup_slave()
208 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_alloc_chan_resources() local
209 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_alloc_chan_resources()
221 schan->real_slave_id = slave->slave_id; in shdma_alloc_chan_resources()
222 ret = shdma_setup_slave(schan, 0); in shdma_alloc_chan_resources()
227 schan->slave_id = -EINVAL; in shdma_alloc_chan_resources()
230 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, in shdma_alloc_chan_resources()
232 if (!schan->desc) { in shdma_alloc_chan_resources()
236 schan->desc_num = NR_DESCS_PER_CHANNEL; in shdma_alloc_chan_resources()
239 desc = ops->embedded_desc(schan->desc, i); in shdma_alloc_chan_resources()
241 &schan->dma_chan); in shdma_alloc_chan_resources()
245 list_add(&desc->node, &schan->ld_free); in shdma_alloc_chan_resources()
279 struct shdma_chan *schan; in shdma_chan_filter() local
289 schan = to_shdma_chan(chan); in shdma_chan_filter()
298 if (schan->dev->of_node) { in shdma_chan_filter()
299 ret = sdev->ops->set_slave(schan, slave_id, 0, true); in shdma_chan_filter()
303 schan->real_slave_id = schan->slave_id; in shdma_chan_filter()
316 ret = sdev->ops->set_slave(schan, slave_id, 0, true); in shdma_chan_filter()
320 schan->real_slave_id = slave_id; in shdma_chan_filter()
326 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) in __ld_cleanup() argument
337 spin_lock_irqsave(&schan->chan_lock, flags); in __ld_cleanup()
338 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { in __ld_cleanup()
359 if (schan->dma_chan.completed_cookie != desc->cookie - 1) in __ld_cleanup()
360 dev_dbg(schan->dev, in __ld_cleanup()
363 schan->dma_chan.completed_cookie + 1); in __ld_cleanup()
364 schan->dma_chan.completed_cookie = desc->cookie; in __ld_cleanup()
372 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", in __ld_cleanup()
373 tx->cookie, tx, schan->id); in __ld_cleanup()
395 dev_dbg(schan->dev, "descriptor %p #%d completed.\n", in __ld_cleanup()
405 list_move(&desc->node, &schan->ld_free); in __ld_cleanup()
412 if (list_empty(&schan->ld_queue)) { in __ld_cleanup()
413 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); in __ld_cleanup()
414 pm_runtime_put(schan->dev); in __ld_cleanup()
415 schan->pm_state = SHDMA_PM_ESTABLISHED; in __ld_cleanup()
416 } else if (schan->pm_state == SHDMA_PM_PENDING) { in __ld_cleanup()
417 shdma_chan_xfer_ld_queue(schan); in __ld_cleanup()
427 schan->dma_chan.completed_cookie = schan->dma_chan.cookie; in __ld_cleanup()
429 list_splice_tail(&cyclic_list, &schan->ld_queue); in __ld_cleanup()
431 spin_unlock_irqrestore(&schan->chan_lock, flags); in __ld_cleanup()
444 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) in shdma_chan_ld_cleanup() argument
446 while (__ld_cleanup(schan, all)) in shdma_chan_ld_cleanup()
455 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_free_chan_resources() local
461 spin_lock_irq(&schan->chan_lock); in shdma_free_chan_resources()
462 ops->halt_channel(schan); in shdma_free_chan_resources()
463 spin_unlock_irq(&schan->chan_lock); in shdma_free_chan_resources()
468 if (!list_empty(&schan->ld_queue)) in shdma_free_chan_resources()
469 shdma_chan_ld_cleanup(schan, true); in shdma_free_chan_resources()
471 if (schan->slave_id >= 0) { in shdma_free_chan_resources()
473 clear_bit(schan->slave_id, shdma_slave_used); in shdma_free_chan_resources()
477 schan->real_slave_id = 0; in shdma_free_chan_resources()
479 spin_lock_irq(&schan->chan_lock); in shdma_free_chan_resources()
481 list_splice_init(&schan->ld_free, &list); in shdma_free_chan_resources()
482 schan->desc_num = 0; in shdma_free_chan_resources()
484 spin_unlock_irq(&schan->chan_lock); in shdma_free_chan_resources()
486 kfree(schan->desc); in shdma_free_chan_resources()
504 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, in shdma_add_desc() argument
508 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_add_desc()
517 new = shdma_get_desc(schan); in shdma_add_desc()
519 dev_err(schan->dev, "No free link descriptor available\n"); in shdma_add_desc()
523 ops->desc_setup(schan, new, *src, *dst, &copy_size); in shdma_add_desc()
534 dev_dbg(schan->dev, in shdma_add_desc()
563 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, in shdma_prep_sg() argument
575 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg()
578 spin_lock_irqsave(&schan->chan_lock, irq_flags); in shdma_prep_sg()
599 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", in shdma_prep_sg()
603 new = shdma_add_desc(schan, flags, in shdma_prep_sg()
607 new = shdma_add_desc(schan, flags, in shdma_prep_sg()
626 list_splice_tail(&tx_list, &schan->ld_free); in shdma_prep_sg()
628 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); in shdma_prep_sg()
635 list_splice(&tx_list, &schan->ld_free); in shdma_prep_sg()
637 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); in shdma_prep_sg()
646 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_memcpy() local
652 BUG_ON(!schan->desc_num); in shdma_prep_memcpy()
660 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, in shdma_prep_memcpy()
668 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_slave_sg() local
669 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_prep_slave_sg()
671 int slave_id = schan->slave_id; in shdma_prep_slave_sg()
677 BUG_ON(!schan->desc_num); in shdma_prep_slave_sg()
681 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", in shdma_prep_slave_sg()
686 slave_addr = ops->slave_addr(schan); in shdma_prep_slave_sg()
688 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, in shdma_prep_slave_sg()
699 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_prep_dma_cyclic() local
700 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); in shdma_prep_dma_cyclic()
704 int slave_id = schan->slave_id; in shdma_prep_dma_cyclic()
712 BUG_ON(!schan->desc_num); in shdma_prep_dma_cyclic()
715 dev_err(schan->dev, "sg length %d exceds limit %d", in shdma_prep_dma_cyclic()
722 dev_warn(schan->dev, in shdma_prep_dma_cyclic()
728 slave_addr = ops->slave_addr(schan); in shdma_prep_dma_cyclic()
749 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, in shdma_prep_dma_cyclic()
758 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_terminate_all() local
763 spin_lock_irqsave(&schan->chan_lock, flags); in shdma_terminate_all()
764 ops->halt_channel(schan); in shdma_terminate_all()
766 if (ops->get_partial && !list_empty(&schan->ld_queue)) { in shdma_terminate_all()
768 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, in shdma_terminate_all()
770 desc->partial = ops->get_partial(schan, desc); in shdma_terminate_all()
773 spin_unlock_irqrestore(&schan->chan_lock, flags); in shdma_terminate_all()
775 shdma_chan_ld_cleanup(schan, true); in shdma_terminate_all()
783 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_config() local
797 config->slave_id != schan->real_slave_id)) in shdma_config()
798 schan->real_slave_id = config->slave_id; in shdma_config()
804 return shdma_setup_slave(schan, in shdma_config()
811 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_issue_pending() local
813 spin_lock_irq(&schan->chan_lock); in shdma_issue_pending()
814 if (schan->pm_state == SHDMA_PM_ESTABLISHED) in shdma_issue_pending()
815 shdma_chan_xfer_ld_queue(schan); in shdma_issue_pending()
817 schan->pm_state = SHDMA_PM_PENDING; in shdma_issue_pending()
818 spin_unlock_irq(&schan->chan_lock); in shdma_issue_pending()
825 struct shdma_chan *schan = to_shdma_chan(chan); in shdma_tx_status() local
829 shdma_chan_ld_cleanup(schan, false); in shdma_tx_status()
831 spin_lock_irqsave(&schan->chan_lock, flags); in shdma_tx_status()
842 list_for_each_entry(sdesc, &schan->ld_queue, node) in shdma_tx_status()
849 spin_unlock_irqrestore(&schan->chan_lock, flags); in shdma_tx_status()
858 struct shdma_chan *schan; in shdma_reset() local
863 shdma_for_each_chan(schan, sdev, i) { in shdma_reset()
867 if (!schan) in shdma_reset()
870 spin_lock(&schan->chan_lock); in shdma_reset()
873 ops->halt_channel(schan); in shdma_reset()
875 list_splice_init(&schan->ld_queue, &dl); in shdma_reset()
878 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); in shdma_reset()
879 pm_runtime_put(schan->dev); in shdma_reset()
881 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_reset()
883 spin_unlock(&schan->chan_lock); in shdma_reset()
893 spin_lock(&schan->chan_lock); in shdma_reset()
894 list_splice(&dl, &schan->ld_free); in shdma_reset()
895 spin_unlock(&schan->chan_lock); in shdma_reset()
906 struct shdma_chan *schan = dev; in chan_irq() local
908 to_shdma_dev(schan->dma_chan.device)->ops; in chan_irq()
911 spin_lock(&schan->chan_lock); in chan_irq()
913 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; in chan_irq()
915 spin_unlock(&schan->chan_lock); in chan_irq()
922 struct shdma_chan *schan = dev; in chan_irqt() local
924 to_shdma_dev(schan->dma_chan.device)->ops; in chan_irqt()
927 spin_lock_irq(&schan->chan_lock); in chan_irqt()
928 list_for_each_entry(sdesc, &schan->ld_queue, node) { in chan_irqt()
930 ops->desc_completed(schan, sdesc)) { in chan_irqt()
931 dev_dbg(schan->dev, "done #%d@%p\n", in chan_irqt()
938 shdma_chan_xfer_ld_queue(schan); in chan_irqt()
939 spin_unlock_irq(&schan->chan_lock); in chan_irqt()
941 shdma_chan_ld_cleanup(schan, false); in chan_irqt()
946 int shdma_request_irq(struct shdma_chan *schan, int irq, in shdma_request_irq() argument
949 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, in shdma_request_irq()
950 chan_irqt, flags, name, schan); in shdma_request_irq()
952 schan->irq = ret < 0 ? ret : irq; in shdma_request_irq()
959 struct shdma_chan *schan, int id) in shdma_chan_probe() argument
961 schan->pm_state = SHDMA_PM_ESTABLISHED; in shdma_chan_probe()
964 schan->dma_chan.device = &sdev->dma_dev; in shdma_chan_probe()
965 dma_cookie_init(&schan->dma_chan); in shdma_chan_probe()
967 schan->dev = sdev->dma_dev.dev; in shdma_chan_probe()
968 schan->id = id; in shdma_chan_probe()
970 if (!schan->max_xfer_len) in shdma_chan_probe()
971 schan->max_xfer_len = PAGE_SIZE; in shdma_chan_probe()
973 spin_lock_init(&schan->chan_lock); in shdma_chan_probe()
976 INIT_LIST_HEAD(&schan->ld_queue); in shdma_chan_probe()
977 INIT_LIST_HEAD(&schan->ld_free); in shdma_chan_probe()
980 list_add_tail(&schan->dma_chan.device_node, in shdma_chan_probe()
982 sdev->schan[id] = schan; in shdma_chan_probe()
986 void shdma_chan_remove(struct shdma_chan *schan) in shdma_chan_remove() argument
988 list_del(&schan->dma_chan.device_node); in shdma_chan_remove()
1014 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); in shdma_init()
1015 if (!sdev->schan) in shdma_init()
1042 kfree(sdev->schan); in shdma_cleanup()