Lines Matching refs:chan

157 	struct nbpf_channel *chan;  member
230 struct nbpf_channel chan[]; member
298 static inline u32 nbpf_chan_read(struct nbpf_channel *chan, in nbpf_chan_read() argument
301 u32 data = ioread32(chan->base + offset); in nbpf_chan_read()
302 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_read()
303 __func__, chan->base, offset, data); in nbpf_chan_read()
307 static inline void nbpf_chan_write(struct nbpf_channel *chan, in nbpf_chan_write() argument
310 iowrite32(data, chan->base + offset); in nbpf_chan_write()
311 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_write()
312 __func__, chan->base, offset, data); in nbpf_chan_write()
332 static void nbpf_chan_halt(struct nbpf_channel *chan) in nbpf_chan_halt() argument
334 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); in nbpf_chan_halt()
337 static bool nbpf_status_get(struct nbpf_channel *chan) in nbpf_status_get() argument
339 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); in nbpf_status_get()
341 return status & BIT(chan - chan->nbpf->chan); in nbpf_status_get()
344 static void nbpf_status_ack(struct nbpf_channel *chan) in nbpf_status_ack() argument
346 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); in nbpf_status_ack()
356 return nbpf->chan + __ffs(error); in nbpf_error_get_channel()
359 static void nbpf_error_clear(struct nbpf_channel *chan) in nbpf_error_clear() argument
365 nbpf_chan_halt(chan); in nbpf_error_clear()
368 status = nbpf_chan_read(chan, NBPF_CHAN_STAT); in nbpf_error_clear()
375 dev_err(chan->dma_chan.device->dev, in nbpf_error_clear()
378 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); in nbpf_error_clear()
383 struct nbpf_channel *chan = desc->chan; in nbpf_start() local
386 nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); in nbpf_start()
387 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); in nbpf_start()
388 chan->paused = false; in nbpf_start()
392 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); in nbpf_start()
394 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, in nbpf_start()
395 nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); in nbpf_start()
400 static void nbpf_chan_prepare(struct nbpf_channel *chan) in nbpf_chan_prepare() argument
402 chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | in nbpf_chan_prepare()
403 (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | in nbpf_chan_prepare()
404 (chan->flags & NBPF_SLAVE_RQ_LEVEL ? in nbpf_chan_prepare()
406 chan->terminal; in nbpf_chan_prepare()
409 static void nbpf_chan_prepare_default(struct nbpf_channel *chan) in nbpf_chan_prepare_default() argument
412 chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; in nbpf_chan_prepare_default()
413 chan->terminal = 0; in nbpf_chan_prepare_default()
414 chan->flags = 0; in nbpf_chan_prepare_default()
417 static void nbpf_chan_configure(struct nbpf_channel *chan) in nbpf_chan_configure() argument
424 nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); in nbpf_chan_configure()
481 struct nbpf_channel *chan = desc->chan; in nbpf_prep_one() local
482 struct device *dev = chan->dma_chan.device->dev; in nbpf_prep_one()
509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size); in nbpf_prep_one()
513 can_burst = chan->slave_src_width >= 3; in nbpf_prep_one()
515 chan->slave_src_burst : chan->slave_src_width); in nbpf_prep_one()
520 if (mem_xfer > chan->slave_src_burst && !can_burst) in nbpf_prep_one()
521 mem_xfer = chan->slave_src_burst; in nbpf_prep_one()
529 slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? in nbpf_prep_one()
530 chan->slave_dst_burst : chan->slave_dst_width); in nbpf_prep_one()
545 hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | in nbpf_prep_one()
558 static size_t nbpf_bytes_left(struct nbpf_channel *chan) in nbpf_bytes_left() argument
560 return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); in nbpf_bytes_left()
573 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_issue_pending() local
578 spin_lock_irqsave(&chan->lock, flags); in nbpf_issue_pending()
579 if (list_empty(&chan->queued)) in nbpf_issue_pending()
582 list_splice_tail_init(&chan->queued, &chan->active); in nbpf_issue_pending()
584 if (!chan->running) { in nbpf_issue_pending()
585 struct nbpf_desc *desc = list_first_entry(&chan->active, in nbpf_issue_pending()
588 chan->running = desc; in nbpf_issue_pending()
592 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_issue_pending()
598 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_tx_status() local
605 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_status()
606 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status()
609 state->residue = nbpf_bytes_left(chan); in nbpf_tx_status()
616 list_for_each_entry(desc, &chan->active, node) in nbpf_tx_status()
623 list_for_each_entry(desc, &chan->queued, node) in nbpf_tx_status()
633 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_status()
636 if (chan->paused) in nbpf_tx_status()
645 struct nbpf_channel *chan = desc->chan; in nbpf_tx_submit() local
649 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_submit()
651 list_add_tail(&desc->node, &chan->queued); in nbpf_tx_submit()
652 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_submit()
654 dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); in nbpf_tx_submit()
659 static int nbpf_desc_page_alloc(struct nbpf_channel *chan) in nbpf_desc_page_alloc() argument
661 struct dma_chan *dchan = &chan->dma_chan; in nbpf_desc_page_alloc()
694 desc->chan = chan; in nbpf_desc_page_alloc()
703 spin_lock_irq(&chan->lock); in nbpf_desc_page_alloc()
704 list_splice_tail(&lhead, &chan->free_links); in nbpf_desc_page_alloc()
705 list_splice_tail(&head, &chan->free); in nbpf_desc_page_alloc()
706 list_add(&dpage->node, &chan->desc_page); in nbpf_desc_page_alloc()
707 spin_unlock_irq(&chan->lock); in nbpf_desc_page_alloc()
714 struct nbpf_channel *chan = desc->chan; in nbpf_desc_put() local
718 spin_lock_irqsave(&chan->lock, flags); in nbpf_desc_put()
720 list_move(&ldesc->node, &chan->free_links); in nbpf_desc_put()
722 list_add(&desc->node, &chan->free); in nbpf_desc_put()
723 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_desc_put()
726 static void nbpf_scan_acked(struct nbpf_channel *chan) in nbpf_scan_acked() argument
732 spin_lock_irqsave(&chan->lock, flags); in nbpf_scan_acked()
733 list_for_each_entry_safe(desc, tmp, &chan->done, node) in nbpf_scan_acked()
738 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_scan_acked()
752 static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) in nbpf_desc_get() argument
757 nbpf_scan_acked(chan); in nbpf_desc_get()
759 spin_lock_irq(&chan->lock); in nbpf_desc_get()
764 if (list_empty(&chan->free)) { in nbpf_desc_get()
766 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
767 ret = nbpf_desc_page_alloc(chan); in nbpf_desc_get()
770 spin_lock_irq(&chan->lock); in nbpf_desc_get()
773 desc = list_first_entry(&chan->free, struct nbpf_desc, node); in nbpf_desc_get()
777 if (list_empty(&chan->free_links)) { in nbpf_desc_get()
779 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
780 ret = nbpf_desc_page_alloc(chan); in nbpf_desc_get()
785 spin_lock_irq(&chan->lock); in nbpf_desc_get()
789 ldesc = list_first_entry(&chan->free_links, in nbpf_desc_get()
804 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
809 static void nbpf_chan_idle(struct nbpf_channel *chan) in nbpf_chan_idle() argument
815 spin_lock_irqsave(&chan->lock, flags); in nbpf_chan_idle()
817 list_splice_init(&chan->done, &head); in nbpf_chan_idle()
818 list_splice_init(&chan->active, &head); in nbpf_chan_idle()
819 list_splice_init(&chan->queued, &head); in nbpf_chan_idle()
821 chan->running = NULL; in nbpf_chan_idle()
823 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_chan_idle()
826 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", in nbpf_chan_idle()
835 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_pause() local
839 chan->paused = true; in nbpf_pause()
840 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); in nbpf_pause()
842 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); in nbpf_pause()
849 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_terminate_all() local
854 nbpf_chan_halt(chan); in nbpf_terminate_all()
855 nbpf_chan_idle(chan); in nbpf_terminate_all()
863 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_config() local
873 chan->slave_dst_addr = config->dst_addr; in nbpf_config()
874 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
876 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
879 chan->slave_src_addr = config->src_addr; in nbpf_config()
880 chan->slave_src_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
882 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
889 static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, in nbpf_prep_sg() argument
921 desc = nbpf_desc_get(chan, len); in nbpf_prep_sg()
962 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_memcpy() local
978 return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, in nbpf_prep_memcpy()
988 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_memcpy_sg() local
993 return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, in nbpf_prep_memcpy_sg()
1001 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_slave_sg() local
1010 sg_dma_address(&slave_sg) = chan->slave_dst_addr; in nbpf_prep_slave_sg()
1011 return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, in nbpf_prep_slave_sg()
1015 sg_dma_address(&slave_sg) = chan->slave_src_addr; in nbpf_prep_slave_sg()
1016 return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, in nbpf_prep_slave_sg()
1026 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_alloc_chan_resources() local
1029 INIT_LIST_HEAD(&chan->free); in nbpf_alloc_chan_resources()
1030 INIT_LIST_HEAD(&chan->free_links); in nbpf_alloc_chan_resources()
1031 INIT_LIST_HEAD(&chan->queued); in nbpf_alloc_chan_resources()
1032 INIT_LIST_HEAD(&chan->active); in nbpf_alloc_chan_resources()
1033 INIT_LIST_HEAD(&chan->done); in nbpf_alloc_chan_resources()
1035 ret = nbpf_desc_page_alloc(chan); in nbpf_alloc_chan_resources()
1040 chan->terminal); in nbpf_alloc_chan_resources()
1042 nbpf_chan_configure(chan); in nbpf_alloc_chan_resources()
1049 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_free_chan_resources() local
1054 nbpf_chan_halt(chan); in nbpf_free_chan_resources()
1055 nbpf_chan_idle(chan); in nbpf_free_chan_resources()
1057 nbpf_chan_prepare_default(chan); in nbpf_free_chan_resources()
1059 list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { in nbpf_free_chan_resources()
1077 struct nbpf_channel *chan; in nbpf_of_xlate() local
1089 chan = nbpf_to_chan(dchan); in nbpf_of_xlate()
1091 chan->terminal = dma_spec->args[0]; in nbpf_of_xlate()
1092 chan->flags = dma_spec->args[1]; in nbpf_of_xlate()
1094 nbpf_chan_prepare(chan); in nbpf_of_xlate()
1095 nbpf_chan_configure(chan); in nbpf_of_xlate()
1102 struct nbpf_channel *chan = (struct nbpf_channel *)data; in nbpf_chan_tasklet() local
1107 while (!list_empty(&chan->done)) { in nbpf_chan_tasklet()
1110 spin_lock_irq(&chan->lock); in nbpf_chan_tasklet()
1112 list_for_each_entry_safe(desc, tmp, &chan->done, node) { in nbpf_chan_tasklet()
1123 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1135 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1157 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1169 struct nbpf_channel *chan = dev; in nbpf_chan_irq() local
1170 bool done = nbpf_status_get(chan); in nbpf_chan_irq()
1178 nbpf_status_ack(chan); in nbpf_chan_irq()
1180 dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); in nbpf_chan_irq()
1182 spin_lock(&chan->lock); in nbpf_chan_irq()
1183 desc = chan->running; in nbpf_chan_irq()
1192 list_move_tail(&desc->node, &chan->done); in nbpf_chan_irq()
1193 chan->running = NULL; in nbpf_chan_irq()
1195 if (!list_empty(&chan->active)) { in nbpf_chan_irq()
1196 desc = list_first_entry(&chan->active, in nbpf_chan_irq()
1199 chan->running = desc; in nbpf_chan_irq()
1203 spin_unlock(&chan->lock); in nbpf_chan_irq()
1206 tasklet_schedule(&chan->tasklet); in nbpf_chan_irq()
1222 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); in nbpf_err_irq() local
1224 nbpf_error_clear(chan); in nbpf_err_irq()
1225 nbpf_chan_idle(chan); in nbpf_err_irq()
1235 struct nbpf_channel *chan = nbpf->chan + n; in nbpf_chan_probe() local
1238 chan->nbpf = nbpf; in nbpf_chan_probe()
1239 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; in nbpf_chan_probe()
1240 INIT_LIST_HEAD(&chan->desc_page); in nbpf_chan_probe()
1241 spin_lock_init(&chan->lock); in nbpf_chan_probe()
1242 chan->dma_chan.device = dma_dev; in nbpf_chan_probe()
1243 dma_cookie_init(&chan->dma_chan); in nbpf_chan_probe()
1244 nbpf_chan_prepare_default(chan); in nbpf_chan_probe()
1246 dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); in nbpf_chan_probe()
1248 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); in nbpf_chan_probe()
1250 tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan); in nbpf_chan_probe()
1251 ret = devm_request_irq(dma_dev->dev, chan->irq, in nbpf_chan_probe()
1253 chan->name, chan); in nbpf_chan_probe()
1258 list_add_tail(&chan->dma_chan.device_node, in nbpf_chan_probe()
1302 sizeof(nbpf->chan[0]), GFP_KERNEL); in nbpf_probe()
1344 nbpf->chan[i].irq = irqbuf[0]; in nbpf_probe()
1351 struct nbpf_channel *chan; in nbpf_probe() local
1353 for (i = 0, chan = nbpf->chan; i <= num_channels; in nbpf_probe()
1354 i++, chan++) { in nbpf_probe()
1358 chan->irq = irqbuf[i]; in nbpf_probe()
1361 if (chan != nbpf->chan + num_channels) in nbpf_probe()
1371 nbpf->chan[i].irq = irq; in nbpf_probe()