Lines Matching refs:c
88 struct bcm2835_chan *c; member
138 static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) in to_bcm2835_dma_chan() argument
140 return container_of(c, struct bcm2835_chan, vc.chan); in to_bcm2835_dma_chan()
155 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, in bcm2835_dma_desc_free()
197 static void bcm2835_dma_start_desc(struct bcm2835_chan *c) in bcm2835_dma_start_desc() argument
199 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); in bcm2835_dma_start_desc()
203 c->desc = NULL; in bcm2835_dma_start_desc()
209 c->desc = d = to_bcm2835_dma_desc(&vd->tx); in bcm2835_dma_start_desc()
211 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); in bcm2835_dma_start_desc()
212 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); in bcm2835_dma_start_desc()
217 struct bcm2835_chan *c = data; in bcm2835_dma_callback() local
221 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_callback()
224 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); in bcm2835_dma_callback()
226 d = c->desc; in bcm2835_dma_callback()
234 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); in bcm2835_dma_callback()
236 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_callback()
243 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_alloc_chan_resources() local
244 struct device *dev = c->vc.chan.device->dev; in bcm2835_dma_alloc_chan_resources()
246 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); in bcm2835_dma_alloc_chan_resources()
248 c->cb_pool = dma_pool_create(dev_name(dev), dev, in bcm2835_dma_alloc_chan_resources()
250 if (!c->cb_pool) { in bcm2835_dma_alloc_chan_resources()
255 return request_irq(c->irq_number, in bcm2835_dma_alloc_chan_resources()
256 bcm2835_dma_callback, 0, "DMA IRQ", c); in bcm2835_dma_alloc_chan_resources()
261 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_free_chan_resources() local
263 vchan_free_chan_resources(&c->vc); in bcm2835_dma_free_chan_resources()
264 free_irq(c->irq_number, c); in bcm2835_dma_free_chan_resources()
265 dma_pool_destroy(c->cb_pool); in bcm2835_dma_free_chan_resources()
267 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); in bcm2835_dma_free_chan_resources()
302 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_tx_status() local
311 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_tx_status()
312 vd = vchan_find_desc(&c->vc, cookie); in bcm2835_dma_tx_status()
316 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { in bcm2835_dma_tx_status()
317 struct bcm2835_desc *d = c->desc; in bcm2835_dma_tx_status()
321 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); in bcm2835_dma_tx_status()
323 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); in bcm2835_dma_tx_status()
332 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_tx_status()
339 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_issue_pending() local
342 c->cyclic = true; /* Nothing else is implemented */ in bcm2835_dma_issue_pending()
344 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_issue_pending()
345 if (vchan_issue_pending(&c->vc) && !c->desc) in bcm2835_dma_issue_pending()
346 bcm2835_dma_start_desc(c); in bcm2835_dma_issue_pending()
348 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_issue_pending()
356 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_prep_dma_cyclic() local
371 dev_addr = c->cfg.src_addr; in bcm2835_dma_prep_dma_cyclic()
372 dev_width = c->cfg.src_addr_width; in bcm2835_dma_prep_dma_cyclic()
375 dev_addr = c->cfg.dst_addr; in bcm2835_dma_prep_dma_cyclic()
376 dev_width = c->cfg.dst_addr_width; in bcm2835_dma_prep_dma_cyclic()
394 d->c = c; in bcm2835_dma_prep_dma_cyclic()
407 cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, in bcm2835_dma_prep_dma_cyclic()
439 if (c->dreq != 0) in bcm2835_dma_prep_dma_cyclic()
441 BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_dma_cyclic()
455 return vchan_tx_prep(&c->vc, &d->vd, flags); in bcm2835_dma_prep_dma_cyclic()
461 dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); in bcm2835_dma_prep_dma_cyclic()
472 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_slave_config() local
482 c->cfg = *cfg; in bcm2835_dma_slave_config()
489 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); in bcm2835_dma_terminate_all() local
490 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); in bcm2835_dma_terminate_all()
495 spin_lock_irqsave(&c->vc.lock, flags); in bcm2835_dma_terminate_all()
499 list_del_init(&c->node); in bcm2835_dma_terminate_all()
507 if (c->desc) { in bcm2835_dma_terminate_all()
508 bcm2835_dma_desc_free(&c->desc->vd); in bcm2835_dma_terminate_all()
509 c->desc = NULL; in bcm2835_dma_terminate_all()
510 bcm2835_dma_abort(c->chan_base); in bcm2835_dma_terminate_all()
514 if (!(readl(c->chan_base + BCM2835_DMA_CS) & in bcm2835_dma_terminate_all()
525 vchan_get_all_descriptors(&c->vc, &head); in bcm2835_dma_terminate_all()
526 spin_unlock_irqrestore(&c->vc.lock, flags); in bcm2835_dma_terminate_all()
527 vchan_dma_desc_free_list(&c->vc, &head); in bcm2835_dma_terminate_all()
534 struct bcm2835_chan *c; in bcm2835_dma_chan_init() local
536 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); in bcm2835_dma_chan_init()
537 if (!c) in bcm2835_dma_chan_init()
540 c->vc.desc_free = bcm2835_dma_desc_free; in bcm2835_dma_chan_init()
541 vchan_init(&c->vc, &d->ddev); in bcm2835_dma_chan_init()
542 INIT_LIST_HEAD(&c->node); in bcm2835_dma_chan_init()
544 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); in bcm2835_dma_chan_init()
545 c->ch = chan_id; in bcm2835_dma_chan_init()
546 c->irq_number = irq; in bcm2835_dma_chan_init()
553 struct bcm2835_chan *c, *next; in bcm2835_dma_free() local
555 list_for_each_entry_safe(c, next, &od->ddev.channels, in bcm2835_dma_free()
557 list_del(&c->vc.chan.device_node); in bcm2835_dma_free()
558 tasklet_kill(&c->vc.task); in bcm2835_dma_free()