Lines Matching refs:ctlr
116 struct cpdma_ctlr *ctlr; member
133 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) argument
136 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) argument
268 struct cpdma_ctlr *ctlr; in cpdma_ctlr_create() local
270 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); in cpdma_ctlr_create()
271 if (!ctlr) in cpdma_ctlr_create()
274 ctlr->state = CPDMA_STATE_IDLE; in cpdma_ctlr_create()
275 ctlr->params = *params; in cpdma_ctlr_create()
276 ctlr->dev = params->dev; in cpdma_ctlr_create()
277 spin_lock_init(&ctlr->lock); in cpdma_ctlr_create()
279 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, in cpdma_ctlr_create()
280 ctlr->params.desc_mem_phys, in cpdma_ctlr_create()
281 ctlr->params.desc_hw_addr, in cpdma_ctlr_create()
282 ctlr->params.desc_mem_size, in cpdma_ctlr_create()
283 ctlr->params.desc_align); in cpdma_ctlr_create()
284 if (!ctlr->pool) in cpdma_ctlr_create()
287 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) in cpdma_ctlr_create()
288 ctlr->num_chan = CPDMA_MAX_CHANNELS; in cpdma_ctlr_create()
289 return ctlr; in cpdma_ctlr_create()
293 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) in cpdma_ctlr_start() argument
298 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_start()
299 if (ctlr->state != CPDMA_STATE_IDLE) { in cpdma_ctlr_start()
300 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_start()
304 if (ctlr->params.has_soft_reset) { in cpdma_ctlr_start()
307 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); in cpdma_ctlr_start()
309 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) in cpdma_ctlr_start()
317 for (i = 0; i < ctlr->num_chan; i++) { in cpdma_ctlr_start()
318 __raw_writel(0, ctlr->params.txhdp + 4 * i); in cpdma_ctlr_start()
319 __raw_writel(0, ctlr->params.rxhdp + 4 * i); in cpdma_ctlr_start()
320 __raw_writel(0, ctlr->params.txcp + 4 * i); in cpdma_ctlr_start()
321 __raw_writel(0, ctlr->params.rxcp + 4 * i); in cpdma_ctlr_start()
324 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_start()
325 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_start()
327 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); in cpdma_ctlr_start()
328 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); in cpdma_ctlr_start()
330 ctlr->state = CPDMA_STATE_ACTIVE; in cpdma_ctlr_start()
332 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_start()
333 if (ctlr->channels[i]) in cpdma_ctlr_start()
334 cpdma_chan_start(ctlr->channels[i]); in cpdma_ctlr_start()
336 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_start()
341 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) in cpdma_ctlr_stop() argument
346 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_stop()
347 if (ctlr->state == CPDMA_STATE_TEARDOWN) { in cpdma_ctlr_stop()
348 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
352 ctlr->state = CPDMA_STATE_TEARDOWN; in cpdma_ctlr_stop()
354 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_stop()
355 if (ctlr->channels[i]) in cpdma_ctlr_stop()
356 cpdma_chan_stop(ctlr->channels[i]); in cpdma_ctlr_stop()
359 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_stop()
360 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_stop()
362 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); in cpdma_ctlr_stop()
363 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); in cpdma_ctlr_stop()
365 ctlr->state = CPDMA_STATE_IDLE; in cpdma_ctlr_stop()
367 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
372 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) in cpdma_ctlr_dump() argument
374 struct device *dev = ctlr->dev; in cpdma_ctlr_dump()
378 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_dump()
380 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); in cpdma_ctlr_dump()
383 dma_reg_read(ctlr, CPDMA_TXIDVER)); in cpdma_ctlr_dump()
385 dma_reg_read(ctlr, CPDMA_TXCONTROL)); in cpdma_ctlr_dump()
387 dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); in cpdma_ctlr_dump()
389 dma_reg_read(ctlr, CPDMA_RXIDVER)); in cpdma_ctlr_dump()
391 dma_reg_read(ctlr, CPDMA_RXCONTROL)); in cpdma_ctlr_dump()
393 dma_reg_read(ctlr, CPDMA_SOFTRESET)); in cpdma_ctlr_dump()
395 dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); in cpdma_ctlr_dump()
397 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); in cpdma_ctlr_dump()
399 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); in cpdma_ctlr_dump()
401 dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); in cpdma_ctlr_dump()
403 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); in cpdma_ctlr_dump()
405 dma_reg_read(ctlr, CPDMA_MACINVECTOR)); in cpdma_ctlr_dump()
407 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); in cpdma_ctlr_dump()
409 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); in cpdma_ctlr_dump()
411 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); in cpdma_ctlr_dump()
413 dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); in cpdma_ctlr_dump()
415 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); in cpdma_ctlr_dump()
417 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); in cpdma_ctlr_dump()
419 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); in cpdma_ctlr_dump()
421 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); in cpdma_ctlr_dump()
423 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); in cpdma_ctlr_dump()
425 if (!ctlr->params.has_ext_regs) { in cpdma_ctlr_dump()
427 dma_reg_read(ctlr, CPDMA_DMACONTROL)); in cpdma_ctlr_dump()
429 dma_reg_read(ctlr, CPDMA_DMASTATUS)); in cpdma_ctlr_dump()
431 dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); in cpdma_ctlr_dump()
434 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) in cpdma_ctlr_dump()
435 if (ctlr->channels[i]) in cpdma_ctlr_dump()
436 cpdma_chan_dump(ctlr->channels[i]); in cpdma_ctlr_dump()
438 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_dump()
443 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) in cpdma_ctlr_destroy() argument
448 if (!ctlr) in cpdma_ctlr_destroy()
451 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_destroy()
452 if (ctlr->state != CPDMA_STATE_IDLE) in cpdma_ctlr_destroy()
453 cpdma_ctlr_stop(ctlr); in cpdma_ctlr_destroy()
455 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) in cpdma_ctlr_destroy()
456 cpdma_chan_destroy(ctlr->channels[i]); in cpdma_ctlr_destroy()
458 cpdma_desc_pool_destroy(ctlr->pool); in cpdma_ctlr_destroy()
459 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_destroy()
464 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) in cpdma_ctlr_int_ctrl() argument
469 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
470 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_ctlr_int_ctrl()
471 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
476 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); in cpdma_ctlr_int_ctrl()
478 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_int_ctrl()
479 if (ctlr->channels[i]) in cpdma_ctlr_int_ctrl()
480 cpdma_chan_int_ctrl(ctlr->channels[i], enable); in cpdma_ctlr_int_ctrl()
483 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
488 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) in cpdma_ctlr_eoi() argument
490 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); in cpdma_ctlr_eoi()
494 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, in cpdma_chan_create() argument
501 if (__chan_linear(chan_num) >= ctlr->num_chan) in cpdma_chan_create()
504 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); in cpdma_chan_create()
508 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_create()
509 if (ctlr->channels[chan_num]) { in cpdma_chan_create()
510 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_create()
511 devm_kfree(ctlr->dev, chan); in cpdma_chan_create()
515 chan->ctlr = ctlr; in cpdma_chan_create()
521 chan->hdp = ctlr->params.rxhdp + offset; in cpdma_chan_create()
522 chan->cp = ctlr->params.rxcp + offset; in cpdma_chan_create()
523 chan->rxfree = ctlr->params.rxfree + offset; in cpdma_chan_create()
529 chan->hdp = ctlr->params.txhdp + offset; in cpdma_chan_create()
530 chan->cp = ctlr->params.txcp + offset; in cpdma_chan_create()
540 ctlr->channels[chan_num] = chan; in cpdma_chan_create()
541 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_create()
548 struct cpdma_ctlr *ctlr; in cpdma_chan_destroy() local
553 ctlr = chan->ctlr; in cpdma_chan_destroy()
555 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_destroy()
558 ctlr->channels[chan->chan_num] = NULL; in cpdma_chan_destroy()
559 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_destroy()
580 struct device *dev = chan->ctlr->dev; in cpdma_chan_dump()
628 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_submit() local
630 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_submit()
664 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_submit() local
678 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); in cpdma_chan_submit()
685 if (len < ctlr->params.min_packet_size) { in cpdma_chan_submit()
686 len = ctlr->params.min_packet_size; in cpdma_chan_submit()
690 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); in cpdma_chan_submit()
691 ret = dma_mapping_error(ctlr->dev, buffer); in cpdma_chan_submit()
693 cpdma_desc_free(ctlr->pool, desc, 1); in cpdma_chan_submit()
727 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_check_free_tx_desc() local
728 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_check_free_tx_desc()
749 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_free() local
750 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_free()
759 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); in __cpdma_chan_free()
766 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_process() local
770 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_process()
841 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_start() local
842 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_chan_start()
850 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_chan_start()
854 dma_reg_write(ctlr, chan->int_set, chan->mask); in cpdma_chan_start()
869 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_stop() local
870 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_chan_stop()
882 dma_reg_write(ctlr, chan->int_clear, chan->mask); in cpdma_chan_stop()
885 dma_reg_write(ctlr, chan->td, chan_linear(chan)); in cpdma_chan_stop()
940 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, in cpdma_chan_int_ctrl()
970 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) in cpdma_control_get() argument
976 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_control_get()
979 if (!ctlr->params.has_ext_regs) in cpdma_control_get()
983 if (ctlr->state != CPDMA_STATE_ACTIVE) in cpdma_control_get()
994 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; in cpdma_control_get()
997 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_control_get()
1001 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) in cpdma_control_set() argument
1008 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_control_set()
1011 if (!ctlr->params.has_ext_regs) in cpdma_control_set()
1015 if (ctlr->state != CPDMA_STATE_ACTIVE) in cpdma_control_set()
1026 val = dma_reg_read(ctlr, info->reg); in cpdma_control_set()
1029 dma_reg_write(ctlr, info->reg, val); in cpdma_control_set()
1033 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_control_set()