/linux-4.4.14/drivers/dma/ |
D | mmp_pdma.c | 88 struct dma_async_tx_descriptor async_tx; member 135 container_of(tx, struct mmp_pdma_desc_sw, async_tx) 332 set_desc(chan->phy, desc->async_tx.phys); in start_pending_queue() 350 cookie = dma_cookie_assign(&child->async_tx); in mmp_pdma_tx_submit() 375 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in mmp_pdma_alloc_descriptor() 377 desc->async_tx.tx_submit = mmp_pdma_tx_submit; in mmp_pdma_alloc_descriptor() 378 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor() 421 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in mmp_pdma_free_desc_list() 486 prev->desc.ddadr = new->async_tx.phys; in mmp_pdma_prep_memcpy() 488 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy() [all …]
|
D | iop-adma.c | 41 container_of(tx, struct iop_adma_desc_slot, async_tx) 64 struct dma_async_tx_descriptor *tx = &desc->async_tx; in iop_adma_run_tx_complete_actions() 95 if (!async_tx_test_ack(&desc->async_tx)) in iop_adma_clean_slot() 130 iter->async_tx.cookie, iter->idx, busy, in __iop_adma_slot_cleanup() 131 iter->async_tx.phys, iop_desc_get_next_desc(iter), in __iop_adma_slot_cleanup() 132 async_tx_test_ack(&iter->async_tx)); in __iop_adma_slot_cleanup() 134 prefetch(&_iter->async_tx); in __iop_adma_slot_cleanup() 147 if (iter->async_tx.phys == current_desc) { in __iop_adma_slot_cleanup() 226 if (iter->xor_check_result && iter->async_tx.cookie) in __iop_adma_slot_cleanup() 289 prefetch(&_iter->async_tx); in iop_adma_alloc_slots() [all …]
|
D | mv_xor.c | 45 container_of(tx, struct mv_xor_desc_slot, async_tx) 223 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); in mv_chan_start_new_chain() 234 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions() 236 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions() 237 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions() 242 if (desc->async_tx.callback) in mv_desc_run_tx_complete_actions() 243 desc->async_tx.callback( in mv_desc_run_tx_complete_actions() 244 desc->async_tx.callback_param); in mv_desc_run_tx_complete_actions() 246 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions() 250 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions() [all …]
|
D | fsldma.c | 398 set_desc_next(chan, &tail->hw, desc->async_tx.phys); in append_ld_queue() 430 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit() 451 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsl_dma_free_descriptor() 473 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in fsl_dma_alloc_descriptor() 474 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor() 475 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor() 496 if (async_tx_test_ack(&desc->async_tx)) in fsldma_clean_completed_descriptor() 512 struct dma_async_tx_descriptor *txd = &desc->async_tx; in fsldma_run_tx_complete_actions() 552 if (!async_tx_test_ack(&desc->async_tx)) { in fsldma_clean_running_descriptor() 561 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_clean_running_descriptor() [all …]
|
D | fsl_raid.c | 83 #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx) 140 dma_cookie_complete(&desc->async_tx); in fsl_re_desc_done() 142 callback = desc->async_tx.callback; in fsl_re_desc_done() 143 callback_param = desc->async_tx.callback_param; in fsl_re_desc_done() 147 dma_descriptor_unmap(&desc->async_tx); in fsl_re_desc_done() 157 if (async_tx_test_ack(&desc->async_tx)) in fsl_re_cleanup_descs() 264 desc->async_tx.tx_submit = fsl_re_tx_submit; in fsl_re_init_desc() 265 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); in fsl_re_init_desc() 297 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc() 314 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc() [all …]
|
D | nbpfaxi.c | 154 struct dma_async_tx_descriptor async_tx; member 606 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status() 617 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status() 624 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status() 644 struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); in nbpf_tx_submit() 692 dma_async_tx_descriptor_init(&desc->async_tx, dchan); in nbpf_desc_page_alloc() 693 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc() 734 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { in nbpf_scan_acked() 827 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle() 925 desc->async_tx.flags = flags; in nbpf_prep_sg() [all …]
|
D | fsldma.h | 105 struct dma_async_tx_descriptor async_tx; member 193 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
|
D | mv_xor.h | 146 struct dma_async_tx_descriptor async_tx; member
|
D | fsl_raid.h | 294 struct dma_async_tx_descriptor async_tx; member
|
D | Kconfig | 541 bool "Async_tx: Offload support for the async_tx api" 544 This allows the async_tx api to take advantage of offload engines for
|
D | pxa_dma.c | 137 container_of(tx, struct pxad_desc_sw, async_tx)
|
/linux-4.4.14/drivers/dma/sh/ |
D | shdma-base.c | 76 container_of(tx, struct shdma_desc, async_tx); in shdma_tx_submit() 95 chunk->async_tx.cookie > 0 || in shdma_tx_submit() 96 chunk->async_tx.cookie == -EBUSY || in shdma_tx_submit() 101 chunk->async_tx.callback = callback; in shdma_tx_submit() 102 chunk->async_tx.callback_param = tx->callback_param; in shdma_tx_submit() 105 chunk->async_tx.callback = NULL; in shdma_tx_submit() 111 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit() 240 dma_async_tx_descriptor_init(&desc->async_tx, in shdma_alloc_chan_resources() 242 desc->async_tx.tx_submit = shdma_tx_submit; in shdma_alloc_chan_resources() 339 struct dma_async_tx_descriptor *tx = &desc->async_tx; in __ld_cleanup() [all …]
|
D | rcar-dmac.c | 75 struct dma_async_tx_descriptor async_tx; member 96 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) 376 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer() 479 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc() 480 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc() 534 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked() 846 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg() 847 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg() 939 return &desc->async_tx; in rcar_dmac_chan_prep_sg() 1157 if (cookie != desc->async_tx.cookie) in rcar_dmac_chan_get_residue() [all …]
|
D | shdma.h | 61 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
D | shdmac.c | 294 sdesc->async_tx.cookie, sh_chan->shdma_chan.id, in sh_dmae_start_xfer()
|
/linux-4.4.14/Documentation/crypto/ |
D | async-tx-api.txt | 26 The async_tx API provides methods for describing a chain of asynchronous 87 async_tx call will implicitly set the acknowledged state. 163 See include/linux/async_tx.h for more information on the flags. See the 171 accommodate assumptions made by applications using the async_tx API: 222 include/linux/async_tx.h: core header file for the async_tx api 223 crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code 224 crypto/async_tx/async_memcpy.c: copy offload 225 crypto/async_tx/async_xor.c: xor and xor zero sum offload
|
/linux-4.4.14/drivers/dma/ppc4xx/ |
D | adma.c | 1480 BUG_ON(desc->async_tx.cookie < 0); in ppc440spe_adma_run_tx_complete_actions() 1481 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions() 1482 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions() 1483 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions() 1488 if (desc->async_tx.callback) in ppc440spe_adma_run_tx_complete_actions() 1489 desc->async_tx.callback( in ppc440spe_adma_run_tx_complete_actions() 1490 desc->async_tx.callback_param); in ppc440spe_adma_run_tx_complete_actions() 1492 dma_descriptor_unmap(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions() 1496 dma_run_dependencies(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions() 1510 if (!async_tx_test_ack(&desc->async_tx)) in ppc440spe_adma_clean_slot() [all …]
|
D | adma.h | 23 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx) 150 struct dma_async_tx_descriptor async_tx; member
|
/linux-4.4.14/drivers/dma/xilinx/ |
D | xilinx_vdma.c | 181 struct dma_async_tx_descriptor async_tx; member 256 container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) 456 callback = desc->async_tx.callback; in xilinx_vdma_chan_desc_cleanup() 457 callback_param = desc->async_tx.callback_param; in xilinx_vdma_chan_desc_cleanup() 465 dma_run_dependencies(&desc->async_tx); in xilinx_vdma_chan_desc_cleanup() 756 dma_cookie_complete(&desc->async_tx); in xilinx_vdma_complete_descriptor() 953 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in xilinx_vdma_dma_prep_interleaved() 954 desc->async_tx.tx_submit = xilinx_vdma_tx_submit; in xilinx_vdma_dma_prep_interleaved() 955 async_tx_ack(&desc->async_tx); in xilinx_vdma_dma_prep_interleaved() 993 return &desc->async_tx; in xilinx_vdma_dma_prep_interleaved()
|
/linux-4.4.14/crypto/async_tx/ |
D | Makefile | 1 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
|
/linux-4.4.14/arch/arm/include/asm/hardware/ |
D | iop_adma.h | 101 struct dma_async_tx_descriptor async_tx; member
|
D | iop3xx-adma.h | 601 (u32) (desc->async_tx.phys + (i << 5)); in iop_desc_init_zero_sum()
|
/linux-4.4.14/include/linux/ |
D | shdma-base.h | 51 struct dma_async_tx_descriptor async_tx; member
|
/linux-4.4.14/Documentation/dmaengine/ |
D | client.txt | 6 NOTE: For DMA Engine usage in async_tx please see: 121 Although the async_tx API specifies that completion callback
|
/linux-4.4.14/crypto/ |
D | Makefile | 129 obj-$(CONFIG_ASYNC_CORE) += async_tx/
|
D | Kconfig | 8 # async_tx api: hardware offloaded memory transfer/transform support 10 source "crypto/async_tx/Kconfig"
|
/linux-4.4.14/ |
D | MAINTAINERS | 1799 F: crypto/async_tx/ 1802 F: include/linux/async_tx.h
|