Searched refs:edesc (Results 1 - 8 of 8) sorted by relevance

/linux-4.1.27/drivers/dma/
H A Dedma.c111 struct edma_desc *edesc; member in struct:edma_chan
153 struct edma_desc *edesc; edma_execute() local
158 if (!echan->edesc || edma_execute()
159 echan->edesc->pset_nr == echan->edesc->processed) { edma_execute()
163 echan->edesc = NULL; edma_execute()
167 echan->edesc = to_edma_desc(&vdesc->tx); edma_execute()
170 edesc = echan->edesc; edma_execute()
173 left = edesc->pset_nr - edesc->processed; edma_execute()
175 edesc->sg_len = 0; edma_execute()
179 j = i + edesc->processed; edma_execute()
180 edma_write_slot(echan->slot[i], &edesc->pset[j].param); edma_execute()
181 edesc->sg_len += edesc->pset[j].len; edma_execute()
195 edesc->pset[j].param.opt, edma_execute()
196 edesc->pset[j].param.src, edma_execute()
197 edesc->pset[j].param.dst, edma_execute()
198 edesc->pset[j].param.a_b_cnt, edma_execute()
199 edesc->pset[j].param.ccnt, edma_execute()
200 edesc->pset[j].param.src_dst_bidx, edma_execute()
201 edesc->pset[j].param.src_dst_cidx, edma_execute()
202 edesc->pset[j].param.link_bcntrld); edma_execute()
208 edesc->processed += nslots; edma_execute()
215 if (edesc->processed == edesc->pset_nr) { edma_execute()
216 if (edesc->cyclic) edma_execute()
223 if (edesc->processed <= MAX_NR_SG) { edma_execute()
229 echan->ch_num, edesc->processed); edma_execute()
259 * echan->edesc is NULL and exit.) edma_terminate_all()
261 if (echan->edesc) { edma_terminate_all()
262 int cyclic = echan->edesc->cyclic; edma_terminate_all()
268 edma_desc_free(&echan->edesc->vdesc); edma_terminate_all()
270 echan->edesc = NULL; edma_terminate_all()
304 if (!echan->edesc || !echan->edesc->cyclic) edma_dma_pause()
316 if (!echan->edesc->cyclic) edma_dma_resume()
455 struct edma_desc *edesc; edma_prep_slave_sg() local
483 edesc = kzalloc(sizeof(*edesc) + sg_len * edma_prep_slave_sg()
484 sizeof(edesc->pset[0]), GFP_ATOMIC); edma_prep_slave_sg()
485 if (!edesc) { edma_prep_slave_sg()
490 edesc->pset_nr = sg_len; edma_prep_slave_sg()
491 edesc->residue = 0; edma_prep_slave_sg()
492 edesc->direction = direction; edma_prep_slave_sg()
493 edesc->echan = echan; edma_prep_slave_sg()
504 kfree(edesc); edma_prep_slave_sg()
520 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, for_each_sg()
524 kfree(edesc); for_each_sg()
528 edesc->absync = ret; for_each_sg()
529 edesc->residue += sg_dma_len(sg); for_each_sg()
534 edesc->pset[i].param.opt |= TCINTEN; for_each_sg()
538 edesc->pset[i].param.opt |= TCINTEN; for_each_sg()
540 edesc->residue_stat = edesc->residue;
542 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
550 struct edma_desc *edesc; edma_prep_dma_memcpy() local
557 edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); edma_prep_dma_memcpy()
558 if (!edesc) { edma_prep_dma_memcpy()
563 edesc->pset_nr = 1; edma_prep_dma_memcpy()
565 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, edma_prep_dma_memcpy()
570 edesc->absync = ret; edma_prep_dma_memcpy()
577 edesc->pset[0].param.opt |= ITCCHEN; edma_prep_dma_memcpy()
578 edesc->pset[0].param.opt |= TCINTEN; edma_prep_dma_memcpy()
580 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); edma_prep_dma_memcpy()
590 struct edma_desc *edesc; edma_prep_dma_cyclic() local
637 edesc = kzalloc(sizeof(*edesc) + nslots * edma_prep_dma_cyclic()
638 sizeof(edesc->pset[0]), GFP_ATOMIC); edma_prep_dma_cyclic()
639 if (!edesc) { edma_prep_dma_cyclic()
644 edesc->cyclic = 1; edma_prep_dma_cyclic()
645 edesc->pset_nr = nslots; edma_prep_dma_cyclic()
646 edesc->residue = edesc->residue_stat = buf_len; edma_prep_dma_cyclic()
647 edesc->direction = direction; edma_prep_dma_cyclic()
648 edesc->echan = echan; edma_prep_dma_cyclic()
660 kfree(edesc); edma_prep_dma_cyclic()
668 memcpy(&edesc->pset[i], &edesc->pset[0], edma_prep_dma_cyclic()
669 sizeof(edesc->pset[0])); edma_prep_dma_cyclic()
673 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, edma_prep_dma_cyclic()
677 kfree(edesc); edma_prep_dma_cyclic()
700 edesc->pset[i].param.opt, edma_prep_dma_cyclic()
701 edesc->pset[i].param.src, edma_prep_dma_cyclic()
702 edesc->pset[i].param.dst, edma_prep_dma_cyclic()
703 edesc->pset[i].param.a_b_cnt, edma_prep_dma_cyclic()
704 edesc->pset[i].param.ccnt, edma_prep_dma_cyclic()
705 edesc->pset[i].param.src_dst_bidx, edma_prep_dma_cyclic()
706 edesc->pset[i].param.src_dst_cidx, edma_prep_dma_cyclic()
707 edesc->pset[i].param.link_bcntrld); edma_prep_dma_cyclic()
709 edesc->absync = ret; edma_prep_dma_cyclic()
715 edesc->pset[i].param.opt |= TCINTEN; edma_prep_dma_cyclic()
721 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); edma_prep_dma_cyclic()
728 struct edma_desc *edesc; edma_callback() local
731 edesc = echan->edesc; edma_callback()
734 if (!edesc || (edesc && !edesc->cyclic)) edma_callback()
741 if (edesc) { edma_callback()
742 if (edesc->cyclic) { edma_callback()
743 vchan_cyclic_callback(&edesc->vdesc); edma_callback()
744 } else if (edesc->processed == edesc->pset_nr) { edma_callback()
746 edesc->residue = 0; edma_callback()
748 vchan_cookie_complete(&edesc->vdesc); edma_callback()
754 edesc->residue -= edesc->sg_len; edma_callback()
755 edesc->residue_stat = edesc->residue; edma_callback()
756 edesc->processed_stat = edesc->processed; edma_callback()
880 if (vchan_issue_pending(&echan->vchan) && !echan->edesc) edma_issue_pending()
885 static u32 edma_residue(struct edma_desc *edesc) edma_residue() argument
887 bool dst = edesc->direction == DMA_DEV_TO_MEM; edma_residue()
888 struct edma_pset *pset = edesc->pset; edma_residue()
896 pos = edma_get_position(edesc->echan->slot[0], dst); edma_residue()
901 * We never update edesc->residue in the cyclic case, so we edma_residue()
905 if (edesc->cyclic) { edma_residue()
907 edesc->residue_stat = edesc->residue - done; edma_residue()
908 return edesc->residue_stat; edma_residue()
915 pset += edesc->processed_stat; edma_residue()
917 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { edma_residue()
924 return edesc->residue_stat - (pos - pset->addr); edma_residue()
927 edesc->processed_stat++; edma_residue()
928 edesc->residue_stat -= pset->len; edma_residue()
930 return edesc->residue_stat; edma_residue()
948 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) edma_tx_status()
949 txstate->residue = edma_residue(echan->edesc); edma_tx_status()
H A Dfsl-edma.c151 struct fsl_edma_desc *edesc; member in struct:fsl_edma_chan
300 fsl_chan->edesc = NULL; fsl_edma_terminate_all()
313 if (fsl_chan->edesc) { fsl_edma_pause()
327 if (fsl_chan->edesc) { fsl_edma_resume()
360 struct fsl_edma_desc *edesc = fsl_chan->edesc; fsl_edma_desc_residue() local
369 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) fsl_edma_desc_residue()
370 len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) fsl_edma_desc_residue()
371 * le16_to_cpu(edesc->tcd[i].vtcd->biter); fsl_edma_desc_residue()
382 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { fsl_edma_desc_residue()
383 size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) fsl_edma_desc_residue()
384 * le16_to_cpu(edesc->tcd[i].vtcd->biter); fsl_edma_desc_residue()
386 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); fsl_edma_desc_residue()
388 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); fsl_edma_desc_residue()
417 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) fsl_edma_tx_status()
647 fsl_chan->edesc = to_fsl_edma_desc(vdesc); fsl_edma_xfer_desc()
648 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); fsl_edma_xfer_desc()
674 if (!fsl_chan->edesc->iscyclic) { fsl_edma_tx_handler()
675 list_del(&fsl_chan->edesc->vdesc.node); fsl_edma_tx_handler()
676 vchan_cookie_complete(&fsl_chan->edesc->vdesc); fsl_edma_tx_handler()
677 fsl_chan->edesc = NULL; fsl_edma_tx_handler()
680 vchan_cyclic_callback(&fsl_chan->edesc->vdesc); fsl_edma_tx_handler()
683 if (!fsl_chan->edesc) fsl_edma_tx_handler()
727 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) fsl_edma_issue_pending()
781 fsl_chan->edesc = NULL; fsl_edma_free_chan_resources()
/linux-4.1.27/drivers/crypto/caam/
H A Dcaamhash.c605 struct ahash_edesc *edesc, ahash_unmap()
608 if (edesc->src_nents) ahash_unmap()
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents, ahash_unmap()
610 DMA_TO_DEVICE, edesc->chained); ahash_unmap()
611 if (edesc->dst_dma) ahash_unmap()
612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); ahash_unmap()
614 if (edesc->sec4_sg_bytes) ahash_unmap()
615 dma_unmap_single(dev, edesc->sec4_sg_dma, ahash_unmap()
616 edesc->sec4_sg_bytes, DMA_TO_DEVICE); ahash_unmap()
620 struct ahash_edesc *edesc, ahash_unmap_ctx()
629 ahash_unmap(dev, edesc, req, dst_len); ahash_unmap_ctx()
636 struct ahash_edesc *edesc; ahash_done() local
646 edesc = (struct ahash_edesc *)((char *)desc - ahash_done()
651 ahash_unmap(jrdev, edesc, req, digestsize); ahash_done()
652 kfree(edesc); ahash_done()
671 struct ahash_edesc *edesc; ahash_done_bi() local
681 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_bi()
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ahash_done_bi()
687 kfree(edesc); ahash_done_bi()
706 struct ahash_edesc *edesc; ahash_done_ctx_src() local
716 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_ctx_src()
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); ahash_done_ctx_src()
722 kfree(edesc); ahash_done_ctx_src()
741 struct ahash_edesc *edesc; ahash_done_ctx_dst() local
751 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_ctx_dst()
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ahash_done_ctx_dst()
757 kfree(edesc); ahash_done_ctx_dst()
790 struct ahash_edesc *edesc; ahash_update_ctx() local
807 * allocate space for base edesc and hw desc commands, ahash_update_ctx()
810 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + ahash_update_ctx()
812 if (!edesc) { ahash_update_ctx()
818 edesc->src_nents = src_nents; ahash_update_ctx()
819 edesc->chained = chained; ahash_update_ctx()
820 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_update_ctx()
821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_update_ctx()
825 edesc->sec4_sg, DMA_BIDIRECTIONAL); ahash_update_ctx()
830 edesc->sec4_sg + 1, ahash_update_ctx()
836 edesc->sec4_sg + sec4_sg_src_index, ahash_update_ctx()
845 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= ahash_update_ctx()
850 desc = edesc->hw_desc; ahash_update_ctx()
854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_update_ctx()
857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_update_ctx()
862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_update_ctx()
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_ctx()
879 kfree(edesc); ahash_update_ctx()
914 struct ahash_edesc *edesc; ahash_final_ctx() local
921 /* allocate space for base edesc and hw desc commands, link tables */ ahash_final_ctx()
922 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + ahash_final_ctx()
924 if (!edesc) { ahash_final_ctx()
930 desc = edesc->hw_desc; ahash_final_ctx()
933 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_final_ctx()
934 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_final_ctx()
936 edesc->src_nents = 0; ahash_final_ctx()
939 edesc->sec4_sg, DMA_TO_DEVICE); ahash_final_ctx()
943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, ahash_final_ctx()
946 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; ahash_final_ctx()
948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_final_ctx()
950 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_final_ctx()
955 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, ahash_final_ctx()
958 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_ctx()
960 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_final_ctx()
974 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_final_ctx()
975 kfree(edesc); ahash_final_ctx()
998 struct ahash_edesc *edesc; ahash_finup_ctx() local
1008 /* allocate space for base edesc and hw desc commands, link tables */ ahash_finup_ctx()
1009 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + ahash_finup_ctx()
1011 if (!edesc) { ahash_finup_ctx()
1017 desc = edesc->hw_desc; ahash_finup_ctx()
1020 edesc->src_nents = src_nents; ahash_finup_ctx()
1021 edesc->chained = chained; ahash_finup_ctx()
1022 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_finup_ctx()
1023 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_finup_ctx()
1027 edesc->sec4_sg, DMA_TO_DEVICE); ahash_finup_ctx()
1031 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, ahash_finup_ctx()
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + ahash_finup_ctx()
1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_finup_ctx()
1040 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_finup_ctx()
1045 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_finup_ctx()
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_ctx()
1050 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_finup_ctx()
1064 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_finup_ctx()
1065 kfree(edesc); ahash_finup_ctx()
1083 struct ahash_edesc *edesc; ahash_digest() local
1094 /* allocate space for base edesc and hw desc commands, link tables */ ahash_digest()
1095 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + ahash_digest()
1097 if (!edesc) { ahash_digest()
1101 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_digest()
1103 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_digest()
1104 edesc->src_nents = src_nents; ahash_digest()
1105 edesc->chained = chained; ahash_digest()
1108 desc = edesc->hw_desc; ahash_digest()
1112 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); ahash_digest()
1113 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_digest()
1115 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_digest()
1119 src_dma = edesc->sec4_sg_dma; ahash_digest()
1127 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_digest()
1129 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_digest()
1143 ahash_unmap(jrdev, edesc, req, digestsize); ahash_digest()
1144 kfree(edesc); ahash_digest()
1164 struct ahash_edesc *edesc; ahash_final_no_ctx() local
1168 /* allocate space for base edesc and hw desc commands, link tables */ ahash_final_no_ctx()
1169 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, ahash_final_no_ctx()
1171 if (!edesc) { ahash_final_no_ctx()
1176 edesc->sec4_sg_bytes = 0; ahash_final_no_ctx()
1178 desc = edesc->hw_desc; ahash_final_no_ctx()
1189 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_no_ctx()
1191 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_final_no_ctx()
1195 edesc->src_nents = 0; ahash_final_no_ctx()
1206 ahash_unmap(jrdev, edesc, req, digestsize); ahash_final_no_ctx()
1207 kfree(edesc); ahash_final_no_ctx()
1229 struct ahash_edesc *edesc; ahash_update_no_ctx() local
1246 * allocate space for base edesc and hw desc commands, ahash_update_no_ctx()
1249 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + ahash_update_no_ctx()
1251 if (!edesc) { ahash_update_no_ctx()
1257 edesc->src_nents = src_nents; ahash_update_no_ctx()
1258 edesc->chained = chained; ahash_update_no_ctx()
1259 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_update_no_ctx()
1260 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_update_no_ctx()
1262 edesc->dst_dma = 0; ahash_update_no_ctx()
1264 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, ahash_update_no_ctx()
1267 edesc->sec4_sg + 1, chained); ahash_update_no_ctx()
1276 desc = edesc->hw_desc; ahash_update_no_ctx()
1280 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_update_no_ctx()
1283 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_update_no_ctx()
1288 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); ahash_update_no_ctx()
1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_no_ctx()
1309 kfree(edesc); ahash_update_no_ctx()
1345 struct ahash_edesc *edesc; ahash_finup_no_ctx() local
1355 /* allocate space for base edesc and hw desc commands, link tables */ ahash_finup_no_ctx()
1356 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + ahash_finup_no_ctx()
1358 if (!edesc) { ahash_finup_no_ctx()
1364 desc = edesc->hw_desc; ahash_finup_no_ctx()
1367 edesc->src_nents = src_nents; ahash_finup_no_ctx()
1368 edesc->chained = chained; ahash_finup_no_ctx()
1369 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_finup_no_ctx()
1370 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_finup_no_ctx()
1373 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, ahash_finup_no_ctx()
1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, ahash_finup_no_ctx()
1380 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_finup_no_ctx()
1382 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_finup_no_ctx()
1387 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + ahash_finup_no_ctx()
1390 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_no_ctx()
1392 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_finup_no_ctx()
1406 ahash_unmap(jrdev, edesc, req, digestsize); ahash_finup_no_ctx()
1407 kfree(edesc); ahash_finup_no_ctx()
1431 struct ahash_edesc *edesc; ahash_update_first() local
1448 * allocate space for base edesc and hw desc commands, ahash_update_first()
1451 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + ahash_update_first()
1453 if (!edesc) { ahash_update_first()
1459 edesc->src_nents = src_nents; ahash_update_first()
1460 edesc->chained = chained; ahash_update_first()
1461 edesc->sec4_sg_bytes = sec4_sg_bytes; ahash_update_first()
1462 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + ahash_update_first()
1464 edesc->dst_dma = 0; ahash_update_first()
1468 edesc->sec4_sg, 0); ahash_update_first()
1469 edesc->sec4_sg_dma = dma_map_single(jrdev, ahash_update_first()
1470 edesc->sec4_sg, ahash_update_first()
1473 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_update_first()
1477 src_dma = edesc->sec4_sg_dma; ahash_update_first()
1489 desc = edesc->hw_desc; ahash_update_first()
1513 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_first()
1515 kfree(edesc); ahash_update_first()
604 ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len) ahash_unmap() argument
619 ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) ahash_unmap_ctx() argument
H A Dcaamalg.c2154 struct aead_edesc *edesc, aead_unmap()
2160 dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, aead_unmap()
2161 DMA_TO_DEVICE, edesc->assoc_chained); aead_unmap()
2164 edesc->src_nents, edesc->src_chained, edesc->dst_nents, aead_unmap()
2165 edesc->dst_chained, edesc->iv_dma, ivsize, aead_unmap()
2166 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); aead_unmap()
2170 struct ablkcipher_edesc *edesc, ablkcipher_unmap()
2177 edesc->src_nents, edesc->src_chained, edesc->dst_nents, ablkcipher_unmap()
2178 edesc->dst_chained, edesc->iv_dma, ivsize, ablkcipher_unmap()
2179 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); ablkcipher_unmap()
2186 struct aead_edesc *edesc; aead_encrypt_done() local
2195 edesc = (struct aead_edesc *)((char *)desc - aead_encrypt_done()
2201 aead_unmap(jrdev, edesc, req); aead_encrypt_done()
2209 edesc->src_nents ? 100 : ivsize, 1); aead_encrypt_done()
2212 edesc->src_nents ? 100 : req->cryptlen + aead_encrypt_done()
2216 kfree(edesc); aead_encrypt_done()
2225 struct aead_edesc *edesc; aead_decrypt_done() local
2234 edesc = (struct aead_edesc *)((char *)desc - aead_decrypt_done()
2249 aead_unmap(jrdev, edesc, req); aead_decrypt_done()
2264 if (!err && edesc->sec4_sg_bytes) { aead_decrypt_done()
2265 struct scatterlist *sg = sg_last(req->src, edesc->src_nents); aead_decrypt_done()
2272 kfree(edesc); aead_decrypt_done()
2281 struct ablkcipher_edesc *edesc; ablkcipher_encrypt_done() local
2289 edesc = (struct ablkcipher_edesc *)((char *)desc - ablkcipher_encrypt_done()
2298 edesc->src_nents > 1 ? 100 : ivsize, 1); ablkcipher_encrypt_done()
2301 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_encrypt_done()
2304 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_encrypt_done()
2305 kfree(edesc); ablkcipher_encrypt_done()
2314 struct ablkcipher_edesc *edesc; ablkcipher_decrypt_done() local
2322 edesc = (struct ablkcipher_edesc *)((char *)desc - ablkcipher_decrypt_done()
2333 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_decrypt_done()
2336 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_decrypt_done()
2337 kfree(edesc); ablkcipher_decrypt_done()
2346 struct aead_edesc *edesc, init_aead_job()
2354 u32 *desc = edesc->hw_desc; init_aead_job()
2368 edesc->src_nents ? 100 : ivsize, 1); init_aead_job()
2371 edesc->src_nents ? 100 : req->cryptlen, 1); init_aead_job()
2387 src_dma = edesc->iv_dma; init_aead_job()
2392 src_dma = edesc->sec4_sg_dma; init_aead_job()
2393 sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + init_aead_job()
2394 (edesc->src_nents ? : 1); init_aead_job()
2406 ((edesc->assoc_nents ? : 1) + 1); init_aead_job()
2410 if (!edesc->dst_nents) { init_aead_job()
2413 dst_dma = edesc->sec4_sg_dma + init_aead_job()
2431 struct aead_edesc *edesc, init_aead_giv_job()
2439 u32 *desc = edesc->hw_desc; init_aead_giv_job()
2455 edesc->src_nents > 1 ? 100 : req->cryptlen, 1); init_aead_giv_job()
2471 src_dma = edesc->iv_dma; init_aead_giv_job()
2476 src_dma = edesc->sec4_sg_dma; init_aead_giv_job()
2477 sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; init_aead_giv_job()
2484 dst_dma = edesc->iv_dma; init_aead_giv_job()
2488 (edesc->assoc_nents + init_aead_giv_job()
2489 (is_gcm ? 1 + edesc->src_nents : 0)); init_aead_giv_job()
2492 dst_dma = edesc->sec4_sg_dma + init_aead_giv_job()
2507 struct ablkcipher_edesc *edesc, init_ablkcipher_job()
2513 u32 *desc = edesc->hw_desc; init_ablkcipher_job()
2524 edesc->src_nents ? 100 : req->nbytes, 1); init_ablkcipher_job()
2531 src_dma = edesc->iv_dma; init_ablkcipher_job()
2534 src_dma = edesc->sec4_sg_dma; init_ablkcipher_job()
2535 sec4_sg_index += edesc->src_nents + 1; init_ablkcipher_job()
2541 if (!edesc->src_nents && iv_contig) { init_ablkcipher_job()
2544 dst_dma = edesc->sec4_sg_dma + init_ablkcipher_job()
2549 if (!edesc->dst_nents) { init_ablkcipher_job()
2552 dst_dma = edesc->sec4_sg_dma + init_ablkcipher_job()
2564 struct ablkcipher_edesc *edesc, init_ablkcipher_giv_job()
2570 u32 *desc = edesc->hw_desc; init_ablkcipher_giv_job()
2581 edesc->src_nents ? 100 : req->nbytes, 1); init_ablkcipher_giv_job()
2587 if (!edesc->src_nents) { init_ablkcipher_giv_job()
2591 src_dma = edesc->sec4_sg_dma; init_ablkcipher_giv_job()
2592 sec4_sg_index += edesc->src_nents; init_ablkcipher_giv_job()
2598 dst_dma = edesc->iv_dma; init_ablkcipher_giv_job()
2601 dst_dma = edesc->sec4_sg_dma + init_ablkcipher_giv_job()
2621 struct aead_edesc *edesc; aead_edesc_alloc() local
2693 /* allocate space for base edesc and hw desc commands, link tables */ aead_edesc_alloc()
2694 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + aead_edesc_alloc()
2696 if (!edesc) { aead_edesc_alloc()
2701 edesc->assoc_nents = assoc_nents; aead_edesc_alloc()
2702 edesc->assoc_chained = assoc_chained; aead_edesc_alloc()
2703 edesc->src_nents = src_nents; aead_edesc_alloc()
2704 edesc->src_chained = src_chained; aead_edesc_alloc()
2705 edesc->dst_nents = dst_nents; aead_edesc_alloc()
2706 edesc->dst_chained = dst_chained; aead_edesc_alloc()
2707 edesc->iv_dma = iv_dma; aead_edesc_alloc()
2708 edesc->sec4_sg_bytes = sec4_sg_bytes; aead_edesc_alloc()
2709 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + aead_edesc_alloc()
2718 edesc->sec4_sg + aead_edesc_alloc()
2723 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, aead_edesc_alloc()
2730 edesc->sec4_sg + aead_edesc_alloc()
2737 edesc->sec4_sg + aead_edesc_alloc()
2743 edesc->sec4_sg + sec4_sg_index, 0); aead_edesc_alloc()
2745 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, aead_edesc_alloc()
2747 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { aead_edesc_alloc()
2752 return edesc; aead_edesc_alloc()
2757 struct aead_edesc *edesc; aead_encrypt() local
2766 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * aead_encrypt()
2768 if (IS_ERR(edesc)) aead_encrypt()
2769 return PTR_ERR(edesc); aead_encrypt()
2772 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, aead_encrypt()
2776 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, aead_encrypt()
2777 desc_bytes(edesc->hw_desc), 1); aead_encrypt()
2780 desc = edesc->hw_desc; aead_encrypt()
2785 aead_unmap(jrdev, edesc, req); aead_encrypt()
2786 kfree(edesc); aead_encrypt()
2794 struct aead_edesc *edesc; aead_decrypt() local
2803 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * aead_decrypt()
2805 if (IS_ERR(edesc)) aead_decrypt()
2806 return PTR_ERR(edesc); aead_decrypt()
2816 ctx->sh_desc_dec_dma, edesc, req, all_contig, false); aead_decrypt()
2819 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, aead_decrypt()
2820 desc_bytes(edesc->hw_desc), 1); aead_decrypt()
2823 desc = edesc->hw_desc; aead_decrypt()
2828 aead_unmap(jrdev, edesc, req); aead_decrypt()
2829 kfree(edesc); aead_decrypt()
2849 struct aead_edesc *edesc; aead_giv_edesc_alloc() local
2933 /* allocate space for base edesc and hw desc commands, link tables */ aead_giv_edesc_alloc()
2934 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + aead_giv_edesc_alloc()
2936 if (!edesc) { aead_giv_edesc_alloc()
2941 edesc->assoc_nents = assoc_nents; aead_giv_edesc_alloc()
2942 edesc->assoc_chained = assoc_chained; aead_giv_edesc_alloc()
2943 edesc->src_nents = src_nents; aead_giv_edesc_alloc()
2944 edesc->src_chained = src_chained; aead_giv_edesc_alloc()
2945 edesc->dst_nents = dst_nents; aead_giv_edesc_alloc()
2946 edesc->dst_chained = dst_chained; aead_giv_edesc_alloc()
2947 edesc->iv_dma = iv_dma; aead_giv_edesc_alloc()
2948 edesc->sec4_sg_bytes = sec4_sg_bytes; aead_giv_edesc_alloc()
2949 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + aead_giv_edesc_alloc()
2957 edesc->sec4_sg + sec4_sg_index, 0); aead_giv_edesc_alloc()
2961 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, aead_giv_edesc_alloc()
2967 edesc->sec4_sg + sec4_sg_index, 0); aead_giv_edesc_alloc()
2972 edesc->sec4_sg + aead_giv_edesc_alloc()
2978 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, aead_giv_edesc_alloc()
2982 edesc->sec4_sg + sec4_sg_index, 0); aead_giv_edesc_alloc()
2986 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, aead_giv_edesc_alloc()
2990 edesc->sec4_sg + sec4_sg_index, 0); aead_giv_edesc_alloc()
2992 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, aead_giv_edesc_alloc()
2994 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { aead_giv_edesc_alloc()
2999 return edesc; aead_giv_edesc_alloc()
3005 struct aead_edesc *edesc; aead_givencrypt() local
3014 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * aead_givencrypt()
3017 if (IS_ERR(edesc)) aead_givencrypt()
3018 return PTR_ERR(edesc); aead_givencrypt()
3028 ctx->sh_desc_givenc_dma, edesc, req, contig); aead_givencrypt()
3031 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, aead_givencrypt()
3032 desc_bytes(edesc->hw_desc), 1); aead_givencrypt()
3035 desc = edesc->hw_desc; aead_givencrypt()
3040 aead_unmap(jrdev, edesc, req); aead_givencrypt()
3041 kfree(edesc); aead_givencrypt()
3066 struct ablkcipher_edesc *edesc; ablkcipher_edesc_alloc() local
3106 /* allocate space for base edesc and hw desc commands, link tables */ ablkcipher_edesc_alloc()
3107 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + ablkcipher_edesc_alloc()
3109 if (!edesc) { ablkcipher_edesc_alloc()
3114 edesc->src_nents = src_nents; ablkcipher_edesc_alloc()
3115 edesc->src_chained = src_chained; ablkcipher_edesc_alloc()
3116 edesc->dst_nents = dst_nents; ablkcipher_edesc_alloc()
3117 edesc->dst_chained = dst_chained; ablkcipher_edesc_alloc()
3118 edesc->sec4_sg_bytes = sec4_sg_bytes; ablkcipher_edesc_alloc()
3119 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + ablkcipher_edesc_alloc()
3124 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); ablkcipher_edesc_alloc()
3126 edesc->sec4_sg + 1, 0); ablkcipher_edesc_alloc()
3132 edesc->sec4_sg + sec4_sg_index, 0); ablkcipher_edesc_alloc()
3135 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ablkcipher_edesc_alloc()
3137 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ablkcipher_edesc_alloc()
3142 edesc->iv_dma = iv_dma; ablkcipher_edesc_alloc()
3146 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, ablkcipher_edesc_alloc()
3151 return edesc; ablkcipher_edesc_alloc()
3156 struct ablkcipher_edesc *edesc; ablkcipher_encrypt() local
3165 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * ablkcipher_encrypt()
3167 if (IS_ERR(edesc)) ablkcipher_encrypt()
3168 return PTR_ERR(edesc); ablkcipher_encrypt()
3172 ctx->sh_desc_enc_dma, edesc, req, iv_contig); ablkcipher_encrypt()
3175 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, ablkcipher_encrypt()
3176 desc_bytes(edesc->hw_desc), 1); ablkcipher_encrypt()
3178 desc = edesc->hw_desc; ablkcipher_encrypt()
3184 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_encrypt()
3185 kfree(edesc); ablkcipher_encrypt()
3193 struct ablkcipher_edesc *edesc; ablkcipher_decrypt() local
3202 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * ablkcipher_decrypt()
3204 if (IS_ERR(edesc)) ablkcipher_decrypt()
3205 return PTR_ERR(edesc); ablkcipher_decrypt()
3209 ctx->sh_desc_dec_dma, edesc, req, iv_contig); ablkcipher_decrypt()
3210 desc = edesc->hw_desc; ablkcipher_decrypt()
3213 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, ablkcipher_decrypt()
3214 desc_bytes(edesc->hw_desc), 1); ablkcipher_decrypt()
3221 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_decrypt()
3222 kfree(edesc); ablkcipher_decrypt()
3245 struct ablkcipher_edesc *edesc; ablkcipher_giv_edesc_alloc() local
3285 /* allocate space for base edesc and hw desc commands, link tables */ ablkcipher_giv_edesc_alloc()
3286 edesc = kmalloc(sizeof(*edesc) + desc_bytes + ablkcipher_giv_edesc_alloc()
3288 if (!edesc) { ablkcipher_giv_edesc_alloc()
3293 edesc->src_nents = src_nents; ablkcipher_giv_edesc_alloc()
3294 edesc->src_chained = src_chained; ablkcipher_giv_edesc_alloc()
3295 edesc->dst_nents = dst_nents; ablkcipher_giv_edesc_alloc()
3296 edesc->dst_chained = dst_chained; ablkcipher_giv_edesc_alloc()
3297 edesc->sec4_sg_bytes = sec4_sg_bytes; ablkcipher_giv_edesc_alloc()
3298 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + ablkcipher_giv_edesc_alloc()
3303 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); ablkcipher_giv_edesc_alloc()
3308 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, ablkcipher_giv_edesc_alloc()
3312 edesc->sec4_sg + sec4_sg_index, 0); ablkcipher_giv_edesc_alloc()
3315 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ablkcipher_giv_edesc_alloc()
3317 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ablkcipher_giv_edesc_alloc()
3321 edesc->iv_dma = iv_dma; ablkcipher_giv_edesc_alloc()
3326 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, ablkcipher_giv_edesc_alloc()
3331 return edesc; ablkcipher_giv_edesc_alloc()
3337 struct ablkcipher_edesc *edesc; ablkcipher_givencrypt() local
3346 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * ablkcipher_givencrypt()
3348 if (IS_ERR(edesc)) ablkcipher_givencrypt()
3349 return PTR_ERR(edesc); ablkcipher_givencrypt()
3353 edesc, req, iv_contig); ablkcipher_givencrypt()
3357 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, ablkcipher_givencrypt()
3358 desc_bytes(edesc->hw_desc), 1); ablkcipher_givencrypt()
3360 desc = edesc->hw_desc; ablkcipher_givencrypt()
3366 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_givencrypt()
3367 kfree(edesc); ablkcipher_givencrypt()
2153 aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) aead_unmap() argument
2169 ablkcipher_unmap(struct device *dev, struct ablkcipher_edesc *edesc, struct ablkcipher_request *req) ablkcipher_unmap() argument
2345 init_aead_job(u32 *sh_desc, dma_addr_t ptr, struct aead_edesc *edesc, struct aead_request *req, bool all_contig, bool encrypt) init_aead_job() argument
2430 init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, struct aead_edesc *edesc, struct aead_request *req, int contig) init_aead_giv_job() argument
2506 init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, struct ablkcipher_request *req, bool iv_contig) init_ablkcipher_job() argument
2563 init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, struct ablkcipher_request *req, bool iv_contig) init_ablkcipher_giv_job() argument
/linux-4.1.27/drivers/crypto/
H A Dtalitos.c761 struct talitos_edesc *edesc, talitos_sg_unmap()
765 unsigned int src_nents = edesc->src_nents ? : 1; talitos_sg_unmap()
766 unsigned int dst_nents = edesc->dst_nents ? : 1; talitos_sg_unmap()
769 if (edesc->src_chained) talitos_sg_unmap()
775 if (edesc->dst_chained) talitos_sg_unmap()
783 if (edesc->src_chained) talitos_sg_unmap()
790 struct talitos_edesc *edesc, ipsec_esp_unmap()
793 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); ipsec_esp_unmap()
794 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); ipsec_esp_unmap()
795 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); ipsec_esp_unmap()
796 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); ipsec_esp_unmap()
798 if (edesc->assoc_chained) ipsec_esp_unmap()
803 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, ipsec_esp_unmap()
806 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); ipsec_esp_unmap()
808 if (edesc->dma_len) ipsec_esp_unmap()
809 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, ipsec_esp_unmap()
823 struct talitos_edesc *edesc; ipsec_esp_encrypt_done() local
827 edesc = container_of(desc, struct talitos_edesc, desc); ipsec_esp_encrypt_done()
829 ipsec_esp_unmap(dev, edesc, areq); ipsec_esp_encrypt_done()
832 if (edesc->dst_nents) { ipsec_esp_encrypt_done()
833 icvdata = &edesc->link_tbl[edesc->src_nents + ipsec_esp_encrypt_done()
834 edesc->dst_nents + 2 + ipsec_esp_encrypt_done()
835 edesc->assoc_nents]; ipsec_esp_encrypt_done()
836 sg = sg_last(areq->dst, edesc->dst_nents); ipsec_esp_encrypt_done()
841 kfree(edesc); ipsec_esp_encrypt_done()
853 struct talitos_edesc *edesc; ipsec_esp_decrypt_swauth_done() local
857 edesc = container_of(desc, struct talitos_edesc, desc); ipsec_esp_decrypt_swauth_done()
859 ipsec_esp_unmap(dev, edesc, req); ipsec_esp_decrypt_swauth_done()
863 if (edesc->dma_len) ipsec_esp_decrypt_swauth_done()
864 icvdata = &edesc->link_tbl[edesc->src_nents + ipsec_esp_decrypt_swauth_done()
865 edesc->dst_nents + 2 + ipsec_esp_decrypt_swauth_done()
866 edesc->assoc_nents]; ipsec_esp_decrypt_swauth_done()
868 icvdata = &edesc->link_tbl[0]; ipsec_esp_decrypt_swauth_done()
870 sg = sg_last(req->dst, edesc->dst_nents ? : 1); ipsec_esp_decrypt_swauth_done()
875 kfree(edesc); ipsec_esp_decrypt_swauth_done()
885 struct talitos_edesc *edesc; ipsec_esp_decrypt_hwauth_done() local
887 edesc = container_of(desc, struct talitos_edesc, desc); ipsec_esp_decrypt_hwauth_done()
889 ipsec_esp_unmap(dev, edesc, req); ipsec_esp_decrypt_hwauth_done()
896 kfree(edesc); ipsec_esp_decrypt_hwauth_done()
940 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, ipsec_esp() argument
948 struct talitos_desc *desc = &edesc->desc; ipsec_esp()
961 if (edesc->assoc_nents) { ipsec_esp()
962 int tbl_off = edesc->src_nents + edesc->dst_nents + 2; ipsec_esp()
963 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; ipsec_esp()
965 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * ipsec_esp()
970 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1, ipsec_esp()
977 to_talitos_ptr(tbl_ptr, edesc->iv_dma); ipsec_esp()
981 dma_sync_single_for_device(dev, edesc->dma_link_tbl, ipsec_esp()
982 edesc->dma_len, DMA_BIDIRECTIONAL); ipsec_esp()
988 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); ipsec_esp()
993 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); ipsec_esp()
997 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); ipsec_esp()
1013 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, ipsec_esp()
1016 edesc->src_chained); ipsec_esp()
1023 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) ipsec_esp()
1027 &edesc->link_tbl[0]); ipsec_esp()
1030 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); ipsec_esp()
1031 dma_sync_single_for_device(dev, edesc->dma_link_tbl, ipsec_esp()
1032 edesc->dma_len, ipsec_esp()
1047 edesc->dst_nents ? : 1, ipsec_esp()
1048 DMA_FROM_DEVICE, edesc->dst_chained); ipsec_esp()
1053 int tbl_off = edesc->src_nents + 1; ipsec_esp()
1054 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; ipsec_esp()
1056 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + ipsec_esp()
1069 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + ipsec_esp()
1070 (tbl_off + edesc->dst_nents + 1 + ipsec_esp()
1071 edesc->assoc_nents) * ipsec_esp()
1074 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, ipsec_esp()
1075 edesc->dma_len, DMA_BIDIRECTIONAL); ipsec_esp()
1084 ipsec_esp_unmap(dev, edesc, areq); ipsec_esp()
1085 kfree(edesc); ipsec_esp()
1126 struct talitos_edesc *edesc; talitos_edesc_alloc() local
1171 * allocate space for base edesc plus the link tables, talitos_edesc_alloc()
1185 edesc = kmalloc(alloc_len, GFP_DMA | flags); talitos_edesc_alloc()
1186 if (!edesc) { talitos_edesc_alloc()
1201 edesc->assoc_nents = assoc_nents; talitos_edesc_alloc()
1202 edesc->src_nents = src_nents; talitos_edesc_alloc()
1203 edesc->dst_nents = dst_nents; talitos_edesc_alloc()
1204 edesc->assoc_chained = assoc_chained; talitos_edesc_alloc()
1205 edesc->src_chained = src_chained; talitos_edesc_alloc()
1206 edesc->dst_chained = dst_chained; talitos_edesc_alloc()
1207 edesc->iv_dma = iv_dma; talitos_edesc_alloc()
1208 edesc->dma_len = dma_len; talitos_edesc_alloc()
1210 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], talitos_edesc_alloc()
1211 edesc->dma_len, talitos_edesc_alloc()
1214 return edesc; talitos_edesc_alloc()
1234 struct talitos_edesc *edesc; aead_encrypt() local
1237 edesc = aead_edesc_alloc(req, req->iv, 0, true); aead_encrypt()
1238 if (IS_ERR(edesc)) aead_encrypt()
1239 return PTR_ERR(edesc); aead_encrypt()
1242 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; aead_encrypt()
1244 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done); aead_encrypt()
1253 struct talitos_edesc *edesc; aead_decrypt() local
1260 edesc = aead_edesc_alloc(req, req->iv, 1, false); aead_decrypt()
1261 if (IS_ERR(edesc)) aead_decrypt()
1262 return PTR_ERR(edesc); aead_decrypt()
1265 ((!edesc->src_nents && !edesc->dst_nents) || aead_decrypt()
1269 edesc->desc.hdr = ctx->desc_hdr_template | aead_decrypt()
1274 edesc->desc.hdr_lo = 0; aead_decrypt()
1276 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done); aead_decrypt()
1280 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; aead_decrypt()
1283 if (edesc->dma_len) aead_decrypt()
1284 icvdata = &edesc->link_tbl[edesc->src_nents + aead_decrypt()
1285 edesc->dst_nents + 2 + aead_decrypt()
1286 edesc->assoc_nents]; aead_decrypt()
1288 icvdata = &edesc->link_tbl[0]; aead_decrypt()
1290 sg = sg_last(req->src, edesc->src_nents ? : 1); aead_decrypt()
1295 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done); aead_decrypt()
1303 struct talitos_edesc *edesc; aead_givencrypt() local
1306 edesc = aead_edesc_alloc(areq, req->giv, 0, true); aead_givencrypt()
1307 if (IS_ERR(edesc)) aead_givencrypt()
1308 return PTR_ERR(edesc); aead_givencrypt()
1311 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; aead_givencrypt()
1317 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done); aead_givencrypt()
1332 struct talitos_edesc *edesc, common_nonsnoop_unmap()
1335 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); common_nonsnoop_unmap()
1336 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); common_nonsnoop_unmap()
1337 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); common_nonsnoop_unmap()
1339 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); common_nonsnoop_unmap()
1341 if (edesc->dma_len) common_nonsnoop_unmap()
1342 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, common_nonsnoop_unmap()
1351 struct talitos_edesc *edesc; ablkcipher_done() local
1353 edesc = container_of(desc, struct talitos_edesc, desc); ablkcipher_done()
1355 common_nonsnoop_unmap(dev, edesc, areq); ablkcipher_done()
1357 kfree(edesc); ablkcipher_done()
1362 static int common_nonsnoop(struct talitos_edesc *edesc, common_nonsnoop() argument
1371 struct talitos_desc *desc = &edesc->desc; common_nonsnoop()
1382 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); common_nonsnoop()
1396 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, common_nonsnoop()
1399 edesc->src_chained); common_nonsnoop()
1405 &edesc->link_tbl[0]); common_nonsnoop()
1407 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); common_nonsnoop()
1409 dma_sync_single_for_device(dev, edesc->dma_link_tbl, common_nonsnoop()
1410 edesc->dma_len, common_nonsnoop()
1425 edesc->dst_nents ? : 1, common_nonsnoop()
1426 DMA_FROM_DEVICE, edesc->dst_chained); common_nonsnoop()
1432 &edesc->link_tbl[edesc->src_nents + 1]; common_nonsnoop()
1434 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + common_nonsnoop()
1435 (edesc->src_nents + 1) * common_nonsnoop()
1440 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, common_nonsnoop()
1441 edesc->dma_len, DMA_BIDIRECTIONAL); common_nonsnoop()
1455 common_nonsnoop_unmap(dev, edesc, areq); common_nonsnoop()
1456 kfree(edesc); common_nonsnoop()
1477 struct talitos_edesc *edesc; ablkcipher_encrypt() local
1480 edesc = ablkcipher_edesc_alloc(areq, true); ablkcipher_encrypt()
1481 if (IS_ERR(edesc)) ablkcipher_encrypt()
1482 return PTR_ERR(edesc); ablkcipher_encrypt()
1485 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; ablkcipher_encrypt()
1487 return common_nonsnoop(edesc, areq, ablkcipher_done); ablkcipher_encrypt()
1494 struct talitos_edesc *edesc; ablkcipher_decrypt() local
1497 edesc = ablkcipher_edesc_alloc(areq, false); ablkcipher_decrypt()
1498 if (IS_ERR(edesc)) ablkcipher_decrypt()
1499 return PTR_ERR(edesc); ablkcipher_decrypt()
1501 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; ablkcipher_decrypt()
1503 return common_nonsnoop(edesc, areq, ablkcipher_done); ablkcipher_decrypt()
1507 struct talitos_edesc *edesc, common_nonsnoop_hash_unmap()
1512 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); common_nonsnoop_hash_unmap()
1515 if (edesc->desc.ptr[1].len) common_nonsnoop_hash_unmap()
1516 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], common_nonsnoop_hash_unmap()
1519 if (edesc->desc.ptr[2].len) common_nonsnoop_hash_unmap()
1520 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], common_nonsnoop_hash_unmap()
1523 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); common_nonsnoop_hash_unmap()
1525 if (edesc->dma_len) common_nonsnoop_hash_unmap()
1526 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, common_nonsnoop_hash_unmap()
1536 struct talitos_edesc *edesc = ahash_done() local
1545 common_nonsnoop_hash_unmap(dev, edesc, areq); ahash_done()
1547 kfree(edesc); ahash_done()
1552 static int common_nonsnoop_hash(struct talitos_edesc *edesc, common_nonsnoop_hash() argument
1562 struct talitos_desc *desc = &edesc->desc; common_nonsnoop_hash()
1595 edesc->src_nents ? : 1, common_nonsnoop_hash()
1596 DMA_TO_DEVICE, edesc->src_chained); common_nonsnoop_hash()
1602 &edesc->link_tbl[0]); common_nonsnoop_hash()
1605 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); common_nonsnoop_hash()
1607 edesc->dma_link_tbl, common_nonsnoop_hash()
1608 edesc->dma_len, common_nonsnoop_hash()
1635 common_nonsnoop_hash_unmap(dev, edesc, areq); common_nonsnoop_hash()
1636 kfree(edesc); common_nonsnoop_hash()
1701 struct talitos_edesc *edesc; ahash_process_req() local
1754 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); ahash_process_req()
1755 if (IS_ERR(edesc)) ahash_process_req()
1756 return PTR_ERR(edesc); ahash_process_req()
1758 edesc->desc.hdr = ctx->desc_hdr_template; ahash_process_req()
1762 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; ahash_process_req()
1764 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; ahash_process_req()
1768 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; ahash_process_req()
1774 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; ahash_process_req()
1776 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_process_req()
760 talitos_sg_unmap(struct device *dev, struct talitos_edesc *edesc, struct scatterlist *src, struct scatterlist *dst) talitos_sg_unmap() argument
789 ipsec_esp_unmap(struct device *dev, struct talitos_edesc *edesc, struct aead_request *areq) ipsec_esp_unmap() argument
1331 common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) common_nonsnoop_unmap() argument
1506 common_nonsnoop_hash_unmap(struct device *dev, struct talitos_edesc *edesc, struct ahash_request *areq) common_nonsnoop_hash_unmap() argument
/linux-4.1.27/drivers/net/ethernet/tile/
H A Dtilegx.c1668 /* Determine how many edesc's are needed for TSO.
1692 /* One edesc for header and for each piece of the payload. */ tso_count_edescs()
1849 /* Prepare to egress the headers: set up header edesc. */ tso_egress()
1943 /* Determine how many mpipe edesc's are needed. */ tile_net_tx_tso()
2014 gxio_mpipe_edesc_t edesc = { { 0 } }; tile_net_tx() local
2024 edesc.stack_idx = md->first_buffer_stack; tile_net_tx()
2028 edesc.xfer_size = frags[i].length; tile_net_tx()
2029 edesc.va = va_to_tile_io_addr(frags[i].buf); tile_net_tx()
2030 edescs[i] = edesc; tile_net_tx()
2033 /* Mark the final edesc. */ tile_net_tx()
2036 /* Add checksum info to the initial edesc, if needed. */ tile_net_tx()
/linux-4.1.27/arch/tile/include/gxio/
H A Dmpipe.h1440 * a copy of "edesc->words[0]" on the stack for no obvious reason.
1465 /* Post an edesc to a given slot in an equeue.
1467 * This function copies the supplied edesc into entry "slot mod N" in
1484 * @param edesc The egress descriptor to be posted.
1488 gxio_mpipe_edesc_t edesc, gxio_mpipe_equeue_put_at()
1491 gxio_mpipe_equeue_put_at_aux(equeue, edesc.words, slot); gxio_mpipe_equeue_put_at()
1494 /* Post an edesc to the next slot in an equeue.
1500 * @param edesc The egress descriptor to be posted.
1504 gxio_mpipe_edesc_t edesc) gxio_mpipe_equeue_put()
1510 gxio_mpipe_equeue_put_at(equeue, edesc, slot); gxio_mpipe_equeue_put()
1537 /* Determine if a given edesc has been completed.
1544 * @param completion_slot The completion slot used by the edesc.
1548 * @return True iff the given edesc has been completed.
1487 gxio_mpipe_equeue_put_at(gxio_mpipe_equeue_t *equeue, gxio_mpipe_edesc_t edesc, unsigned long slot) gxio_mpipe_equeue_put_at() argument
1503 gxio_mpipe_equeue_put(gxio_mpipe_equeue_t *equeue, gxio_mpipe_edesc_t edesc) gxio_mpipe_equeue_put() argument
/linux-4.1.27/drivers/block/
H A Dcciss_scsi.c333 printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",

Completed in 460 milliseconds