Lines Matching refs:jrdev

102 	struct device *jrdev;  member
149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, in map_seq_out_ptr_ctx() argument
153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, in map_seq_out_ptr_ctx()
155 if (dma_mapping_error(jrdev, state->ctx_dma)) { in map_seq_out_ptr_ctx()
156 dev_err(jrdev, "unable to map ctx\n"); in map_seq_out_ptr_ctx()
166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, in map_seq_out_ptr_result() argument
171 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); in map_seq_out_ptr_result()
178 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, in buf_map_to_sec4_sg() argument
184 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); in buf_map_to_sec4_sg()
191 static inline void src_map_to_sec4_sg(struct device *jrdev, in src_map_to_sec4_sg() argument
195 dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE); in src_map_to_sec4_sg()
204 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, in try_buf_map_to_sec4_sg() argument
208 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) in try_buf_map_to_sec4_sg()
209 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); in try_buf_map_to_sec4_sg()
211 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); in try_buf_map_to_sec4_sg()
219 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, in ctx_map_to_sec4_sg() argument
223 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); in ctx_map_to_sec4_sg()
224 if (dma_mapping_error(jrdev, state->ctx_dma)) { in ctx_map_to_sec4_sg()
225 dev_err(jrdev, "unable to map ctx\n"); in ctx_map_to_sec4_sg()
323 struct device *jrdev = ctx->jrdev; in ahash_set_sh_desc() local
346 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in ahash_set_sh_desc()
348 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { in ahash_set_sh_desc()
349 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
364 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, in ahash_set_sh_desc()
367 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { in ahash_set_sh_desc()
368 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
383 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in ahash_set_sh_desc()
385 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { in ahash_set_sh_desc()
386 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
401 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in ahash_set_sh_desc()
403 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { in ahash_set_sh_desc()
404 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
419 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, in ahash_set_sh_desc()
422 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { in ahash_set_sh_desc()
423 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
439 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, in gen_split_hash_key()
448 struct device *jrdev = ctx->jrdev; in hash_digest_key() local
456 dev_err(jrdev, "unable to allocate key input memory\n"); in hash_digest_key()
462 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, in hash_digest_key()
464 if (dma_mapping_error(jrdev, src_dma)) { in hash_digest_key()
465 dev_err(jrdev, "unable to map key input memory\n"); in hash_digest_key()
469 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, in hash_digest_key()
471 if (dma_mapping_error(jrdev, dst_dma)) { in hash_digest_key()
472 dev_err(jrdev, "unable to map key output memory\n"); in hash_digest_key()
473 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); in hash_digest_key()
498 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); in hash_digest_key()
510 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); in hash_digest_key()
511 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); in hash_digest_key()
526 struct device *jrdev = ctx->jrdev; in ahash_setkey() local
564 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, in ahash_setkey()
566 if (dma_mapping_error(jrdev, ctx->key_dma)) { in ahash_setkey()
567 dev_err(jrdev, "unable to map key i/o memory\n"); in ahash_setkey()
579 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, in ahash_setkey()
637 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, in ahash_done() argument
648 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done()
654 caam_jr_strstatus(jrdev, err); in ahash_done()
656 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_done()
672 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, in ahash_done_bi() argument
683 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done_bi()
689 caam_jr_strstatus(jrdev, err); in ahash_done_bi()
691 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_done_bi()
707 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, in ahash_done_ctx_src() argument
718 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done_ctx_src()
724 caam_jr_strstatus(jrdev, err); in ahash_done_ctx_src()
726 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); in ahash_done_ctx_src()
742 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, in ahash_done_ctx_dst() argument
753 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done_ctx_dst()
759 caam_jr_strstatus(jrdev, err); in ahash_done_ctx_dst()
761 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); in ahash_done_ctx_dst()
783 struct device *jrdev = ctx->jrdev; in ahash_update_ctx() local
817 dev_err(jrdev, in ahash_update_ctx()
827 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, in ahash_update_ctx()
832 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, in ahash_update_ctx()
838 src_map_to_sec4_sg(jrdev, req->src, src_nents, in ahash_update_ctx()
856 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_ctx()
859 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_ctx()
860 dev_err(jrdev, "unable to map S/G table\n"); in ahash_update_ctx()
875 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); in ahash_update_ctx()
879 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, in ahash_update_ctx()
905 struct device *jrdev = ctx->jrdev; in ahash_final_ctx() local
927 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_final_ctx()
940 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, in ahash_final_ctx()
945 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, in ahash_final_ctx()
950 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_final_ctx()
952 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_final_ctx()
953 dev_err(jrdev, "unable to map S/G table\n"); in ahash_final_ctx()
960 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_final_ctx()
962 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_final_ctx()
963 dev_err(jrdev, "unable to map dst\n"); in ahash_final_ctx()
972 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); in ahash_final_ctx()
976 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); in ahash_final_ctx()
988 struct device *jrdev = ctx->jrdev; in ahash_finup_ctx() local
1013 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_finup_ctx()
1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, in ahash_finup_ctx()
1031 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, in ahash_finup_ctx()
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + in ahash_finup_ctx()
1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_finup_ctx()
1040 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_finup_ctx()
1041 dev_err(jrdev, "unable to map S/G table\n"); in ahash_finup_ctx()
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_finup_ctx()
1050 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_finup_ctx()
1051 dev_err(jrdev, "unable to map dst\n"); in ahash_finup_ctx()
1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); in ahash_finup_ctx()
1064 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); in ahash_finup_ctx()
1075 struct device *jrdev = ctx->jrdev; in ahash_digest() local
1089 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); in ahash_digest()
1096 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_digest()
1110 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_digest()
1112 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_digest()
1113 dev_err(jrdev, "unable to map S/G table\n"); in ahash_digest()
1124 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_digest()
1126 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_digest()
1127 dev_err(jrdev, "unable to map dst\n"); in ahash_digest()
1136 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); in ahash_digest()
1140 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1153 struct device *jrdev = ctx->jrdev; in ahash_final_no_ctx() local
1168 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_final_no_ctx()
1177 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); in ahash_final_no_ctx()
1178 if (dma_mapping_error(jrdev, state->buf_dma)) { in ahash_final_no_ctx()
1179 dev_err(jrdev, "unable to map src\n"); in ahash_final_no_ctx()
1185 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_final_no_ctx()
1187 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_final_no_ctx()
1188 dev_err(jrdev, "unable to map dst\n"); in ahash_final_no_ctx()
1198 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); in ahash_final_no_ctx()
1202 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_final_no_ctx()
1215 struct device *jrdev = ctx->jrdev; in ahash_update_no_ctx() local
1247 dev_err(jrdev, in ahash_update_no_ctx()
1258 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1260 src_map_to_sec4_sg(jrdev, req->src, src_nents, in ahash_update_no_ctx()
1275 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1278 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_no_ctx()
1279 dev_err(jrdev, "unable to map S/G table\n"); in ahash_update_no_ctx()
1285 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); in ahash_update_no_ctx()
1295 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); in ahash_update_no_ctx()
1302 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, in ahash_update_no_ctx()
1329 struct device *jrdev = ctx->jrdev; in ahash_finup_no_ctx() local
1353 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_finup_no_ctx()
1366 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, in ahash_finup_no_ctx()
1370 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1); in ahash_finup_no_ctx()
1372 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_finup_no_ctx()
1374 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_finup_no_ctx()
1375 dev_err(jrdev, "unable to map S/G table\n"); in ahash_finup_no_ctx()
1382 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_finup_no_ctx()
1384 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_finup_no_ctx()
1385 dev_err(jrdev, "unable to map dst\n"); in ahash_finup_no_ctx()
1394 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); in ahash_finup_no_ctx()
1398 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_finup_no_ctx()
1411 struct device *jrdev = ctx->jrdev; in ahash_update_first() local
1433 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); in ahash_update_first()
1443 dev_err(jrdev, in ahash_update_first()
1457 edesc->sec4_sg_dma = dma_map_single(jrdev, in ahash_update_first()
1461 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_first()
1462 dev_err(jrdev, "unable to map S/G table\n"); in ahash_update_first()
1483 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); in ahash_update_first()
1493 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, in ahash_update_first()
1501 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, in ahash_update_first()
1783 ctx->jrdev = caam_jr_alloc(); in caam_hash_cra_init()
1784 if (IS_ERR(ctx->jrdev)) { in caam_hash_cra_init()
1786 return PTR_ERR(ctx->jrdev); in caam_hash_cra_init()
1808 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) in caam_hash_cra_exit()
1809 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, in caam_hash_cra_exit()
1813 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) in caam_hash_cra_exit()
1814 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, in caam_hash_cra_exit()
1818 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) in caam_hash_cra_exit()
1819 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, in caam_hash_cra_exit()
1822 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) in caam_hash_cra_exit()
1823 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, in caam_hash_cra_exit()
1827 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) in caam_hash_cra_exit()
1828 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, in caam_hash_cra_exit()
1831 caam_jr_free(ctx->jrdev); in caam_hash_cra_exit()