Lines Matching refs:jrdev

102 	struct device *jrdev;  member
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, in map_seq_out_ptr_ctx() argument
144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, in map_seq_out_ptr_ctx()
146 if (dma_mapping_error(jrdev, state->ctx_dma)) { in map_seq_out_ptr_ctx()
147 dev_err(jrdev, "unable to map ctx\n"); in map_seq_out_ptr_ctx()
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, in map_seq_out_ptr_result() argument
162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); in map_seq_out_ptr_result()
169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, in buf_map_to_sec4_sg() argument
175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); in buf_map_to_sec4_sg()
182 static inline void src_map_to_sec4_sg(struct device *jrdev, in src_map_to_sec4_sg() argument
187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); in src_map_to_sec4_sg()
196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, in try_buf_map_to_sec4_sg() argument
200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) in try_buf_map_to_sec4_sg()
201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); in try_buf_map_to_sec4_sg()
203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); in try_buf_map_to_sec4_sg()
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, in ctx_map_to_sec4_sg() argument
215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); in ctx_map_to_sec4_sg()
216 if (dma_mapping_error(jrdev, state->ctx_dma)) { in ctx_map_to_sec4_sg()
217 dev_err(jrdev, "unable to map ctx\n"); in ctx_map_to_sec4_sg()
315 struct device *jrdev = ctx->jrdev; in ahash_set_sh_desc() local
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in ahash_set_sh_desc()
340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { in ahash_set_sh_desc()
341 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, in ahash_set_sh_desc()
359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { in ahash_set_sh_desc()
360 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in ahash_set_sh_desc()
377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { in ahash_set_sh_desc()
378 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), in ahash_set_sh_desc()
395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { in ahash_set_sh_desc()
396 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, in ahash_set_sh_desc()
414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { in ahash_set_sh_desc()
415 dev_err(jrdev, "unable to map shared descriptor\n"); in ahash_set_sh_desc()
431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, in gen_split_hash_key()
440 struct device *jrdev = ctx->jrdev; in hash_digest_key() local
448 dev_err(jrdev, "unable to allocate key input memory\n"); in hash_digest_key()
454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, in hash_digest_key()
456 if (dma_mapping_error(jrdev, src_dma)) { in hash_digest_key()
457 dev_err(jrdev, "unable to map key input memory\n"); in hash_digest_key()
461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, in hash_digest_key()
463 if (dma_mapping_error(jrdev, dst_dma)) { in hash_digest_key()
464 dev_err(jrdev, "unable to map key output memory\n"); in hash_digest_key()
465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); in hash_digest_key()
490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); in hash_digest_key()
502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); in hash_digest_key()
503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); in hash_digest_key()
518 struct device *jrdev = ctx->jrdev; in ahash_setkey() local
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, in ahash_setkey()
558 if (dma_mapping_error(jrdev, ctx->key_dma)) { in ahash_setkey()
559 dev_err(jrdev, "unable to map key i/o memory\n"); in ahash_setkey()
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, in ahash_setkey()
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, in ahash_done() argument
643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done()
649 caam_jr_strstatus(jrdev, err); in ahash_done()
651 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_done()
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, in ahash_done_bi() argument
678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done_bi()
684 caam_jr_strstatus(jrdev, err); in ahash_done_bi()
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_done_bi()
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, in ahash_done_ctx_src() argument
713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done_ctx_src()
719 caam_jr_strstatus(jrdev, err); in ahash_done_ctx_src()
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); in ahash_done_ctx_src()
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, in ahash_done_ctx_dst() argument
748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ahash_done_ctx_dst()
754 caam_jr_strstatus(jrdev, err); in ahash_done_ctx_dst()
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); in ahash_done_ctx_dst()
778 struct device *jrdev = ctx->jrdev; in ahash_update_ctx() local
813 dev_err(jrdev, in ahash_update_ctx()
824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, in ahash_update_ctx()
829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, in ahash_update_ctx()
835 src_map_to_sec4_sg(jrdev, req->src, src_nents, in ahash_update_ctx()
854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_ctx()
857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_ctx()
858 dev_err(jrdev, "unable to map S/G table\n"); in ahash_update_ctx()
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); in ahash_update_ctx()
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, in ahash_update_ctx()
903 struct device *jrdev = ctx->jrdev; in ahash_final_ctx() local
925 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_final_ctx()
938 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, in ahash_final_ctx()
943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, in ahash_final_ctx()
948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_final_ctx()
950 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_final_ctx()
951 dev_err(jrdev, "unable to map S/G table\n"); in ahash_final_ctx()
958 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_final_ctx()
960 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_final_ctx()
961 dev_err(jrdev, "unable to map dst\n"); in ahash_final_ctx()
970 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); in ahash_final_ctx()
974 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); in ahash_final_ctx()
986 struct device *jrdev = ctx->jrdev; in ahash_finup_ctx() local
1012 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_finup_ctx()
1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, in ahash_finup_ctx()
1031 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, in ahash_finup_ctx()
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + in ahash_finup_ctx()
1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_finup_ctx()
1040 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_finup_ctx()
1041 dev_err(jrdev, "unable to map S/G table\n"); in ahash_finup_ctx()
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_finup_ctx()
1050 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_finup_ctx()
1051 dev_err(jrdev, "unable to map dst\n"); in ahash_finup_ctx()
1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); in ahash_finup_ctx()
1064 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); in ahash_finup_ctx()
1075 struct device *jrdev = ctx->jrdev; in ahash_digest() local
1090 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, in ahash_digest()
1098 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_digest()
1113 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_digest()
1115 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_digest()
1116 dev_err(jrdev, "unable to map S/G table\n"); in ahash_digest()
1127 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_digest()
1129 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_digest()
1130 dev_err(jrdev, "unable to map dst\n"); in ahash_digest()
1139 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); in ahash_digest()
1143 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1156 struct device *jrdev = ctx->jrdev; in ahash_final_no_ctx() local
1172 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_final_no_ctx()
1181 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); in ahash_final_no_ctx()
1182 if (dma_mapping_error(jrdev, state->buf_dma)) { in ahash_final_no_ctx()
1183 dev_err(jrdev, "unable to map src\n"); in ahash_final_no_ctx()
1189 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_final_no_ctx()
1191 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_final_no_ctx()
1192 dev_err(jrdev, "unable to map dst\n"); in ahash_final_no_ctx()
1202 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); in ahash_final_no_ctx()
1206 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_final_no_ctx()
1219 struct device *jrdev = ctx->jrdev; in ahash_update_no_ctx() local
1252 dev_err(jrdev, in ahash_update_no_ctx()
1264 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1266 src_map_to_sec4_sg(jrdev, req->src, src_nents, in ahash_update_no_ctx()
1280 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1283 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_no_ctx()
1284 dev_err(jrdev, "unable to map S/G table\n"); in ahash_update_no_ctx()
1290 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); in ahash_update_no_ctx()
1300 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); in ahash_update_no_ctx()
1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, in ahash_update_no_ctx()
1334 struct device *jrdev = ctx->jrdev; in ahash_finup_no_ctx() local
1359 dev_err(jrdev, "could not allocate extended descriptor\n"); in ahash_finup_no_ctx()
1373 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, in ahash_finup_no_ctx()
1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, in ahash_finup_no_ctx()
1380 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_finup_no_ctx()
1382 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_finup_no_ctx()
1383 dev_err(jrdev, "unable to map S/G table\n"); in ahash_finup_no_ctx()
1390 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, in ahash_finup_no_ctx()
1392 if (dma_mapping_error(jrdev, edesc->dst_dma)) { in ahash_finup_no_ctx()
1393 dev_err(jrdev, "unable to map dst\n"); in ahash_finup_no_ctx()
1402 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); in ahash_finup_no_ctx()
1406 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_finup_no_ctx()
1419 struct device *jrdev = ctx->jrdev; in ahash_update_first() local
1443 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in ahash_update_first()
1454 dev_err(jrdev, in ahash_update_first()
1469 edesc->sec4_sg_dma = dma_map_single(jrdev, in ahash_update_first()
1473 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_first()
1474 dev_err(jrdev, "unable to map S/G table\n"); in ahash_update_first()
1495 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); in ahash_update_first()
1505 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, in ahash_update_first()
1513 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, in ahash_update_first()
1772 ctx->jrdev = caam_jr_alloc(); in caam_hash_cra_init()
1773 if (IS_ERR(ctx->jrdev)) { in caam_hash_cra_init()
1775 return PTR_ERR(ctx->jrdev); in caam_hash_cra_init()
1797 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) in caam_hash_cra_exit()
1798 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, in caam_hash_cra_exit()
1802 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) in caam_hash_cra_exit()
1803 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, in caam_hash_cra_exit()
1807 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) in caam_hash_cra_exit()
1808 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, in caam_hash_cra_exit()
1811 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) in caam_hash_cra_exit()
1812 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, in caam_hash_cra_exit()
1816 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) in caam_hash_cra_exit()
1817 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, in caam_hash_cra_exit()
1820 caam_jr_free(ctx->jrdev); in caam_hash_cra_exit()