Lines Matching refs:jrdev

182 	struct device *jrdev;  member
263 struct device *jrdev = ctx->jrdev; in aead_null_set_sh_desc() local
340 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, in aead_null_set_sh_desc()
343 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { in aead_null_set_sh_desc()
344 dev_err(jrdev, "unable to map shared descriptor\n"); in aead_null_set_sh_desc()
433 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, in aead_null_set_sh_desc()
436 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { in aead_null_set_sh_desc()
437 dev_err(jrdev, "unable to map shared descriptor\n"); in aead_null_set_sh_desc()
456 struct device *jrdev = ctx->jrdev; in aead_set_sh_desc() local
544 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, in aead_set_sh_desc()
547 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { in aead_set_sh_desc()
548 dev_err(jrdev, "unable to map shared descriptor\n"); in aead_set_sh_desc()
615 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, in aead_set_sh_desc()
618 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { in aead_set_sh_desc()
619 dev_err(jrdev, "unable to map shared descriptor\n"); in aead_set_sh_desc()
712 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, in aead_set_sh_desc()
715 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { in aead_set_sh_desc()
716 dev_err(jrdev, "unable to map shared descriptor\n"); in aead_set_sh_desc()
743 struct device *jrdev = ctx->jrdev; in gcm_set_sh_desc() local
847 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, in gcm_set_sh_desc()
850 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { in gcm_set_sh_desc()
851 dev_err(jrdev, "unable to map shared descriptor\n"); in gcm_set_sh_desc()
943 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, in gcm_set_sh_desc()
946 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { in gcm_set_sh_desc()
947 dev_err(jrdev, "unable to map shared descriptor\n"); in gcm_set_sh_desc()
973 struct device *jrdev = ctx->jrdev; in rfc4106_set_sh_desc() local
1045 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, in rfc4106_set_sh_desc()
1048 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { in rfc4106_set_sh_desc()
1049 dev_err(jrdev, "unable to map shared descriptor\n"); in rfc4106_set_sh_desc()
1122 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, in rfc4106_set_sh_desc()
1125 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { in rfc4106_set_sh_desc()
1126 dev_err(jrdev, "unable to map shared descriptor\n"); in rfc4106_set_sh_desc()
1222 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, in rfc4106_set_sh_desc()
1225 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { in rfc4106_set_sh_desc()
1226 dev_err(jrdev, "unable to map shared descriptor\n"); in rfc4106_set_sh_desc()
1254 struct device *jrdev = ctx->jrdev; in rfc4543_set_sh_desc() local
1367 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, in rfc4543_set_sh_desc()
1370 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { in rfc4543_set_sh_desc()
1371 dev_err(jrdev, "unable to map shared descriptor\n"); in rfc4543_set_sh_desc()
1489 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, in rfc4543_set_sh_desc()
1492 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { in rfc4543_set_sh_desc()
1493 dev_err(jrdev, "unable to map shared descriptor\n"); in rfc4543_set_sh_desc()
1617 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, in rfc4543_set_sh_desc()
1620 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { in rfc4543_set_sh_desc()
1621 dev_err(jrdev, "unable to map shared descriptor\n"); in rfc4543_set_sh_desc()
1648 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, in gen_split_aead_key()
1659 struct device *jrdev = ctx->jrdev; in aead_setkey() local
1692 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + in aead_setkey()
1694 if (dma_mapping_error(jrdev, ctx->key_dma)) { in aead_setkey()
1695 dev_err(jrdev, "unable to map key i/o memory\n"); in aead_setkey()
1708 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + in aead_setkey()
1722 struct device *jrdev = ctx->jrdev; in gcm_setkey() local
1731 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, in gcm_setkey()
1733 if (dma_mapping_error(jrdev, ctx->key_dma)) { in gcm_setkey()
1734 dev_err(jrdev, "unable to map key i/o memory\n"); in gcm_setkey()
1741 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, in gcm_setkey()
1752 struct device *jrdev = ctx->jrdev; in rfc4106_setkey() local
1771 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, in rfc4106_setkey()
1773 if (dma_mapping_error(jrdev, ctx->key_dma)) { in rfc4106_setkey()
1774 dev_err(jrdev, "unable to map key i/o memory\n"); in rfc4106_setkey()
1780 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, in rfc4106_setkey()
1791 struct device *jrdev = ctx->jrdev; in rfc4543_setkey() local
1810 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, in rfc4543_setkey()
1812 if (dma_mapping_error(jrdev, ctx->key_dma)) { in rfc4543_setkey()
1813 dev_err(jrdev, "unable to map key i/o memory\n"); in rfc4543_setkey()
1819 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, in rfc4543_setkey()
1833 struct device *jrdev = ctx->jrdev; in ablkcipher_setkey() local
1868 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, in ablkcipher_setkey()
1870 if (dma_mapping_error(jrdev, ctx->key_dma)) { in ablkcipher_setkey()
1871 dev_err(jrdev, "unable to map key i/o memory\n"); in ablkcipher_setkey()
1921 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, in ablkcipher_setkey()
1924 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { in ablkcipher_setkey()
1925 dev_err(jrdev, "unable to map shared descriptor\n"); in ablkcipher_setkey()
1983 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, in ablkcipher_setkey()
1986 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { in ablkcipher_setkey()
1987 dev_err(jrdev, "unable to map shared descriptor\n"); in ablkcipher_setkey()
2061 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, in ablkcipher_setkey()
2064 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { in ablkcipher_setkey()
2065 dev_err(jrdev, "unable to map shared descriptor\n"); in ablkcipher_setkey()
2182 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, in aead_encrypt_done() argument
2192 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in aead_encrypt_done()
2199 caam_jr_strstatus(jrdev, err); in aead_encrypt_done()
2201 aead_unmap(jrdev, edesc, req); in aead_encrypt_done()
2221 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, in aead_decrypt_done() argument
2231 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in aead_decrypt_done()
2247 caam_jr_strstatus(jrdev, err); in aead_decrypt_done()
2249 aead_unmap(jrdev, edesc, req); in aead_decrypt_done()
2277 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, in ablkcipher_encrypt_done() argument
2286 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ablkcipher_encrypt_done()
2293 caam_jr_strstatus(jrdev, err); in ablkcipher_encrypt_done()
2304 ablkcipher_unmap(jrdev, edesc, req); in ablkcipher_encrypt_done()
2310 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, in ablkcipher_decrypt_done() argument
2319 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in ablkcipher_decrypt_done()
2325 caam_jr_strstatus(jrdev, err); in ablkcipher_decrypt_done()
2336 ablkcipher_unmap(jrdev, edesc, req); in ablkcipher_decrypt_done()
2617 struct device *jrdev = ctx->jrdev; in aead_edesc_alloc() local
2646 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, in aead_edesc_alloc()
2649 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in aead_edesc_alloc()
2652 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in aead_edesc_alloc()
2654 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, in aead_edesc_alloc()
2658 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); in aead_edesc_alloc()
2659 if (dma_mapping_error(jrdev, iv_dma)) { in aead_edesc_alloc()
2660 dev_err(jrdev, "unable to map IV\n"); in aead_edesc_alloc()
2697 dev_err(jrdev, "could not allocate extended descriptor\n"); in aead_edesc_alloc()
2745 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in aead_edesc_alloc()
2747 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in aead_edesc_alloc()
2748 dev_err(jrdev, "unable to map S/G table\n"); in aead_edesc_alloc()
2760 struct device *jrdev = ctx->jrdev; in aead_encrypt() local
2781 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); in aead_encrypt()
2785 aead_unmap(jrdev, edesc, req); in aead_encrypt()
2797 struct device *jrdev = ctx->jrdev; in aead_decrypt() local
2824 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); in aead_decrypt()
2828 aead_unmap(jrdev, edesc, req); in aead_decrypt()
2845 struct device *jrdev = ctx->jrdev; in aead_giv_edesc_alloc() local
2865 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, in aead_giv_edesc_alloc()
2868 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in aead_giv_edesc_alloc()
2871 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in aead_giv_edesc_alloc()
2873 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, in aead_giv_edesc_alloc()
2877 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); in aead_giv_edesc_alloc()
2878 if (dma_mapping_error(jrdev, iv_dma)) { in aead_giv_edesc_alloc()
2879 dev_err(jrdev, "unable to map IV\n"); in aead_giv_edesc_alloc()
2937 dev_err(jrdev, "could not allocate extended descriptor\n"); in aead_giv_edesc_alloc()
2992 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in aead_giv_edesc_alloc()
2994 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in aead_giv_edesc_alloc()
2995 dev_err(jrdev, "unable to map S/G table\n"); in aead_giv_edesc_alloc()
3008 struct device *jrdev = ctx->jrdev; in aead_givencrypt() local
3036 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); in aead_givencrypt()
3040 aead_unmap(jrdev, edesc, req); in aead_givencrypt()
3061 struct device *jrdev = ctx->jrdev; in ablkcipher_edesc_alloc() local
3080 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in ablkcipher_edesc_alloc()
3083 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in ablkcipher_edesc_alloc()
3085 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, in ablkcipher_edesc_alloc()
3089 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); in ablkcipher_edesc_alloc()
3090 if (dma_mapping_error(jrdev, iv_dma)) { in ablkcipher_edesc_alloc()
3091 dev_err(jrdev, "unable to map IV\n"); in ablkcipher_edesc_alloc()
3110 dev_err(jrdev, "could not allocate extended descriptor\n"); in ablkcipher_edesc_alloc()
3135 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ablkcipher_edesc_alloc()
3137 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ablkcipher_edesc_alloc()
3138 dev_err(jrdev, "unable to map S/G table\n"); in ablkcipher_edesc_alloc()
3159 struct device *jrdev = ctx->jrdev; in ablkcipher_encrypt() local
3179 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); in ablkcipher_encrypt()
3184 ablkcipher_unmap(jrdev, edesc, req); in ablkcipher_encrypt()
3196 struct device *jrdev = ctx->jrdev; in ablkcipher_decrypt() local
3217 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); in ablkcipher_decrypt()
3221 ablkcipher_unmap(jrdev, edesc, req); in ablkcipher_decrypt()
3240 struct device *jrdev = ctx->jrdev; in ablkcipher_giv_edesc_alloc() local
3259 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in ablkcipher_giv_edesc_alloc()
3262 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, in ablkcipher_giv_edesc_alloc()
3264 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, in ablkcipher_giv_edesc_alloc()
3272 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); in ablkcipher_giv_edesc_alloc()
3273 if (dma_mapping_error(jrdev, iv_dma)) { in ablkcipher_giv_edesc_alloc()
3274 dev_err(jrdev, "unable to map IV\n"); in ablkcipher_giv_edesc_alloc()
3289 dev_err(jrdev, "could not allocate extended descriptor\n"); in ablkcipher_giv_edesc_alloc()
3315 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ablkcipher_giv_edesc_alloc()
3317 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ablkcipher_giv_edesc_alloc()
3318 dev_err(jrdev, "unable to map S/G table\n"); in ablkcipher_giv_edesc_alloc()
3340 struct device *jrdev = ctx->jrdev; in ablkcipher_givencrypt() local
3361 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); in ablkcipher_givencrypt()
3366 ablkcipher_unmap(jrdev, edesc, req); in ablkcipher_givencrypt()
4142 ctx->jrdev = caam_jr_alloc(); in caam_cra_init()
4143 if (IS_ERR(ctx->jrdev)) { in caam_cra_init()
4145 return PTR_ERR(ctx->jrdev); in caam_cra_init()
4161 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) in caam_cra_exit()
4162 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, in caam_cra_exit()
4165 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) in caam_cra_exit()
4166 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, in caam_cra_exit()
4169 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) in caam_cra_exit()
4170 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, in caam_cra_exit()
4174 !dma_mapping_error(ctx->jrdev, ctx->key_dma)) in caam_cra_exit()
4175 dma_unmap_single(ctx->jrdev, ctx->key_dma, in caam_cra_exit()
4179 caam_jr_free(ctx->jrdev); in caam_cra_exit()