/linux-4.1.27/drivers/crypto/caam/ |
H A D | error.h | 10 void caam_jr_strstatus(struct device *jrdev, u32 status);
|
H A D | key_gen.c | 44 int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, gen_split_key() argument 55 dev_err(jrdev, "unable to allocate key input memory\n"); gen_split_key() 59 dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, gen_split_key() 61 if (dma_mapping_error(jrdev, dma_addr_in)) { gen_split_key() 62 dev_err(jrdev, "unable to map key input memory\n"); gen_split_key() 66 dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, gen_split_key() 68 if (dma_mapping_error(jrdev, dma_addr_out)) { gen_split_key() 69 dev_err(jrdev, "unable to map key output memory\n"); gen_split_key() 103 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); gen_split_key() 115 dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, gen_split_key() 118 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); gen_split_key()
|
H A D | caamrng.c | 71 struct device *jrdev; member in struct:caam_rng_ctx 81 static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) rng_unmap_buf() argument 84 dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, rng_unmap_buf() 90 struct device *jrdev = ctx->jrdev; rng_unmap_ctx() local 93 dma_unmap_single(jrdev, ctx->sh_desc_dma, rng_unmap_ctx() 95 rng_unmap_buf(jrdev, &ctx->bufs[0]); rng_unmap_ctx() 96 rng_unmap_buf(jrdev, &ctx->bufs[1]); rng_unmap_ctx() 99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) rng_done() argument 107 caam_jr_strstatus(jrdev, err); rng_done() 120 struct device *jrdev = ctx->jrdev; submit_job() local 124 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); submit_job() 126 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); submit_job() 160 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", caam_read() 181 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); caam_read() 190 struct device *jrdev = ctx->jrdev; rng_create_sh_desc() local 204 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), rng_create_sh_desc() 206 if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { rng_create_sh_desc() 207 dev_err(jrdev, "unable to map shared descriptor\n"); rng_create_sh_desc() 219 struct device *jrdev = ctx->jrdev; rng_create_job_desc() local 227 bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); rng_create_job_desc() 228 if (dma_mapping_error(jrdev, bd->addr)) { rng_create_job_desc() 229 dev_err(jrdev, "unable to map dst\n"); rng_create_job_desc() 271 static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) caam_init_rng() argument 275 ctx->jrdev = jrdev; caam_init_rng() 303 caam_jr_free(rng_ctx->jrdev); caam_rng_exit()
|
H A D | caamhash.c | 102 struct device *jrdev; member in struct:caam_hash_ctx 140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, map_seq_out_ptr_ctx() argument 144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, map_seq_out_ptr_ctx() 146 if (dma_mapping_error(jrdev, state->ctx_dma)) { map_seq_out_ptr_ctx() 147 dev_err(jrdev, "unable to map ctx\n"); map_seq_out_ptr_ctx() 157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, map_seq_out_ptr_result() argument 162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); map_seq_out_ptr_result() 169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, buf_map_to_sec4_sg() argument 175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); buf_map_to_sec4_sg() 182 static inline void src_map_to_sec4_sg(struct device *jrdev, src_map_to_sec4_sg() argument 187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); src_map_to_sec4_sg() 196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, try_buf_map_to_sec4_sg() argument 200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) try_buf_map_to_sec4_sg() 201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); try_buf_map_to_sec4_sg() 203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); try_buf_map_to_sec4_sg() 211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, ctx_map_to_sec4_sg() argument 215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); ctx_map_to_sec4_sg() 216 if (dma_mapping_error(jrdev, state->ctx_dma)) { ctx_map_to_sec4_sg() 217 dev_err(jrdev, "unable to map ctx\n"); ctx_map_to_sec4_sg() 315 struct device *jrdev = ctx->jrdev; ahash_set_sh_desc() local 338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc() 340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { ahash_set_sh_desc() 341 dev_err(jrdev, "unable to map shared descriptor\n"); ahash_set_sh_desc() 356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc() 359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { ahash_set_sh_desc() 360 dev_err(jrdev, "unable to map shared descriptor\n"); ahash_set_sh_desc() 375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc() 377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { ahash_set_sh_desc() 378 dev_err(jrdev, "unable to map shared descriptor\n"); ahash_set_sh_desc() 393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc() 395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { ahash_set_sh_desc() 396 dev_err(jrdev, "unable to map shared descriptor\n"); ahash_set_sh_desc() 411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc() 414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { ahash_set_sh_desc() 415 dev_err(jrdev, "unable to map shared descriptor\n"); ahash_set_sh_desc() 431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, gen_split_hash_key() 440 struct device *jrdev = ctx->jrdev; hash_digest_key() local 448 dev_err(jrdev, "unable to allocate key input memory\n"); hash_digest_key() 454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, hash_digest_key() 456 if (dma_mapping_error(jrdev, src_dma)) { hash_digest_key() 457 dev_err(jrdev, "unable to map key input memory\n"); hash_digest_key() 461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, hash_digest_key() 463 if (dma_mapping_error(jrdev, dst_dma)) { hash_digest_key() 464 dev_err(jrdev, "unable to map key output memory\n"); hash_digest_key() 465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); hash_digest_key() 490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); hash_digest_key() 502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); hash_digest_key() 503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); hash_digest_key() 518 struct device *jrdev = ctx->jrdev; ahash_setkey() local 556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, ahash_setkey() 558 if (dma_mapping_error(jrdev, ctx->key_dma)) { ahash_setkey() 559 dev_err(jrdev, "unable to map key i/o memory\n"); ahash_setkey() 571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, ahash_setkey() 632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, ahash_done() argument 643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ahash_done() 649 caam_jr_strstatus(jrdev, err); ahash_done() 651 ahash_unmap(jrdev, edesc, req, digestsize); ahash_done() 667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, ahash_done_bi() argument 678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ahash_done_bi() 684 caam_jr_strstatus(jrdev, err); ahash_done_bi() 686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ahash_done_bi() 702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, ahash_done_ctx_src() argument 713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ahash_done_ctx_src() 719 caam_jr_strstatus(jrdev, err); ahash_done_ctx_src() 721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); ahash_done_ctx_src() 737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ahash_done_ctx_dst() argument 748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ahash_done_ctx_dst() 754 caam_jr_strstatus(jrdev, err); ahash_done_ctx_dst() 756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ahash_done_ctx_dst() 778 struct device *jrdev = ctx->jrdev; ahash_update_ctx() local 813 dev_err(jrdev, ahash_update_ctx() 824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_update_ctx() 829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, ahash_update_ctx() 835 src_map_to_sec4_sg(jrdev, req->src, src_nents, ahash_update_ctx() 854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_update_ctx() 857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_update_ctx() 858 dev_err(jrdev, "unable to map S/G table\n"); ahash_update_ctx() 873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); ahash_update_ctx() 877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_ctx() 903 struct device *jrdev = ctx->jrdev; ahash_final_ctx() local 925 dev_err(jrdev, "could not allocate extended descriptor\n"); ahash_final_ctx() 938 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_final_ctx() 943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, ahash_final_ctx() 948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_final_ctx() 950 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_final_ctx() 951 dev_err(jrdev, "unable to map S/G table\n"); ahash_final_ctx() 958 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_ctx() 960 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_final_ctx() 961 dev_err(jrdev, "unable to map dst\n"); ahash_final_ctx() 970 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_final_ctx() 974 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_final_ctx() 986 struct device *jrdev = ctx->jrdev; ahash_finup_ctx() local 1012 dev_err(jrdev, "could not allocate extended descriptor\n"); ahash_finup_ctx() 1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_finup_ctx() 1031 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, ahash_finup_ctx() 1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + ahash_finup_ctx() 1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_finup_ctx() 1040 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_finup_ctx() 1041 dev_err(jrdev, "unable to map S/G table\n"); ahash_finup_ctx() 1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_ctx() 1050 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_finup_ctx() 1051 dev_err(jrdev, "unable to map dst\n"); ahash_finup_ctx() 1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_finup_ctx() 1064 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_finup_ctx() 1075 struct device *jrdev = ctx->jrdev; ahash_digest() local 1090 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, ahash_digest() 1098 dev_err(jrdev, "could not allocate extended descriptor\n"); ahash_digest() 1113 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_digest() 1115 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_digest() 1116 dev_err(jrdev, "unable to map S/G table\n"); ahash_digest() 1127 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_digest() 1129 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_digest() 1130 dev_err(jrdev, "unable to map dst\n"); ahash_digest() 1139 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_digest() 1143 ahash_unmap(jrdev, edesc, req, digestsize); ahash_digest() 1156 struct device *jrdev = ctx->jrdev; ahash_final_no_ctx() local 1172 dev_err(jrdev, "could not allocate extended descriptor\n"); ahash_final_no_ctx() 1181 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); ahash_final_no_ctx() 1182 if (dma_mapping_error(jrdev, state->buf_dma)) { ahash_final_no_ctx() 1183 dev_err(jrdev, "unable to map src\n"); ahash_final_no_ctx() 1189 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_no_ctx() 1191 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_final_no_ctx() 1192 dev_err(jrdev, "unable to map dst\n"); ahash_final_no_ctx() 1202 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_final_no_ctx() 1206 ahash_unmap(jrdev, edesc, req, digestsize); ahash_final_no_ctx() 1219 struct device *jrdev = ctx->jrdev; ahash_update_no_ctx() local 1252 dev_err(jrdev, ahash_update_no_ctx() 1264 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, ahash_update_no_ctx() 1266 src_map_to_sec4_sg(jrdev, req->src, src_nents, ahash_update_no_ctx() 1280 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_update_no_ctx() 1283 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_update_no_ctx() 1284 dev_err(jrdev, "unable to map S/G table\n"); ahash_update_no_ctx() 1290 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_no_ctx() 1300 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); ahash_update_no_ctx() 1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_no_ctx() 1334 struct device *jrdev = ctx->jrdev; ahash_finup_no_ctx() local 1359 dev_err(jrdev, "could not allocate extended descriptor\n"); ahash_finup_no_ctx() 1373 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, ahash_finup_no_ctx() 1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, ahash_finup_no_ctx() 1380 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ahash_finup_no_ctx() 1382 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_finup_no_ctx() 1383 dev_err(jrdev, "unable to map S/G table\n"); ahash_finup_no_ctx() 1390 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_no_ctx() 1392 if (dma_mapping_error(jrdev, edesc->dst_dma)) { ahash_finup_no_ctx() 1393 dev_err(jrdev, "unable to map dst\n"); ahash_finup_no_ctx() 1402 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_finup_no_ctx() 1406 ahash_unmap(jrdev, edesc, req, digestsize); ahash_finup_no_ctx() 1419 struct device *jrdev = ctx->jrdev; ahash_update_first() local 1443 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, ahash_update_first() 1454 dev_err(jrdev, ahash_update_first() 1469 edesc->sec4_sg_dma = dma_map_single(jrdev, ahash_update_first() 1473 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ahash_update_first() 1474 dev_err(jrdev, "unable to map S/G table\n"); ahash_update_first() 1495 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_first() 1505 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, ahash_update_first() 1513 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_first() 1772 ctx->jrdev = caam_jr_alloc(); caam_hash_cra_init() 1773 if (IS_ERR(ctx->jrdev)) { caam_hash_cra_init() 1775 return PTR_ERR(ctx->jrdev); caam_hash_cra_init() 1797 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) caam_hash_cra_exit() 1798 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, caam_hash_cra_exit() 1802 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) caam_hash_cra_exit() 1803 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, caam_hash_cra_exit() 1807 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) caam_hash_cra_exit() 1808 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, caam_hash_cra_exit() 1811 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) caam_hash_cra_exit() 1812 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, caam_hash_cra_exit() 1816 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) caam_hash_cra_exit() 1817 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, caam_hash_cra_exit() 1820 caam_jr_free(ctx->jrdev); caam_hash_cra_exit()
|
H A D | key_gen.h | 15 int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
|
H A D | caamalg.c | 182 struct device *jrdev; member in struct:caam_ctx 263 struct device *jrdev = ctx->jrdev; aead_null_set_sh_desc() local 340 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_null_set_sh_desc() 343 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { aead_null_set_sh_desc() 344 dev_err(jrdev, "unable to map shared descriptor\n"); aead_null_set_sh_desc() 433 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, aead_null_set_sh_desc() 436 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { aead_null_set_sh_desc() 437 dev_err(jrdev, "unable to map shared descriptor\n"); aead_null_set_sh_desc() 456 struct device *jrdev = ctx->jrdev; aead_set_sh_desc() local 544 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_set_sh_desc() 547 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { aead_set_sh_desc() 548 dev_err(jrdev, "unable to map shared descriptor\n"); aead_set_sh_desc() 615 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, aead_set_sh_desc() 618 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { aead_set_sh_desc() 619 dev_err(jrdev, "unable to map shared descriptor\n"); aead_set_sh_desc() 712 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, aead_set_sh_desc() 715 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { aead_set_sh_desc() 716 dev_err(jrdev, "unable to map shared descriptor\n"); aead_set_sh_desc() 743 struct device *jrdev = ctx->jrdev; gcm_set_sh_desc() local 847 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, gcm_set_sh_desc() 850 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { gcm_set_sh_desc() 851 dev_err(jrdev, "unable to map shared descriptor\n"); gcm_set_sh_desc() 943 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, gcm_set_sh_desc() 946 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { gcm_set_sh_desc() 947 dev_err(jrdev, "unable to map shared descriptor\n"); gcm_set_sh_desc() 973 struct device *jrdev = ctx->jrdev; rfc4106_set_sh_desc() local 1045 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc() 1048 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { rfc4106_set_sh_desc() 1049 dev_err(jrdev, "unable to map shared descriptor\n"); rfc4106_set_sh_desc() 1122 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc() 1125 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { rfc4106_set_sh_desc() 1126 dev_err(jrdev, "unable to map shared descriptor\n"); rfc4106_set_sh_desc() 1222 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc() 1225 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { rfc4106_set_sh_desc() 1226 dev_err(jrdev, "unable to map shared descriptor\n"); rfc4106_set_sh_desc() 1254 struct device *jrdev = ctx->jrdev; rfc4543_set_sh_desc() local 1367 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc() 1370 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { rfc4543_set_sh_desc() 1371 dev_err(jrdev, "unable to map shared descriptor\n"); rfc4543_set_sh_desc() 1489 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc() 1492 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { rfc4543_set_sh_desc() 1493 dev_err(jrdev, "unable to map shared descriptor\n"); rfc4543_set_sh_desc() 1617 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc() 1620 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { rfc4543_set_sh_desc() 1621 dev_err(jrdev, "unable to map shared descriptor\n"); rfc4543_set_sh_desc() 1648 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, gen_split_aead_key() 1659 struct device *jrdev = ctx->jrdev; aead_setkey() local 1692 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + aead_setkey() 1694 if (dma_mapping_error(jrdev, ctx->key_dma)) { aead_setkey() 1695 dev_err(jrdev, "unable to map key i/o memory\n"); aead_setkey() 1708 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + aead_setkey() 1722 struct device *jrdev = ctx->jrdev; gcm_setkey() local 1731 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, gcm_setkey() 1733 if (dma_mapping_error(jrdev, ctx->key_dma)) { gcm_setkey() 1734 dev_err(jrdev, "unable to map key i/o memory\n"); gcm_setkey() 1741 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, gcm_setkey() 1752 struct device *jrdev = ctx->jrdev; rfc4106_setkey() local 1771 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, rfc4106_setkey() 1773 if (dma_mapping_error(jrdev, ctx->key_dma)) { rfc4106_setkey() 1774 dev_err(jrdev, "unable to map key i/o memory\n"); rfc4106_setkey() 1780 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, rfc4106_setkey() 1791 struct device *jrdev = ctx->jrdev; rfc4543_setkey() local 1810 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen, rfc4543_setkey() 1812 if (dma_mapping_error(jrdev, ctx->key_dma)) { rfc4543_setkey() 1813 dev_err(jrdev, "unable to map key i/o memory\n"); rfc4543_setkey() 1819 dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, rfc4543_setkey() 1833 struct device *jrdev = ctx->jrdev; ablkcipher_setkey() local 1868 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, ablkcipher_setkey() 1870 if (dma_mapping_error(jrdev, ctx->key_dma)) { ablkcipher_setkey() 1871 dev_err(jrdev, "unable to map key i/o memory\n"); ablkcipher_setkey() 1921 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, ablkcipher_setkey() 1924 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { ablkcipher_setkey() 1925 dev_err(jrdev, "unable to map shared descriptor\n"); ablkcipher_setkey() 1983 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, ablkcipher_setkey() 1986 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { ablkcipher_setkey() 1987 dev_err(jrdev, "unable to map shared descriptor\n"); ablkcipher_setkey() 2061 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, ablkcipher_setkey() 2064 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { ablkcipher_setkey() 2065 dev_err(jrdev, "unable to map shared descriptor\n"); ablkcipher_setkey() 2182 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_encrypt_done() argument 2192 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); aead_encrypt_done() 2199 caam_jr_strstatus(jrdev, err); aead_encrypt_done() 2201 aead_unmap(jrdev, edesc, req); aead_encrypt_done() 2221 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_decrypt_done() argument 2231 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); aead_decrypt_done() 2247 caam_jr_strstatus(jrdev, err); aead_decrypt_done() 2249 aead_unmap(jrdev, edesc, req); aead_decrypt_done() 2277 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_encrypt_done() argument 2286 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ablkcipher_encrypt_done() 2293 caam_jr_strstatus(jrdev, err); ablkcipher_encrypt_done() 2304 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_encrypt_done() 2310 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_decrypt_done() argument 2319 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); ablkcipher_decrypt_done() 2325 caam_jr_strstatus(jrdev, err); ablkcipher_decrypt_done() 2336 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_decrypt_done() 2617 struct device *jrdev = ctx->jrdev; aead_edesc_alloc() local 2646 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, aead_edesc_alloc() 2649 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, aead_edesc_alloc() 2652 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, aead_edesc_alloc() 2654 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, aead_edesc_alloc() 2658 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); aead_edesc_alloc() 2659 if (dma_mapping_error(jrdev, iv_dma)) { aead_edesc_alloc() 2660 dev_err(jrdev, "unable to map IV\n"); aead_edesc_alloc() 2697 dev_err(jrdev, "could not allocate extended descriptor\n"); aead_edesc_alloc() 2745 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, aead_edesc_alloc() 2747 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { aead_edesc_alloc() 2748 dev_err(jrdev, "unable to map S/G table\n"); aead_edesc_alloc() 2760 struct device *jrdev = ctx->jrdev; aead_encrypt() local 2781 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); aead_encrypt() 2785 aead_unmap(jrdev, edesc, req); aead_encrypt() 2797 struct device *jrdev = ctx->jrdev; aead_decrypt() local 2824 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); aead_decrypt() 2828 aead_unmap(jrdev, edesc, req); aead_decrypt() 2845 struct device *jrdev = ctx->jrdev; aead_giv_edesc_alloc() local 2865 sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, aead_giv_edesc_alloc() 2868 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, aead_giv_edesc_alloc() 2871 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, aead_giv_edesc_alloc() 2873 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, aead_giv_edesc_alloc() 2877 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); aead_giv_edesc_alloc() 2878 if (dma_mapping_error(jrdev, iv_dma)) { aead_giv_edesc_alloc() 2879 dev_err(jrdev, "unable to map IV\n"); aead_giv_edesc_alloc() 2937 dev_err(jrdev, "could not allocate extended descriptor\n"); aead_giv_edesc_alloc() 2992 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, aead_giv_edesc_alloc() 2994 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { aead_giv_edesc_alloc() 2995 dev_err(jrdev, "unable to map S/G table\n"); aead_giv_edesc_alloc() 3008 struct device *jrdev = ctx->jrdev; aead_givencrypt() local 3036 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); aead_givencrypt() 3040 aead_unmap(jrdev, edesc, req); aead_givencrypt() 3061 struct device *jrdev = ctx->jrdev; ablkcipher_edesc_alloc() local 3080 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, ablkcipher_edesc_alloc() 3083 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, ablkcipher_edesc_alloc() 3085 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, ablkcipher_edesc_alloc() 3089 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); ablkcipher_edesc_alloc() 3090 if (dma_mapping_error(jrdev, iv_dma)) { ablkcipher_edesc_alloc() 3091 dev_err(jrdev, "unable to map IV\n"); ablkcipher_edesc_alloc() 3110 dev_err(jrdev, "could not allocate extended descriptor\n"); ablkcipher_edesc_alloc() 3135 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ablkcipher_edesc_alloc() 3137 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ablkcipher_edesc_alloc() 3138 dev_err(jrdev, "unable to map S/G table\n"); ablkcipher_edesc_alloc() 3159 struct device *jrdev = ctx->jrdev; ablkcipher_encrypt() local 3179 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); ablkcipher_encrypt() 3184 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_encrypt() 3196 struct device *jrdev = ctx->jrdev; ablkcipher_decrypt() local 3217 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); ablkcipher_decrypt() 3221 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_decrypt() 3240 struct device *jrdev = ctx->jrdev; ablkcipher_giv_edesc_alloc() local 3259 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, ablkcipher_giv_edesc_alloc() 3262 sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, ablkcipher_giv_edesc_alloc() 3264 sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, ablkcipher_giv_edesc_alloc() 3272 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); ablkcipher_giv_edesc_alloc() 3273 if (dma_mapping_error(jrdev, iv_dma)) { ablkcipher_giv_edesc_alloc() 3274 dev_err(jrdev, "unable to map IV\n"); ablkcipher_giv_edesc_alloc() 3289 dev_err(jrdev, "could not allocate extended descriptor\n"); ablkcipher_giv_edesc_alloc() 3315 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, ablkcipher_giv_edesc_alloc() 3317 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { ablkcipher_giv_edesc_alloc() 3318 dev_err(jrdev, "unable to map S/G table\n"); ablkcipher_giv_edesc_alloc() 3340 struct device *jrdev = ctx->jrdev; ablkcipher_givencrypt() local 3361 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); ablkcipher_givencrypt() 3366 ablkcipher_unmap(jrdev, edesc, req); ablkcipher_givencrypt() 4142 ctx->jrdev = caam_jr_alloc(); caam_cra_init() 4143 if (IS_ERR(ctx->jrdev)) { caam_cra_init() 4145 return PTR_ERR(ctx->jrdev); caam_cra_init() 4161 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) caam_cra_exit() 4162 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, caam_cra_exit() 4165 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) caam_cra_exit() 4166 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, caam_cra_exit() 4169 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) caam_cra_exit() 4170 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, caam_cra_exit() 4174 !dma_mapping_error(ctx->jrdev, ctx->key_dma)) caam_cra_exit() 4175 dma_unmap_single(ctx->jrdev, ctx->key_dma, caam_cra_exit() 4179 caam_jr_free(ctx->jrdev); caam_cra_exit()
|
H A D | error.c | 120 static void report_ccb_status(struct device *jrdev, const u32 status, report_ccb_status() argument 159 dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", report_ccb_status() 165 static void report_jump_status(struct device *jrdev, const u32 status, report_jump_status() argument 168 dev_err(jrdev, "%08x: %s: %s() not implemented\n", report_jump_status() 172 static void report_deco_status(struct device *jrdev, const u32 status, report_deco_status() argument 197 dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", report_deco_status() 201 static void report_jr_status(struct device *jrdev, const u32 status, report_jr_status() argument 204 dev_err(jrdev, "%08x: %s: %s() not implemented\n", report_jr_status() 208 static void report_cond_code_status(struct device *jrdev, const u32 status, report_cond_code_status() argument 211 dev_err(jrdev, "%08x: %s: %s() not implemented\n", report_cond_code_status() 215 void caam_jr_strstatus(struct device *jrdev, u32 status) caam_jr_strstatus() argument 218 void (*report_ssed)(struct device *jrdev, const u32 status, caam_jr_strstatus() 247 status_src[ssrc].report_ssed(jrdev, status, error); caam_jr_strstatus() 249 dev_err(jrdev, "%d: %s\n", ssrc, error); caam_jr_strstatus() 251 dev_err(jrdev, "%d: unknown error source\n", ssrc); caam_jr_strstatus()
|
H A D | jr.c | 96 struct device *jrdev; caam_jr_remove() local 99 jrdev = &pdev->dev; caam_jr_remove() 100 jrpriv = dev_get_drvdata(jrdev); caam_jr_remove() 106 dev_err(jrdev, "Device is busy\n"); caam_jr_remove() 116 ret = caam_jr_shutdown(jrdev); caam_jr_remove() 118 dev_err(jrdev, "Failed to shut down job ring\n"); caam_jr_remove() 456 struct device *jrdev; caam_jr_probe() local 463 jrdev = &pdev->dev; caam_jr_probe() 464 jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), caam_jr_probe() 469 dev_set_drvdata(jrdev, jrpriv); caam_jr_probe() 479 dev_err(jrdev, "of_iomap() failed\n"); caam_jr_probe() 487 dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40)); caam_jr_probe() 489 dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36)); caam_jr_probe() 491 dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32)); caam_jr_probe() 497 error = caam_jr_init(jrdev); /* now turn on hardware */ caam_jr_probe() 503 jrpriv->dev = jrdev; caam_jr_probe()
|