1/* 2 * Cipher algorithms supported by the CESA: DES, 3DES and AES. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/aes.h> 16#include <crypto/des.h> 17 18#include "cesa.h" 19 20struct mv_cesa_des_ctx { 21 struct mv_cesa_ctx base; 22 u8 key[DES_KEY_SIZE]; 23}; 24 25struct mv_cesa_des3_ctx { 26 struct mv_cesa_ctx base; 27 u8 key[DES3_EDE_KEY_SIZE]; 28}; 29 30struct mv_cesa_aes_ctx { 31 struct mv_cesa_ctx base; 32 struct crypto_aes_ctx aes; 33}; 34 35struct mv_cesa_ablkcipher_dma_iter { 36 struct mv_cesa_dma_iter base; 37 struct mv_cesa_sg_dma_iter src; 38 struct mv_cesa_sg_dma_iter dst; 39}; 40 41static inline void 42mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, 43 struct ablkcipher_request *req) 44{ 45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); 46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); 48} 49 50static inline bool 51mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) 52{ 53 iter->src.op_offset = 0; 54 iter->dst.op_offset = 0; 55 56 return mv_cesa_req_dma_iter_next_op(&iter->base); 57} 58 59static inline void 60mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) 61{ 62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 63 64 if (req->dst != req->src) { 65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 66 DMA_FROM_DEVICE); 67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 68 DMA_TO_DEVICE); 69 } else { 70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 71 DMA_BIDIRECTIONAL); 72 } 73 mv_cesa_dma_cleanup(&creq->req.dma); 74} 75 76static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) 77{ 78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 79 80 if (creq->req.base.type == CESA_DMA_REQ) 81 mv_cesa_ablkcipher_dma_cleanup(req); 82} 83 84static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) 85{ 86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 88 struct mv_cesa_engine *engine = sreq->base.engine; 89 size_t len = min_t(size_t, req->nbytes - sreq->offset, 90 CESA_SA_SRAM_PAYLOAD_SIZE); 91 92 len = sg_pcopy_to_buffer(req->src, creq->src_nents, 93 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 94 len, sreq->offset); 95 96 sreq->size = len; 97 mv_cesa_set_crypt_op_len(&sreq->op, len); 98 99 /* FIXME: only update enc_len field */ 100 if (!sreq->skip_ctx) { 101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 102 sreq->skip_ctx = true; 103 } else { 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc)); 105 } 106 107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 110} 111 112static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, 113 u32 status) 114{ 115 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 116 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 117 struct mv_cesa_engine *engine = sreq->base.engine; 118 size_t len; 119 120 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, 121 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 122 sreq->size, sreq->offset); 123 124 sreq->offset += len; 125 if (sreq->offset < req->nbytes) 126 return -EINPROGRESS; 127 128 return 0; 129} 130 131static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, 132 u32 status) 133{ 134 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 135 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 136 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 137 struct mv_cesa_engine *engine = sreq->base.engine; 138 int ret; 139 140 if (creq->req.base.type == CESA_DMA_REQ) 141 ret = mv_cesa_dma_process(&creq->req.dma, status); 142 else 143 ret = mv_cesa_ablkcipher_std_process(ablkreq, status); 144 145 if (ret) 146 return ret; 147 148 memcpy_fromio(ablkreq->info, 149 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 150 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); 151 152 return 0; 153} 154 155static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) 156{ 157 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 158 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 159 160 if (creq->req.base.type == CESA_DMA_REQ) 161 mv_cesa_dma_step(&creq->req.dma); 162 else 163 mv_cesa_ablkcipher_std_step(ablkreq); 164} 165 166static inline void 167mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) 168{ 169 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 170 struct mv_cesa_tdma_req *dreq = &creq->req.dma; 171 172 mv_cesa_dma_prepare(dreq, dreq->base.engine); 173} 174 175static inline void 176mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) 177{ 178 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 179 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 180 struct mv_cesa_engine *engine = sreq->base.engine; 181 182 sreq->size = 0; 183 sreq->offset = 0; 184 mv_cesa_adjust_op(engine, &sreq->op); 185 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 186} 187 188static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, 189 struct mv_cesa_engine *engine) 190{ 191 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 192 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 193 creq->req.base.engine = engine; 194 195 if (creq->req.base.type == CESA_DMA_REQ) 196 mv_cesa_ablkcipher_dma_prepare(ablkreq); 197 else 198 mv_cesa_ablkcipher_std_prepare(ablkreq); 199} 200 201static inline void 202mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) 203{ 204 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 205 206 mv_cesa_ablkcipher_cleanup(ablkreq); 207} 208 209static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { 210 .step = mv_cesa_ablkcipher_step, 211 .process = mv_cesa_ablkcipher_process, 212 .prepare = mv_cesa_ablkcipher_prepare, 213 .cleanup = mv_cesa_ablkcipher_req_cleanup, 214}; 215 216static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) 217{ 218 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 219 220 ctx->base.ops = &mv_cesa_ablkcipher_req_ops; 221 222 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); 223 224 return 0; 225} 226 227static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 228 unsigned int len) 229{ 230 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 231 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 232 int remaining; 233 int offset; 234 int ret; 235 int i; 236 237 ret = crypto_aes_expand_key(&ctx->aes, key, len); 238 if (ret) { 239 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 240 return ret; 241 } 242 243 remaining = (ctx->aes.key_length - 16) / 4; 244 offset = ctx->aes.key_length + 24 - remaining; 245 for (i = 0; i < remaining; i++) 246 ctx->aes.key_dec[4 + i] = 247 cpu_to_le32(ctx->aes.key_enc[offset + i]); 248 249 return 0; 250} 251 252static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 253 unsigned int len) 254{ 255 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 256 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 257 u32 tmp[DES_EXPKEY_WORDS]; 258 int ret; 259 260 if (len != DES_KEY_SIZE) { 261 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 262 return -EINVAL; 263 } 264 265 ret = des_ekey(tmp, key); 266 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 267 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 268 return -EINVAL; 269 } 270 271 memcpy(ctx->key, key, DES_KEY_SIZE); 272 273 return 0; 274} 275 276static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, 277 const u8 *key, unsigned int len) 278{ 279 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 280 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 281 282 if (len != DES3_EDE_KEY_SIZE) { 283 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 284 return -EINVAL; 285 } 286 287 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); 288 289 return 0; 290} 291 292static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, 293 const struct mv_cesa_op_ctx *op_templ) 294{ 295 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 296 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 297 GFP_KERNEL : GFP_ATOMIC; 298 struct mv_cesa_tdma_req *dreq = &creq->req.dma; 299 struct mv_cesa_ablkcipher_dma_iter iter; 300 struct mv_cesa_tdma_chain chain; 301 bool skip_ctx = false; 302 int ret; 303 304 dreq->base.type = CESA_DMA_REQ; 305 dreq->chain.first = NULL; 306 dreq->chain.last = NULL; 307 308 if (req->src != req->dst) { 309 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 310 DMA_TO_DEVICE); 311 if (!ret) 312 return -ENOMEM; 313 314 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, 315 DMA_FROM_DEVICE); 316 if (!ret) { 317 ret = -ENOMEM; 318 goto err_unmap_src; 319 } 320 } else { 321 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 322 DMA_BIDIRECTIONAL); 323 if (!ret) 324 return -ENOMEM; 325 } 326 327 mv_cesa_tdma_desc_iter_init(&chain); 328 mv_cesa_ablkcipher_req_iter_init(&iter, req); 329 330 do { 331 struct mv_cesa_op_ctx *op; 332 333 op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); 334 if (IS_ERR(op)) { 335 ret = PTR_ERR(op); 336 goto err_free_tdma; 337 } 338 skip_ctx = true; 339 340 mv_cesa_set_crypt_op_len(op, iter.base.op_len); 341 342 /* Add input transfers */ 343 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, 344 &iter.src, flags); 345 if (ret) 346 goto err_free_tdma; 347 348 /* Add dummy desc to launch the crypto operation */ 349 ret = mv_cesa_dma_add_dummy_launch(&chain, flags); 350 if (ret) 351 goto err_free_tdma; 352 353 /* Add output transfers */ 354 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, 355 &iter.dst, flags); 356 if (ret) 357 goto err_free_tdma; 358 359 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); 360 361 dreq->chain = chain; 362 363 return 0; 364 365err_free_tdma: 366 mv_cesa_dma_cleanup(dreq); 367 if (req->dst != req->src) 368 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 369 DMA_FROM_DEVICE); 370 371err_unmap_src: 372 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 373 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 374 375 return ret; 376} 377 378static inline int 379mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, 380 const struct mv_cesa_op_ctx *op_templ) 381{ 382 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 383 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 384 385 sreq->base.type = CESA_STD_REQ; 386 sreq->op = *op_templ; 387 sreq->skip_ctx = false; 388 389 return 0; 390} 391 392static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, 393 struct mv_cesa_op_ctx *tmpl) 394{ 395 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 396 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 397 unsigned int blksize = crypto_ablkcipher_blocksize(tfm); 398 int ret; 399 400 if (!IS_ALIGNED(req->nbytes, blksize)) 401 return -EINVAL; 402 403 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 404 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); 405 406 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, 407 CESA_SA_DESC_CFG_OP_MSK); 408 409 /* TODO: add a threshold for DMA usage */ 410 if (cesa_dev->caps->has_tdma) 411 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); 412 else 413 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); 414 415 return ret; 416} 417 418static int mv_cesa_des_op(struct ablkcipher_request *req, 419 struct mv_cesa_op_ctx *tmpl) 420{ 421 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 422 int ret; 423 424 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, 425 CESA_SA_DESC_CFG_CRYPTM_MSK); 426 427 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); 428 429 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 430 if (ret) 431 return ret; 432 433 ret = mv_cesa_queue_req(&req->base); 434 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 435 mv_cesa_ablkcipher_cleanup(req); 436 437 return ret; 438} 439 440static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) 441{ 442 struct mv_cesa_op_ctx tmpl; 443 444 mv_cesa_set_op_cfg(&tmpl, 445 CESA_SA_DESC_CFG_CRYPTCM_ECB | 446 CESA_SA_DESC_CFG_DIR_ENC); 447 448 return mv_cesa_des_op(req, &tmpl); 449} 450 451static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) 452{ 453 struct mv_cesa_op_ctx tmpl; 454 455 mv_cesa_set_op_cfg(&tmpl, 456 CESA_SA_DESC_CFG_CRYPTCM_ECB | 457 CESA_SA_DESC_CFG_DIR_DEC); 458 459 return mv_cesa_des_op(req, &tmpl); 460} 461 462struct crypto_alg mv_cesa_ecb_des_alg = { 463 .cra_name = "ecb(des)", 464 .cra_driver_name = "mv-ecb-des", 465 .cra_priority = 300, 466 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 467 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 468 .cra_blocksize = DES_BLOCK_SIZE, 469 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 470 .cra_alignmask = 0, 471 .cra_type = &crypto_ablkcipher_type, 472 .cra_module = THIS_MODULE, 473 .cra_init = mv_cesa_ablkcipher_cra_init, 474 .cra_u = { 475 .ablkcipher = { 476 .min_keysize = DES_KEY_SIZE, 477 .max_keysize = DES_KEY_SIZE, 478 .setkey = mv_cesa_des_setkey, 479 .encrypt = mv_cesa_ecb_des_encrypt, 480 .decrypt = mv_cesa_ecb_des_decrypt, 481 }, 482 }, 483}; 484 485static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, 486 struct mv_cesa_op_ctx *tmpl) 487{ 488 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 489 CESA_SA_DESC_CFG_CRYPTCM_MSK); 490 491 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); 492 493 return mv_cesa_des_op(req, tmpl); 494} 495 496static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) 497{ 498 struct mv_cesa_op_ctx tmpl; 499 500 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 501 502 return mv_cesa_cbc_des_op(req, &tmpl); 503} 504 505static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) 506{ 507 struct mv_cesa_op_ctx tmpl; 508 509 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 510 511 return mv_cesa_cbc_des_op(req, &tmpl); 512} 513 514struct crypto_alg mv_cesa_cbc_des_alg = { 515 .cra_name = "cbc(des)", 516 .cra_driver_name = "mv-cbc-des", 517 .cra_priority = 300, 518 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 519 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 520 .cra_blocksize = DES_BLOCK_SIZE, 521 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 522 .cra_alignmask = 0, 523 .cra_type = &crypto_ablkcipher_type, 524 .cra_module = THIS_MODULE, 525 .cra_init = mv_cesa_ablkcipher_cra_init, 526 .cra_u = { 527 .ablkcipher = { 528 .min_keysize = DES_KEY_SIZE, 529 .max_keysize = DES_KEY_SIZE, 530 .ivsize = DES_BLOCK_SIZE, 531 .setkey = mv_cesa_des_setkey, 532 .encrypt = mv_cesa_cbc_des_encrypt, 533 .decrypt = mv_cesa_cbc_des_decrypt, 534 }, 535 }, 536}; 537 538static int mv_cesa_des3_op(struct ablkcipher_request *req, 539 struct mv_cesa_op_ctx *tmpl) 540{ 541 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 542 int ret; 543 544 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, 545 CESA_SA_DESC_CFG_CRYPTM_MSK); 546 547 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); 548 549 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 550 if (ret) 551 return ret; 552 553 ret = mv_cesa_queue_req(&req->base); 554 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 555 mv_cesa_ablkcipher_cleanup(req); 556 557 return ret; 558} 559 560static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) 561{ 562 struct mv_cesa_op_ctx tmpl; 563 564 mv_cesa_set_op_cfg(&tmpl, 565 CESA_SA_DESC_CFG_CRYPTCM_ECB | 566 CESA_SA_DESC_CFG_3DES_EDE | 567 CESA_SA_DESC_CFG_DIR_ENC); 568 569 return mv_cesa_des3_op(req, &tmpl); 570} 571 572static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) 573{ 574 struct mv_cesa_op_ctx tmpl; 575 576 mv_cesa_set_op_cfg(&tmpl, 577 CESA_SA_DESC_CFG_CRYPTCM_ECB | 578 CESA_SA_DESC_CFG_3DES_EDE | 579 CESA_SA_DESC_CFG_DIR_DEC); 580 581 return mv_cesa_des3_op(req, &tmpl); 582} 583 584struct crypto_alg mv_cesa_ecb_des3_ede_alg = { 585 .cra_name = "ecb(des3_ede)", 586 .cra_driver_name = "mv-ecb-des3-ede", 587 .cra_priority = 300, 588 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 589 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 590 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 591 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 592 .cra_alignmask = 0, 593 .cra_type = &crypto_ablkcipher_type, 594 .cra_module = THIS_MODULE, 595 .cra_init = mv_cesa_ablkcipher_cra_init, 596 .cra_u = { 597 .ablkcipher = { 598 .min_keysize = DES3_EDE_KEY_SIZE, 599 .max_keysize = DES3_EDE_KEY_SIZE, 600 .ivsize = DES3_EDE_BLOCK_SIZE, 601 .setkey = mv_cesa_des3_ede_setkey, 602 .encrypt = mv_cesa_ecb_des3_ede_encrypt, 603 .decrypt = mv_cesa_ecb_des3_ede_decrypt, 604 }, 605 }, 606}; 607 608static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, 609 struct mv_cesa_op_ctx *tmpl) 610{ 611 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); 612 613 return mv_cesa_des3_op(req, tmpl); 614} 615 616static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) 617{ 618 struct mv_cesa_op_ctx tmpl; 619 620 mv_cesa_set_op_cfg(&tmpl, 621 CESA_SA_DESC_CFG_CRYPTCM_CBC | 622 CESA_SA_DESC_CFG_3DES_EDE | 623 CESA_SA_DESC_CFG_DIR_ENC); 624 625 return mv_cesa_cbc_des3_op(req, &tmpl); 626} 627 628static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) 629{ 630 struct mv_cesa_op_ctx tmpl; 631 632 mv_cesa_set_op_cfg(&tmpl, 633 CESA_SA_DESC_CFG_CRYPTCM_CBC | 634 CESA_SA_DESC_CFG_3DES_EDE | 635 CESA_SA_DESC_CFG_DIR_DEC); 636 637 return mv_cesa_cbc_des3_op(req, &tmpl); 638} 639 640struct crypto_alg mv_cesa_cbc_des3_ede_alg = { 641 .cra_name = "cbc(des3_ede)", 642 .cra_driver_name = "mv-cbc-des3-ede", 643 .cra_priority = 300, 644 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 645 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 646 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 647 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 648 .cra_alignmask = 0, 649 .cra_type = &crypto_ablkcipher_type, 650 .cra_module = THIS_MODULE, 651 .cra_init = mv_cesa_ablkcipher_cra_init, 652 .cra_u = { 653 .ablkcipher = { 654 .min_keysize = DES3_EDE_KEY_SIZE, 655 .max_keysize = DES3_EDE_KEY_SIZE, 656 .ivsize = DES3_EDE_BLOCK_SIZE, 657 .setkey = mv_cesa_des3_ede_setkey, 658 .encrypt = mv_cesa_cbc_des3_ede_encrypt, 659 .decrypt = mv_cesa_cbc_des3_ede_decrypt, 660 }, 661 }, 662}; 663 664static int mv_cesa_aes_op(struct ablkcipher_request *req, 665 struct mv_cesa_op_ctx *tmpl) 666{ 667 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 668 int ret, i; 669 u32 *key; 670 u32 cfg; 671 672 cfg = CESA_SA_DESC_CFG_CRYPTM_AES; 673 674 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) 675 key = ctx->aes.key_dec; 676 else 677 key = ctx->aes.key_enc; 678 679 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) 680 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); 681 682 if (ctx->aes.key_length == 24) 683 cfg |= CESA_SA_DESC_CFG_AES_LEN_192; 684 else if (ctx->aes.key_length == 32) 685 cfg |= CESA_SA_DESC_CFG_AES_LEN_256; 686 687 mv_cesa_update_op_cfg(tmpl, cfg, 688 CESA_SA_DESC_CFG_CRYPTM_MSK | 689 CESA_SA_DESC_CFG_AES_LEN_MSK); 690 691 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 692 if (ret) 693 return ret; 694 695 ret = mv_cesa_queue_req(&req->base); 696 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 697 mv_cesa_ablkcipher_cleanup(req); 698 699 return ret; 700} 701 702static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) 703{ 704 struct mv_cesa_op_ctx tmpl; 705 706 mv_cesa_set_op_cfg(&tmpl, 707 CESA_SA_DESC_CFG_CRYPTCM_ECB | 708 CESA_SA_DESC_CFG_DIR_ENC); 709 710 return mv_cesa_aes_op(req, &tmpl); 711} 712 713static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) 714{ 715 struct mv_cesa_op_ctx tmpl; 716 717 mv_cesa_set_op_cfg(&tmpl, 718 CESA_SA_DESC_CFG_CRYPTCM_ECB | 719 CESA_SA_DESC_CFG_DIR_DEC); 720 721 return mv_cesa_aes_op(req, &tmpl); 722} 723 724struct crypto_alg mv_cesa_ecb_aes_alg = { 725 .cra_name = "ecb(aes)", 726 .cra_driver_name = "mv-ecb-aes", 727 .cra_priority = 300, 728 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 729 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 730 .cra_blocksize = AES_BLOCK_SIZE, 731 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 732 .cra_alignmask = 0, 733 .cra_type = &crypto_ablkcipher_type, 734 .cra_module = THIS_MODULE, 735 .cra_init = mv_cesa_ablkcipher_cra_init, 736 .cra_u = { 737 .ablkcipher = { 738 .min_keysize = AES_MIN_KEY_SIZE, 739 .max_keysize = AES_MAX_KEY_SIZE, 740 .setkey = mv_cesa_aes_setkey, 741 .encrypt = mv_cesa_ecb_aes_encrypt, 742 .decrypt = mv_cesa_ecb_aes_decrypt, 743 }, 744 }, 745}; 746 747static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, 748 struct mv_cesa_op_ctx *tmpl) 749{ 750 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 751 CESA_SA_DESC_CFG_CRYPTCM_MSK); 752 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); 753 754 return mv_cesa_aes_op(req, tmpl); 755} 756 757static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) 758{ 759 struct mv_cesa_op_ctx tmpl; 760 761 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 762 763 return mv_cesa_cbc_aes_op(req, &tmpl); 764} 765 766static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) 767{ 768 struct mv_cesa_op_ctx tmpl; 769 770 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 771 772 return mv_cesa_cbc_aes_op(req, &tmpl); 773} 774 775struct crypto_alg mv_cesa_cbc_aes_alg = { 776 .cra_name = "cbc(aes)", 777 .cra_driver_name = "mv-cbc-aes", 778 .cra_priority = 300, 779 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 780 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 781 .cra_blocksize = AES_BLOCK_SIZE, 782 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 783 .cra_alignmask = 0, 784 .cra_type = &crypto_ablkcipher_type, 785 .cra_module = THIS_MODULE, 786 .cra_init = mv_cesa_ablkcipher_cra_init, 787 .cra_u = { 788 .ablkcipher = { 789 .min_keysize = AES_MIN_KEY_SIZE, 790 .max_keysize = AES_MAX_KEY_SIZE, 791 .ivsize = AES_BLOCK_SIZE, 792 .setkey = mv_cesa_aes_setkey, 793 .encrypt = mv_cesa_cbc_aes_encrypt, 794 .decrypt = mv_cesa_cbc_aes_decrypt, 795 }, 796 }, 797}; 798