root/drivers/crypto/marvell/hash.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mv_cesa_ahash_req_iter_init
  2. mv_cesa_ahash_req_iter_next_op
  3. mv_cesa_ahash_dma_alloc_cache
  4. mv_cesa_ahash_dma_free_cache
  5. mv_cesa_ahash_dma_alloc_padding
  6. mv_cesa_ahash_dma_free_padding
  7. mv_cesa_ahash_dma_last_cleanup
  8. mv_cesa_ahash_dma_cleanup
  9. mv_cesa_ahash_cleanup
  10. mv_cesa_ahash_last_cleanup
  11. mv_cesa_ahash_pad_len
  12. mv_cesa_ahash_pad_req
  13. mv_cesa_ahash_std_step
  14. mv_cesa_ahash_std_process
  15. mv_cesa_ahash_dma_prepare
  16. mv_cesa_ahash_std_prepare
  17. mv_cesa_ahash_dma_step
  18. mv_cesa_ahash_step
  19. mv_cesa_ahash_process
  20. mv_cesa_ahash_complete
  21. mv_cesa_ahash_prepare
  22. mv_cesa_ahash_req_cleanup
  23. mv_cesa_ahash_init
  24. mv_cesa_ahash_cra_init
  25. mv_cesa_ahash_cache_req
  26. mv_cesa_dma_add_frag
  27. mv_cesa_ahash_dma_add_cache
  28. mv_cesa_ahash_dma_last_req
  29. mv_cesa_ahash_dma_req_init
  30. mv_cesa_ahash_req_init
  31. mv_cesa_ahash_queue_req
  32. mv_cesa_ahash_update
  33. mv_cesa_ahash_final
  34. mv_cesa_ahash_finup
  35. mv_cesa_ahash_export
  36. mv_cesa_ahash_import
  37. mv_cesa_md5_init
  38. mv_cesa_md5_export
  39. mv_cesa_md5_import
  40. mv_cesa_md5_digest
  41. mv_cesa_sha1_init
  42. mv_cesa_sha1_export
  43. mv_cesa_sha1_import
  44. mv_cesa_sha1_digest
  45. mv_cesa_sha256_init
  46. mv_cesa_sha256_digest
  47. mv_cesa_sha256_export
  48. mv_cesa_sha256_import
  49. mv_cesa_hmac_ahash_complete
  50. mv_cesa_ahmac_iv_state_init
  51. mv_cesa_ahmac_pad_init
  52. mv_cesa_ahmac_setkey
  53. mv_cesa_ahmac_cra_init
  54. mv_cesa_ahmac_md5_init
  55. mv_cesa_ahmac_md5_setkey
  56. mv_cesa_ahmac_md5_digest
  57. mv_cesa_ahmac_sha1_init
  58. mv_cesa_ahmac_sha1_setkey
  59. mv_cesa_ahmac_sha1_digest
  60. mv_cesa_ahmac_sha256_setkey
  61. mv_cesa_ahmac_sha256_init
  62. mv_cesa_ahmac_sha256_digest

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
   4  *
   5  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   6  * Author: Arnaud Ebalard <arno@natisbad.org>
   7  *
   8  * This work is based on an initial version written by
   9  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  10  */
  11 
  12 #include <crypto/hmac.h>
  13 #include <crypto/md5.h>
  14 #include <crypto/sha.h>
  15 
  16 #include "cesa.h"
  17 
  18 struct mv_cesa_ahash_dma_iter {
  19         struct mv_cesa_dma_iter base;
  20         struct mv_cesa_sg_dma_iter src;
  21 };
  22 
  23 static inline void
  24 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  25                             struct ahash_request *req)
  26 {
  27         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  28         unsigned int len = req->nbytes + creq->cache_ptr;
  29 
  30         if (!creq->last_req)
  31                 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  32 
  33         mv_cesa_req_dma_iter_init(&iter->base, len);
  34         mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  35         iter->src.op_offset = creq->cache_ptr;
  36 }
  37 
  38 static inline bool
  39 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  40 {
  41         iter->src.op_offset = 0;
  42 
  43         return mv_cesa_req_dma_iter_next_op(&iter->base);
  44 }
  45 
  46 static inline int
  47 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  48 {
  49         req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  50                                     &req->cache_dma);
  51         if (!req->cache)
  52                 return -ENOMEM;
  53 
  54         return 0;
  55 }
  56 
  57 static inline void
  58 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  59 {
  60         if (!req->cache)
  61                 return;
  62 
  63         dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  64                       req->cache_dma);
  65 }
  66 
  67 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  68                                            gfp_t flags)
  69 {
  70         if (req->padding)
  71                 return 0;
  72 
  73         req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  74                                       &req->padding_dma);
  75         if (!req->padding)
  76                 return -ENOMEM;
  77 
  78         return 0;
  79 }
  80 
  81 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  82 {
  83         if (!req->padding)
  84                 return;
  85 
  86         dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  87                       req->padding_dma);
  88         req->padding = NULL;
  89 }
  90 
  91 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  92 {
  93         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  94 
  95         mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  96 }
  97 
  98 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
  99 {
 100         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 101 
 102         dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 103         mv_cesa_ahash_dma_free_cache(&creq->req.dma);
 104         mv_cesa_dma_cleanup(&creq->base);
 105 }
 106 
 107 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
 108 {
 109         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 110 
 111         if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 112                 mv_cesa_ahash_dma_cleanup(req);
 113 }
 114 
 115 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
 116 {
 117         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 118 
 119         if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 120                 mv_cesa_ahash_dma_last_cleanup(req);
 121 }
 122 
 123 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 124 {
 125         unsigned int index, padlen;
 126 
 127         index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 128         padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
 129 
 130         return padlen;
 131 }
 132 
 133 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 134 {
 135         unsigned int padlen;
 136 
 137         buf[0] = 0x80;
 138         /* Pad out to 56 mod 64 */
 139         padlen = mv_cesa_ahash_pad_len(creq);
 140         memset(buf + 1, 0, padlen - 1);
 141 
 142         if (creq->algo_le) {
 143                 __le64 bits = cpu_to_le64(creq->len << 3);
 144                 memcpy(buf + padlen, &bits, sizeof(bits));
 145         } else {
 146                 __be64 bits = cpu_to_be64(creq->len << 3);
 147                 memcpy(buf + padlen, &bits, sizeof(bits));
 148         }
 149 
 150         return padlen + 8;
 151 }
 152 
 153 static void mv_cesa_ahash_std_step(struct ahash_request *req)
 154 {
 155         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 156         struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 157         struct mv_cesa_engine *engine = creq->base.engine;
 158         struct mv_cesa_op_ctx *op;
 159         unsigned int new_cache_ptr = 0;
 160         u32 frag_mode;
 161         size_t  len;
 162         unsigned int digsize;
 163         int i;
 164 
 165         mv_cesa_adjust_op(engine, &creq->op_tmpl);
 166         memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
 167 
 168         if (!sreq->offset) {
 169                 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 170                 for (i = 0; i < digsize / 4; i++)
 171                         writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 172         }
 173 
 174         if (creq->cache_ptr)
 175                 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 176                             creq->cache, creq->cache_ptr);
 177 
 178         len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
 179                     CESA_SA_SRAM_PAYLOAD_SIZE);
 180 
 181         if (!creq->last_req) {
 182                 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
 183                 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
 184         }
 185 
 186         if (len - creq->cache_ptr)
 187                 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
 188                                                    engine->sram +
 189                                                    CESA_SA_DATA_SRAM_OFFSET +
 190                                                    creq->cache_ptr,
 191                                                    len - creq->cache_ptr,
 192                                                    sreq->offset);
 193 
 194         op = &creq->op_tmpl;
 195 
 196         frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
 197 
 198         if (creq->last_req && sreq->offset == req->nbytes &&
 199             creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 200                 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 201                         frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
 202                 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
 203                         frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
 204         }
 205 
 206         if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
 207             frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
 208                 if (len &&
 209                     creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 210                         mv_cesa_set_mac_op_total_len(op, creq->len);
 211                 } else {
 212                         int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
 213 
 214                         if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
 215                                 len &= CESA_HASH_BLOCK_SIZE_MSK;
 216                                 new_cache_ptr = 64 - trailerlen;
 217                                 memcpy_fromio(creq->cache,
 218                                               engine->sram +
 219                                               CESA_SA_DATA_SRAM_OFFSET + len,
 220                                               new_cache_ptr);
 221                         } else {
 222                                 len += mv_cesa_ahash_pad_req(creq,
 223                                                 engine->sram + len +
 224                                                 CESA_SA_DATA_SRAM_OFFSET);
 225                         }
 226 
 227                         if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
 228                                 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
 229                         else
 230                                 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
 231                 }
 232         }
 233 
 234         mv_cesa_set_mac_op_frag_len(op, len);
 235         mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
 236 
 237         /* FIXME: only update enc_len field */
 238         memcpy_toio(engine->sram, op, sizeof(*op));
 239 
 240         if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 241                 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
 242                                       CESA_SA_DESC_CFG_FRAG_MSK);
 243 
 244         creq->cache_ptr = new_cache_ptr;
 245 
 246         mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 247         writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 248         BUG_ON(readl(engine->regs + CESA_SA_CMD) &
 249                CESA_SA_CMD_EN_CESA_SA_ACCL0);
 250         writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 251 }
 252 
 253 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
 254 {
 255         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 256         struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 257 
 258         if (sreq->offset < (req->nbytes - creq->cache_ptr))
 259                 return -EINPROGRESS;
 260 
 261         return 0;
 262 }
 263 
 264 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
 265 {
 266         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 267         struct mv_cesa_req *basereq = &creq->base;
 268 
 269         mv_cesa_dma_prepare(basereq, basereq->engine);
 270 }
 271 
 272 static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 273 {
 274         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 275         struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 276 
 277         sreq->offset = 0;
 278 }
 279 
 280 static void mv_cesa_ahash_dma_step(struct ahash_request *req)
 281 {
 282         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 283         struct mv_cesa_req *base = &creq->base;
 284 
 285         /* We must explicitly set the digest state. */
 286         if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
 287                 struct mv_cesa_engine *engine = base->engine;
 288                 int i;
 289 
 290                 /* Set the hash state in the IVDIG regs. */
 291                 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
 292                         writel_relaxed(creq->state[i], engine->regs +
 293                                        CESA_IVDIG(i));
 294         }
 295 
 296         mv_cesa_dma_step(base);
 297 }
 298 
 299 static void mv_cesa_ahash_step(struct crypto_async_request *req)
 300 {
 301         struct ahash_request *ahashreq = ahash_request_cast(req);
 302         struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 303 
 304         if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 305                 mv_cesa_ahash_dma_step(ahashreq);
 306         else
 307                 mv_cesa_ahash_std_step(ahashreq);
 308 }
 309 
 310 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
 311 {
 312         struct ahash_request *ahashreq = ahash_request_cast(req);
 313         struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 314 
 315         if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 316                 return mv_cesa_dma_process(&creq->base, status);
 317 
 318         return mv_cesa_ahash_std_process(ahashreq, status);
 319 }
 320 
 321 static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 322 {
 323         struct ahash_request *ahashreq = ahash_request_cast(req);
 324         struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 325         struct mv_cesa_engine *engine = creq->base.engine;
 326         unsigned int digsize;
 327         int i;
 328 
 329         digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 330 
 331         if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
 332             (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
 333                 __le32 *data = NULL;
 334 
 335                 /*
 336                  * Result is already in the correct endianess when the SA is
 337                  * used
 338                  */
 339                 data = creq->base.chain.last->op->ctx.hash.hash;
 340                 for (i = 0; i < digsize / 4; i++)
 341                         creq->state[i] = cpu_to_le32(data[i]);
 342 
 343                 memcpy(ahashreq->result, data, digsize);
 344         } else {
 345                 for (i = 0; i < digsize / 4; i++)
 346                         creq->state[i] = readl_relaxed(engine->regs +
 347                                                        CESA_IVDIG(i));
 348                 if (creq->last_req) {
 349                         /*
 350                         * Hardware's MD5 digest is in little endian format, but
 351                         * SHA in big endian format
 352                         */
 353                         if (creq->algo_le) {
 354                                 __le32 *result = (void *)ahashreq->result;
 355 
 356                                 for (i = 0; i < digsize / 4; i++)
 357                                         result[i] = cpu_to_le32(creq->state[i]);
 358                         } else {
 359                                 __be32 *result = (void *)ahashreq->result;
 360 
 361                                 for (i = 0; i < digsize / 4; i++)
 362                                         result[i] = cpu_to_be32(creq->state[i]);
 363                         }
 364                 }
 365         }
 366 
 367         atomic_sub(ahashreq->nbytes, &engine->load);
 368 }
 369 
 370 static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
 371                                   struct mv_cesa_engine *engine)
 372 {
 373         struct ahash_request *ahashreq = ahash_request_cast(req);
 374         struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 375 
 376         creq->base.engine = engine;
 377 
 378         if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 379                 mv_cesa_ahash_dma_prepare(ahashreq);
 380         else
 381                 mv_cesa_ahash_std_prepare(ahashreq);
 382 }
 383 
 384 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 385 {
 386         struct ahash_request *ahashreq = ahash_request_cast(req);
 387         struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 388 
 389         if (creq->last_req)
 390                 mv_cesa_ahash_last_cleanup(ahashreq);
 391 
 392         mv_cesa_ahash_cleanup(ahashreq);
 393 
 394         if (creq->cache_ptr)
 395                 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
 396                                    creq->cache,
 397                                    creq->cache_ptr,
 398                                    ahashreq->nbytes - creq->cache_ptr);
 399 }
 400 
 401 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
 402         .step = mv_cesa_ahash_step,
 403         .process = mv_cesa_ahash_process,
 404         .cleanup = mv_cesa_ahash_req_cleanup,
 405         .complete = mv_cesa_ahash_complete,
 406 };
 407 
 408 static void mv_cesa_ahash_init(struct ahash_request *req,
 409                               struct mv_cesa_op_ctx *tmpl, bool algo_le)
 410 {
 411         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 412 
 413         memset(creq, 0, sizeof(*creq));
 414         mv_cesa_update_op_cfg(tmpl,
 415                               CESA_SA_DESC_CFG_OP_MAC_ONLY |
 416                               CESA_SA_DESC_CFG_FIRST_FRAG,
 417                               CESA_SA_DESC_CFG_OP_MSK |
 418                               CESA_SA_DESC_CFG_FRAG_MSK);
 419         mv_cesa_set_mac_op_total_len(tmpl, 0);
 420         mv_cesa_set_mac_op_frag_len(tmpl, 0);
 421         creq->op_tmpl = *tmpl;
 422         creq->len = 0;
 423         creq->algo_le = algo_le;
 424 }
 425 
 426 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
 427 {
 428         struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 429 
 430         ctx->base.ops = &mv_cesa_ahash_req_ops;
 431 
 432         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 433                                  sizeof(struct mv_cesa_ahash_req));
 434         return 0;
 435 }
 436 
 437 static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
 438 {
 439         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 440         bool cached = false;
 441 
 442         if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
 443                 cached = true;
 444 
 445                 if (!req->nbytes)
 446                         return cached;
 447 
 448                 sg_pcopy_to_buffer(req->src, creq->src_nents,
 449                                    creq->cache + creq->cache_ptr,
 450                                    req->nbytes, 0);
 451 
 452                 creq->cache_ptr += req->nbytes;
 453         }
 454 
 455         return cached;
 456 }
 457 
 458 static struct mv_cesa_op_ctx *
 459 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
 460                      struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
 461                      gfp_t flags)
 462 {
 463         struct mv_cesa_op_ctx *op;
 464         int ret;
 465 
 466         op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
 467         if (IS_ERR(op))
 468                 return op;
 469 
 470         /* Set the operation block fragment length. */
 471         mv_cesa_set_mac_op_frag_len(op, frag_len);
 472 
 473         /* Append dummy desc to launch operation */
 474         ret = mv_cesa_dma_add_dummy_launch(chain, flags);
 475         if (ret)
 476                 return ERR_PTR(ret);
 477 
 478         if (mv_cesa_mac_op_is_first_frag(tmpl))
 479                 mv_cesa_update_op_cfg(tmpl,
 480                                       CESA_SA_DESC_CFG_MID_FRAG,
 481                                       CESA_SA_DESC_CFG_FRAG_MSK);
 482 
 483         return op;
 484 }
 485 
 486 static int
 487 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
 488                             struct mv_cesa_ahash_req *creq,
 489                             gfp_t flags)
 490 {
 491         struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 492         int ret;
 493 
 494         if (!creq->cache_ptr)
 495                 return 0;
 496 
 497         ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
 498         if (ret)
 499                 return ret;
 500 
 501         memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
 502 
 503         return mv_cesa_dma_add_data_transfer(chain,
 504                                              CESA_SA_DATA_SRAM_OFFSET,
 505                                              ahashdreq->cache_dma,
 506                                              creq->cache_ptr,
 507                                              CESA_TDMA_DST_IN_SRAM,
 508                                              flags);
 509 }
 510 
 511 static struct mv_cesa_op_ctx *
 512 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 513                            struct mv_cesa_ahash_dma_iter *dma_iter,
 514                            struct mv_cesa_ahash_req *creq,
 515                            unsigned int frag_len, gfp_t flags)
 516 {
 517         struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 518         unsigned int len, trailerlen, padoff = 0;
 519         struct mv_cesa_op_ctx *op;
 520         int ret;
 521 
 522         /*
 523          * If the transfer is smaller than our maximum length, and we have
 524          * some data outstanding, we can ask the engine to finish the hash.
 525          */
 526         if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
 527                 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
 528                                           flags);
 529                 if (IS_ERR(op))
 530                         return op;
 531 
 532                 mv_cesa_set_mac_op_total_len(op, creq->len);
 533                 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
 534                                                 CESA_SA_DESC_CFG_NOT_FRAG :
 535                                                 CESA_SA_DESC_CFG_LAST_FRAG,
 536                                       CESA_SA_DESC_CFG_FRAG_MSK);
 537 
 538                 ret = mv_cesa_dma_add_result_op(chain,
 539                                                 CESA_SA_CFG_SRAM_OFFSET,
 540                                                 CESA_SA_DATA_SRAM_OFFSET,
 541                                                 CESA_TDMA_SRC_IN_SRAM, flags);
 542                 if (ret)
 543                         return ERR_PTR(-ENOMEM);
 544                 return op;
 545         }
 546 
 547         /*
 548          * The request is longer than the engine can handle, or we have
 549          * no data outstanding. Manually generate the padding, adding it
 550          * as a "mid" fragment.
 551          */
 552         ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
 553         if (ret)
 554                 return ERR_PTR(ret);
 555 
 556         trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
 557 
 558         len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
 559         if (len) {
 560                 ret = mv_cesa_dma_add_data_transfer(chain,
 561                                                 CESA_SA_DATA_SRAM_OFFSET +
 562                                                 frag_len,
 563                                                 ahashdreq->padding_dma,
 564                                                 len, CESA_TDMA_DST_IN_SRAM,
 565                                                 flags);
 566                 if (ret)
 567                         return ERR_PTR(ret);
 568 
 569                 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
 570                                           flags);
 571                 if (IS_ERR(op))
 572                         return op;
 573 
 574                 if (len == trailerlen)
 575                         return op;
 576 
 577                 padoff += len;
 578         }
 579 
 580         ret = mv_cesa_dma_add_data_transfer(chain,
 581                                             CESA_SA_DATA_SRAM_OFFSET,
 582                                             ahashdreq->padding_dma +
 583                                             padoff,
 584                                             trailerlen - padoff,
 585                                             CESA_TDMA_DST_IN_SRAM,
 586                                             flags);
 587         if (ret)
 588                 return ERR_PTR(ret);
 589 
 590         return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
 591                                     flags);
 592 }
 593 
 594 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 595 {
 596         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 597         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 598                       GFP_KERNEL : GFP_ATOMIC;
 599         struct mv_cesa_req *basereq = &creq->base;
 600         struct mv_cesa_ahash_dma_iter iter;
 601         struct mv_cesa_op_ctx *op = NULL;
 602         unsigned int frag_len;
 603         bool set_state = false;
 604         int ret;
 605         u32 type;
 606 
 607         basereq->chain.first = NULL;
 608         basereq->chain.last = NULL;
 609 
 610         if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
 611                 set_state = true;
 612 
 613         if (creq->src_nents) {
 614                 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 615                                  DMA_TO_DEVICE);
 616                 if (!ret) {
 617                         ret = -ENOMEM;
 618                         goto err;
 619                 }
 620         }
 621 
 622         mv_cesa_tdma_desc_iter_init(&basereq->chain);
 623         mv_cesa_ahash_req_iter_init(&iter, req);
 624 
 625         /*
 626          * Add the cache (left-over data from a previous block) first.
 627          * This will never overflow the SRAM size.
 628          */
 629         ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
 630         if (ret)
 631                 goto err_free_tdma;
 632 
 633         if (iter.src.sg) {
 634                 /*
 635                  * Add all the new data, inserting an operation block and
 636                  * launch command between each full SRAM block-worth of
 637                  * data. We intentionally do not add the final op block.
 638                  */
 639                 while (true) {
 640                         ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
 641                                                            &iter.base,
 642                                                            &iter.src, flags);
 643                         if (ret)
 644                                 goto err_free_tdma;
 645 
 646                         frag_len = iter.base.op_len;
 647 
 648                         if (!mv_cesa_ahash_req_iter_next_op(&iter))
 649                                 break;
 650 
 651                         op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 652                                                   frag_len, flags);
 653                         if (IS_ERR(op)) {
 654                                 ret = PTR_ERR(op);
 655                                 goto err_free_tdma;
 656                         }
 657                 }
 658         } else {
 659                 /* Account for the data that was in the cache. */
 660                 frag_len = iter.base.op_len;
 661         }
 662 
 663         /*
 664          * At this point, frag_len indicates whether we have any data
 665          * outstanding which needs an operation.  Queue up the final
 666          * operation, which depends whether this is the final request.
 667          */
 668         if (creq->last_req)
 669                 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
 670                                                 frag_len, flags);
 671         else if (frag_len)
 672                 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 673                                           frag_len, flags);
 674 
 675         if (IS_ERR(op)) {
 676                 ret = PTR_ERR(op);
 677                 goto err_free_tdma;
 678         }
 679 
 680         /*
 681          * If results are copied via DMA, this means that this
 682          * request can be directly processed by the engine,
 683          * without partial updates. So we can chain it at the
 684          * DMA level with other requests.
 685          */
 686         type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
 687 
 688         if (op && type != CESA_TDMA_RESULT) {
 689                 /* Add dummy desc to wait for crypto operation end */
 690                 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
 691                 if (ret)
 692                         goto err_free_tdma;
 693         }
 694 
 695         if (!creq->last_req)
 696                 creq->cache_ptr = req->nbytes + creq->cache_ptr -
 697                                   iter.base.len;
 698         else
 699                 creq->cache_ptr = 0;
 700 
 701         basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
 702 
 703         if (type != CESA_TDMA_RESULT)
 704                 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 705 
 706         if (set_state) {
 707                 /*
 708                  * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
 709                  * let the step logic know that the IVDIG registers should be
 710                  * explicitly set before launching a TDMA chain.
 711                  */
 712                 basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
 713         }
 714 
 715         return 0;
 716 
 717 err_free_tdma:
 718         mv_cesa_dma_cleanup(basereq);
 719         dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 720 
 721 err:
 722         mv_cesa_ahash_last_cleanup(req);
 723 
 724         return ret;
 725 }
 726 
 727 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 728 {
 729         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 730 
 731         creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 732         if (creq->src_nents < 0) {
 733                 dev_err(cesa_dev->dev, "Invalid number of src SG");
 734                 return creq->src_nents;
 735         }
 736 
 737         *cached = mv_cesa_ahash_cache_req(req);
 738 
 739         if (*cached)
 740                 return 0;
 741 
 742         if (cesa_dev->caps->has_tdma)
 743                 return mv_cesa_ahash_dma_req_init(req);
 744         else
 745                 return 0;
 746 }
 747 
 748 static int mv_cesa_ahash_queue_req(struct ahash_request *req)
 749 {
 750         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 751         struct mv_cesa_engine *engine;
 752         bool cached = false;
 753         int ret;
 754 
 755         ret = mv_cesa_ahash_req_init(req, &cached);
 756         if (ret)
 757                 return ret;
 758 
 759         if (cached)
 760                 return 0;
 761 
 762         engine = mv_cesa_select_engine(req->nbytes);
 763         mv_cesa_ahash_prepare(&req->base, engine);
 764 
 765         ret = mv_cesa_queue_req(&req->base, &creq->base);
 766 
 767         if (mv_cesa_req_needs_cleanup(&req->base, ret))
 768                 mv_cesa_ahash_cleanup(req);
 769 
 770         return ret;
 771 }
 772 
 773 static int mv_cesa_ahash_update(struct ahash_request *req)
 774 {
 775         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 776 
 777         creq->len += req->nbytes;
 778 
 779         return mv_cesa_ahash_queue_req(req);
 780 }
 781 
 782 static int mv_cesa_ahash_final(struct ahash_request *req)
 783 {
 784         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 785         struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 786 
 787         mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 788         creq->last_req = true;
 789         req->nbytes = 0;
 790 
 791         return mv_cesa_ahash_queue_req(req);
 792 }
 793 
 794 static int mv_cesa_ahash_finup(struct ahash_request *req)
 795 {
 796         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 797         struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 798 
 799         creq->len += req->nbytes;
 800         mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 801         creq->last_req = true;
 802 
 803         return mv_cesa_ahash_queue_req(req);
 804 }
 805 
 806 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
 807                                 u64 *len, void *cache)
 808 {
 809         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 810         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 811         unsigned int digsize = crypto_ahash_digestsize(ahash);
 812         unsigned int blocksize;
 813 
 814         blocksize = crypto_ahash_blocksize(ahash);
 815 
 816         *len = creq->len;
 817         memcpy(hash, creq->state, digsize);
 818         memset(cache, 0, blocksize);
 819         memcpy(cache, creq->cache, creq->cache_ptr);
 820 
 821         return 0;
 822 }
 823 
 824 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
 825                                 u64 len, const void *cache)
 826 {
 827         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 828         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 829         unsigned int digsize = crypto_ahash_digestsize(ahash);
 830         unsigned int blocksize;
 831         unsigned int cache_ptr;
 832         int ret;
 833 
 834         ret = crypto_ahash_init(req);
 835         if (ret)
 836                 return ret;
 837 
 838         blocksize = crypto_ahash_blocksize(ahash);
 839         if (len >= blocksize)
 840                 mv_cesa_update_op_cfg(&creq->op_tmpl,
 841                                       CESA_SA_DESC_CFG_MID_FRAG,
 842                                       CESA_SA_DESC_CFG_FRAG_MSK);
 843 
 844         creq->len = len;
 845         memcpy(creq->state, hash, digsize);
 846         creq->cache_ptr = 0;
 847 
 848         cache_ptr = do_div(len, blocksize);
 849         if (!cache_ptr)
 850                 return 0;
 851 
 852         memcpy(creq->cache, cache, cache_ptr);
 853         creq->cache_ptr = cache_ptr;
 854 
 855         return 0;
 856 }
 857 
 858 static int mv_cesa_md5_init(struct ahash_request *req)
 859 {
 860         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 861         struct mv_cesa_op_ctx tmpl = { };
 862 
 863         mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
 864 
 865         mv_cesa_ahash_init(req, &tmpl, true);
 866 
 867         creq->state[0] = MD5_H0;
 868         creq->state[1] = MD5_H1;
 869         creq->state[2] = MD5_H2;
 870         creq->state[3] = MD5_H3;
 871 
 872         return 0;
 873 }
 874 
 875 static int mv_cesa_md5_export(struct ahash_request *req, void *out)
 876 {
 877         struct md5_state *out_state = out;
 878 
 879         return mv_cesa_ahash_export(req, out_state->hash,
 880                                     &out_state->byte_count, out_state->block);
 881 }
 882 
 883 static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
 884 {
 885         const struct md5_state *in_state = in;
 886 
 887         return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
 888                                     in_state->block);
 889 }
 890 
 891 static int mv_cesa_md5_digest(struct ahash_request *req)
 892 {
 893         int ret;
 894 
 895         ret = mv_cesa_md5_init(req);
 896         if (ret)
 897                 return ret;
 898 
 899         return mv_cesa_ahash_finup(req);
 900 }
 901 
 902 struct ahash_alg mv_md5_alg = {
 903         .init = mv_cesa_md5_init,
 904         .update = mv_cesa_ahash_update,
 905         .final = mv_cesa_ahash_final,
 906         .finup = mv_cesa_ahash_finup,
 907         .digest = mv_cesa_md5_digest,
 908         .export = mv_cesa_md5_export,
 909         .import = mv_cesa_md5_import,
 910         .halg = {
 911                 .digestsize = MD5_DIGEST_SIZE,
 912                 .statesize = sizeof(struct md5_state),
 913                 .base = {
 914                         .cra_name = "md5",
 915                         .cra_driver_name = "mv-md5",
 916                         .cra_priority = 300,
 917                         .cra_flags = CRYPTO_ALG_ASYNC |
 918                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
 919                         .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 920                         .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 921                         .cra_init = mv_cesa_ahash_cra_init,
 922                         .cra_module = THIS_MODULE,
 923                  }
 924         }
 925 };
 926 
 927 static int mv_cesa_sha1_init(struct ahash_request *req)
 928 {
 929         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 930         struct mv_cesa_op_ctx tmpl = { };
 931 
 932         mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
 933 
 934         mv_cesa_ahash_init(req, &tmpl, false);
 935 
 936         creq->state[0] = SHA1_H0;
 937         creq->state[1] = SHA1_H1;
 938         creq->state[2] = SHA1_H2;
 939         creq->state[3] = SHA1_H3;
 940         creq->state[4] = SHA1_H4;
 941 
 942         return 0;
 943 }
 944 
 945 static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
 946 {
 947         struct sha1_state *out_state = out;
 948 
 949         return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 950                                     out_state->buffer);
 951 }
 952 
 953 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
 954 {
 955         const struct sha1_state *in_state = in;
 956 
 957         return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 958                                     in_state->buffer);
 959 }
 960 
 961 static int mv_cesa_sha1_digest(struct ahash_request *req)
 962 {
 963         int ret;
 964 
 965         ret = mv_cesa_sha1_init(req);
 966         if (ret)
 967                 return ret;
 968 
 969         return mv_cesa_ahash_finup(req);
 970 }
 971 
 972 struct ahash_alg mv_sha1_alg = {
 973         .init = mv_cesa_sha1_init,
 974         .update = mv_cesa_ahash_update,
 975         .final = mv_cesa_ahash_final,
 976         .finup = mv_cesa_ahash_finup,
 977         .digest = mv_cesa_sha1_digest,
 978         .export = mv_cesa_sha1_export,
 979         .import = mv_cesa_sha1_import,
 980         .halg = {
 981                 .digestsize = SHA1_DIGEST_SIZE,
 982                 .statesize = sizeof(struct sha1_state),
 983                 .base = {
 984                         .cra_name = "sha1",
 985                         .cra_driver_name = "mv-sha1",
 986                         .cra_priority = 300,
 987                         .cra_flags = CRYPTO_ALG_ASYNC |
 988                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
 989                         .cra_blocksize = SHA1_BLOCK_SIZE,
 990                         .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 991                         .cra_init = mv_cesa_ahash_cra_init,
 992                         .cra_module = THIS_MODULE,
 993                  }
 994         }
 995 };
 996 
 997 static int mv_cesa_sha256_init(struct ahash_request *req)
 998 {
 999         struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1000         struct mv_cesa_op_ctx tmpl = { };
1001 
1002         mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1003 
1004         mv_cesa_ahash_init(req, &tmpl, false);
1005 
1006         creq->state[0] = SHA256_H0;
1007         creq->state[1] = SHA256_H1;
1008         creq->state[2] = SHA256_H2;
1009         creq->state[3] = SHA256_H3;
1010         creq->state[4] = SHA256_H4;
1011         creq->state[5] = SHA256_H5;
1012         creq->state[6] = SHA256_H6;
1013         creq->state[7] = SHA256_H7;
1014 
1015         return 0;
1016 }
1017 
1018 static int mv_cesa_sha256_digest(struct ahash_request *req)
1019 {
1020         int ret;
1021 
1022         ret = mv_cesa_sha256_init(req);
1023         if (ret)
1024                 return ret;
1025 
1026         return mv_cesa_ahash_finup(req);
1027 }
1028 
1029 static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1030 {
1031         struct sha256_state *out_state = out;
1032 
1033         return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1034                                     out_state->buf);
1035 }
1036 
1037 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1038 {
1039         const struct sha256_state *in_state = in;
1040 
1041         return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1042                                     in_state->buf);
1043 }
1044 
1045 struct ahash_alg mv_sha256_alg = {
1046         .init = mv_cesa_sha256_init,
1047         .update = mv_cesa_ahash_update,
1048         .final = mv_cesa_ahash_final,
1049         .finup = mv_cesa_ahash_finup,
1050         .digest = mv_cesa_sha256_digest,
1051         .export = mv_cesa_sha256_export,
1052         .import = mv_cesa_sha256_import,
1053         .halg = {
1054                 .digestsize = SHA256_DIGEST_SIZE,
1055                 .statesize = sizeof(struct sha256_state),
1056                 .base = {
1057                         .cra_name = "sha256",
1058                         .cra_driver_name = "mv-sha256",
1059                         .cra_priority = 300,
1060                         .cra_flags = CRYPTO_ALG_ASYNC |
1061                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
1062                         .cra_blocksize = SHA256_BLOCK_SIZE,
1063                         .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1064                         .cra_init = mv_cesa_ahash_cra_init,
1065                         .cra_module = THIS_MODULE,
1066                  }
1067         }
1068 };
1069 
1070 struct mv_cesa_ahash_result {
1071         struct completion completion;
1072         int error;
1073 };
1074 
1075 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1076                                         int error)
1077 {
1078         struct mv_cesa_ahash_result *result = req->data;
1079 
1080         if (error == -EINPROGRESS)
1081                 return;
1082 
1083         result->error = error;
1084         complete(&result->completion);
1085 }
1086 
1087 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1088                                        void *state, unsigned int blocksize)
1089 {
1090         struct mv_cesa_ahash_result result;
1091         struct scatterlist sg;
1092         int ret;
1093 
1094         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1095                                    mv_cesa_hmac_ahash_complete, &result);
1096         sg_init_one(&sg, pad, blocksize);
1097         ahash_request_set_crypt(req, &sg, pad, blocksize);
1098         init_completion(&result.completion);
1099 
1100         ret = crypto_ahash_init(req);
1101         if (ret)
1102                 return ret;
1103 
1104         ret = crypto_ahash_update(req);
1105         if (ret && ret != -EINPROGRESS)
1106                 return ret;
1107 
1108         wait_for_completion_interruptible(&result.completion);
1109         if (result.error)
1110                 return result.error;
1111 
1112         ret = crypto_ahash_export(req, state);
1113         if (ret)
1114                 return ret;
1115 
1116         return 0;
1117 }
1118 
1119 static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1120                                   const u8 *key, unsigned int keylen,
1121                                   u8 *ipad, u8 *opad,
1122                                   unsigned int blocksize)
1123 {
1124         struct mv_cesa_ahash_result result;
1125         struct scatterlist sg;
1126         int ret;
1127         int i;
1128 
1129         if (keylen <= blocksize) {
1130                 memcpy(ipad, key, keylen);
1131         } else {
1132                 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1133 
1134                 if (!keydup)
1135                         return -ENOMEM;
1136 
1137                 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1138                                            mv_cesa_hmac_ahash_complete,
1139                                            &result);
1140                 sg_init_one(&sg, keydup, keylen);
1141                 ahash_request_set_crypt(req, &sg, ipad, keylen);
1142                 init_completion(&result.completion);
1143 
1144                 ret = crypto_ahash_digest(req);
1145                 if (ret == -EINPROGRESS) {
1146                         wait_for_completion_interruptible(&result.completion);
1147                         ret = result.error;
1148                 }
1149 
1150                 /* Set the memory region to 0 to avoid any leak. */
1151                 kzfree(keydup);
1152 
1153                 if (ret)
1154                         return ret;
1155 
1156                 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1157         }
1158 
1159         memset(ipad + keylen, 0, blocksize - keylen);
1160         memcpy(opad, ipad, blocksize);
1161 
1162         for (i = 0; i < blocksize; i++) {
1163                 ipad[i] ^= HMAC_IPAD_VALUE;
1164                 opad[i] ^= HMAC_OPAD_VALUE;
1165         }
1166 
1167         return 0;
1168 }
1169 
1170 static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1171                                 const u8 *key, unsigned int keylen,
1172                                 void *istate, void *ostate)
1173 {
1174         struct ahash_request *req;
1175         struct crypto_ahash *tfm;
1176         unsigned int blocksize;
1177         u8 *ipad = NULL;
1178         u8 *opad;
1179         int ret;
1180 
1181         tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1182         if (IS_ERR(tfm))
1183                 return PTR_ERR(tfm);
1184 
1185         req = ahash_request_alloc(tfm, GFP_KERNEL);
1186         if (!req) {
1187                 ret = -ENOMEM;
1188                 goto free_ahash;
1189         }
1190 
1191         crypto_ahash_clear_flags(tfm, ~0);
1192 
1193         blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1194 
1195         ipad = kcalloc(2, blocksize, GFP_KERNEL);
1196         if (!ipad) {
1197                 ret = -ENOMEM;
1198                 goto free_req;
1199         }
1200 
1201         opad = ipad + blocksize;
1202 
1203         ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1204         if (ret)
1205                 goto free_ipad;
1206 
1207         ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1208         if (ret)
1209                 goto free_ipad;
1210 
1211         ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1212 
1213 free_ipad:
1214         kfree(ipad);
1215 free_req:
1216         ahash_request_free(req);
1217 free_ahash:
1218         crypto_free_ahash(tfm);
1219 
1220         return ret;
1221 }
1222 
1223 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1224 {
1225         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1226 
1227         ctx->base.ops = &mv_cesa_ahash_req_ops;
1228 
1229         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1230                                  sizeof(struct mv_cesa_ahash_req));
1231         return 0;
1232 }
1233 
1234 static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1235 {
1236         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1237         struct mv_cesa_op_ctx tmpl = { };
1238 
1239         mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1240         memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1241 
1242         mv_cesa_ahash_init(req, &tmpl, true);
1243 
1244         return 0;
1245 }
1246 
1247 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1248                                     unsigned int keylen)
1249 {
1250         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1251         struct md5_state istate, ostate;
1252         int ret, i;
1253 
1254         ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1255         if (ret)
1256                 return ret;
1257 
1258         for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1259                 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1260 
1261         for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1262                 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1263 
1264         return 0;
1265 }
1266 
1267 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1268 {
1269         int ret;
1270 
1271         ret = mv_cesa_ahmac_md5_init(req);
1272         if (ret)
1273                 return ret;
1274 
1275         return mv_cesa_ahash_finup(req);
1276 }
1277 
1278 struct ahash_alg mv_ahmac_md5_alg = {
1279         .init = mv_cesa_ahmac_md5_init,
1280         .update = mv_cesa_ahash_update,
1281         .final = mv_cesa_ahash_final,
1282         .finup = mv_cesa_ahash_finup,
1283         .digest = mv_cesa_ahmac_md5_digest,
1284         .setkey = mv_cesa_ahmac_md5_setkey,
1285         .export = mv_cesa_md5_export,
1286         .import = mv_cesa_md5_import,
1287         .halg = {
1288                 .digestsize = MD5_DIGEST_SIZE,
1289                 .statesize = sizeof(struct md5_state),
1290                 .base = {
1291                         .cra_name = "hmac(md5)",
1292                         .cra_driver_name = "mv-hmac-md5",
1293                         .cra_priority = 300,
1294                         .cra_flags = CRYPTO_ALG_ASYNC |
1295                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
1296                         .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1297                         .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1298                         .cra_init = mv_cesa_ahmac_cra_init,
1299                         .cra_module = THIS_MODULE,
1300                  }
1301         }
1302 };
1303 
1304 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1305 {
1306         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1307         struct mv_cesa_op_ctx tmpl = { };
1308 
1309         mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1310         memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1311 
1312         mv_cesa_ahash_init(req, &tmpl, false);
1313 
1314         return 0;
1315 }
1316 
1317 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1318                                      unsigned int keylen)
1319 {
1320         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1321         struct sha1_state istate, ostate;
1322         int ret, i;
1323 
1324         ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1325         if (ret)
1326                 return ret;
1327 
1328         for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1329                 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1330 
1331         for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1332                 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1333 
1334         return 0;
1335 }
1336 
1337 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1338 {
1339         int ret;
1340 
1341         ret = mv_cesa_ahmac_sha1_init(req);
1342         if (ret)
1343                 return ret;
1344 
1345         return mv_cesa_ahash_finup(req);
1346 }
1347 
1348 struct ahash_alg mv_ahmac_sha1_alg = {
1349         .init = mv_cesa_ahmac_sha1_init,
1350         .update = mv_cesa_ahash_update,
1351         .final = mv_cesa_ahash_final,
1352         .finup = mv_cesa_ahash_finup,
1353         .digest = mv_cesa_ahmac_sha1_digest,
1354         .setkey = mv_cesa_ahmac_sha1_setkey,
1355         .export = mv_cesa_sha1_export,
1356         .import = mv_cesa_sha1_import,
1357         .halg = {
1358                 .digestsize = SHA1_DIGEST_SIZE,
1359                 .statesize = sizeof(struct sha1_state),
1360                 .base = {
1361                         .cra_name = "hmac(sha1)",
1362                         .cra_driver_name = "mv-hmac-sha1",
1363                         .cra_priority = 300,
1364                         .cra_flags = CRYPTO_ALG_ASYNC |
1365                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
1366                         .cra_blocksize = SHA1_BLOCK_SIZE,
1367                         .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1368                         .cra_init = mv_cesa_ahmac_cra_init,
1369                         .cra_module = THIS_MODULE,
1370                  }
1371         }
1372 };
1373 
1374 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1375                                        unsigned int keylen)
1376 {
1377         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1378         struct sha256_state istate, ostate;
1379         int ret, i;
1380 
1381         ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1382         if (ret)
1383                 return ret;
1384 
1385         for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1386                 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1387 
1388         for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1389                 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1390 
1391         return 0;
1392 }
1393 
1394 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1395 {
1396         struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1397         struct mv_cesa_op_ctx tmpl = { };
1398 
1399         mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1400         memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1401 
1402         mv_cesa_ahash_init(req, &tmpl, false);
1403 
1404         return 0;
1405 }
1406 
1407 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1408 {
1409         int ret;
1410 
1411         ret = mv_cesa_ahmac_sha256_init(req);
1412         if (ret)
1413                 return ret;
1414 
1415         return mv_cesa_ahash_finup(req);
1416 }
1417 
1418 struct ahash_alg mv_ahmac_sha256_alg = {
1419         .init = mv_cesa_ahmac_sha256_init,
1420         .update = mv_cesa_ahash_update,
1421         .final = mv_cesa_ahash_final,
1422         .finup = mv_cesa_ahash_finup,
1423         .digest = mv_cesa_ahmac_sha256_digest,
1424         .setkey = mv_cesa_ahmac_sha256_setkey,
1425         .export = mv_cesa_sha256_export,
1426         .import = mv_cesa_sha256_import,
1427         .halg = {
1428                 .digestsize = SHA256_DIGEST_SIZE,
1429                 .statesize = sizeof(struct sha256_state),
1430                 .base = {
1431                         .cra_name = "hmac(sha256)",
1432                         .cra_driver_name = "mv-hmac-sha256",
1433                         .cra_priority = 300,
1434                         .cra_flags = CRYPTO_ALG_ASYNC |
1435                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
1436                         .cra_blocksize = SHA256_BLOCK_SIZE,
1437                         .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1438                         .cra_init = mv_cesa_ahmac_cra_init,
1439                         .cra_module = THIS_MODULE,
1440                  }
1441         }
1442 };

/* [<][>][^][v][top][bottom][index][help] */