root/drivers/crypto/ccree/cc_aead.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. valid_assoclen
  2. cc_aead_exit
  3. cc_get_aead_hash_len
  4. cc_aead_init
  5. cc_aead_complete
  6. xcbc_setkey
  7. hmac_setkey
  8. validate_keys_sizes
  9. cc_get_plain_hmac_key
  10. cc_aead_setkey
  11. cc_des3_aead_setkey
  12. cc_rfc4309_ccm_setkey
  13. cc_aead_setauthsize
  14. cc_rfc4309_ccm_setauthsize
  15. cc_ccm_setauthsize
  16. cc_set_assoc_desc
  17. cc_proc_authen_desc
  18. cc_proc_cipher_desc
  19. cc_proc_digest_desc
  20. cc_set_cipher_desc
  21. cc_proc_cipher
  22. cc_set_hmac_desc
  23. cc_set_xcbc_desc
  24. cc_proc_header_desc
  25. cc_proc_scheme_desc
  26. cc_mlli_to_sram
  27. cc_get_data_flow
  28. cc_hmac_authenc
  29. cc_xcbc_authenc
  30. validate_data_size
  31. format_ccm_a0
  32. set_msg_len
  33. cc_ccm
  34. config_ccm_adata
  35. cc_proc_rfc4309_ccm
  36. cc_set_ghash_desc
  37. cc_set_gctr_desc
  38. cc_proc_gcm_result
  39. cc_gcm
  40. config_gcm_context
  41. cc_proc_rfc4_gcm
  42. cc_proc_aead
  43. cc_aead_encrypt
  44. cc_rfc4309_ccm_encrypt
  45. cc_aead_decrypt
  46. cc_rfc4309_ccm_decrypt
  47. cc_rfc4106_gcm_setkey
  48. cc_rfc4543_gcm_setkey
  49. cc_gcm_setauthsize
  50. cc_rfc4106_gcm_setauthsize
  51. cc_rfc4543_gcm_setauthsize
  52. cc_rfc4106_gcm_encrypt
  53. cc_rfc4543_gcm_encrypt
  54. cc_rfc4106_gcm_decrypt
  55. cc_rfc4543_gcm_decrypt
  56. cc_create_aead_alg
  57. cc_aead_free
  58. cc_aead_alloc

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3 
   4 #include <linux/kernel.h>
   5 #include <linux/module.h>
   6 #include <crypto/algapi.h>
   7 #include <crypto/internal/aead.h>
   8 #include <crypto/authenc.h>
   9 #include <crypto/internal/des.h>
  10 #include <linux/rtnetlink.h>
  11 #include "cc_driver.h"
  12 #include "cc_buffer_mgr.h"
  13 #include "cc_aead.h"
  14 #include "cc_request_mgr.h"
  15 #include "cc_hash.h"
  16 #include "cc_sram_mgr.h"
  17 
  18 #define template_aead   template_u.aead
  19 
  20 #define MAX_AEAD_SETKEY_SEQ 12
  21 #define MAX_AEAD_PROCESS_SEQ 23
  22 
  23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  25 
  26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  27 
  28 struct cc_aead_handle {
  29         cc_sram_addr_t sram_workspace_addr;
  30         struct list_head aead_list;
  31 };
  32 
  33 struct cc_hmac_s {
  34         u8 *padded_authkey;
  35         u8 *ipad_opad; /* IPAD, OPAD*/
  36         dma_addr_t padded_authkey_dma_addr;
  37         dma_addr_t ipad_opad_dma_addr;
  38 };
  39 
  40 struct cc_xcbc_s {
  41         u8 *xcbc_keys; /* K1,K2,K3 */
  42         dma_addr_t xcbc_keys_dma_addr;
  43 };
  44 
  45 struct cc_aead_ctx {
  46         struct cc_drvdata *drvdata;
  47         u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  48         u8 *enckey;
  49         dma_addr_t enckey_dma_addr;
  50         union {
  51                 struct cc_hmac_s hmac;
  52                 struct cc_xcbc_s xcbc;
  53         } auth_state;
  54         unsigned int enc_keylen;
  55         unsigned int auth_keylen;
  56         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  57         unsigned int hash_len;
  58         enum drv_cipher_mode cipher_mode;
  59         enum cc_flow_mode flow_mode;
  60         enum drv_hash_mode auth_mode;
  61 };
  62 
  63 static inline bool valid_assoclen(struct aead_request *req)
  64 {
  65         return ((req->assoclen == 16) || (req->assoclen == 20));
  66 }
  67 
  68 static void cc_aead_exit(struct crypto_aead *tfm)
  69 {
  70         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  71         struct device *dev = drvdata_to_dev(ctx->drvdata);
  72 
  73         dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  74                 crypto_tfm_alg_name(&tfm->base));
  75 
  76         /* Unmap enckey buffer */
  77         if (ctx->enckey) {
  78                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  79                                   ctx->enckey_dma_addr);
  80                 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  81                         &ctx->enckey_dma_addr);
  82                 ctx->enckey_dma_addr = 0;
  83                 ctx->enckey = NULL;
  84         }
  85 
  86         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  87                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  88 
  89                 if (xcbc->xcbc_keys) {
  90                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  91                                           xcbc->xcbc_keys,
  92                                           xcbc->xcbc_keys_dma_addr);
  93                 }
  94                 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  95                         &xcbc->xcbc_keys_dma_addr);
  96                 xcbc->xcbc_keys_dma_addr = 0;
  97                 xcbc->xcbc_keys = NULL;
  98         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
  99                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 100 
 101                 if (hmac->ipad_opad) {
 102                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
 103                                           hmac->ipad_opad,
 104                                           hmac->ipad_opad_dma_addr);
 105                         dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 106                                 &hmac->ipad_opad_dma_addr);
 107                         hmac->ipad_opad_dma_addr = 0;
 108                         hmac->ipad_opad = NULL;
 109                 }
 110                 if (hmac->padded_authkey) {
 111                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 112                                           hmac->padded_authkey,
 113                                           hmac->padded_authkey_dma_addr);
 114                         dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 115                                 &hmac->padded_authkey_dma_addr);
 116                         hmac->padded_authkey_dma_addr = 0;
 117                         hmac->padded_authkey = NULL;
 118                 }
 119         }
 120 }
 121 
 122 static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
 123 {
 124         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 125 
 126         return cc_get_default_hash_len(ctx->drvdata);
 127 }
 128 
 129 static int cc_aead_init(struct crypto_aead *tfm)
 130 {
 131         struct aead_alg *alg = crypto_aead_alg(tfm);
 132         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 133         struct cc_crypto_alg *cc_alg =
 134                         container_of(alg, struct cc_crypto_alg, aead_alg);
 135         struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 136 
 137         dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 138                 crypto_tfm_alg_name(&tfm->base));
 139 
 140         /* Initialize modes in instance */
 141         ctx->cipher_mode = cc_alg->cipher_mode;
 142         ctx->flow_mode = cc_alg->flow_mode;
 143         ctx->auth_mode = cc_alg->auth_mode;
 144         ctx->drvdata = cc_alg->drvdata;
 145         crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 146 
 147         /* Allocate key buffer, cache line aligned */
 148         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 149                                          &ctx->enckey_dma_addr, GFP_KERNEL);
 150         if (!ctx->enckey) {
 151                 dev_err(dev, "Failed allocating key buffer\n");
 152                 goto init_failed;
 153         }
 154         dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 155                 ctx->enckey);
 156 
 157         /* Set default authlen value */
 158 
 159         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 160                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 161                 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 162 
 163                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 164                 /* (and temporary for user key - up to 256b) */
 165                 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 166                                                      &xcbc->xcbc_keys_dma_addr,
 167                                                      GFP_KERNEL);
 168                 if (!xcbc->xcbc_keys) {
 169                         dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 170                         goto init_failed;
 171                 }
 172         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 173                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 174                 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 175                 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 176 
 177                 /* Allocate dma-coherent buffer for IPAD + OPAD */
 178                 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 179                                                      &hmac->ipad_opad_dma_addr,
 180                                                      GFP_KERNEL);
 181 
 182                 if (!hmac->ipad_opad) {
 183                         dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 184                         goto init_failed;
 185                 }
 186 
 187                 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 188                         hmac->ipad_opad);
 189 
 190                 hmac->padded_authkey = dma_alloc_coherent(dev,
 191                                                           MAX_HMAC_BLOCK_SIZE,
 192                                                           pkey_dma,
 193                                                           GFP_KERNEL);
 194 
 195                 if (!hmac->padded_authkey) {
 196                         dev_err(dev, "failed to allocate padded_authkey\n");
 197                         goto init_failed;
 198                 }
 199         } else {
 200                 ctx->auth_state.hmac.ipad_opad = NULL;
 201                 ctx->auth_state.hmac.padded_authkey = NULL;
 202         }
 203         ctx->hash_len = cc_get_aead_hash_len(tfm);
 204 
 205         return 0;
 206 
 207 init_failed:
 208         cc_aead_exit(tfm);
 209         return -ENOMEM;
 210 }
 211 
 212 static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 213 {
 214         struct aead_request *areq = (struct aead_request *)cc_req;
 215         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 216         struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 217         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 218 
 219         /* BACKLOG notification */
 220         if (err == -EINPROGRESS)
 221                 goto done;
 222 
 223         cc_unmap_aead_request(dev, areq);
 224 
 225         /* Restore ordinary iv pointer */
 226         areq->iv = areq_ctx->backup_iv;
 227 
 228         if (err)
 229                 goto done;
 230 
 231         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 232                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 233                            ctx->authsize) != 0) {
 234                         dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 235                                 ctx->authsize, ctx->cipher_mode);
 236                         /* In case of payload authentication failure, MUST NOT
 237                          * revealed the decrypted message --> zero its memory.
 238                          */
 239                         sg_zero_buffer(areq->dst, sg_nents(areq->dst),
 240                                        areq->cryptlen, areq->assoclen);
 241                         err = -EBADMSG;
 242                 }
 243         /*ENCRYPT*/
 244         } else if (areq_ctx->is_icv_fragmented) {
 245                 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 246 
 247                 cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
 248                                    skip, (skip + ctx->authsize),
 249                                    CC_SG_FROM_BUF);
 250         }
 251 done:
 252         aead_request_complete(areq, err);
 253 }
 254 
 255 static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 256                                 struct cc_aead_ctx *ctx)
 257 {
 258         /* Load the AES key */
 259         hw_desc_init(&desc[0]);
 260         /* We are using for the source/user key the same buffer
 261          * as for the output keys, * because after this key loading it
 262          * is not needed anymore
 263          */
 264         set_din_type(&desc[0], DMA_DLLI,
 265                      ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 266                      NS_BIT);
 267         set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 268         set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 269         set_key_size_aes(&desc[0], ctx->auth_keylen);
 270         set_flow_mode(&desc[0], S_DIN_to_AES);
 271         set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 272 
 273         hw_desc_init(&desc[1]);
 274         set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 275         set_flow_mode(&desc[1], DIN_AES_DOUT);
 276         set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 277                       AES_KEYSIZE_128, NS_BIT, 0);
 278 
 279         hw_desc_init(&desc[2]);
 280         set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 281         set_flow_mode(&desc[2], DIN_AES_DOUT);
 282         set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 283                                          + AES_KEYSIZE_128),
 284                               AES_KEYSIZE_128, NS_BIT, 0);
 285 
 286         hw_desc_init(&desc[3]);
 287         set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 288         set_flow_mode(&desc[3], DIN_AES_DOUT);
 289         set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 290                                           + 2 * AES_KEYSIZE_128),
 291                               AES_KEYSIZE_128, NS_BIT, 0);
 292 
 293         return 4;
 294 }
 295 
 296 static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
 297 {
 298         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 299         unsigned int digest_ofs = 0;
 300         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 301                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 302         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 303                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 304         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 305 
 306         unsigned int idx = 0;
 307         int i;
 308 
 309         /* calc derived HMAC key */
 310         for (i = 0; i < 2; i++) {
 311                 /* Load hash initial state */
 312                 hw_desc_init(&desc[idx]);
 313                 set_cipher_mode(&desc[idx], hash_mode);
 314                 set_din_sram(&desc[idx],
 315                              cc_larval_digest_addr(ctx->drvdata,
 316                                                    ctx->auth_mode),
 317                              digest_size);
 318                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
 319                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 320                 idx++;
 321 
 322                 /* Load the hash current length*/
 323                 hw_desc_init(&desc[idx]);
 324                 set_cipher_mode(&desc[idx], hash_mode);
 325                 set_din_const(&desc[idx], 0, ctx->hash_len);
 326                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
 327                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 328                 idx++;
 329 
 330                 /* Prepare ipad key */
 331                 hw_desc_init(&desc[idx]);
 332                 set_xor_val(&desc[idx], hmac_pad_const[i]);
 333                 set_cipher_mode(&desc[idx], hash_mode);
 334                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
 335                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 336                 idx++;
 337 
 338                 /* Perform HASH update */
 339                 hw_desc_init(&desc[idx]);
 340                 set_din_type(&desc[idx], DMA_DLLI,
 341                              hmac->padded_authkey_dma_addr,
 342                              SHA256_BLOCK_SIZE, NS_BIT);
 343                 set_cipher_mode(&desc[idx], hash_mode);
 344                 set_xor_active(&desc[idx]);
 345                 set_flow_mode(&desc[idx], DIN_HASH);
 346                 idx++;
 347 
 348                 /* Get the digset */
 349                 hw_desc_init(&desc[idx]);
 350                 set_cipher_mode(&desc[idx], hash_mode);
 351                 set_dout_dlli(&desc[idx],
 352                               (hmac->ipad_opad_dma_addr + digest_ofs),
 353                               digest_size, NS_BIT, 0);
 354                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 355                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 356                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 357                 idx++;
 358 
 359                 digest_ofs += digest_size;
 360         }
 361 
 362         return idx;
 363 }
 364 
 365 static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 366 {
 367         struct device *dev = drvdata_to_dev(ctx->drvdata);
 368 
 369         dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 370                 ctx->enc_keylen, ctx->auth_keylen);
 371 
 372         switch (ctx->auth_mode) {
 373         case DRV_HASH_SHA1:
 374         case DRV_HASH_SHA256:
 375                 break;
 376         case DRV_HASH_XCBC_MAC:
 377                 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 378                     ctx->auth_keylen != AES_KEYSIZE_192 &&
 379                     ctx->auth_keylen != AES_KEYSIZE_256)
 380                         return -ENOTSUPP;
 381                 break;
 382         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 383                 if (ctx->auth_keylen > 0)
 384                         return -EINVAL;
 385                 break;
 386         default:
 387                 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 388                 return -EINVAL;
 389         }
 390         /* Check cipher key size */
 391         if (ctx->flow_mode == S_DIN_to_DES) {
 392                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 393                         dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
 394                                 ctx->enc_keylen);
 395                         return -EINVAL;
 396                 }
 397         } else { /* Default assumed to be AES ciphers */
 398                 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 399                     ctx->enc_keylen != AES_KEYSIZE_192 &&
 400                     ctx->enc_keylen != AES_KEYSIZE_256) {
 401                         dev_err(dev, "Invalid cipher(AES) key size: %u\n",
 402                                 ctx->enc_keylen);
 403                         return -EINVAL;
 404                 }
 405         }
 406 
 407         return 0; /* All tests of keys sizes passed */
 408 }
 409 
 410 /* This function prepers the user key so it can pass to the hmac processing
 411  * (copy to intenral buffer or hash in case of key longer than block
 412  */
 413 static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 414                                  unsigned int keylen)
 415 {
 416         dma_addr_t key_dma_addr = 0;
 417         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 418         struct device *dev = drvdata_to_dev(ctx->drvdata);
 419         u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
 420         struct cc_crypto_req cc_req = {};
 421         unsigned int blocksize;
 422         unsigned int digestsize;
 423         unsigned int hashmode;
 424         unsigned int idx = 0;
 425         int rc = 0;
 426         u8 *key = NULL;
 427         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 428         dma_addr_t padded_authkey_dma_addr =
 429                 ctx->auth_state.hmac.padded_authkey_dma_addr;
 430 
 431         switch (ctx->auth_mode) { /* auth_key required and >0 */
 432         case DRV_HASH_SHA1:
 433                 blocksize = SHA1_BLOCK_SIZE;
 434                 digestsize = SHA1_DIGEST_SIZE;
 435                 hashmode = DRV_HASH_HW_SHA1;
 436                 break;
 437         case DRV_HASH_SHA256:
 438         default:
 439                 blocksize = SHA256_BLOCK_SIZE;
 440                 digestsize = SHA256_DIGEST_SIZE;
 441                 hashmode = DRV_HASH_HW_SHA256;
 442         }
 443 
 444         if (keylen != 0) {
 445 
 446                 key = kmemdup(authkey, keylen, GFP_KERNEL);
 447                 if (!key)
 448                         return -ENOMEM;
 449 
 450                 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
 451                                               DMA_TO_DEVICE);
 452                 if (dma_mapping_error(dev, key_dma_addr)) {
 453                         dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 454                                 key, keylen);
 455                         kzfree(key);
 456                         return -ENOMEM;
 457                 }
 458                 if (keylen > blocksize) {
 459                         /* Load hash initial state */
 460                         hw_desc_init(&desc[idx]);
 461                         set_cipher_mode(&desc[idx], hashmode);
 462                         set_din_sram(&desc[idx], larval_addr, digestsize);
 463                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
 464                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 465                         idx++;
 466 
 467                         /* Load the hash current length*/
 468                         hw_desc_init(&desc[idx]);
 469                         set_cipher_mode(&desc[idx], hashmode);
 470                         set_din_const(&desc[idx], 0, ctx->hash_len);
 471                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 472                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
 473                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 474                         idx++;
 475 
 476                         hw_desc_init(&desc[idx]);
 477                         set_din_type(&desc[idx], DMA_DLLI,
 478                                      key_dma_addr, keylen, NS_BIT);
 479                         set_flow_mode(&desc[idx], DIN_HASH);
 480                         idx++;
 481 
 482                         /* Get hashed key */
 483                         hw_desc_init(&desc[idx]);
 484                         set_cipher_mode(&desc[idx], hashmode);
 485                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 486                                       digestsize, NS_BIT, 0);
 487                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 488                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 489                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 490                         set_cipher_config0(&desc[idx],
 491                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 492                         idx++;
 493 
 494                         hw_desc_init(&desc[idx]);
 495                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
 496                         set_flow_mode(&desc[idx], BYPASS);
 497                         set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 498                                       digestsize), (blocksize - digestsize),
 499                                       NS_BIT, 0);
 500                         idx++;
 501                 } else {
 502                         hw_desc_init(&desc[idx]);
 503                         set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 504                                      keylen, NS_BIT);
 505                         set_flow_mode(&desc[idx], BYPASS);
 506                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 507                                       keylen, NS_BIT, 0);
 508                         idx++;
 509 
 510                         if ((blocksize - keylen) != 0) {
 511                                 hw_desc_init(&desc[idx]);
 512                                 set_din_const(&desc[idx], 0,
 513                                               (blocksize - keylen));
 514                                 set_flow_mode(&desc[idx], BYPASS);
 515                                 set_dout_dlli(&desc[idx],
 516                                               (padded_authkey_dma_addr +
 517                                                keylen),
 518                                               (blocksize - keylen), NS_BIT, 0);
 519                                 idx++;
 520                         }
 521                 }
 522         } else {
 523                 hw_desc_init(&desc[idx]);
 524                 set_din_const(&desc[idx], 0, (blocksize - keylen));
 525                 set_flow_mode(&desc[idx], BYPASS);
 526                 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 527                               blocksize, NS_BIT, 0);
 528                 idx++;
 529         }
 530 
 531         rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 532         if (rc)
 533                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 534 
 535         if (key_dma_addr)
 536                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 537 
 538         kzfree(key);
 539 
 540         return rc;
 541 }
 542 
 543 static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 544                           unsigned int keylen)
 545 {
 546         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 547         struct cc_crypto_req cc_req = {};
 548         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 549         unsigned int seq_len = 0;
 550         struct device *dev = drvdata_to_dev(ctx->drvdata);
 551         const u8 *enckey, *authkey;
 552         int rc;
 553 
 554         dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 555                 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 556 
 557         /* STAT_PHASE_0: Init and sanity checks */
 558 
 559         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 560                 struct crypto_authenc_keys keys;
 561 
 562                 rc = crypto_authenc_extractkeys(&keys, key, keylen);
 563                 if (rc)
 564                         goto badkey;
 565                 enckey = keys.enckey;
 566                 authkey = keys.authkey;
 567                 ctx->enc_keylen = keys.enckeylen;
 568                 ctx->auth_keylen = keys.authkeylen;
 569 
 570                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 571                         /* the nonce is stored in bytes at end of key */
 572                         rc = -EINVAL;
 573                         if (ctx->enc_keylen <
 574                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 575                                 goto badkey;
 576                         /* Copy nonce from last 4 bytes in CTR key to
 577                          *  first 4 bytes in CTR IV
 578                          */
 579                         memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
 580                                CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 581                         /* Set CTR key size */
 582                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 583                 }
 584         } else { /* non-authenc - has just one key */
 585                 enckey = key;
 586                 authkey = NULL;
 587                 ctx->enc_keylen = keylen;
 588                 ctx->auth_keylen = 0;
 589         }
 590 
 591         rc = validate_keys_sizes(ctx);
 592         if (rc)
 593                 goto badkey;
 594 
 595         /* STAT_PHASE_1: Copy key to ctx */
 596 
 597         /* Get key material */
 598         memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 599         if (ctx->enc_keylen == 24)
 600                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 601         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 602                 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
 603                        ctx->auth_keylen);
 604         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 605                 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 606                 if (rc)
 607                         goto badkey;
 608         }
 609 
 610         /* STAT_PHASE_2: Create sequence */
 611 
 612         switch (ctx->auth_mode) {
 613         case DRV_HASH_SHA1:
 614         case DRV_HASH_SHA256:
 615                 seq_len = hmac_setkey(desc, ctx);
 616                 break;
 617         case DRV_HASH_XCBC_MAC:
 618                 seq_len = xcbc_setkey(desc, ctx);
 619                 break;
 620         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 621                 break; /* No auth. key setup */
 622         default:
 623                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 624                 rc = -ENOTSUPP;
 625                 goto badkey;
 626         }
 627 
 628         /* STAT_PHASE_3: Submit sequence to HW */
 629 
 630         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 631                 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 632                 if (rc) {
 633                         dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 634                         goto setkey_error;
 635                 }
 636         }
 637 
 638         /* Update STAT_PHASE_3 */
 639         return rc;
 640 
 641 badkey:
 642         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 643 
 644 setkey_error:
 645         return rc;
 646 }
 647 
 648 static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 649                                unsigned int keylen)
 650 {
 651         struct crypto_authenc_keys keys;
 652         int err;
 653 
 654         err = crypto_authenc_extractkeys(&keys, key, keylen);
 655         if (unlikely(err))
 656                 return err;
 657 
 658         err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
 659               cc_aead_setkey(aead, key, keylen);
 660 
 661         memzero_explicit(&keys, sizeof(keys));
 662         return err;
 663 }
 664 
 665 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 666                                  unsigned int keylen)
 667 {
 668         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 669 
 670         if (keylen < 3)
 671                 return -EINVAL;
 672 
 673         keylen -= 3;
 674         memcpy(ctx->ctr_nonce, key + keylen, 3);
 675 
 676         return cc_aead_setkey(tfm, key, keylen);
 677 }
 678 
 679 static int cc_aead_setauthsize(struct crypto_aead *authenc,
 680                                unsigned int authsize)
 681 {
 682         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 683         struct device *dev = drvdata_to_dev(ctx->drvdata);
 684 
 685         /* Unsupported auth. sizes */
 686         if (authsize == 0 ||
 687             authsize > crypto_aead_maxauthsize(authenc)) {
 688                 return -ENOTSUPP;
 689         }
 690 
 691         ctx->authsize = authsize;
 692         dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 693 
 694         return 0;
 695 }
 696 
 697 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 698                                       unsigned int authsize)
 699 {
 700         switch (authsize) {
 701         case 8:
 702         case 12:
 703         case 16:
 704                 break;
 705         default:
 706                 return -EINVAL;
 707         }
 708 
 709         return cc_aead_setauthsize(authenc, authsize);
 710 }
 711 
 712 static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 713                               unsigned int authsize)
 714 {
 715         switch (authsize) {
 716         case 4:
 717         case 6:
 718         case 8:
 719         case 10:
 720         case 12:
 721         case 14:
 722         case 16:
 723                 break;
 724         default:
 725                 return -EINVAL;
 726         }
 727 
 728         return cc_aead_setauthsize(authenc, authsize);
 729 }
 730 
 731 static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 732                               struct cc_hw_desc desc[], unsigned int *seq_size)
 733 {
 734         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 735         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 736         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 737         enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 738         unsigned int idx = *seq_size;
 739         struct device *dev = drvdata_to_dev(ctx->drvdata);
 740 
 741         switch (assoc_dma_type) {
 742         case CC_DMA_BUF_DLLI:
 743                 dev_dbg(dev, "ASSOC buffer type DLLI\n");
 744                 hw_desc_init(&desc[idx]);
 745                 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 746                              areq_ctx->assoclen, NS_BIT);
 747                 set_flow_mode(&desc[idx], flow_mode);
 748                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 749                     areq_ctx->cryptlen > 0)
 750                         set_din_not_last_indication(&desc[idx]);
 751                 break;
 752         case CC_DMA_BUF_MLLI:
 753                 dev_dbg(dev, "ASSOC buffer type MLLI\n");
 754                 hw_desc_init(&desc[idx]);
 755                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 756                              areq_ctx->assoc.mlli_nents, NS_BIT);
 757                 set_flow_mode(&desc[idx], flow_mode);
 758                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 759                     areq_ctx->cryptlen > 0)
 760                         set_din_not_last_indication(&desc[idx]);
 761                 break;
 762         case CC_DMA_BUF_NULL:
 763         default:
 764                 dev_err(dev, "Invalid ASSOC buffer type\n");
 765         }
 766 
 767         *seq_size = (++idx);
 768 }
 769 
 770 static void cc_proc_authen_desc(struct aead_request *areq,
 771                                 unsigned int flow_mode,
 772                                 struct cc_hw_desc desc[],
 773                                 unsigned int *seq_size, int direct)
 774 {
 775         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 776         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 777         unsigned int idx = *seq_size;
 778         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 779         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 780         struct device *dev = drvdata_to_dev(ctx->drvdata);
 781 
 782         switch (data_dma_type) {
 783         case CC_DMA_BUF_DLLI:
 784         {
 785                 struct scatterlist *cipher =
 786                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 787                         areq_ctx->dst_sgl : areq_ctx->src_sgl;
 788 
 789                 unsigned int offset =
 790                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 791                         areq_ctx->dst_offset : areq_ctx->src_offset;
 792                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 793                 hw_desc_init(&desc[idx]);
 794                 set_din_type(&desc[idx], DMA_DLLI,
 795                              (sg_dma_address(cipher) + offset),
 796                              areq_ctx->cryptlen, NS_BIT);
 797                 set_flow_mode(&desc[idx], flow_mode);
 798                 break;
 799         }
 800         case CC_DMA_BUF_MLLI:
 801         {
 802                 /* DOUBLE-PASS flow (as default)
 803                  * assoc. + iv + data -compact in one table
 804                  * if assoclen is ZERO only IV perform
 805                  */
 806                 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
 807                 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 808 
 809                 if (areq_ctx->is_single_pass) {
 810                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 811                                 mlli_addr = areq_ctx->dst.sram_addr;
 812                                 mlli_nents = areq_ctx->dst.mlli_nents;
 813                         } else {
 814                                 mlli_addr = areq_ctx->src.sram_addr;
 815                                 mlli_nents = areq_ctx->src.mlli_nents;
 816                         }
 817                 }
 818 
 819                 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 820                 hw_desc_init(&desc[idx]);
 821                 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 822                              NS_BIT);
 823                 set_flow_mode(&desc[idx], flow_mode);
 824                 break;
 825         }
 826         case CC_DMA_BUF_NULL:
 827         default:
 828                 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 829         }
 830 
 831         *seq_size = (++idx);
 832 }
 833 
 834 static void cc_proc_cipher_desc(struct aead_request *areq,
 835                                 unsigned int flow_mode,
 836                                 struct cc_hw_desc desc[],
 837                                 unsigned int *seq_size)
 838 {
 839         unsigned int idx = *seq_size;
 840         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 841         enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 842         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 843         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 844         struct device *dev = drvdata_to_dev(ctx->drvdata);
 845 
 846         if (areq_ctx->cryptlen == 0)
 847                 return; /*null processing*/
 848 
 849         switch (data_dma_type) {
 850         case CC_DMA_BUF_DLLI:
 851                 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 852                 hw_desc_init(&desc[idx]);
 853                 set_din_type(&desc[idx], DMA_DLLI,
 854                              (sg_dma_address(areq_ctx->src_sgl) +
 855                               areq_ctx->src_offset), areq_ctx->cryptlen,
 856                               NS_BIT);
 857                 set_dout_dlli(&desc[idx],
 858                               (sg_dma_address(areq_ctx->dst_sgl) +
 859                                areq_ctx->dst_offset),
 860                               areq_ctx->cryptlen, NS_BIT, 0);
 861                 set_flow_mode(&desc[idx], flow_mode);
 862                 break;
 863         case CC_DMA_BUF_MLLI:
 864                 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 865                 hw_desc_init(&desc[idx]);
 866                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 867                              areq_ctx->src.mlli_nents, NS_BIT);
 868                 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 869                               areq_ctx->dst.mlli_nents, NS_BIT, 0);
 870                 set_flow_mode(&desc[idx], flow_mode);
 871                 break;
 872         case CC_DMA_BUF_NULL:
 873         default:
 874                 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 875         }
 876 
 877         *seq_size = (++idx);
 878 }
 879 
 880 static void cc_proc_digest_desc(struct aead_request *req,
 881                                 struct cc_hw_desc desc[],
 882                                 unsigned int *seq_size)
 883 {
 884         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 885         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 886         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 887         unsigned int idx = *seq_size;
 888         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 889                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 890         int direct = req_ctx->gen_ctx.op_type;
 891 
 892         /* Get final ICV result */
 893         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 894                 hw_desc_init(&desc[idx]);
 895                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 896                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 897                 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 898                               NS_BIT, 1);
 899                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
 900                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 901                         set_aes_not_hash_mode(&desc[idx]);
 902                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 903                 } else {
 904                         set_cipher_config0(&desc[idx],
 905                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 906                         set_cipher_mode(&desc[idx], hash_mode);
 907                 }
 908         } else { /*Decrypt*/
 909                 /* Get ICV out from hardware */
 910                 hw_desc_init(&desc[idx]);
 911                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 912                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 913                 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 914                               ctx->authsize, NS_BIT, 1);
 915                 set_queue_last_ind(ctx->drvdata, &desc[idx]);
 916                 set_cipher_config0(&desc[idx],
 917                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 918                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 919                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 920                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 921                         set_aes_not_hash_mode(&desc[idx]);
 922                 } else {
 923                         set_cipher_mode(&desc[idx], hash_mode);
 924                 }
 925         }
 926 
 927         *seq_size = (++idx);
 928 }
 929 
 930 static void cc_set_cipher_desc(struct aead_request *req,
 931                                struct cc_hw_desc desc[],
 932                                unsigned int *seq_size)
 933 {
 934         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 935         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 936         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 937         unsigned int hw_iv_size = req_ctx->hw_iv_size;
 938         unsigned int idx = *seq_size;
 939         int direct = req_ctx->gen_ctx.op_type;
 940 
 941         /* Setup cipher state */
 942         hw_desc_init(&desc[idx]);
 943         set_cipher_config0(&desc[idx], direct);
 944         set_flow_mode(&desc[idx], ctx->flow_mode);
 945         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 946                      hw_iv_size, NS_BIT);
 947         if (ctx->cipher_mode == DRV_CIPHER_CTR)
 948                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 949         else
 950                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 951         set_cipher_mode(&desc[idx], ctx->cipher_mode);
 952         idx++;
 953 
 954         /* Setup enc. key */
 955         hw_desc_init(&desc[idx]);
 956         set_cipher_config0(&desc[idx], direct);
 957         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 958         set_flow_mode(&desc[idx], ctx->flow_mode);
 959         if (ctx->flow_mode == S_DIN_to_AES) {
 960                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 961                              ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 962                               ctx->enc_keylen), NS_BIT);
 963                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
 964         } else {
 965                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 966                              ctx->enc_keylen, NS_BIT);
 967                 set_key_size_des(&desc[idx], ctx->enc_keylen);
 968         }
 969         set_cipher_mode(&desc[idx], ctx->cipher_mode);
 970         idx++;
 971 
 972         *seq_size = idx;
 973 }
 974 
 975 static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 976                            unsigned int *seq_size, unsigned int data_flow_mode)
 977 {
 978         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 979         int direct = req_ctx->gen_ctx.op_type;
 980         unsigned int idx = *seq_size;
 981 
 982         if (req_ctx->cryptlen == 0)
 983                 return; /*null processing*/
 984 
 985         cc_set_cipher_desc(req, desc, &idx);
 986         cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 987         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 988                 /* We must wait for DMA to write all cipher */
 989                 hw_desc_init(&desc[idx]);
 990                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
 991                 set_dout_no_dma(&desc[idx], 0, 0, 1);
 992                 idx++;
 993         }
 994 
 995         *seq_size = idx;
 996 }
 997 
 998 static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 999                              unsigned int *seq_size)
1000 {
1001         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1002         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1003         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1004                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1005         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1006                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1007         unsigned int idx = *seq_size;
1008 
1009         /* Loading hash ipad xor key state */
1010         hw_desc_init(&desc[idx]);
1011         set_cipher_mode(&desc[idx], hash_mode);
1012         set_din_type(&desc[idx], DMA_DLLI,
1013                      ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1014                      NS_BIT);
1015         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1016         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1017         idx++;
1018 
1019         /* Load init. digest len (64 bytes) */
1020         hw_desc_init(&desc[idx]);
1021         set_cipher_mode(&desc[idx], hash_mode);
1022         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1023                      ctx->hash_len);
1024         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1025         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1026         idx++;
1027 
1028         *seq_size = idx;
1029 }
1030 
1031 static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1032                              unsigned int *seq_size)
1033 {
1034         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1036         unsigned int idx = *seq_size;
1037 
1038         /* Loading MAC state */
1039         hw_desc_init(&desc[idx]);
1040         set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1041         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1042         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1043         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1044         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1045         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1046         set_aes_not_hash_mode(&desc[idx]);
1047         idx++;
1048 
1049         /* Setup XCBC MAC K1 */
1050         hw_desc_init(&desc[idx]);
1051         set_din_type(&desc[idx], DMA_DLLI,
1052                      ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1053                      AES_KEYSIZE_128, NS_BIT);
1054         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1055         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1056         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1057         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1058         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1059         set_aes_not_hash_mode(&desc[idx]);
1060         idx++;
1061 
1062         /* Setup XCBC MAC K2 */
1063         hw_desc_init(&desc[idx]);
1064         set_din_type(&desc[idx], DMA_DLLI,
1065                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1066                       AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1067         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1068         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1069         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1070         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1071         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1072         set_aes_not_hash_mode(&desc[idx]);
1073         idx++;
1074 
1075         /* Setup XCBC MAC K3 */
1076         hw_desc_init(&desc[idx]);
1077         set_din_type(&desc[idx], DMA_DLLI,
1078                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1079                       2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1080         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1081         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1082         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1083         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1084         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1085         set_aes_not_hash_mode(&desc[idx]);
1086         idx++;
1087 
1088         *seq_size = idx;
1089 }
1090 
1091 static void cc_proc_header_desc(struct aead_request *req,
1092                                 struct cc_hw_desc desc[],
1093                                 unsigned int *seq_size)
1094 {
1095         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1096         unsigned int idx = *seq_size;
1097 
1098         /* Hash associated data */
1099         if (areq_ctx->assoclen > 0)
1100                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1101 
1102         /* Hash IV */
1103         *seq_size = idx;
1104 }
1105 
1106 static void cc_proc_scheme_desc(struct aead_request *req,
1107                                 struct cc_hw_desc desc[],
1108                                 unsigned int *seq_size)
1109 {
1110         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1111         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1112         struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1113         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1114                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1115         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1116                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1117         unsigned int idx = *seq_size;
1118 
1119         hw_desc_init(&desc[idx]);
1120         set_cipher_mode(&desc[idx], hash_mode);
1121         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1122                       ctx->hash_len);
1123         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1124         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1125         set_cipher_do(&desc[idx], DO_PAD);
1126         idx++;
1127 
1128         /* Get final ICV result */
1129         hw_desc_init(&desc[idx]);
1130         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1131                       digest_size);
1132         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1133         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1134         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1135         set_cipher_mode(&desc[idx], hash_mode);
1136         idx++;
1137 
1138         /* Loading hash opad xor key state */
1139         hw_desc_init(&desc[idx]);
1140         set_cipher_mode(&desc[idx], hash_mode);
1141         set_din_type(&desc[idx], DMA_DLLI,
1142                      (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1143                      digest_size, NS_BIT);
1144         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1145         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1146         idx++;
1147 
1148         /* Load init. digest len (64 bytes) */
1149         hw_desc_init(&desc[idx]);
1150         set_cipher_mode(&desc[idx], hash_mode);
1151         set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1152                      ctx->hash_len);
1153         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1154         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1155         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1156         idx++;
1157 
1158         /* Perform HASH update */
1159         hw_desc_init(&desc[idx]);
1160         set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1161                      digest_size);
1162         set_flow_mode(&desc[idx], DIN_HASH);
1163         idx++;
1164 
1165         *seq_size = idx;
1166 }
1167 
1168 static void cc_mlli_to_sram(struct aead_request *req,
1169                             struct cc_hw_desc desc[], unsigned int *seq_size)
1170 {
1171         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1172         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1173         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1174         struct device *dev = drvdata_to_dev(ctx->drvdata);
1175 
1176         if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1177             req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1178             !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1179                 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1180                         (unsigned int)ctx->drvdata->mlli_sram_addr,
1181                         req_ctx->mlli_params.mlli_len);
1182                 /* Copy MLLI table host-to-sram */
1183                 hw_desc_init(&desc[*seq_size]);
1184                 set_din_type(&desc[*seq_size], DMA_DLLI,
1185                              req_ctx->mlli_params.mlli_dma_addr,
1186                              req_ctx->mlli_params.mlli_len, NS_BIT);
1187                 set_dout_sram(&desc[*seq_size],
1188                               ctx->drvdata->mlli_sram_addr,
1189                               req_ctx->mlli_params.mlli_len);
1190                 set_flow_mode(&desc[*seq_size], BYPASS);
1191                 (*seq_size)++;
1192         }
1193 }
1194 
1195 static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1196                                           enum cc_flow_mode setup_flow_mode,
1197                                           bool is_single_pass)
1198 {
1199         enum cc_flow_mode data_flow_mode;
1200 
1201         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1202                 if (setup_flow_mode == S_DIN_to_AES)
1203                         data_flow_mode = is_single_pass ?
1204                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1205                 else
1206                         data_flow_mode = is_single_pass ?
1207                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1208         } else { /* Decrypt */
1209                 if (setup_flow_mode == S_DIN_to_AES)
1210                         data_flow_mode = is_single_pass ?
1211                                 AES_and_HASH : DIN_AES_DOUT;
1212                 else
1213                         data_flow_mode = is_single_pass ?
1214                                 DES_and_HASH : DIN_DES_DOUT;
1215         }
1216 
1217         return data_flow_mode;
1218 }
1219 
1220 static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1221                             unsigned int *seq_size)
1222 {
1223         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1224         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1225         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1226         int direct = req_ctx->gen_ctx.op_type;
1227         unsigned int data_flow_mode =
1228                 cc_get_data_flow(direct, ctx->flow_mode,
1229                                  req_ctx->is_single_pass);
1230 
1231         if (req_ctx->is_single_pass) {
1232                 /**
1233                  * Single-pass flow
1234                  */
1235                 cc_set_hmac_desc(req, desc, seq_size);
1236                 cc_set_cipher_desc(req, desc, seq_size);
1237                 cc_proc_header_desc(req, desc, seq_size);
1238                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1239                 cc_proc_scheme_desc(req, desc, seq_size);
1240                 cc_proc_digest_desc(req, desc, seq_size);
1241                 return;
1242         }
1243 
1244         /**
1245          * Double-pass flow
1246          * Fallback for unsupported single-pass modes,
1247          * i.e. using assoc. data of non-word-multiple
1248          */
1249         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1250                 /* encrypt first.. */
1251                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1252                 /* authenc after..*/
1253                 cc_set_hmac_desc(req, desc, seq_size);
1254                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1255                 cc_proc_scheme_desc(req, desc, seq_size);
1256                 cc_proc_digest_desc(req, desc, seq_size);
1257 
1258         } else { /*DECRYPT*/
1259                 /* authenc first..*/
1260                 cc_set_hmac_desc(req, desc, seq_size);
1261                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1262                 cc_proc_scheme_desc(req, desc, seq_size);
1263                 /* decrypt after.. */
1264                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1265                 /* read the digest result with setting the completion bit
1266                  * must be after the cipher operation
1267                  */
1268                 cc_proc_digest_desc(req, desc, seq_size);
1269         }
1270 }
1271 
1272 static void
1273 cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1274                 unsigned int *seq_size)
1275 {
1276         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1277         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1278         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1279         int direct = req_ctx->gen_ctx.op_type;
1280         unsigned int data_flow_mode =
1281                 cc_get_data_flow(direct, ctx->flow_mode,
1282                                  req_ctx->is_single_pass);
1283 
1284         if (req_ctx->is_single_pass) {
1285                 /**
1286                  * Single-pass flow
1287                  */
1288                 cc_set_xcbc_desc(req, desc, seq_size);
1289                 cc_set_cipher_desc(req, desc, seq_size);
1290                 cc_proc_header_desc(req, desc, seq_size);
1291                 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1292                 cc_proc_digest_desc(req, desc, seq_size);
1293                 return;
1294         }
1295 
1296         /**
1297          * Double-pass flow
1298          * Fallback for unsupported single-pass modes,
1299          * i.e. using assoc. data of non-word-multiple
1300          */
1301         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1302                 /* encrypt first.. */
1303                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1304                 /* authenc after.. */
1305                 cc_set_xcbc_desc(req, desc, seq_size);
1306                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1307                 cc_proc_digest_desc(req, desc, seq_size);
1308         } else { /*DECRYPT*/
1309                 /* authenc first.. */
1310                 cc_set_xcbc_desc(req, desc, seq_size);
1311                 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1312                 /* decrypt after..*/
1313                 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1314                 /* read the digest result with setting the completion bit
1315                  * must be after the cipher operation
1316                  */
1317                 cc_proc_digest_desc(req, desc, seq_size);
1318         }
1319 }
1320 
1321 static int validate_data_size(struct cc_aead_ctx *ctx,
1322                               enum drv_crypto_direction direct,
1323                               struct aead_request *req)
1324 {
1325         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1326         struct device *dev = drvdata_to_dev(ctx->drvdata);
1327         unsigned int assoclen = areq_ctx->assoclen;
1328         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1329                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1330 
1331         if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1332             req->cryptlen < ctx->authsize)
1333                 goto data_size_err;
1334 
1335         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1336 
1337         switch (ctx->flow_mode) {
1338         case S_DIN_to_AES:
1339                 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1340                     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1341                         goto data_size_err;
1342                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1343                         break;
1344                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1345                         if (areq_ctx->plaintext_authenticate_only)
1346                                 areq_ctx->is_single_pass = false;
1347                         break;
1348                 }
1349 
1350                 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1351                         areq_ctx->is_single_pass = false;
1352 
1353                 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1354                     !IS_ALIGNED(cipherlen, sizeof(u32)))
1355                         areq_ctx->is_single_pass = false;
1356 
1357                 break;
1358         case S_DIN_to_DES:
1359                 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1360                         goto data_size_err;
1361                 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1362                         areq_ctx->is_single_pass = false;
1363                 break;
1364         default:
1365                 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1366                 goto data_size_err;
1367         }
1368 
1369         return 0;
1370 
1371 data_size_err:
1372         return -EINVAL;
1373 }
1374 
1375 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1376 {
1377         unsigned int len = 0;
1378 
1379         if (header_size == 0)
1380                 return 0;
1381 
1382         if (header_size < ((1UL << 16) - (1UL << 8))) {
1383                 len = 2;
1384 
1385                 pa0_buff[0] = (header_size >> 8) & 0xFF;
1386                 pa0_buff[1] = header_size & 0xFF;
1387         } else {
1388                 len = 6;
1389 
1390                 pa0_buff[0] = 0xFF;
1391                 pa0_buff[1] = 0xFE;
1392                 pa0_buff[2] = (header_size >> 24) & 0xFF;
1393                 pa0_buff[3] = (header_size >> 16) & 0xFF;
1394                 pa0_buff[4] = (header_size >> 8) & 0xFF;
1395                 pa0_buff[5] = header_size & 0xFF;
1396         }
1397 
1398         return len;
1399 }
1400 
1401 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1402 {
1403         __be32 data;
1404 
1405         memset(block, 0, csize);
1406         block += csize;
1407 
1408         if (csize >= 4)
1409                 csize = 4;
1410         else if (msglen > (1 << (8 * csize)))
1411                 return -EOVERFLOW;
1412 
1413         data = cpu_to_be32(msglen);
1414         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1415 
1416         return 0;
1417 }
1418 
1419 static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1420                   unsigned int *seq_size)
1421 {
1422         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1423         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1424         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1425         unsigned int idx = *seq_size;
1426         unsigned int cipher_flow_mode;
1427         dma_addr_t mac_result;
1428 
1429         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1430                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1431                 mac_result = req_ctx->mac_buf_dma_addr;
1432         } else { /* Encrypt */
1433                 cipher_flow_mode = AES_and_HASH;
1434                 mac_result = req_ctx->icv_dma_addr;
1435         }
1436 
1437         /* load key */
1438         hw_desc_init(&desc[idx]);
1439         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1440         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1441                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1442                       ctx->enc_keylen), NS_BIT);
1443         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1444         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1445         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1446         set_flow_mode(&desc[idx], S_DIN_to_AES);
1447         idx++;
1448 
1449         /* load ctr state */
1450         hw_desc_init(&desc[idx]);
1451         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1452         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1453         set_din_type(&desc[idx], DMA_DLLI,
1454                      req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1455         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1456         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1457         set_flow_mode(&desc[idx], S_DIN_to_AES);
1458         idx++;
1459 
1460         /* load MAC key */
1461         hw_desc_init(&desc[idx]);
1462         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1463         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1464                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1465                       ctx->enc_keylen), NS_BIT);
1466         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1467         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1468         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1469         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1470         set_aes_not_hash_mode(&desc[idx]);
1471         idx++;
1472 
1473         /* load MAC state */
1474         hw_desc_init(&desc[idx]);
1475         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1476         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1477         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1478                      AES_BLOCK_SIZE, NS_BIT);
1479         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1480         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1481         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1482         set_aes_not_hash_mode(&desc[idx]);
1483         idx++;
1484 
1485         /* process assoc data */
1486         if (req_ctx->assoclen > 0) {
1487                 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1488         } else {
1489                 hw_desc_init(&desc[idx]);
1490                 set_din_type(&desc[idx], DMA_DLLI,
1491                              sg_dma_address(&req_ctx->ccm_adata_sg),
1492                              AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1493                 set_flow_mode(&desc[idx], DIN_HASH);
1494                 idx++;
1495         }
1496 
1497         /* process the cipher */
1498         if (req_ctx->cryptlen)
1499                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1500 
1501         /* Read temporal MAC */
1502         hw_desc_init(&desc[idx]);
1503         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1504         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1505                       NS_BIT, 0);
1506         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1507         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1508         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1509         set_aes_not_hash_mode(&desc[idx]);
1510         idx++;
1511 
1512         /* load AES-CTR state (for last MAC calculation)*/
1513         hw_desc_init(&desc[idx]);
1514         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1515         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1516         set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1517                      AES_BLOCK_SIZE, NS_BIT);
1518         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1519         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1520         set_flow_mode(&desc[idx], S_DIN_to_AES);
1521         idx++;
1522 
1523         hw_desc_init(&desc[idx]);
1524         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1525         set_dout_no_dma(&desc[idx], 0, 0, 1);
1526         idx++;
1527 
1528         /* encrypt the "T" value and store MAC in mac_state */
1529         hw_desc_init(&desc[idx]);
1530         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1531                      ctx->authsize, NS_BIT);
1532         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1533         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1534         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1535         idx++;
1536 
1537         *seq_size = idx;
1538         return 0;
1539 }
1540 
1541 static int config_ccm_adata(struct aead_request *req)
1542 {
1543         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1544         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1545         struct device *dev = drvdata_to_dev(ctx->drvdata);
1546         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1547         //unsigned int size_of_a = 0, rem_a_size = 0;
1548         unsigned int lp = req->iv[0];
1549         /* Note: The code assume that req->iv[0] already contains the value
1550          * of L' of RFC3610
1551          */
1552         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1553         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1554         u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1555         u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1556         u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1557         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1558                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1559                                 req->cryptlen :
1560                                 (req->cryptlen - ctx->authsize);
1561         int rc;
1562 
1563         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1564         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1565 
1566         /* taken from crypto/ccm.c */
1567         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1568         if (l < 2 || l > 8) {
1569                 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1570                 return -EINVAL;
1571         }
1572         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1573 
1574         /* format control info per RFC 3610 and
1575          * NIST Special Publication 800-38C
1576          */
1577         *b0 |= (8 * ((m - 2) / 2));
1578         if (req_ctx->assoclen > 0)
1579                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1580 
1581         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1582         if (rc) {
1583                 dev_err(dev, "message len overflow detected");
1584                 return rc;
1585         }
1586          /* END of "taken from crypto/ccm.c" */
1587 
1588         /* l(a) - size of associated data. */
1589         req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1590 
1591         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1592         req->iv[15] = 1;
1593 
1594         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1595         ctr_count_0[15] = 0;
1596 
1597         return 0;
1598 }
1599 
1600 static void cc_proc_rfc4309_ccm(struct aead_request *req)
1601 {
1602         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1603         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1604         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1605 
1606         /* L' */
1607         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1608         /* For RFC 4309, always use 4 bytes for message length
1609          * (at most 2^32-1 bytes).
1610          */
1611         areq_ctx->ctr_iv[0] = 3;
1612 
1613         /* In RFC 4309 there is an 11-bytes nonce+IV part,
1614          * that we build here.
1615          */
1616         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1617                CCM_BLOCK_NONCE_SIZE);
1618         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1619                CCM_BLOCK_IV_SIZE);
1620         req->iv = areq_ctx->ctr_iv;
1621         areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1622 }
1623 
1624 static void cc_set_ghash_desc(struct aead_request *req,
1625                               struct cc_hw_desc desc[], unsigned int *seq_size)
1626 {
1627         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1628         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1629         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1630         unsigned int idx = *seq_size;
1631 
1632         /* load key to AES*/
1633         hw_desc_init(&desc[idx]);
1634         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1635         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1636         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1637                      ctx->enc_keylen, NS_BIT);
1638         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1639         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1640         set_flow_mode(&desc[idx], S_DIN_to_AES);
1641         idx++;
1642 
1643         /* process one zero block to generate hkey */
1644         hw_desc_init(&desc[idx]);
1645         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1646         set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1647                       NS_BIT, 0);
1648         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1649         idx++;
1650 
1651         /* Memory Barrier */
1652         hw_desc_init(&desc[idx]);
1653         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1654         set_dout_no_dma(&desc[idx], 0, 0, 1);
1655         idx++;
1656 
1657         /* Load GHASH subkey */
1658         hw_desc_init(&desc[idx]);
1659         set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1660                      AES_BLOCK_SIZE, NS_BIT);
1661         set_dout_no_dma(&desc[idx], 0, 0, 1);
1662         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1663         set_aes_not_hash_mode(&desc[idx]);
1664         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1665         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1666         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1667         idx++;
1668 
1669         /* Configure Hash Engine to work with GHASH.
1670          * Since it was not possible to extend HASH submodes to add GHASH,
1671          * The following command is necessary in order to
1672          * select GHASH (according to HW designers)
1673          */
1674         hw_desc_init(&desc[idx]);
1675         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1676         set_dout_no_dma(&desc[idx], 0, 0, 1);
1677         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1678         set_aes_not_hash_mode(&desc[idx]);
1679         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1680         set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1681         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1682         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1683         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1684         idx++;
1685 
1686         /* Load GHASH initial STATE (which is 0). (for any hash there is an
1687          * initial state)
1688          */
1689         hw_desc_init(&desc[idx]);
1690         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1691         set_dout_no_dma(&desc[idx], 0, 0, 1);
1692         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1693         set_aes_not_hash_mode(&desc[idx]);
1694         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1695         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1696         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1697         idx++;
1698 
1699         *seq_size = idx;
1700 }
1701 
1702 static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1703                              unsigned int *seq_size)
1704 {
1705         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1706         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1707         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1708         unsigned int idx = *seq_size;
1709 
1710         /* load key to AES*/
1711         hw_desc_init(&desc[idx]);
1712         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1713         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1714         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1715                      ctx->enc_keylen, NS_BIT);
1716         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1717         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1718         set_flow_mode(&desc[idx], S_DIN_to_AES);
1719         idx++;
1720 
1721         if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1722                 /* load AES/CTR initial CTR value inc by 2*/
1723                 hw_desc_init(&desc[idx]);
1724                 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1725                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1726                 set_din_type(&desc[idx], DMA_DLLI,
1727                              req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1728                              NS_BIT);
1729                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1730                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1731                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1732                 idx++;
1733         }
1734 
1735         *seq_size = idx;
1736 }
1737 
1738 static void cc_proc_gcm_result(struct aead_request *req,
1739                                struct cc_hw_desc desc[],
1740                                unsigned int *seq_size)
1741 {
1742         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1743         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1744         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1745         dma_addr_t mac_result;
1746         unsigned int idx = *seq_size;
1747 
1748         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1749                 mac_result = req_ctx->mac_buf_dma_addr;
1750         } else { /* Encrypt */
1751                 mac_result = req_ctx->icv_dma_addr;
1752         }
1753 
1754         /* process(ghash) gcm_block_len */
1755         hw_desc_init(&desc[idx]);
1756         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1757                      AES_BLOCK_SIZE, NS_BIT);
1758         set_flow_mode(&desc[idx], DIN_HASH);
1759         idx++;
1760 
1761         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1762         hw_desc_init(&desc[idx]);
1763         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1764         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1765         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1766                       NS_BIT, 0);
1767         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1768         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1769         set_aes_not_hash_mode(&desc[idx]);
1770 
1771         idx++;
1772 
1773         /* load AES/CTR initial CTR value inc by 1*/
1774         hw_desc_init(&desc[idx]);
1775         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1776         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1777         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1778                      AES_BLOCK_SIZE, NS_BIT);
1779         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1780         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1781         set_flow_mode(&desc[idx], S_DIN_to_AES);
1782         idx++;
1783 
1784         /* Memory Barrier */
1785         hw_desc_init(&desc[idx]);
1786         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1787         set_dout_no_dma(&desc[idx], 0, 0, 1);
1788         idx++;
1789 
1790         /* process GCTR on stored GHASH and store MAC in mac_state*/
1791         hw_desc_init(&desc[idx]);
1792         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1793         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1794                      AES_BLOCK_SIZE, NS_BIT);
1795         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1796         set_queue_last_ind(ctx->drvdata, &desc[idx]);
1797         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1798         idx++;
1799 
1800         *seq_size = idx;
1801 }
1802 
1803 static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1804                   unsigned int *seq_size)
1805 {
1806         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1807         unsigned int cipher_flow_mode;
1808 
1809         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810                 cipher_flow_mode = AES_and_HASH;
1811         } else { /* Encrypt */
1812                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1813         }
1814 
1815         //in RFC4543 no data to encrypt. just copy data from src to dest.
1816         if (req_ctx->plaintext_authenticate_only) {
1817                 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1818                 cc_set_ghash_desc(req, desc, seq_size);
1819                 /* process(ghash) assoc data */
1820                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1821                 cc_set_gctr_desc(req, desc, seq_size);
1822                 cc_proc_gcm_result(req, desc, seq_size);
1823                 return 0;
1824         }
1825 
1826         // for gcm and rfc4106.
1827         cc_set_ghash_desc(req, desc, seq_size);
1828         /* process(ghash) assoc data */
1829         if (req_ctx->assoclen > 0)
1830                 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1831         cc_set_gctr_desc(req, desc, seq_size);
1832         /* process(gctr+ghash) */
1833         if (req_ctx->cryptlen)
1834                 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1835         cc_proc_gcm_result(req, desc, seq_size);
1836 
1837         return 0;
1838 }
1839 
1840 static int config_gcm_context(struct aead_request *req)
1841 {
1842         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1843         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1844         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1845         struct device *dev = drvdata_to_dev(ctx->drvdata);
1846 
1847         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1848                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1849                                 req->cryptlen :
1850                                 (req->cryptlen - ctx->authsize);
1851         __be32 counter = cpu_to_be32(2);
1852 
1853         dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1854                 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1855 
1856         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1857 
1858         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1859 
1860         memcpy(req->iv + 12, &counter, 4);
1861         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1862 
1863         counter = cpu_to_be32(1);
1864         memcpy(req->iv + 12, &counter, 4);
1865         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1866 
1867         if (!req_ctx->plaintext_authenticate_only) {
1868                 __be64 temp64;
1869 
1870                 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1871                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1872                 temp64 = cpu_to_be64(cryptlen * 8);
1873                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1874         } else {
1875                 /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1876                  * data that is nothing is encrypted.
1877                  */
1878                 __be64 temp64;
1879 
1880                 temp64 = cpu_to_be64((req_ctx->assoclen +
1881                                       GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1882                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1883                 temp64 = 0;
1884                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1885         }
1886 
1887         return 0;
1888 }
1889 
1890 static void cc_proc_rfc4_gcm(struct aead_request *req)
1891 {
1892         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1893         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1894         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1895 
1896         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1897                ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1898         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1899                GCM_BLOCK_RFC4_IV_SIZE);
1900         req->iv = areq_ctx->ctr_iv;
1901         areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1902 }
1903 
1904 static int cc_proc_aead(struct aead_request *req,
1905                         enum drv_crypto_direction direct)
1906 {
1907         int rc = 0;
1908         int seq_len = 0;
1909         struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1910         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1911         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1912         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1913         struct device *dev = drvdata_to_dev(ctx->drvdata);
1914         struct cc_crypto_req cc_req = {};
1915 
1916         dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1917                 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1918                 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1919                 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1920 
1921         /* STAT_PHASE_0: Init and sanity checks */
1922 
1923         /* Check data length according to mode */
1924         if (validate_data_size(ctx, direct, req)) {
1925                 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1926                         req->cryptlen, areq_ctx->assoclen);
1927                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1928                 return -EINVAL;
1929         }
1930 
1931         /* Setup request structure */
1932         cc_req.user_cb = (void *)cc_aead_complete;
1933         cc_req.user_arg = (void *)req;
1934 
1935         /* Setup request context */
1936         areq_ctx->gen_ctx.op_type = direct;
1937         areq_ctx->req_authsize = ctx->authsize;
1938         areq_ctx->cipher_mode = ctx->cipher_mode;
1939 
1940         /* STAT_PHASE_1: Map buffers */
1941 
1942         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1943                 /* Build CTR IV - Copy nonce from last 4 bytes in
1944                  * CTR key to first 4 bytes in CTR IV
1945                  */
1946                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1947                        CTR_RFC3686_NONCE_SIZE);
1948                 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1949                        CTR_RFC3686_IV_SIZE);
1950                 /* Initialize counter portion of counter block */
1951                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1952                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1953 
1954                 /* Replace with counter iv */
1955                 req->iv = areq_ctx->ctr_iv;
1956                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1957         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1958                    (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1959                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1960                 if (areq_ctx->ctr_iv != req->iv) {
1961                         memcpy(areq_ctx->ctr_iv, req->iv,
1962                                crypto_aead_ivsize(tfm));
1963                         req->iv = areq_ctx->ctr_iv;
1964                 }
1965         }  else {
1966                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1967         }
1968 
1969         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1970                 rc = config_ccm_adata(req);
1971                 if (rc) {
1972                         dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1973                                 rc);
1974                         goto exit;
1975                 }
1976         } else {
1977                 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1978         }
1979 
1980         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1981                 rc = config_gcm_context(req);
1982                 if (rc) {
1983                         dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1984                                 rc);
1985                         goto exit;
1986                 }
1987         }
1988 
1989         rc = cc_map_aead_request(ctx->drvdata, req);
1990         if (rc) {
1991                 dev_err(dev, "map_request() failed\n");
1992                 goto exit;
1993         }
1994 
1995         /* STAT_PHASE_2: Create sequence */
1996 
1997         /* Load MLLI tables to SRAM if necessary */
1998         cc_mlli_to_sram(req, desc, &seq_len);
1999 
2000         /*TODO: move seq len by reference */
2001         switch (ctx->auth_mode) {
2002         case DRV_HASH_SHA1:
2003         case DRV_HASH_SHA256:
2004                 cc_hmac_authenc(req, desc, &seq_len);
2005                 break;
2006         case DRV_HASH_XCBC_MAC:
2007                 cc_xcbc_authenc(req, desc, &seq_len);
2008                 break;
2009         case DRV_HASH_NULL:
2010                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2011                         cc_ccm(req, desc, &seq_len);
2012                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2013                         cc_gcm(req, desc, &seq_len);
2014                 break;
2015         default:
2016                 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2017                 cc_unmap_aead_request(dev, req);
2018                 rc = -ENOTSUPP;
2019                 goto exit;
2020         }
2021 
2022         /* STAT_PHASE_3: Lock HW and push sequence */
2023 
2024         rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2025 
2026         if (rc != -EINPROGRESS && rc != -EBUSY) {
2027                 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2028                 cc_unmap_aead_request(dev, req);
2029         }
2030 
2031 exit:
2032         return rc;
2033 }
2034 
2035 static int cc_aead_encrypt(struct aead_request *req)
2036 {
2037         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2038         int rc;
2039 
2040         memset(areq_ctx, 0, sizeof(*areq_ctx));
2041 
2042         /* No generated IV required */
2043         areq_ctx->backup_iv = req->iv;
2044         areq_ctx->assoclen = req->assoclen;
2045         areq_ctx->is_gcm4543 = false;
2046 
2047         areq_ctx->plaintext_authenticate_only = false;
2048 
2049         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2050         if (rc != -EINPROGRESS && rc != -EBUSY)
2051                 req->iv = areq_ctx->backup_iv;
2052 
2053         return rc;
2054 }
2055 
2056 static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2057 {
2058         /* Very similar to cc_aead_encrypt() above. */
2059 
2060         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2061         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2062         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2063         struct device *dev = drvdata_to_dev(ctx->drvdata);
2064         int rc = -EINVAL;
2065 
2066         if (!valid_assoclen(req)) {
2067                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2068                 goto out;
2069         }
2070 
2071         memset(areq_ctx, 0, sizeof(*areq_ctx));
2072 
2073         /* No generated IV required */
2074         areq_ctx->backup_iv = req->iv;
2075         areq_ctx->assoclen = req->assoclen;
2076         areq_ctx->is_gcm4543 = true;
2077 
2078         cc_proc_rfc4309_ccm(req);
2079 
2080         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2081         if (rc != -EINPROGRESS && rc != -EBUSY)
2082                 req->iv = areq_ctx->backup_iv;
2083 out:
2084         return rc;
2085 }
2086 
2087 static int cc_aead_decrypt(struct aead_request *req)
2088 {
2089         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2090         int rc;
2091 
2092         memset(areq_ctx, 0, sizeof(*areq_ctx));
2093 
2094         /* No generated IV required */
2095         areq_ctx->backup_iv = req->iv;
2096         areq_ctx->assoclen = req->assoclen;
2097         areq_ctx->is_gcm4543 = false;
2098 
2099         areq_ctx->plaintext_authenticate_only = false;
2100 
2101         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2102         if (rc != -EINPROGRESS && rc != -EBUSY)
2103                 req->iv = areq_ctx->backup_iv;
2104 
2105         return rc;
2106 }
2107 
2108 static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2109 {
2110         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2111         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2112         struct device *dev = drvdata_to_dev(ctx->drvdata);
2113         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2114         int rc = -EINVAL;
2115 
2116         if (!valid_assoclen(req)) {
2117                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2118                 goto out;
2119         }
2120 
2121         memset(areq_ctx, 0, sizeof(*areq_ctx));
2122 
2123         /* No generated IV required */
2124         areq_ctx->backup_iv = req->iv;
2125         areq_ctx->assoclen = req->assoclen;
2126 
2127         areq_ctx->is_gcm4543 = true;
2128         cc_proc_rfc4309_ccm(req);
2129 
2130         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2131         if (rc != -EINPROGRESS && rc != -EBUSY)
2132                 req->iv = areq_ctx->backup_iv;
2133 
2134 out:
2135         return rc;
2136 }
2137 
2138 static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2139                                  unsigned int keylen)
2140 {
2141         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2142         struct device *dev = drvdata_to_dev(ctx->drvdata);
2143 
2144         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2145 
2146         if (keylen < 4)
2147                 return -EINVAL;
2148 
2149         keylen -= 4;
2150         memcpy(ctx->ctr_nonce, key + keylen, 4);
2151 
2152         return cc_aead_setkey(tfm, key, keylen);
2153 }
2154 
2155 static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2156                                  unsigned int keylen)
2157 {
2158         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2159         struct device *dev = drvdata_to_dev(ctx->drvdata);
2160 
2161         dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2162 
2163         if (keylen < 4)
2164                 return -EINVAL;
2165 
2166         keylen -= 4;
2167         memcpy(ctx->ctr_nonce, key + keylen, 4);
2168 
2169         return cc_aead_setkey(tfm, key, keylen);
2170 }
2171 
2172 static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2173                               unsigned int authsize)
2174 {
2175         switch (authsize) {
2176         case 4:
2177         case 8:
2178         case 12:
2179         case 13:
2180         case 14:
2181         case 15:
2182         case 16:
2183                 break;
2184         default:
2185                 return -EINVAL;
2186         }
2187 
2188         return cc_aead_setauthsize(authenc, authsize);
2189 }
2190 
2191 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2192                                       unsigned int authsize)
2193 {
2194         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2195         struct device *dev = drvdata_to_dev(ctx->drvdata);
2196 
2197         dev_dbg(dev, "authsize %d\n", authsize);
2198 
2199         switch (authsize) {
2200         case 8:
2201         case 12:
2202         case 16:
2203                 break;
2204         default:
2205                 return -EINVAL;
2206         }
2207 
2208         return cc_aead_setauthsize(authenc, authsize);
2209 }
2210 
2211 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2212                                       unsigned int authsize)
2213 {
2214         struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2215         struct device *dev = drvdata_to_dev(ctx->drvdata);
2216 
2217         dev_dbg(dev, "authsize %d\n", authsize);
2218 
2219         if (authsize != 16)
2220                 return -EINVAL;
2221 
2222         return cc_aead_setauthsize(authenc, authsize);
2223 }
2224 
2225 static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2226 {
2227         /* Very similar to cc_aead_encrypt() above. */
2228 
2229         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2230         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2231         struct device *dev = drvdata_to_dev(ctx->drvdata);
2232         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2233         int rc = -EINVAL;
2234 
2235         if (!valid_assoclen(req)) {
2236                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2237                 goto out;
2238         }
2239 
2240         memset(areq_ctx, 0, sizeof(*areq_ctx));
2241 
2242         /* No generated IV required */
2243         areq_ctx->backup_iv = req->iv;
2244         areq_ctx->assoclen = req->assoclen;
2245         areq_ctx->plaintext_authenticate_only = false;
2246 
2247         cc_proc_rfc4_gcm(req);
2248         areq_ctx->is_gcm4543 = true;
2249 
2250         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2251         if (rc != -EINPROGRESS && rc != -EBUSY)
2252                 req->iv = areq_ctx->backup_iv;
2253 out:
2254         return rc;
2255 }
2256 
2257 static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2258 {
2259         /* Very similar to cc_aead_encrypt() above. */
2260         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2261         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2262         struct device *dev = drvdata_to_dev(ctx->drvdata);
2263         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2264         int rc = -EINVAL;
2265 
2266         if (!valid_assoclen(req)) {
2267                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2268                 goto out;
2269         }
2270 
2271         memset(areq_ctx, 0, sizeof(*areq_ctx));
2272 
2273         //plaintext is not encryped with rfc4543
2274         areq_ctx->plaintext_authenticate_only = true;
2275 
2276         /* No generated IV required */
2277         areq_ctx->backup_iv = req->iv;
2278         areq_ctx->assoclen = req->assoclen;
2279 
2280         cc_proc_rfc4_gcm(req);
2281         areq_ctx->is_gcm4543 = true;
2282 
2283         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2284         if (rc != -EINPROGRESS && rc != -EBUSY)
2285                 req->iv = areq_ctx->backup_iv;
2286 out:
2287         return rc;
2288 }
2289 
2290 static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2291 {
2292         /* Very similar to cc_aead_decrypt() above. */
2293 
2294         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2295         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2296         struct device *dev = drvdata_to_dev(ctx->drvdata);
2297         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2298         int rc = -EINVAL;
2299 
2300         if (!valid_assoclen(req)) {
2301                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2302                 goto out;
2303         }
2304 
2305         memset(areq_ctx, 0, sizeof(*areq_ctx));
2306 
2307         /* No generated IV required */
2308         areq_ctx->backup_iv = req->iv;
2309         areq_ctx->assoclen = req->assoclen;
2310         areq_ctx->plaintext_authenticate_only = false;
2311 
2312         cc_proc_rfc4_gcm(req);
2313         areq_ctx->is_gcm4543 = true;
2314 
2315         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2316         if (rc != -EINPROGRESS && rc != -EBUSY)
2317                 req->iv = areq_ctx->backup_iv;
2318 out:
2319         return rc;
2320 }
2321 
2322 static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2323 {
2324         /* Very similar to cc_aead_decrypt() above. */
2325         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2326         struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2327         struct device *dev = drvdata_to_dev(ctx->drvdata);
2328         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2329         int rc = -EINVAL;
2330 
2331         if (!valid_assoclen(req)) {
2332                 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2333                 goto out;
2334         }
2335 
2336         memset(areq_ctx, 0, sizeof(*areq_ctx));
2337 
2338         //plaintext is not decryped with rfc4543
2339         areq_ctx->plaintext_authenticate_only = true;
2340 
2341         /* No generated IV required */
2342         areq_ctx->backup_iv = req->iv;
2343         areq_ctx->assoclen = req->assoclen;
2344 
2345         cc_proc_rfc4_gcm(req);
2346         areq_ctx->is_gcm4543 = true;
2347 
2348         rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2349         if (rc != -EINPROGRESS && rc != -EBUSY)
2350                 req->iv = areq_ctx->backup_iv;
2351 out:
2352         return rc;
2353 }
2354 
2355 /* aead alg */
2356 static struct cc_alg_template aead_algs[] = {
2357         {
2358                 .name = "authenc(hmac(sha1),cbc(aes))",
2359                 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2360                 .blocksize = AES_BLOCK_SIZE,
2361                 .template_aead = {
2362                         .setkey = cc_aead_setkey,
2363                         .setauthsize = cc_aead_setauthsize,
2364                         .encrypt = cc_aead_encrypt,
2365                         .decrypt = cc_aead_decrypt,
2366                         .init = cc_aead_init,
2367                         .exit = cc_aead_exit,
2368                         .ivsize = AES_BLOCK_SIZE,
2369                         .maxauthsize = SHA1_DIGEST_SIZE,
2370                 },
2371                 .cipher_mode = DRV_CIPHER_CBC,
2372                 .flow_mode = S_DIN_to_AES,
2373                 .auth_mode = DRV_HASH_SHA1,
2374                 .min_hw_rev = CC_HW_REV_630,
2375                 .std_body = CC_STD_NIST,
2376         },
2377         {
2378                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2379                 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2380                 .blocksize = DES3_EDE_BLOCK_SIZE,
2381                 .template_aead = {
2382                         .setkey = cc_des3_aead_setkey,
2383                         .setauthsize = cc_aead_setauthsize,
2384                         .encrypt = cc_aead_encrypt,
2385                         .decrypt = cc_aead_decrypt,
2386                         .init = cc_aead_init,
2387                         .exit = cc_aead_exit,
2388                         .ivsize = DES3_EDE_BLOCK_SIZE,
2389                         .maxauthsize = SHA1_DIGEST_SIZE,
2390                 },
2391                 .cipher_mode = DRV_CIPHER_CBC,
2392                 .flow_mode = S_DIN_to_DES,
2393                 .auth_mode = DRV_HASH_SHA1,
2394                 .min_hw_rev = CC_HW_REV_630,
2395                 .std_body = CC_STD_NIST,
2396         },
2397         {
2398                 .name = "authenc(hmac(sha256),cbc(aes))",
2399                 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2400                 .blocksize = AES_BLOCK_SIZE,
2401                 .template_aead = {
2402                         .setkey = cc_aead_setkey,
2403                         .setauthsize = cc_aead_setauthsize,
2404                         .encrypt = cc_aead_encrypt,
2405                         .decrypt = cc_aead_decrypt,
2406                         .init = cc_aead_init,
2407                         .exit = cc_aead_exit,
2408                         .ivsize = AES_BLOCK_SIZE,
2409                         .maxauthsize = SHA256_DIGEST_SIZE,
2410                 },
2411                 .cipher_mode = DRV_CIPHER_CBC,
2412                 .flow_mode = S_DIN_to_AES,
2413                 .auth_mode = DRV_HASH_SHA256,
2414                 .min_hw_rev = CC_HW_REV_630,
2415                 .std_body = CC_STD_NIST,
2416         },
2417         {
2418                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2419                 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2420                 .blocksize = DES3_EDE_BLOCK_SIZE,
2421                 .template_aead = {
2422                         .setkey = cc_des3_aead_setkey,
2423                         .setauthsize = cc_aead_setauthsize,
2424                         .encrypt = cc_aead_encrypt,
2425                         .decrypt = cc_aead_decrypt,
2426                         .init = cc_aead_init,
2427                         .exit = cc_aead_exit,
2428                         .ivsize = DES3_EDE_BLOCK_SIZE,
2429                         .maxauthsize = SHA256_DIGEST_SIZE,
2430                 },
2431                 .cipher_mode = DRV_CIPHER_CBC,
2432                 .flow_mode = S_DIN_to_DES,
2433                 .auth_mode = DRV_HASH_SHA256,
2434                 .min_hw_rev = CC_HW_REV_630,
2435                 .std_body = CC_STD_NIST,
2436         },
2437         {
2438                 .name = "authenc(xcbc(aes),cbc(aes))",
2439                 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2440                 .blocksize = AES_BLOCK_SIZE,
2441                 .template_aead = {
2442                         .setkey = cc_aead_setkey,
2443                         .setauthsize = cc_aead_setauthsize,
2444                         .encrypt = cc_aead_encrypt,
2445                         .decrypt = cc_aead_decrypt,
2446                         .init = cc_aead_init,
2447                         .exit = cc_aead_exit,
2448                         .ivsize = AES_BLOCK_SIZE,
2449                         .maxauthsize = AES_BLOCK_SIZE,
2450                 },
2451                 .cipher_mode = DRV_CIPHER_CBC,
2452                 .flow_mode = S_DIN_to_AES,
2453                 .auth_mode = DRV_HASH_XCBC_MAC,
2454                 .min_hw_rev = CC_HW_REV_630,
2455                 .std_body = CC_STD_NIST,
2456         },
2457         {
2458                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2459                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2460                 .blocksize = 1,
2461                 .template_aead = {
2462                         .setkey = cc_aead_setkey,
2463                         .setauthsize = cc_aead_setauthsize,
2464                         .encrypt = cc_aead_encrypt,
2465                         .decrypt = cc_aead_decrypt,
2466                         .init = cc_aead_init,
2467                         .exit = cc_aead_exit,
2468                         .ivsize = CTR_RFC3686_IV_SIZE,
2469                         .maxauthsize = SHA1_DIGEST_SIZE,
2470                 },
2471                 .cipher_mode = DRV_CIPHER_CTR,
2472                 .flow_mode = S_DIN_to_AES,
2473                 .auth_mode = DRV_HASH_SHA1,
2474                 .min_hw_rev = CC_HW_REV_630,
2475                 .std_body = CC_STD_NIST,
2476         },
2477         {
2478                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2479                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2480                 .blocksize = 1,
2481                 .template_aead = {
2482                         .setkey = cc_aead_setkey,
2483                         .setauthsize = cc_aead_setauthsize,
2484                         .encrypt = cc_aead_encrypt,
2485                         .decrypt = cc_aead_decrypt,
2486                         .init = cc_aead_init,
2487                         .exit = cc_aead_exit,
2488                         .ivsize = CTR_RFC3686_IV_SIZE,
2489                         .maxauthsize = SHA256_DIGEST_SIZE,
2490                 },
2491                 .cipher_mode = DRV_CIPHER_CTR,
2492                 .flow_mode = S_DIN_to_AES,
2493                 .auth_mode = DRV_HASH_SHA256,
2494                 .min_hw_rev = CC_HW_REV_630,
2495                 .std_body = CC_STD_NIST,
2496         },
2497         {
2498                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2499                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2500                 .blocksize = 1,
2501                 .template_aead = {
2502                         .setkey = cc_aead_setkey,
2503                         .setauthsize = cc_aead_setauthsize,
2504                         .encrypt = cc_aead_encrypt,
2505                         .decrypt = cc_aead_decrypt,
2506                         .init = cc_aead_init,
2507                         .exit = cc_aead_exit,
2508                         .ivsize = CTR_RFC3686_IV_SIZE,
2509                         .maxauthsize = AES_BLOCK_SIZE,
2510                 },
2511                 .cipher_mode = DRV_CIPHER_CTR,
2512                 .flow_mode = S_DIN_to_AES,
2513                 .auth_mode = DRV_HASH_XCBC_MAC,
2514                 .min_hw_rev = CC_HW_REV_630,
2515                 .std_body = CC_STD_NIST,
2516         },
2517         {
2518                 .name = "ccm(aes)",
2519                 .driver_name = "ccm-aes-ccree",
2520                 .blocksize = 1,
2521                 .template_aead = {
2522                         .setkey = cc_aead_setkey,
2523                         .setauthsize = cc_ccm_setauthsize,
2524                         .encrypt = cc_aead_encrypt,
2525                         .decrypt = cc_aead_decrypt,
2526                         .init = cc_aead_init,
2527                         .exit = cc_aead_exit,
2528                         .ivsize = AES_BLOCK_SIZE,
2529                         .maxauthsize = AES_BLOCK_SIZE,
2530                 },
2531                 .cipher_mode = DRV_CIPHER_CCM,
2532                 .flow_mode = S_DIN_to_AES,
2533                 .auth_mode = DRV_HASH_NULL,
2534                 .min_hw_rev = CC_HW_REV_630,
2535                 .std_body = CC_STD_NIST,
2536         },
2537         {
2538                 .name = "rfc4309(ccm(aes))",
2539                 .driver_name = "rfc4309-ccm-aes-ccree",
2540                 .blocksize = 1,
2541                 .template_aead = {
2542                         .setkey = cc_rfc4309_ccm_setkey,
2543                         .setauthsize = cc_rfc4309_ccm_setauthsize,
2544                         .encrypt = cc_rfc4309_ccm_encrypt,
2545                         .decrypt = cc_rfc4309_ccm_decrypt,
2546                         .init = cc_aead_init,
2547                         .exit = cc_aead_exit,
2548                         .ivsize = CCM_BLOCK_IV_SIZE,
2549                         .maxauthsize = AES_BLOCK_SIZE,
2550                 },
2551                 .cipher_mode = DRV_CIPHER_CCM,
2552                 .flow_mode = S_DIN_to_AES,
2553                 .auth_mode = DRV_HASH_NULL,
2554                 .min_hw_rev = CC_HW_REV_630,
2555                 .std_body = CC_STD_NIST,
2556         },
2557         {
2558                 .name = "gcm(aes)",
2559                 .driver_name = "gcm-aes-ccree",
2560                 .blocksize = 1,
2561                 .template_aead = {
2562                         .setkey = cc_aead_setkey,
2563                         .setauthsize = cc_gcm_setauthsize,
2564                         .encrypt = cc_aead_encrypt,
2565                         .decrypt = cc_aead_decrypt,
2566                         .init = cc_aead_init,
2567                         .exit = cc_aead_exit,
2568                         .ivsize = 12,
2569                         .maxauthsize = AES_BLOCK_SIZE,
2570                 },
2571                 .cipher_mode = DRV_CIPHER_GCTR,
2572                 .flow_mode = S_DIN_to_AES,
2573                 .auth_mode = DRV_HASH_NULL,
2574                 .min_hw_rev = CC_HW_REV_630,
2575                 .std_body = CC_STD_NIST,
2576         },
2577         {
2578                 .name = "rfc4106(gcm(aes))",
2579                 .driver_name = "rfc4106-gcm-aes-ccree",
2580                 .blocksize = 1,
2581                 .template_aead = {
2582                         .setkey = cc_rfc4106_gcm_setkey,
2583                         .setauthsize = cc_rfc4106_gcm_setauthsize,
2584                         .encrypt = cc_rfc4106_gcm_encrypt,
2585                         .decrypt = cc_rfc4106_gcm_decrypt,
2586                         .init = cc_aead_init,
2587                         .exit = cc_aead_exit,
2588                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2589                         .maxauthsize = AES_BLOCK_SIZE,
2590                 },
2591                 .cipher_mode = DRV_CIPHER_GCTR,
2592                 .flow_mode = S_DIN_to_AES,
2593                 .auth_mode = DRV_HASH_NULL,
2594                 .min_hw_rev = CC_HW_REV_630,
2595                 .std_body = CC_STD_NIST,
2596         },
2597         {
2598                 .name = "rfc4543(gcm(aes))",
2599                 .driver_name = "rfc4543-gcm-aes-ccree",
2600                 .blocksize = 1,
2601                 .template_aead = {
2602                         .setkey = cc_rfc4543_gcm_setkey,
2603                         .setauthsize = cc_rfc4543_gcm_setauthsize,
2604                         .encrypt = cc_rfc4543_gcm_encrypt,
2605                         .decrypt = cc_rfc4543_gcm_decrypt,
2606                         .init = cc_aead_init,
2607                         .exit = cc_aead_exit,
2608                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2609                         .maxauthsize = AES_BLOCK_SIZE,
2610                 },
2611                 .cipher_mode = DRV_CIPHER_GCTR,
2612                 .flow_mode = S_DIN_to_AES,
2613                 .auth_mode = DRV_HASH_NULL,
2614                 .min_hw_rev = CC_HW_REV_630,
2615                 .std_body = CC_STD_NIST,
2616         },
2617 };
2618 
2619 static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2620                                                 struct device *dev)
2621 {
2622         struct cc_crypto_alg *t_alg;
2623         struct aead_alg *alg;
2624 
2625         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2626         if (!t_alg)
2627                 return ERR_PTR(-ENOMEM);
2628 
2629         alg = &tmpl->template_aead;
2630 
2631         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2632         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2633                  tmpl->driver_name);
2634         alg->base.cra_module = THIS_MODULE;
2635         alg->base.cra_priority = CC_CRA_PRIO;
2636 
2637         alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2638         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2639         alg->init = cc_aead_init;
2640         alg->exit = cc_aead_exit;
2641 
2642         t_alg->aead_alg = *alg;
2643 
2644         t_alg->cipher_mode = tmpl->cipher_mode;
2645         t_alg->flow_mode = tmpl->flow_mode;
2646         t_alg->auth_mode = tmpl->auth_mode;
2647 
2648         return t_alg;
2649 }
2650 
2651 int cc_aead_free(struct cc_drvdata *drvdata)
2652 {
2653         struct cc_crypto_alg *t_alg, *n;
2654         struct cc_aead_handle *aead_handle =
2655                 (struct cc_aead_handle *)drvdata->aead_handle;
2656 
2657         if (aead_handle) {
2658                 /* Remove registered algs */
2659                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2660                                          entry) {
2661                         crypto_unregister_aead(&t_alg->aead_alg);
2662                         list_del(&t_alg->entry);
2663                         kfree(t_alg);
2664                 }
2665                 kfree(aead_handle);
2666                 drvdata->aead_handle = NULL;
2667         }
2668 
2669         return 0;
2670 }
2671 
2672 int cc_aead_alloc(struct cc_drvdata *drvdata)
2673 {
2674         struct cc_aead_handle *aead_handle;
2675         struct cc_crypto_alg *t_alg;
2676         int rc = -ENOMEM;
2677         int alg;
2678         struct device *dev = drvdata_to_dev(drvdata);
2679 
2680         aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2681         if (!aead_handle) {
2682                 rc = -ENOMEM;
2683                 goto fail0;
2684         }
2685 
2686         INIT_LIST_HEAD(&aead_handle->aead_list);
2687         drvdata->aead_handle = aead_handle;
2688 
2689         aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2690                                                          MAX_HMAC_DIGEST_SIZE);
2691 
2692         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2693                 dev_err(dev, "SRAM pool exhausted\n");
2694                 rc = -ENOMEM;
2695                 goto fail1;
2696         }
2697 
2698         /* Linux crypto */
2699         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2700                 if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2701                     !(drvdata->std_bodies & aead_algs[alg].std_body))
2702                         continue;
2703 
2704                 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2705                 if (IS_ERR(t_alg)) {
2706                         rc = PTR_ERR(t_alg);
2707                         dev_err(dev, "%s alg allocation failed\n",
2708                                 aead_algs[alg].driver_name);
2709                         goto fail1;
2710                 }
2711                 t_alg->drvdata = drvdata;
2712                 rc = crypto_register_aead(&t_alg->aead_alg);
2713                 if (rc) {
2714                         dev_err(dev, "%s alg registration failed\n",
2715                                 t_alg->aead_alg.base.cra_driver_name);
2716                         goto fail2;
2717                 } else {
2718                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2719                         dev_dbg(dev, "Registered %s\n",
2720                                 t_alg->aead_alg.base.cra_driver_name);
2721                 }
2722         }
2723 
2724         return 0;
2725 
2726 fail2:
2727         kfree(t_alg);
2728 fail1:
2729         cc_aead_free(drvdata);
2730 fail0:
2731         return rc;
2732 }

/* [<][>][^][v][top][bottom][index][help] */