root/drivers/crypto/nx/nx-aes-ccm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ccm_aes_nx_set_key
  2. ccm4309_aes_nx_set_key
  3. ccm_aes_nx_setauthsize
  4. ccm4309_aes_nx_setauthsize
  5. set_msg_len
  6. crypto_ccm_check_iv
  7. generate_b0
  8. generate_pat
  9. ccm_nx_decrypt
  10. ccm_nx_encrypt
  11. ccm4309_aes_nx_encrypt
  12. ccm_aes_nx_encrypt
  13. ccm4309_aes_nx_decrypt
  14. ccm_aes_nx_decrypt

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /**
   3  * AES CCM routines supporting the Power 7+ Nest Accelerators driver
   4  *
   5  * Copyright (C) 2012 International Business Machines Inc.
   6  *
   7  * Author: Kent Yoder <yoder1@us.ibm.com>
   8  */
   9 
  10 #include <crypto/internal/aead.h>
  11 #include <crypto/aes.h>
  12 #include <crypto/algapi.h>
  13 #include <crypto/scatterwalk.h>
  14 #include <linux/module.h>
  15 #include <linux/types.h>
  16 #include <linux/crypto.h>
  17 #include <asm/vio.h>
  18 
  19 #include "nx_csbcpb.h"
  20 #include "nx.h"
  21 
  22 
  23 static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
  24                               const u8           *in_key,
  25                               unsigned int        key_len)
  26 {
  27         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  28         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  29         struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  30 
  31         nx_ctx_init(nx_ctx, HCOP_FC_AES);
  32 
  33         switch (key_len) {
  34         case AES_KEYSIZE_128:
  35                 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  36                 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  37                 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  38                 break;
  39         default:
  40                 return -EINVAL;
  41         }
  42 
  43         csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
  44         memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
  45 
  46         csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
  47         memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
  48 
  49         return 0;
  50 
  51 }
  52 
  53 static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
  54                                   const u8           *in_key,
  55                                   unsigned int        key_len)
  56 {
  57         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  58 
  59         if (key_len < 3)
  60                 return -EINVAL;
  61 
  62         key_len -= 3;
  63 
  64         memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
  65 
  66         return ccm_aes_nx_set_key(tfm, in_key, key_len);
  67 }
  68 
  69 static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
  70                                   unsigned int authsize)
  71 {
  72         switch (authsize) {
  73         case 4:
  74         case 6:
  75         case 8:
  76         case 10:
  77         case 12:
  78         case 14:
  79         case 16:
  80                 break;
  81         default:
  82                 return -EINVAL;
  83         }
  84 
  85         return 0;
  86 }
  87 
  88 static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
  89                                       unsigned int authsize)
  90 {
  91         switch (authsize) {
  92         case 8:
  93         case 12:
  94         case 16:
  95                 break;
  96         default:
  97                 return -EINVAL;
  98         }
  99 
 100         return 0;
 101 }
 102 
 103 /* taken from crypto/ccm.c */
 104 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
 105 {
 106         __be32 data;
 107 
 108         memset(block, 0, csize);
 109         block += csize;
 110 
 111         if (csize >= 4)
 112                 csize = 4;
 113         else if (msglen > (unsigned int)(1 << (8 * csize)))
 114                 return -EOVERFLOW;
 115 
 116         data = cpu_to_be32(msglen);
 117         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
 118 
 119         return 0;
 120 }
 121 
 122 /* taken from crypto/ccm.c */
 123 static inline int crypto_ccm_check_iv(const u8 *iv)
 124 {
 125         /* 2 <= L <= 8, so 1 <= L' <= 7. */
 126         if (1 > iv[0] || iv[0] > 7)
 127                 return -EINVAL;
 128 
 129         return 0;
 130 }
 131 
 132 /* based on code from crypto/ccm.c */
 133 static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
 134                        unsigned int cryptlen, u8 *b0)
 135 {
 136         unsigned int l, lp, m = authsize;
 137         int rc;
 138 
 139         memcpy(b0, iv, 16);
 140 
 141         lp = b0[0];
 142         l = lp + 1;
 143 
 144         /* set m, bits 3-5 */
 145         *b0 |= (8 * ((m - 2) / 2));
 146 
 147         /* set adata, bit 6, if associated data is used */
 148         if (assoclen)
 149                 *b0 |= 64;
 150 
 151         rc = set_msg_len(b0 + 16 - l, cryptlen, l);
 152 
 153         return rc;
 154 }
 155 
 156 static int generate_pat(u8                   *iv,
 157                         struct aead_request  *req,
 158                         struct nx_crypto_ctx *nx_ctx,
 159                         unsigned int          authsize,
 160                         unsigned int          nbytes,
 161                         unsigned int          assoclen,
 162                         u8                   *out)
 163 {
 164         struct nx_sg *nx_insg = nx_ctx->in_sg;
 165         struct nx_sg *nx_outsg = nx_ctx->out_sg;
 166         unsigned int iauth_len = 0;
 167         u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
 168         int rc;
 169         unsigned int max_sg_len;
 170 
 171         /* zero the ctr value */
 172         memset(iv + 15 - iv[0], 0, iv[0] + 1);
 173 
 174         /* page 78 of nx_wb.pdf has,
 175          * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
 176          * in length. If a full message is used, the AES CCA implementation
 177          * restricts the maximum AAD length to 2^32 -1 bytes.
 178          * If partial messages are used, the implementation supports
 179          * 2^64 -1 bytes maximum AAD length.
 180          *
 181          * However, in the cryptoapi's aead_request structure,
 182          * assoclen is an unsigned int, thus it cannot hold a length
 183          * value greater than 2^32 - 1.
 184          * Thus the AAD is further constrained by this and is never
 185          * greater than 2^32.
 186          */
 187 
 188         if (!assoclen) {
 189                 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 190         } else if (assoclen <= 14) {
 191                 /* if associated data is 14 bytes or less, we do 1 GCM
 192                  * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
 193                  * which is fed in through the source buffers here */
 194                 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 195                 b1 = nx_ctx->priv.ccm.iauth_tag;
 196                 iauth_len = assoclen;
 197         } else if (assoclen <= 65280) {
 198                 /* if associated data is less than (2^16 - 2^8), we construct
 199                  * B1 differently and feed in the associated data to a CCA
 200                  * operation */
 201                 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
 202                 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
 203                 iauth_len = 14;
 204         } else {
 205                 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
 206                 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
 207                 iauth_len = 10;
 208         }
 209 
 210         /* generate B0 */
 211         rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
 212         if (rc)
 213                 return rc;
 214 
 215         /* generate B1:
 216          * add control info for associated data
 217          * RFC 3610 and NIST Special Publication 800-38C
 218          */
 219         if (b1) {
 220                 memset(b1, 0, 16);
 221                 if (assoclen <= 65280) {
 222                         *(u16 *)b1 = assoclen;
 223                         scatterwalk_map_and_copy(b1 + 2, req->src, 0,
 224                                          iauth_len, SCATTERWALK_FROM_SG);
 225                 } else {
 226                         *(u16 *)b1 = (u16)(0xfffe);
 227                         *(u32 *)&b1[2] = assoclen;
 228                         scatterwalk_map_and_copy(b1 + 6, req->src, 0,
 229                                          iauth_len, SCATTERWALK_FROM_SG);
 230                 }
 231         }
 232 
 233         /* now copy any remaining AAD to scatterlist and call nx... */
 234         if (!assoclen) {
 235                 return rc;
 236         } else if (assoclen <= 14) {
 237                 unsigned int len = 16;
 238 
 239                 nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
 240 
 241                 if (len != 16)
 242                         return -EINVAL;
 243 
 244                 nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
 245                                             nx_ctx->ap->sglen);
 246 
 247                 if (len != 16)
 248                         return -EINVAL;
 249 
 250                 /* inlen should be negative, indicating to phyp that its a
 251                  * pointer to an sg list */
 252                 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
 253                                         sizeof(struct nx_sg);
 254                 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
 255                                         sizeof(struct nx_sg);
 256 
 257                 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 258                 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
 259 
 260                 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
 261 
 262                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 263                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 264                 if (rc)
 265                         return rc;
 266 
 267                 atomic_inc(&(nx_ctx->stats->aes_ops));
 268                 atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 269 
 270         } else {
 271                 unsigned int processed = 0, to_process;
 272 
 273                 processed += iauth_len;
 274 
 275                 /* page_limit: number of sg entries that fit on one page */
 276                 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
 277                                 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
 278                 max_sg_len = min_t(u64, max_sg_len,
 279                                 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 280 
 281                 do {
 282                         to_process = min_t(u32, assoclen - processed,
 283                                            nx_ctx->ap->databytelen);
 284 
 285                         nx_insg = nx_walk_and_build(nx_ctx->in_sg,
 286                                                     nx_ctx->ap->sglen,
 287                                                     req->src, processed,
 288                                                     &to_process);
 289 
 290                         if ((to_process + processed) < assoclen) {
 291                                 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
 292                                         NX_FDM_INTERMEDIATE;
 293                         } else {
 294                                 NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
 295                                         ~NX_FDM_INTERMEDIATE;
 296                         }
 297 
 298 
 299                         nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
 300                                                 sizeof(struct nx_sg);
 301 
 302                         result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 303 
 304                         rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
 305                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 306                         if (rc)
 307                                 return rc;
 308 
 309                         memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
 310                                 nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
 311                                 AES_BLOCK_SIZE);
 312 
 313                         NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
 314 
 315                         atomic_inc(&(nx_ctx->stats->aes_ops));
 316                         atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 317 
 318                         processed += to_process;
 319                 } while (processed < assoclen);
 320 
 321                 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 322         }
 323 
 324         memcpy(out, result, AES_BLOCK_SIZE);
 325 
 326         return rc;
 327 }
 328 
 329 static int ccm_nx_decrypt(struct aead_request   *req,
 330                           struct blkcipher_desc *desc,
 331                           unsigned int assoclen)
 332 {
 333         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 334         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 335         unsigned int nbytes = req->cryptlen;
 336         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
 337         struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
 338         unsigned long irq_flags;
 339         unsigned int processed = 0, to_process;
 340         int rc = -1;
 341 
 342         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 343 
 344         nbytes -= authsize;
 345 
 346         /* copy out the auth tag to compare with later */
 347         scatterwalk_map_and_copy(priv->oauth_tag,
 348                                  req->src, nbytes + req->assoclen, authsize,
 349                                  SCATTERWALK_FROM_SG);
 350 
 351         rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
 352                           csbcpb->cpb.aes_ccm.in_pat_or_b0);
 353         if (rc)
 354                 goto out;
 355 
 356         do {
 357 
 358                 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
 359                  * update. This value is bound by sg list limits.
 360                  */
 361                 to_process = nbytes - processed;
 362 
 363                 if ((to_process + processed) < nbytes)
 364                         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 365                 else
 366                         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 367 
 368                 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 369 
 370                 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
 371                                        &to_process, processed + req->assoclen,
 372                                        csbcpb->cpb.aes_ccm.iv_or_ctr);
 373                 if (rc)
 374                         goto out;
 375 
 376                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 377                            req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 378                 if (rc)
 379                         goto out;
 380 
 381                 /* for partial completion, copy following for next
 382                  * entry into loop...
 383                  */
 384                 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
 385                 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
 386                         csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
 387                 memcpy(csbcpb->cpb.aes_ccm.in_s0,
 388                         csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
 389 
 390                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 391 
 392                 /* update stats */
 393                 atomic_inc(&(nx_ctx->stats->aes_ops));
 394                 atomic64_add(csbcpb->csb.processed_byte_count,
 395                              &(nx_ctx->stats->aes_bytes));
 396 
 397                 processed += to_process;
 398         } while (processed < nbytes);
 399 
 400         rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
 401                     authsize) ? -EBADMSG : 0;
 402 out:
 403         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 404         return rc;
 405 }
 406 
 407 static int ccm_nx_encrypt(struct aead_request   *req,
 408                           struct blkcipher_desc *desc,
 409                           unsigned int assoclen)
 410 {
 411         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 412         struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 413         unsigned int nbytes = req->cryptlen;
 414         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
 415         unsigned long irq_flags;
 416         unsigned int processed = 0, to_process;
 417         int rc = -1;
 418 
 419         spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 420 
 421         rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
 422                           csbcpb->cpb.aes_ccm.in_pat_or_b0);
 423         if (rc)
 424                 goto out;
 425 
 426         do {
 427                 /* to process: the AES_BLOCK_SIZE data chunk to process in this
 428                  * update. This value is bound by sg list limits.
 429                  */
 430                 to_process = nbytes - processed;
 431 
 432                 if ((to_process + processed) < nbytes)
 433                         NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 434                 else
 435                         NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 436 
 437                 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 438 
 439                 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
 440                                        &to_process, processed + req->assoclen,
 441                                        csbcpb->cpb.aes_ccm.iv_or_ctr);
 442                 if (rc)
 443                         goto out;
 444 
 445                 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 446                                    req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 447                 if (rc)
 448                         goto out;
 449 
 450                 /* for partial completion, copy following for next
 451                  * entry into loop...
 452                  */
 453                 memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
 454                 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
 455                         csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
 456                 memcpy(csbcpb->cpb.aes_ccm.in_s0,
 457                         csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
 458 
 459                 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 460 
 461                 /* update stats */
 462                 atomic_inc(&(nx_ctx->stats->aes_ops));
 463                 atomic64_add(csbcpb->csb.processed_byte_count,
 464                              &(nx_ctx->stats->aes_bytes));
 465 
 466                 processed += to_process;
 467 
 468         } while (processed < nbytes);
 469 
 470         /* copy out the auth tag */
 471         scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
 472                                  req->dst, nbytes + req->assoclen, authsize,
 473                                  SCATTERWALK_TO_SG);
 474 
 475 out:
 476         spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 477         return rc;
 478 }
 479 
 480 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
 481 {
 482         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 483         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 484         struct blkcipher_desc desc;
 485         u8 *iv = rctx->iv;
 486 
 487         iv[0] = 3;
 488         memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
 489         memcpy(iv + 4, req->iv, 8);
 490 
 491         desc.info = iv;
 492 
 493         return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
 494 }
 495 
 496 static int ccm_aes_nx_encrypt(struct aead_request *req)
 497 {
 498         struct blkcipher_desc desc;
 499         int rc;
 500 
 501         desc.info = req->iv;
 502 
 503         rc = crypto_ccm_check_iv(desc.info);
 504         if (rc)
 505                 return rc;
 506 
 507         return ccm_nx_encrypt(req, &desc, req->assoclen);
 508 }
 509 
 510 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 511 {
 512         struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 513         struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 514         struct blkcipher_desc desc;
 515         u8 *iv = rctx->iv;
 516 
 517         iv[0] = 3;
 518         memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
 519         memcpy(iv + 4, req->iv, 8);
 520 
 521         desc.info = iv;
 522 
 523         return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
 524 }
 525 
 526 static int ccm_aes_nx_decrypt(struct aead_request *req)
 527 {
 528         struct blkcipher_desc desc;
 529         int rc;
 530 
 531         desc.info = req->iv;
 532 
 533         rc = crypto_ccm_check_iv(desc.info);
 534         if (rc)
 535                 return rc;
 536 
 537         return ccm_nx_decrypt(req, &desc, req->assoclen);
 538 }
 539 
 540 /* tell the block cipher walk routines that this is a stream cipher by
 541  * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
 542  * during encrypt/decrypt doesn't solve this problem, because it calls
 543  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
 544  * but instead uses this tfm->blocksize. */
 545 struct aead_alg nx_ccm_aes_alg = {
 546         .base = {
 547                 .cra_name        = "ccm(aes)",
 548                 .cra_driver_name = "ccm-aes-nx",
 549                 .cra_priority    = 300,
 550                 .cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
 551                 .cra_blocksize   = 1,
 552                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 553                 .cra_module      = THIS_MODULE,
 554         },
 555         .init        = nx_crypto_ctx_aes_ccm_init,
 556         .exit        = nx_crypto_ctx_aead_exit,
 557         .ivsize      = AES_BLOCK_SIZE,
 558         .maxauthsize = AES_BLOCK_SIZE,
 559         .setkey      = ccm_aes_nx_set_key,
 560         .setauthsize = ccm_aes_nx_setauthsize,
 561         .encrypt     = ccm_aes_nx_encrypt,
 562         .decrypt     = ccm_aes_nx_decrypt,
 563 };
 564 
 565 struct aead_alg nx_ccm4309_aes_alg = {
 566         .base = {
 567                 .cra_name        = "rfc4309(ccm(aes))",
 568                 .cra_driver_name = "rfc4309-ccm-aes-nx",
 569                 .cra_priority    = 300,
 570                 .cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
 571                 .cra_blocksize   = 1,
 572                 .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 573                 .cra_module      = THIS_MODULE,
 574         },
 575         .init        = nx_crypto_ctx_aes_ccm_init,
 576         .exit        = nx_crypto_ctx_aead_exit,
 577         .ivsize      = 8,
 578         .maxauthsize = AES_BLOCK_SIZE,
 579         .setkey      = ccm4309_aes_nx_set_key,
 580         .setauthsize = ccm4309_aes_nx_setauthsize,
 581         .encrypt     = ccm4309_aes_nx_encrypt,
 582         .decrypt     = ccm4309_aes_nx_decrypt,
 583 };

/* [<][>][^][v][top][bottom][index][help] */